import chainer from chainer import training from chainer.training import extensions # ニューラルネットワークの定義 class MLP(chainer.Chain): def __init__(self, n_in, n_units, n_out): super(MLP, self).__init__() with self.init_scope(): self.link1 = chainer.links.Linear(n_in, n_units) self.link2 = chainer.links.Linear(None, n_out) def __call__(self, x): h1 = chainer.functions.relu(self.link1(x)) return self.link2(h1) nyuron = 400 for count in range(1,401): # MNISTのデータセットをLoadする train, test = chainer.datasets.get_mnist() # ミニバッチサイズを定義する s batch_size = 1024 # Iterator生成 train_iter = chainer.iterators.SerialIterator(train, batch_size) test_iter = chainer.iterators.SerialIterator(test, batch_size, repeat=False, shuffle=False) # 分類モデルを定義 model = chainer.links.Classifier(MLP(784, nyuron, 10)) # 最適化手法の設定 optimizer = chainer.optimizers.Adam() optimizer.setup(model) max_epoch = 400 updater = training.StandardUpdater(train_iter, optimizer) trainer = training.Trainer(updater, (max_epoch, 'epoch')) trainer.extend(extensions.Evaluator(test_iter, model)) trainer.extend(extensions.LogReport(log_name="saishu_result_mnist_4_" + str(nyuron) + ".log", trigger=(1, 'epoch'))) trainer.extend(extensions.ProgressBar()) trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time'])) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], x_key='epoch', file_name="saishu_loss_mnist_4_" + str(nyuron) + ".png")) trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], x_key='epoch', file_name="saishu_accuracy_mnist_4_"+str(nyuron)+".png")) # 学習(訓練)実行 trainer.run() nyuron-=1