import numpy as np import chainer from chainer import cuda, Variable, FunctionSet, optimizers import chainer.functions as F import chainer.links as L train,test = chainer.datasets.get_mnist() x_train = [] x_test = [] y_train = [] y_test = [] for data in train: x_train.append(data[0]) y_train.append(data[1]) for data in test: x_test.append(data[0]) y_test.append(data[1]) x_train = np.array(x_train).astype(np.float32) y_train = np.array(y_train).astype(np.int32) x_test = np.array(x_test).astype(np.float32) y_test = np.array(y_test).astype(np.int32) # 学習用データの個数と、検証用データの個数を設定 N = len(y_train) N_test = len(y_test) # 以下引用部分 # 確率的勾配降下法で学習させる際の1回分のバッチサイズ batchsize = 100 # 学習の繰り返し回数 n_epoch = 20 # 中間層の数 n_units = 1000 # ニューラルネットの構造 def forward(x_data, y_data, train=True): x, t = Variable(x_data), Variable(y_data) h1 = F.dropout(F.relu(model.l1(x)), train=train) h2 = F.dropout(F.relu(model.l2(h1)), train=train) y = model.l3(h2) # 多クラス分類なので誤差関数としてソフトマックス関数の # 交差エントロピー関数を用いて、誤差を導出 return F.softmax_cross_entropy(y, t), F.accuracy(y, t) # Prepare multi-layer perceptron model # 多層パーセプトロンモデルの設定 # 入力 784次元、出力 10次元 model = FunctionSet(l1=F.Linear(784, n_units), l2=F.Linear(n_units, n_units), l3=F.Linear(n_units, 10)) # Setup optimizer optimizer = optimizers.Adam() optimizer.setup(model) train_loss = [] train_acc = [] test_loss = [] test_acc = [] l1_W = [] l2_W = [] l3_W = [] # Learning loop for epoch in range(1, n_epoch+1): print('epoch', epoch) # training # N個の順番をランダムに並び替える perm = np.random.permutation(N) sum_accuracy = 0 sum_loss = 0 # 0〜Nまでのデータをバッチサイズごとに使って学習 for i in range(0, N, batchsize): x_batch = x_train[perm[i:i+batchsize]] y_batch = y_train[perm[i:i+batchsize]] # 勾配を初期化 optimizer.zero_grads() # 順伝播させて誤差と精度を算出 loss, acc = forward(x_batch, y_batch) # 誤差逆伝播で勾配を計算 loss.backward() optimizer.update() sum_loss += float(cuda.to_cpu(loss.data)) * batchsize sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize # 訓練データの誤差と、正解精度を表示 print('train mean loss={}, accuracy={}'.format(sum_loss / N, sum_accuracy / N)) train_loss.append(sum_loss / N) train_acc.append(sum_accuracy / N) # evaluation # テストデータで誤差と、正解精度を算出し汎化性能を確認 sum_accuracy = 0 sum_loss = 0 for i in range(0, N_test, batchsize): x_batch = x_test[i:i+batchsize] y_batch = y_test[i:i+batchsize] # 順伝播させて誤差と精度を算出 loss, acc = forward(x_batch, y_batch, train=False) sum_loss += float(cuda.to_cpu(loss.data)) * batchsize sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize # テストデータでの誤差と、正解精度を表示 print('test mean loss={}, accuracy={}'.format(sum_loss / N_test, sum_accuracy / N_test)) test_loss.append(sum_loss / N_test) test_acc.append(sum_accuracy / N_test) # 学習したパラメーターを保存 l1_W.append(model.l1.W) l2_W.append(model.l2.W) l3_W.append(model.l3.W) # 引用部分終了 chainer.serializers.save_npz('model_tegaki_{}'.format(epoch),model) chainer.serializers.save_npz('optimizer_tegaki_{}'.format(epoch),optimizer) print('ok')