import numpy as np import chainer from chainer import cuda, Variable, FunctionSet, optimizers import chainer.functions as F import chainer.links as L import os import cv2 import logging import datetime log_name='train_log.txt' logging.basicConfig(filename=log_name,level=logging.INFO) logging.info('***************') logging.info(datetime.datetime.today()) logging.info('Tegaki Log') x_train = [] x_test = [] y_train = [] y_test = [] # 画像データの読み込み path = './data/images/' IMGSIZE = 28 train_images = os.listdir(path+'train') test_images = os.listdir(path+'test') for image in train_images: src = cv2.imread(path+'/train/'+image,1) src = cv2.resize(src,(IMGSIZE*3,IMGSIZE)) src = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY) src = cv2.bitwise_not(src) ret,src = cv2.threshold(src,0,255,cv2.THRESH_BINARY) file = image.split('.')[0] x_train.append(src) y_train.append(file.split('_')[1]) for image in test_images: src = cv2.imread(path+'/test/'+image,1) src = cv2.resize(src,(IMGSIZE*3,IMGSIZE)) src = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY) src = cv2.bitwise_not(src) ret,src = cv2.threshold(src,0,255,cv2.THRESH_BINARY) file = image.split('.')[0] x_test.append(src) y_test.append(file.split('_')[1]) x_train = np.array(x_train).astype(np.float32).reshape( (len(x_train),1,IMGSIZE*3,IMGSIZE))/255 y_train = np.array(y_train).astype(np.int32) x_test = np.array(x_test).astype(np.float32).reshape( (len(x_test),1,IMGSIZE*3,IMGSIZE))/255 y_test = np.array(y_test).astype(np.int32) N = len(y_train) N_test = len(y_test) #以下引用部分 # 確率的勾配降下法で学習させる際の1回分のバッチサイズ batchsize = 100 # 学習の繰り返し回数 n_epoch = 100 # 中間層の数 n_units = 1000 # ニューラルネットの定義 def forward(x_data, y_data, train=True): x, t = Variable(x_data), Variable(y_data) h1 = F.dropout(F.relu(model.l1(x)),train=train) h2 = F.dropout(F.relu(model.l2(h1)),train=train) y = model.l3(h2) return F.softmax_cross_entropy(y,t),F.accuracy(y,t) model = FunctionSet(l1=F.Linear(2352, n_units), l2=F.Linear(n_units, n_units), l3=F.Linear(n_units, 19)) optimizer = optimizers.Adam() optimizer.setup(model) train_loss = [] train_acc = [] test_loss = [] test_acc = [] l1_W = [] l2_W = [] l3_W = [] # Learning loop bestloss = 10 bestepoch = 0 for epoch in range(1, n_epoch+1): print('epoch', epoch) logging.info('epoch:{}'.format(epoch)) # training # N個の順番をランダムに並び替える perm = np.random.permutation(N) sum_accuracy = 0 sum_loss = 0 # 0〜Nまでのデータをバッチサイズごとに使って学習 for i in range(0, N, batchsize): x_batch = x_train[perm[i:i+batchsize]] y_batch = y_train[perm[i:i+batchsize]] # 勾配を初期化 optimizer.zero_grads() # 順伝播させて誤差と精度を算出 loss, acc = forward(x_batch, y_batch) # 誤差逆伝播で勾配を計算 loss.backward() optimizer.update() sum_loss += float(cuda.to_cpu(loss.data)) * batchsize sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize # 訓練データの誤差と、正解精度を表示 print('train mean loss={}, accuracy={}'.format(sum_loss / N, sum_accuracy / N)) logging.info('train mean loss={}, accuracy={}'.format(sum_loss / N, sum_accuracy / N)) train_loss.append(sum_loss / N) train_acc.append(sum_accuracy / N) # evaluation # テストデータで誤差と、正解精度を算出し汎化性能を確認 sum_accuracy = 0 sum_loss = 0 for i in range(0, N_test, batchsize): x_batch = x_test[i:i+batchsize] y_batch = y_test[i:i+batchsize] # 順伝播させて誤差と精度を算出 loss, acc = forward(x_batch, y_batch, train=False) sum_loss += float(cuda.to_cpu(loss.data)) * batchsize sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize # テストデータでの誤差と、正解精度を表示 print('test mean loss={}, accuracy={}'.format(sum_loss / N_test, sum_accuracy / N_test)) logging.info('test mean loss={}, accuracy={}'.format(sum_loss / N_test, sum_accuracy / N_test)) test_loss.append(sum_loss / N_test) test_acc.append(sum_accuracy / N_test) if bestloss > sum_loss/N_test: bestloss = (sum_loss / N_test) bestepoch = epoch # 学習したパラメーターを保存 l1_W.append(model.l1.W) l2_W.append(model.l2.W) l3_W.append(model.l3.W) # 引用部分終了 chainer.serializers.save_npz('data/model_tegaki_{}'.format(epoch),model) chainer.serializers.save_npz('data/optimizer_tegaki_{}'.format(epoch),optimizer) print('bestepoch:',bestepoch) print('ok') logging.info('***************')