#coding: utf-8 import cv2 import os import six import datetime import chainer from chainer import optimizers from chainer import serializers import chainer.functions as F import chainer.links as L import logging from modeling2 import model2 from chainer import training from chainer.training import extensions import numpy as np log_name='train_log.txt' logging.basicConfig(filename=log_name,level=logging.INFO) logging.info('***************') logging.info(datetime.datetime.today()) logging.info('Train Log') #------make data------ def getData(): x_train = [] x_test = [] y_train = [] y_test = [] for i in range(1,15): #データセットが置いてあるディレクトリを指定 path = "//" imageList = os.listdir(path+str(i)) imageNum = len(imageList) Maximage = imageNum - imageNum/5 for j in range(len(imageList)): imageSrc = cv2.imread(path+str(i)+"/"+imageList[j]) if imageSrc is None:continue if j < Maximage: x_train.append(imageSrc) y_train.append(i) else: x_test.append(imageSrc) y_test.append(i) return x_train,y_train,x_test,y_test def train(): Maxepoch = 50 Maxbatch = 120 epoch = 1 model = model2() optimizer = optimizers.Adam() optimizer.setup(model) x_train,y_train,x_test,y_test = getData() x_train = np.array(x_train).astype(np.float32).reshape((len(x_train), 3, 64, 64)) / 255 y_train = np.array(y_train).astype(np.int32) x_test = np.array(x_test).astype(np.float32).reshape((len(x_test), 3, 64, 64)) / 255 y_test = np.array(y_test).astype(np.int32) # train and test while epoch <= Maxepoch: print("epoch: {}".format(epoch)) logging.info('epoch:{}'.format(epoch)) print(datetime.datetime.now()) trainImgNum = len(y_train) testImgNum = len(y_test) #---train--- sumAcr = 0 sumLoss = 0 perm = np.random.permutation(trainImgNum) for i in six.moves.range(0, trainImgNum, Maxbatch): x_batch = x_train[perm[i:i+Maxbatch]] y_batch = y_train[perm[i:i+Maxbatch]] optimizer.zero_grads() loss, acc = model.forward(x_batch, y_batch) loss.backward() optimizer.update() sumLoss += float(loss.data) * len(y_batch) sumAcr += float(acc.data) * len(y_batch) print('train loss={}, accuracy={}'.format(sumLoss / trainImgNum, sumAcr / trainImgNum)) logging.info('train loss={}, accuracy={}'.format(sumLoss / trainImgNum, sumAcr / trainImgNum)) #---test--- sumAcr = 0 sumLoss = 0 perm = np.random.permutation(testImgNum) for i in six.moves.range(0, testImgNum, Maxbatch): x_batch = x_test[perm[i:i+Maxbatch]] y_batch = y_test[perm[i:i+Maxbatch]] loss, acc = model.forward(x_batch, y_batch, train=False) sumLoss += float(loss.data) * len(y_batch) sumAcr += float(acc.data) * len(y_batch) print('test loss={}, accuracy={}'.format(sumLoss / testImgNum, sumAcr / testImgNum)) logging.info('test loss={}, accuracy={}'.format(sumLoss / testImgNum, sumAcr / testImgNum)) #save 学習済みモデルを保存するディレクトリを指定 serializers.save_hdf5('./model'+str(epoch), model) epoch += 1 train() logging.info('***************')