トレーニングにまつわるあれこれを掲載しているブログではありますが,都合によりこのような記事を掲載いたします.
現在一般社団法人ディープラーニング協会のE資格を取得するため,ラビットチャレンジなるプログラムを受講しています.本プログラムにおいてレポート公開が必須になっていることから,こちらのブログにてレポートします.
コード演習
長くなってしまったのでここに続きを書く.
勾配消失
MNISTを使って,勾配消失の様子を観察する.
import numpy as np from common import layers from collections import OrderedDict from common import functions from data.mnist import load_mnist import matplotlib.pyplot as plt # mnistをロード (x_train, d_train), (x_test, d_test) = load_mnist(normalize=True, one_hot_label=True) train_size = len(x_train) print("データ読み込み完了") # 重み初期値補正係数 wieght_init = 0.01 #入力層サイズ input_layer_size = 784 #中間層サイズ hidden_layer_1_size = 40 hidden_layer_2_size = 20 #出力層サイズ output_layer_size = 10 # 繰り返し数 iters_num = 2000 # ミニバッチサイズ batch_size = 100 # 学習率 learning_rate = 0.1 # 描写頻度 plot_interval=10 # 初期設定 def init_network(): network = {} network['W1'] = wieght_init * np.random.randn(input_layer_size, hidden_layer_1_size) network['W2'] = wieght_init * np.random.randn(hidden_layer_1_size, hidden_layer_2_size) network['W3'] = wieght_init * np.random.randn(hidden_layer_2_size, output_layer_size) network['b1'] = np.zeros(hidden_layer_1_size) network['b2'] = np.zeros(hidden_layer_2_size) network['b3'] = np.zeros(output_layer_size) return network # 順伝播 def forward(network, x): W1, W2, W3 = network['W1'], network['W2'], network['W3'] b1, b2, b3 = network['b1'], network['b2'], network['b3'] hidden_f = functions.sigmoid u1 = np.dot(x, W1) + b1 z1 = hidden_f(u1) u2 = np.dot(z1, W2) + b2 z2 = hidden_f(u2) u3 = np.dot(z2, W3) + b3 y = functions.softmax(u3) return z1, z2, y # 誤差逆伝播 def backward(x, d, z1, z2, y): grad = {} W1, W2, W3 = network['W1'], network['W2'], network['W3'] b1, b2, b3 = network['b1'], network['b2'], network['b3'] hidden_d_f = functions.d_sigmoid last_d_f = functions.d_softmax_with_loss # 出力層でのデルタ delta3 = last_d_f(d, y) # b3の勾配 grad['b3'] = np.sum(delta3, axis=0) # W3の勾配 grad['W3'] = np.dot(z2.T, delta3) # 2層でのデルタ delta2 = np.dot(delta3, W3.T) * hidden_d_f(z2) # b2の勾配 grad['b2'] = np.sum(delta2, axis=0) # W2の勾配 grad['W2'] = np.dot(z1.T, delta2) # 1層でのデルタ delta1 = np.dot(delta2, W2.T) * hidden_d_f(z1) # b1の勾配 grad['b1'] = np.sum(delta1, axis=0) # W1の勾配 grad['W1'] = np.dot(x.T, delta1) return grad # パラメータの初期化 network = init_network() accuracies_train = [] accuracies_test = [] # 正答率 def accuracy(x, d): z1, z2, y = forward(network, x) y = np.argmax(y, axis=1) if d.ndim != 1 : d = np.argmax(d, axis=1) accuracy = np.sum(y == d) / float(x.shape[0]) return accuracy for i in range(iters_num): # ランダムにバッチを取得 batch_mask = np.random.choice(train_size, batch_size) # ミニバッチに対応する教師訓練画像データを取得 x_batch = x_train[batch_mask] # ミニバッチに対応する訓練正解ラベルデータを取得する d_batch = d_train[batch_mask] z1, z2, y = forward(network, x_batch) grad = backward(x_batch, d_batch, z1, z2, y) if (i+1)%plot_interval==0: accr_test = accuracy(x_test, d_test) accuracies_test.append(accr_test) accr_train = accuracy(x_batch, d_batch) accuracies_train.append(accr_train) print('Generation: ' + str(i+1) + '. 正答率(トレーニング) = ' + str(accr_train)) print(' : ' + str(i+1) + '. 正答率(テスト) = ' + str(accr_test)) # パラメータに勾配適用 for key in ('W1', 'W2', 'W3', 'b1', 'b2', 'b3'): network[key] -= learning_rate * grad[key] lists = range(0, iters_num, plot_interval) plt.plot(lists, accuracies_train, label="training set") plt.plot(lists, accuracies_test, label="test set") plt.legend(loc="lower right") plt.title("accuracy") plt.xlabel("count") plt.ylabel("accuracy") plt.ylim(0, 1.0) # グラフの表示 plt.show()
ミニバッチ勾配降下法を使っている. シグモイド関数だと全く学習が進まない.勾配消失による影響.
上記のコードに対して,活性化関数をRELUに変更すると次のようになる.活性化関数で勾配消失することがないので,ちゃんと学習が進む.
hidden_f = functions.relu
hidden_d_f = functions.d_relu
Xavierの初期値を利用すると,シグモイド関数でも学習することができる.初期値重要.
# Xavierの初期値 network['W1'] = np.random.randn(input_layer_size, hidden_layer_1_size) / (np.sqrt(input_layer_size)) network['W2'] = np.random.randn(hidden_layer_1_size, hidden_layer_2_size) / (np.sqrt(hidden_layer_1_size)) network['W3'] = np.random.randn(hidden_layer_2_size, output_layer_size) / (np.sqrt(hidden_layer_2_size))
CNN
普段TensorflowでVGGやResNetを書いているが,numpyでは流石に書かないので,試してみた.
まず,im2colとcol2imの仕組みにびっくりしたが,ここは割愛.
convolution層の実装は以下
class Convolution: # W: フィルター, b: バイアス def __init__(self, W, b, stride=1, pad=0): self.W = W self.b = b self.stride = stride self.pad = pad # 中間データ(backward時に使用) self.x = None self.col = None self.col_W = None # フィルター・バイアスパラメータの勾配 self.dW = None self.db = None def forward(self, x): # FN: filter_number, C: channel, FH: filter_height, FW: filter_width FN, C, FH, FW = self.W.shape N, C, H, W = x.shape # 出力値のheight, width out_h = 1 + int((H + 2 * self.pad - FH) / self.stride) out_w = 1 + int((W + 2 * self.pad - FW) / self.stride) # xを行列に変換 col = im2col(x, FH, FW, self.stride, self.pad) # フィルターをxに合わせた行列に変換 col_W = self.W.reshape(FN, -1).T out = np.dot(col, col_W) + self.b # 計算のために変えた形式を戻す out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2) self.x = x self.col = col self.col_W = col_W return out def backward(self, dout): FN, C, FH, FW = self.W.shape dout = dout.transpose(0, 2, 3, 1).reshape(-1, FN) self.db = np.sum(dout, axis=0) self.dW = np.dot(self.col.T, dout) self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW) dcol = np.dot(dout, self.col_W.T) # dcolを画像データに変換 dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad) return dx
次にプーリング層.こちらもback propagationもちゃんと実装されている.
class Pooling: def __init__(self, pool_h, pool_w, stride=1, pad=0): self.pool_h = pool_h self.pool_w = pool_w self.stride = stride self.pad = pad self.x = None self.arg_max = None def forward(self, x): N, C, H, W = x.shape out_h = int(1 + (H - self.pool_h) / self.stride) out_w = int(1 + (W - self.pool_w) / self.stride) # xを行列に変換 col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad) # プーリングのサイズに合わせてリサイズ col = col.reshape(-1, self.pool_h*self.pool_w) # 行ごとに最大値を求める arg_max = np.argmax(col, axis=1) out = np.max(col, axis=1) # 整形 out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2) self.x = x self.arg_max = arg_max return out def backward(self, dout): dout = dout.transpose(0, 2, 3, 1) pool_size = self.pool_h * self.pool_w dmax = np.zeros((dout.size, pool_size)) dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1) dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w, self.stride, self.pad) return dx
これらを使ってConv層1層,Pooling層1層を使った簡単なネットワークを作成.
class SimpleConvNet: # conv - relu - pool - affine - relu - affine - softmax def __init__(self, input_dim=(1, 28, 28), conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1}, hidden_size=100, output_size=10, weight_init_std=0.01): filter_num = conv_param['filter_num'] filter_size = conv_param['filter_size'] filter_pad = conv_param['pad'] filter_stride = conv_param['stride'] input_size = input_dim[1] conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter_stride + 1 pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2)) # 重みの初期化 self.params = {} self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size) self.params['b1'] = np.zeros(filter_num) self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size) self.params['b2'] = np.zeros(hidden_size) self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size) self.params['b3'] = np.zeros(output_size) # レイヤの生成 self.layers = OrderedDict() self.layers['Conv1'] = layers.Convolution(self.params['W1'], self.params['b1'], conv_param['stride'], conv_param['pad']) self.layers['Relu1'] = layers.Relu() self.layers['Pool1'] = layers.Pooling(pool_h=2, pool_w=2, stride=2) self.layers['Affine1'] = layers.Affine(self.params['W2'], self.params['b2']) self.layers['Relu2'] = layers.Relu() self.layers['Affine2'] = layers.Affine(self.params['W3'], self.params['b3']) self.last_layer = layers.SoftmaxWithLoss() def predict(self, x): for key in self.layers.keys(): x = self.layers[key].forward(x) return x def loss(self, x, d): y = self.predict(x) return self.last_layer.forward(y, d) def accuracy(self, x, d, batch_size=100): if d.ndim != 1 : d = np.argmax(d, axis=1) acc = 0.0 for i in range(int(x.shape[0] / batch_size)): tx = x[i*batch_size:(i+1)*batch_size] td = d[i*batch_size:(i+1)*batch_size] y = self.predict(tx) y = np.argmax(y, axis=1) acc += np.sum(y == td) return acc / x.shape[0] def gradient(self, x, d): # forward self.loss(x, d) # backward dout = 1 dout = self.last_layer.backward(dout) layers = list(self.layers.values()) layers.reverse() for layer in layers: dout = layer.backward(dout) # 設定 grad = {} grad['W1'], grad['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db grad['W2'], grad['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db grad['W3'], grad['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db return grad
MNISTに適用.最適化アルゴリズムはAdam.epoch数は1000. ほとんどTensorflowと同じように実行できる.
from common import optimizer # データの読み込み (x_train, d_train), (x_test, d_test) = load_mnist(flatten=False) print("データ読み込み完了") # 処理に時間のかかる場合はデータを削減 x_train, d_train = x_train[:5000], d_train[:5000] x_test, d_test = x_test[:1000], d_test[:1000] network = SimpleConvNet(input_dim=(1,28,28), conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1}, hidden_size=100, output_size=10, weight_init_std=0.01) optimizer = optimizer.Adam() iters_num = 1000 train_size = x_train.shape[0] batch_size = 100 train_loss_list = [] accuracies_train = [] accuracies_test = [] plot_interval=10 for i in range(iters_num): batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] d_batch = d_train[batch_mask] grad = network.gradient(x_batch, d_batch) optimizer.update(network.params, grad) loss = network.loss(x_batch, d_batch) train_loss_list.append(loss) if (i+1) % plot_interval == 0: accr_train = network.accuracy(x_train, d_train) accr_test = network.accuracy(x_test, d_test) accuracies_train.append(accr_train) accuracies_test.append(accr_test) print('Generation: ' + str(i+1) + '. 正答率(トレーニング) = ' + str(accr_train)) print(' : ' + str(i+1) + '. 正答率(テスト) = ' + str(accr_test)) lists = range(0, iters_num, plot_interval) plt.plot(lists, accuracies_train, label="training set") plt.plot(lists, accuracies_test, label="test set") plt.legend(loc="lower right") plt.title("accuracy") plt.xlabel("count") plt.ylabel("accuracy") plt.ylim(0, 1.0) # グラフの表示 plt.show()
ちゃんと学習されているのがわかる.