육두문자

Blog

MNIST 신경망 실습

kevin.lee
2024.07.18
#NN

sigmoid 구현하기

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

3 layer 신경망

def init_network():
  network = {}
  network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
  network['B1'] = np.array([0.1, 0.2, 0.3])
  network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
  network['B2'] = np.array([0.1, 0.2])
  network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])
  network['B3'] = np.array([0.1, 0.2])

  return network
def forward(network, x):
  W1, W2, W3 = network['W1'], network['W2'], network['W3']
  B1, B2, B3 = network['B1'], network['B2'], network['B3']

  a1 = np.dot(x, W1) + B1
  z1 = sigmoid(a1)
  a2 = np.dot(z1, W2) + B2
  z2 = sigmoid(a2)
  a3 = np.dot(z2, W3) + B3
  y = indentity_function(a3)

  return y
network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print(y)
[0.31682708 0.69627909]

softmax 구현하기

def softmax(x) :
  return np.exp(x - np.max(x)) / np.sum(np.exp(x - np.max(x)))
def softmax(x):
    if x.ndim == 2:
        x = x.T
        x = x - np.max(x, axis=0)
        y = np.exp(x) / np.sum(np.exp(x), axis=0)
        return y.T

    x = x - np.max(x) 
    return np.exp(x) / np.sum(np.exp(x))
손실 함수 설정
def MSE(y, t) :
  return 0.5 * np.sum((y - t) ** 2)
def CEE(y, t, delta=1e-4 ):
  return  -np.sum(t * np.log(y + delta))
def CEE2(y, t, delta = 1e-4 ):
  if y.ndim == 1:
    t = t.reshape(1, t.size)
    y = y.reshape(1, y.size)

  batch_size = y.shape[0]
  return -np.sum(t * np.log(y + delta)) / batch_size
def cross_entropy_error(y, t):
    if y.ndim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)

 
    if t.size == y.size:
        t = t.argmax(axis=1)

    batch_size = y.shape[0]
    return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size

기울기 계산

def numerical_diff(function, x):
  h = 1e-4
  return (function(x + h) - function(x - h)) / (2 * h)
def sigmoid_grad(x):
    return (1.0 - sigmoid(x)) * sigmoid(x)

편미분

def numerical_gradient(f, x):
  h = 1e-4
  grad = np.zeros_like(x)

  for idx in range(x.size):
    tmp_val = x[idx]
    x[idx] = tmp_val + h
    fxh1 = f(x)

    x[idx] = tmp_val - h
    fxh2 = f(x)

    grad[idx] = (fxh1 - fxh2) / (2 * h)
    x[idx] = tmp_val

  return grad
def numerical_gradient_2d(f, x):
  h = 1e-4
  grad = np.zeros_like(x)

  for row in range(x.shape[0]):
    for col in range(x.shape[1]):
      tmp_val = x[row][col]
      x[row][col] = tmp_val + h
      fxh1 = f(x)

      x[row][col] = tmp_val - h
      fxh2 = f(x)

      grad[row][col] = (fxh1 - fxh2) / (2 * h)
      x[row][col]= tmp_val

  return grad

경사하강법

def gradient_descent(f, init_x, lr = 0.01, step_num = 100) :
  x = init_x

  for i in range(step_num) :
    grad = numerical_gradient(f, x)
    x -= lr * grad

  return x
def function_2(x):
  return x[0] ** 2 + x[1] ** 2

init_x = np.array([-3.0, 4.0])
gradient_descent(function_2, init_x = init_x, lr = 0.1, step_num = 100)

데이터셋 전처리(MNIST)

from sklearn.preprocessing import MinMaxScaler

scaler01 = MinMaxScaler()
scaler01.fit(x_train.reshape(-1, 784))
x_train_scaled = scaler01.transform(x_train.reshape(-1, 784))

scaler02 = MinMaxScaler()
scaler02.fit(x_test.reshape(-1, 784))
x_test_scaled = scaler01.transform(x_test.reshape(-1, 784))
t_train_onehot = [np.zeros(10) for _ in range(len(t_train))]
for i in range(len(t_train)):
  t_train_onehot[i][t_train[i]] = 1
t_test_onehot = [np.zeros(10) for _ in range(len(t_test))]
for i in range(len(t_test)):
  t_test_onehot[i][t_test[i]] = 1
t_train_onehot = np.array(t_train_onehot)
t_test_onehot = np.array(t_test_onehot)

신경망

class TwoLayerNet:

    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        # 가중치 초기화
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

    def predict(self, x):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        return y

    # x : 입력 데이터, t : 정답 레이블
    def loss(self, x, t):
        y = self.predict(x)

        return cross_entropy_error(y, t)

    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy

    # x : 입력 데이터, t : 정답 레이블
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient_2d(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient_2d(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])

        return grads

    def gradient(self, x, t):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}

        batch_num = x.shape[0]

        # forward
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        # backward
        dy = (y - t) / batch_num
        grads['W2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)

        da1 = np.dot(dy, W2.T)
        dz1 = sigmoid_grad(a1) * da1
        grads['W1'] = np.dot(x.T, dz1)
        grads['b1'] = np.sum(dz1, axis=0)

        return grads
import matplotlib.pyplot as plt

network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

# 하이퍼파라미터
iters_num = 10000  # 반복 횟수를 적절히 설정한다.
train_size = x_train.shape[0]
batch_size = 100   # 미니배치 크기
learning_rate = 0.1

train_loss_list = []
train_acc_list = []
test_acc_list = []

# 1에폭당 반복 수
iter_per_epoch = max(train_size / batch_size, 1)

for i in range(iters_num):
    # 미니배치 획득
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train_scaled[batch_mask]
    t_batch = t_train_onehot[batch_mask]

    # 기울기 계산
    #grad = network.numerical_gradient(x_batch, t_batch)
    grad = network.gradient(x_batch, t_batch)

    # 매개변수 갱신
    for key in ('W1', 'b1', 'W2', 'b2'):
        network.params[key] -= learning_rate * grad[key]

    # 학습 경과 기록
    loss = network.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    # 1에폭당 정확도 계산
    if i % iter_per_epoch == 0:
        train_acc = network.accuracy(x_train_scaled, t_train_onehot)
        test_acc = network.accuracy(x_test_scaled, t_test_onehot)
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print("train acc, test acc, loss | " + str(train_acc) + ", " + str(test_acc)+ ", " + str(loss))

# 그래프 그리기
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label='train acc')
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
train acc, test acc, loss | 0.10441666666666667, 0.1028, 2.286588992995258
train acc, test acc, loss | 0.7857166666666666, 0.791, 0.9047985413912452
train acc, test acc, loss | 0.8752666666666666, 0.8786, 0.44186938285149496
train acc, test acc, loss | 0.8979833333333334, 0.9018, 0.3491395135458403
train acc, test acc, loss | 0.908, 0.9112, 0.3731433893888561
train acc, test acc, loss | 0.9140666666666667, 0.9165, 0.21534141029339696
train acc, test acc, loss | 0.9195333333333333, 0.9206, 0.19480093095448292
train acc, test acc, loss | 0.92315, 0.9265, 0.24049965226019737
train acc, test acc, loss | 0.9277166666666666, 0.9281, 0.3343950189586797
train acc, test acc, loss | 0.9313666666666667, 0.9318, 0.27178398452013863
train acc, test acc, loss | 0.9343833333333333, 0.9348, 0.21778347282966176
train acc, test acc, loss | 0.9376666666666666, 0.9385, 0.19026524998499306
train acc, test acc, loss | 0.9398666666666666, 0.9409, 0.1500719971086648
train acc, test acc, loss | 0.9423166666666667, 0.941, 0.15538171972432768
train acc, test acc, loss | 0.94485, 0.9432, 0.2783876171985013
train acc, test acc, loss | 0.9462666666666667, 0.9457, 0.14533801425457885
train acc, test acc, loss | 0.9481833333333334, 0.9466, 0.13587008909628817

accuracy

epochs = range(1, len(train_loss_list) + 1)

# 그래프 그리기
plt.plot(epochs, train_loss_list, label='Training Loss')
# 그래프 제목 및 레이블 설정
plt.title('Training Loss over Epochs')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

# 그래프 출력
plt.show()

loss

Copyright © 2024 KTB6 Team Blog