히스토리 기능 사용하기
# 1. 사용할 패키지 불러오기
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# 2. 데이터셋 생성하기
# 2.1 훈련 데이터셋과 시험 데이터셋 불러오기
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# 2.2 훈련 데이터셋과 검증 데이터셋 분리하기
X_val = X_train[50000:]
Y_val = Y_train[50000:]
X_train = X_train[:50000]
Y_train = Y_train[:50000]
# 2.3 데이터셋 전처리
X_train = X_train.reshape(50000, 784).astype('float32') / 255.0
X_val = X_val.reshape(10000, 784).astype('float32') / 255.0
X_test = X_test.reshape(10000, 784).astype('float32') / 255.0
# 2.4 훈련 데이터셋과 검증 데이터셋 고르기
train_rand_idxs = np.random.choice(50000, 700)
val_rand_idxs = np.random.choice(10000, 300)
X_train = X_train[train_rand_idxs]
Y_train = Y_train[train_rand_idxs]
X_val = X_val[val_rand_idxs]
Y_val = Y_val[val_rand_idxs]
# 2.5 라벨 데이터 One-hot encoding 처리
Y_train = np_utils.to_categorical(Y_train)
Y_val = np_utils.to_categorical(Y_val)
Y_test = np_utils.to_categorical(Y_test)
# 3. 모델 구성하기
model = Sequential()
model.add(Dense(units=2, input_dim=28*28, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
# 4. 모델 학습과정 살펴보기
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# 5. 모델 학습시키기
hist = model.fit(X_train, Y_train, epochs=1000, batch_size=10, validation_data=(X_val, Y_val))
# 6. 학습과정 살펴보기
fig, loss_ax = plt.subplots()
acc_ac = loss_ax.twinx()
loss_ax.plot(hist.history["loss"], "yellow", label = "train loss")
loss_ax.plot(hist.history["val_loss"], "red", label = "val loss")
loss_ax.plot(hist.history["acc"], "blue", label = "train acc")
loss_ax.plot(hist.history["val_acc"], "green", label = "val acc")
loss_ax.set_xlabel("epoch")
loss_ax.set_ylabel("loss")
acc_ac.set_ylabel("loss")
loss_ax.legend(loc = "upper left")
loss_ax.legend(loc = "lower left")
plt.show()
[출처] 블록과 함께하는 파이썬 딥러닝 케라스, 김태영 지음, DigitalBooks, p47~50