영상입력 다중클래스분류모델(깊은 컨볼루션 신경망 모델)
# 1. 사용할 패키지 불러오기
import numpy as np
import matplotlib.pyplot as plt
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers import Dropout
%matplotlib inline
# 2. 데이터 생성하기
width = 28
height = 28
# 훈련셋과 시험셋 불러오기
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, width, height, 1).astype('float32') / 255.0
x_test = x_test.reshape(10000, width, height, 1).astype('float32') / 255.0
# 훈련셋과 검증셋 분리
x_val = x_train[50000:]
y_val = y_train[50000:]
x_train = x_train[:50000]
y_train = y_train[:50000]
# 데이터셋 전처리 : one hot
y_train = np_utils.to_categorical(y_train)
y_val = np_utils.to_categorical(y_val)
y_test = np_utils.to_categorical(y_test )
# 3. 모델 구성하기
model = Sequential()
model.add(Conv2D(32, (3, 3), activation = "relu", input_shape = (width, height, 1)))
model.add(Conv2D(32, (3, 3), activation = "relu"))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3), activation = "relu"))
model.add(Conv2D(32, (3, 3), activation = "relu"))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.25))
model.add(Dense(10, activation = "softmax"))
# 4. 모델 학습과정 설정하기
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# 5. 모델 학습시키기
hist = model.fit(x_train, y_train, epochs=30, batch_size=32, validation_data=(x_val, y_val))
# 6. 학습과정 살펴보기
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'blue', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'red', label='val loss')
loss_ax.set_ylim([0.0, 0.5])
acc_ax.plot(hist.history['acc'], 'purple', label='train acc')
acc_ax.plot(hist.history['val_acc'], 'green', label='val acc')
acc_ax.set_ylim([0.8, 1.0])
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
# 7. 모델 평가하기
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=32)
print(loss_and_metrics)
# 8. 모델 사용하기
yhat_test = model.predict(x_test, batch_size=32)
plt_row = 5
plt_col = 5
plt.rcParams["figure.figsize"] = (10,10)
f, axarr = plt.subplots(plt_row, plt_col)
cnt = 0
i = 0
while cnt < (plt_row * plt_col) :
if np.argmax(y_test[i]) == np.argmax(yhat_test[i]):
i = i + 1
continue
sub_plt = axarr[int(cnt / plt_row), int(cnt % plt_col)]
sub_plt.imshow(x_test[i].reshape(width, height))
sub_plt_title = "R: " + str(np.argmax(y_test[i])) + " P: " + str(np.argmax(yhat_test[i]))
sub_plt.set_title(sub_plt_title)
i = i + 1
cnt = cnt + 1
plt.show()
[출처] 블록과 함께하는 파이썬 딥러닝 케라스, 김태영 지음, DigitalBooks, p248~252