ABOUT ME

-

Today
-
Yesterday
-
Total
-
  • 시계열수치입력 수치 예측 모델 레시피(상태유지 스택 순환신경망 모델)
    Keras 2018. 1. 3. 22:09

    # 1. 사용할 패키지 불러오기

    import keras
    import numpy as np
    from keras.models import Sequential
    from keras.layers import Dense, LSTM, Dropout
    from sklearn.preprocessing import MinMaxScaler
    import matplotlib.pyplot as plt
    %matplotlib inline


    # 2.함수와 클래스 만들기
    def create_dataset(signal_data, look_back=1):
            dataX, dataY = [], []
            for i in range(len(signal_data)-look_back):

            dataX.append(signal_data[i:(i+look_back), 0])
            dataY.append(signal_data[i + look_back, 0])
    return np.array(dataX), np.array(dataY)


    class CustomHistory(keras.callbacks.Callback):
            def init(self):
            self.train_loss = []
            self.val_loss = []

    def on_epoch_end(self, batch, logs={}):
            self.train_loss.append(logs.get('loss'))
            self.val_loss.append(logs.get('val_loss'))


    look_back = 40



    # 3. 데이터셋 생성하기
    signal_data = np.cos(np.arange(1600)*(20*np.pi/1000))[:,None]


    # 3.1 데이터 전처리
    scaler = MinMaxScaler(feature_range=(0, 1))
    signal_data = scaler.fit_transform(signal_data)


    # 3.2 데이터 분리
    train = signal_data[0:800]
    val = signal_data[800:1200]
    test = signal_data[1200:]


    # 3.3 데이터셋 생성
    x_train, y_train = create_dataset(train, look_back)
    x_val, y_val = create_dataset(val, look_back)
    x_test, y_test = create_dataset(test, look_back)


    # 3.4 데이터셋 전처리
    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
    x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))


    # 4. 모델 구성하기
    model = Sequential()

    for i in range(2):
            model.add(LSTM(32, batch_input_shape=(1, look_back, 1), stateful=True, return_sequences=True))
            model.add(Dropout(0.3))

    model.add(LSTM(32, batch_input_shape=(1, look_back, 1), stateful=True))
    model.add(Dropout(0.3))
    model.add(Dense(1))

    자동 대체 텍스트를 사용할 수 없습니다.


    # 5. 모델 학습과정 설정하기
    model.compile(loss='mean_squared_error', optimizer='adam')


    # 6. 모델 학습시키기
    custom_hist = CustomHistory()
    custom_hist.init()


    for i in range(200):
        model.fit(x_train, y_train, epochs=1, batch_size=1, shuffle=False, callbacks=[custom_hist], validation_data=(x_val, y_val))
        model.reset_states()


    # 7. 학습과정 살펴보기
    plt.plot(custom_hist.train_loss)
    plt.plot(custom_hist.val_loss)
    plt.ylim(0.0, 0.15)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()

    자동 대체 텍스트를 사용할 수 없습니다.


    # 8. 모델 평가하기
    trainScore = model.evaluate(x_train, y_train, batch_size=1, verbose=0)
    model.reset_states()
    print('Train Score: ', trainScore)

    valScore = model.evaluate(x_val, y_val, batch_size=1, verbose=0)
    model.reset_states()
    print('Validataion Score: ', valScore)

    testScore = model.evaluate(x_test, y_test, batch_size=1, verbose=0)
    model.reset_states()
    print('Test Score: ', testScore)

    자동 대체 텍스트를 사용할 수 없습니다.


    # 9. 모델 사용하기
    look_ahead = 250
    xhat = x_test[0]
    predictions = np.zeros((look_ahead,1))

    for i in range(look_ahead):
            prediction = model.predict(np.array([xhat]), batch_size=1)
            predictions[i] = prediction
            xhat = np.vstack([xhat[1:],prediction])


    plt.figure(figsize=(12,5))
    plt.plot(np.arange(look_ahead),predictions,'r',label="prediction")
    plt.plot(np.arange(look_ahead),y_test[:look_ahead],label="test function")
    plt.legend()
    plt.show()

    자동 대체 텍스트를 사용할 수 없습니다.


    [출처] 블록과 함께하는 파이썬 딥러닝 케라스, 김태영 지음, DigitalBooks, p265~268


Designed by Tistory.