4.시계열_1_따릉이
- -
layout: single
title: "jupyter notebook 변환하기!"
categories: coding
tag: [python, blog, jekyll]
toc: true
author_profile: false
따릉이 2시간 뒤 수요량 예측 문제
import warnings
warnings.filterwarnings(action='ignore')
1. scikit-learn 패키지는 머신러닝 교육을 위한 최고의 파이썬 패키지입니다.
scikit-learn를 별칭(alias) sk로 임포트하는 코드를 작성하고 실행하세요.
# 여기에 답안코드를 작성하세요.
import sklearn as sk
2. Pandas는 데이터 분석을 위해 널리 사용되는 파이썬 라이브러리입니다.
Pandas를 사용할 수 있도록 별칭(alias)을 pd로 해서 불러오세요.
# 여기에 답안코드를 작성하세요.
import pandas as pd
3. 모델링을 위해 분석 및 처리할 데이터 파일을 읽어오려고 합니다.
Pandas함수로 데이터 파일을 읽어 데이터프레임 변수명 df에 할당하는 코드를 작성하세요.
path = 'https://raw.githubusercontent.com/DA4BAM/dataset/master/SeoulBikeData2.csv'
df = pd.read_csv(path)
df['DateTime'] = pd.to_datetime(df['DateTime'])
df.head()
DateTime | Count | Temperature | Humidity | WindSpeed | Visibility | Solar | Rainfall | Snowfall | Seasons | Holiday | FuncDay | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2017-12-01 00:00:00 | 254 | -5.2 | 37 | 2.2 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes |
1 | 2017-12-01 01:00:00 | 204 | -5.5 | 38 | 0.8 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes |
2 | 2017-12-01 02:00:00 | 173 | -6.0 | 39 | 1.0 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes |
3 | 2017-12-01 03:00:00 | 107 | -6.2 | 40 | 0.9 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes |
4 | 2017-12-01 04:00:00 | 78 | -6.0 | 36 | 2.3 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes |
결측치 확인
# 여기에 답안코드를 작성하세요.
print(df.isnull().sum())
DateTime 0 Count 0 Temperature 0 Humidity 0 WindSpeed 0 Visibility 0 Solar 0 Rainfall 0 Snowfall 0 Seasons 0 Holiday 0 FuncDay 0 dtype: int64
변수설명
DateTime : year-month-day hh:mi:ss
Count : 시간대별 수요량
Temperature : 온도(섭씨)
Humidity : 습도(%)
WindSpeed : 풍속(m/s)
Rainfall - mm
Snowfall - cm
Seasons - Winter, Spring, Summer, Autumn
Holiday - Holiday / No holiday
FuncDay - Yes / No
Visibility - 시야
Solar - 해
# 데이터 범위
df.DateTime.min(), df.DateTime.max()
(Timestamp('2017-12-01 00:00:00'), Timestamp('2018-11-30 23:00:00'))
# 여기에 답안코드를 작성하세요.
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'Malgun Gothic'
# 14일 동안 매시간별 수요량을 살펴 봅시다.
temp = df[:24*14]
plt.figure(figsize = (20,8))
plt.plot('DateTime', 'Count', data = temp)
plt.grid()
plt.show()
# 시간별 규칙성이 있다. 시계열 데이터, 시간 데이터가 아주 중요한 역할을 함
# 여기에 답안코드를 작성하세요.
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as st
graph = sns.jointplot(x=df['Temperature'], y=df['Count'], kind='scatter')
graph.set_axis_labels(xlabel='Temperature', ylabel='Count')
plt.show()
# 온도가 낮으면 추우면 잘 안탄다
# 여기에 답안코드를 작성하세요.
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as st
graph = sns.jointplot(x=df['WindSpeed'], y=df['Count'], kind='scatter')
graph.set_axis_labels(xlabel='WindSpeed', ylabel='Count')
plt.show()
# 바람이 강하게 불면 잘 타지 않는다
# 여기에 답안코드를 작성하세요.
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as st
graph = sns.jointplot(x=df['Visibility'], y=df['Count'], kind='scatter')
graph.set_axis_labels(xlabel='Visibility', ylabel='Count')
plt.show()
# 시야가 좋지 않으면 잘 안탄다
# 여기에 답안코드를 작성하세요.
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as st
graph = sns.jointplot(x=df['Rainfall'], y=df['Count'], kind='scatter')
graph.set_axis_labels(xlabel='Rainfall', ylabel='Count')
plt.show()
# 비가 오면 잘 타지 않는다
# 여기에 답안코드를 작성하세요.
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as st
graph = sns.jointplot(x=df['Snowfall'], y=df['Count'], kind='scatter')
graph.set_axis_labels(xlabel='Snowfall', ylabel='Count')
plt.show()
# 눈이 오면 잘 타지 않는다
data_agg = df.groupby(['Seasons'])['Count'].agg(['min','max','mean'])
data_agg
min | max | mean | |
---|---|---|---|
Seasons | |||
Autumn | 0 | 3298 | 819.597985 |
Spring | 0 | 3251 | 730.031250 |
Summer | 9 | 3556 | 1034.073370 |
Winter | 3 | 937 | 225.541204 |
plt.figure(figsize=(10, 6))
data_agg['mean'].plot(kind='bar')
plt.title('Mean Value by Season')
plt.xlabel('Seasons')
plt.ylabel('Mean')
plt.xticks(rotation=45)
plt.show()
data_agg = df.groupby(['Holiday'])['Count'].agg(['min','max','mean'])
data_agg
min | max | mean | |
---|---|---|---|
Holiday | |||
Holiday | 0 | 2400 | 499.756944 |
No Holiday | 0 | 3556 | 715.228026 |
plt.figure(figsize=(10, 6))
data_agg['mean'].plot(kind='bar')
plt.title('Mean Value by Holiday')
plt.xlabel('Holiday')
plt.ylabel('Mean')
plt.xticks(rotation=45)
plt.show()
data_agg = df.groupby(['FuncDay'])['Count'].agg(['min','max','mean'])
data_agg
min | max | mean | |
---|---|---|---|
FuncDay | |||
No | 0 | 0 | 0.000000 |
Yes | 2 | 3556 | 729.156999 |
df['FuncDay'].value_counts()
FuncDay Yes 8465 No 295 Name: count, dtype: int64
plt.figure(figsize=(10, 6))
data_agg['mean'].plot(kind='bar')
plt.title('Mean Value by FuncDay')
plt.xlabel('FuncDay')
plt.ylabel('Mean')
plt.xticks(rotation=45)
plt.show()
# 월, 요일 추가
df['Month'] = df['DateTime'].dt.month # 월
df['DayOfWeek'] = df['DateTime'].dt.weekday # 요일
df['Hour'] = df['DateTime'].dt.hour
df['IsWeekend'] = (df['DateTime'].dt.dayofweek >= 5).astype(int) # 주말인지 아닌지
df.head()
DateTime | Count | Temperature | Humidity | WindSpeed | Visibility | Solar | Rainfall | Snowfall | Seasons | Holiday | FuncDay | Month | DayOfWeek | Hour | IsWeekend | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2017-12-01 00:00:00 | 254 | -5.2 | 37 | 2.2 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 0 | 0 |
1 | 2017-12-01 01:00:00 | 204 | -5.5 | 38 | 0.8 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 1 | 0 |
2 | 2017-12-01 02:00:00 | 173 | -6.0 | 39 | 1.0 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 2 | 0 |
3 | 2017-12-01 03:00:00 | 107 | -6.2 | 40 | 0.9 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 3 | 0 |
4 | 2017-12-01 04:00:00 | 78 | -6.0 | 36 | 2.3 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 4 | 0 |
df.set_index('DateTime', inplace=True)
import scipy.stats as spst
import statsmodels.api as sm
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
period = 7
decomp = sm.tsa.seasonal_decompose(df['Count'], model = 'additive', period = period)
result = pd.DataFrame({'observed':decomp.observed, 'trend':decomp.trend, 'seasonal':decomp.seasonal, 'residual':decomp.resid, 'DateTime':df.index})
result['trend'] = result['trend'].ffill()
result['trend'] = result['trend'].bfill()
result['residual'] = result['residual'].ffill()
result['residual'] = result['residual'].bfill()
result.head()
observed | trend | seasonal | residual | DateTime | |
---|---|---|---|---|---|
DateTime | |||||
2017-12-01 00:00:00 | 254.0 | 156.714286 | -36.975688 | -34.186551 | 2017-12-01 00:00:00 |
2017-12-01 01:00:00 | 204.0 | 156.714286 | 12.863055 | -34.186551 | 2017-12-01 01:00:00 |
2017-12-01 02:00:00 | 173.0 | 156.714286 | 18.299855 | -34.186551 | 2017-12-01 02:00:00 |
2017-12-01 03:00:00 | 107.0 | 156.714286 | -15.527735 | -34.186551 | 2017-12-01 03:00:00 |
2017-12-01 04:00:00 | 78.0 | 186.142857 | -6.214272 | -101.928586 | 2017-12-01 04:00:00 |
df.head()
Count | Temperature | Humidity | WindSpeed | Visibility | Solar | Rainfall | Snowfall | Seasons | Holiday | FuncDay | Month | DayOfWeek | Hour | IsWeekend | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
DateTime | |||||||||||||||
2017-12-01 00:00:00 | 254 | -5.2 | 37 | 2.2 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 0 | 0 |
2017-12-01 01:00:00 | 204 | -5.5 | 38 | 0.8 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 1 | 0 |
2017-12-01 02:00:00 | 173 | -6.0 | 39 | 1.0 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 2 | 0 |
2017-12-01 03:00:00 | 107 | -6.2 | 40 | 0.9 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 3 | 0 |
2017-12-01 04:00:00 | 78 | -6.0 | 36 | 2.3 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 4 | 0 |
df.reset_index(inplace=True)
result.reset_index(drop=True, inplace=True)
dataset = pd.merge(df, result, on='DateTime', how='left')
dataset.head()
DateTime | Count | Temperature | Humidity | WindSpeed | Visibility | Solar | Rainfall | Snowfall | Seasons | Holiday | FuncDay | Month | DayOfWeek | Hour | IsWeekend | observed | trend | seasonal | residual | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2017-12-01 00:00:00 | 254 | -5.2 | 37 | 2.2 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 0 | 0 | 254.0 | 156.714286 | -36.975688 | -34.186551 |
1 | 2017-12-01 01:00:00 | 204 | -5.5 | 38 | 0.8 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 1 | 0 | 204.0 | 156.714286 | 12.863055 | -34.186551 |
2 | 2017-12-01 02:00:00 | 173 | -6.0 | 39 | 1.0 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 2 | 0 | 173.0 | 156.714286 | 18.299855 | -34.186551 |
3 | 2017-12-01 03:00:00 | 107 | -6.2 | 40 | 0.9 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 3 | 0 | 107.0 | 156.714286 | -15.527735 | -34.186551 |
4 | 2017-12-01 04:00:00 | 78 | -6.0 | 36 | 2.3 | 2000 | 0.0 | 0.0 | 0.0 | Winter | No Holiday | Yes | 12 | 4 | 4 | 0 | 78.0 | 186.142857 | -6.214272 | -101.928586 |
selected = ['Month', 'DayOfWeek', 'Hour', 'IsWeekend', 'Temperature', 'WindSpeed', 'Visibility', 'Rainfall', 'Snowfall', 'Seasons', 'Holiday', 'trend', 'seasonal', 'Count']
df_del = dataset[selected]
obj_cols = list(df_del.columns[df_del.dtypes=='object'])
df_preset = pd.get_dummies(df_del, columns=obj_cols, drop_first=True, dtype=int) # drop_first는 해당 컬럼 제거
df_preset.head()
Month | DayOfWeek | Hour | IsWeekend | Temperature | WindSpeed | Visibility | Rainfall | Snowfall | trend | seasonal | Count | Seasons_Spring | Seasons_Summer | Seasons_Winter | Holiday_No Holiday | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 12 | 4 | 0 | 0 | -5.2 | 2.2 | 2000 | 0.0 | 0.0 | 156.714286 | -36.975688 | 254 | 0 | 0 | 1 | 1 |
1 | 12 | 4 | 1 | 0 | -5.5 | 0.8 | 2000 | 0.0 | 0.0 | 156.714286 | 12.863055 | 204 | 0 | 0 | 1 | 1 |
2 | 12 | 4 | 2 | 0 | -6.0 | 1.0 | 2000 | 0.0 | 0.0 | 156.714286 | 18.299855 | 173 | 0 | 0 | 1 | 1 |
3 | 12 | 4 | 3 | 0 | -6.2 | 0.9 | 2000 | 0.0 | 0.0 | 156.714286 | -15.527735 | 107 | 0 | 0 | 1 | 1 |
4 | 12 | 4 | 4 | 0 | -6.0 | 2.3 | 2000 | 0.0 | 0.0 | 186.142857 | -6.214272 | 78 | 0 | 0 | 1 | 1 |
df_preset['y'] = df_preset['Count'].shift(-2)
df_preset = df_preset.iloc[:-2]
df_preset.head()
Month | DayOfWeek | Hour | IsWeekend | Temperature | WindSpeed | Visibility | Rainfall | Snowfall | trend | seasonal | Count | Seasons_Spring | Seasons_Summer | Seasons_Winter | Holiday_No Holiday | y | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 12 | 4 | 0 | 0 | -5.2 | 2.2 | 2000 | 0.0 | 0.0 | 156.714286 | -36.975688 | 254 | 0 | 0 | 1 | 1 | 173.0 |
1 | 12 | 4 | 1 | 0 | -5.5 | 0.8 | 2000 | 0.0 | 0.0 | 156.714286 | 12.863055 | 204 | 0 | 0 | 1 | 1 | 107.0 |
2 | 12 | 4 | 2 | 0 | -6.0 | 1.0 | 2000 | 0.0 | 0.0 | 156.714286 | 18.299855 | 173 | 0 | 0 | 1 | 1 | 78.0 |
3 | 12 | 4 | 3 | 0 | -6.2 | 0.9 | 2000 | 0.0 | 0.0 | 156.714286 | -15.527735 | 107 | 0 | 0 | 1 | 1 | 100.0 |
4 | 12 | 4 | 4 | 0 | -6.0 | 2.3 | 2000 | 0.0 | 0.0 | 186.142857 | -6.214272 | 78 | 0 | 0 | 1 | 1 | 181.0 |
# 여기에 답안코드를 작성하세요.
import seaborn as sns
import matplotlib.pyplot as plt
# 상관관계 시각화
plt.figure(figsize=(12,12))
sns.heatmap(df_preset.corr(numeric_only=True),
annot=True,
cmap='Blues',
cbar=False, # 옆에 칼라 바 제거
square=True,
fmt='.3f', # 소수점
annot_kws={'size':9}
)
plt.show()
10. 훈련과 검증 각각에 사용할 데이터셋을 분리하려고 합니다.
medv(주택가격) 컬럼을 label값 y로, 나머지 컬럼을 feature값 X로 할당한 후 훈련데이터셋과 검증데이터셋으로 분리하세요.
대상 데이터프레임: df_preset
훈련과 검증 데이터셋 분리
훈련 데이터셋 label: y_train, 훈련 데이터셋 Feature: X_train
검증 데이터셋 label: y_valid, 검증 데이터셋 Feature: X_valid
훈련 데이터셋과 검증데이터셋 비율은 80:20
random_state: 42
Scikit-learn의 train_test_split 함수를 활용하세요.
스케일링 수행
sklearn.preprocessing의 MinMaxScaler 함수 사용
훈련데이터셋의 Feature는 MinMaxScaler의 fit_transform 함수를 활용하여 X_train 변수로 할당
검증데이터셋의 Feature는 MinMaxScaler의 transform 함수를 활용하여 X_valid 변수로 할당
import numpy as np
def temporalize(x, y, timestep):
output_X = []
output_y = []
for i in range(len(x) - timestep + 1):
output_X.append(x.iloc[i:i+timestep])
output_y.append(y.iloc[i+timestep-1])
return np.array(output_X), np.array(output_y)
def flatten(X):
flattened_X = np.empty((X.shape[0], X.shape[2]))
for i in range(X.shape[0]):
flattened_X[i] = X[i, (X.shape[1]-1), :]
return flattened_X
def scale(X, scaler):
for i in range(X.shape[0]):
X[i, :, :] = scaler.transform(X[i, :, :])
return X
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
def Normalize(x_train, x_val, y_train, y_val):
# 3차원 데이터에 스테일러 적용
scaler_x = MinMaxScaler().fit(flatten(x_train)) # 2차원으로 변환하여 스케일러 생성
x_train_s = scale(x_train, scaler_x)
x_val_s = scale(x_val, scaler_x)
# y에 대한 스케일링(최적화를 위해)
scaler_y = MinMaxScaler()
y_train_s = scaler_y.fit_transform(y_train.reshape(-1,1))
return x_train_s, x_val_s, y_train_s, y_val, scaler_x, scaler_y
# 여기에 답안코드를 작성하세요.
from sklearn.model_selection import train_test_split
target = 'y'
x = df_preset.drop(target, axis=1)
y = df_preset[target]
timesteps = 7
x2, y2 = temporalize(x, y, timesteps) # x:(8760, 13) -> x2:(8754, 7, 13)
test_times = 10 * 24 # 24시간 10일
X_train, X_valid, y_train, y_valid = train_test_split(x2,y2, test_size=test_times, shuffle = False)
print(X_train.shape, X_valid.shape, y_train.shape, y_valid.shape)
(8512, 7, 16) (240, 7, 16) (8512,) (240,)
x_train_s, x_val_s, y_train_s, y_val, scaler_x, scaler_y = Normalize(X_train, X_valid, y_train, y_valid)
x_train_s.shape
(8512, 7, 16)
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
tf.random.set_seed(1)
from tensorflow.keras.layers import Dense, LSTM, Flatten, TimeDistributed, RepeatVector, Bidirectional, Lambda, Conv1D
n_features = x_train_s.shape[2]
model = Sequential([
LSTM(16, input_shape = (timesteps, n_features), return_sequences=True),
Flatten(),
Dense(1)
])
model.compile(optimizer=Adam(learning_rate = 0.01), loss='mae',metrics=['mae','mse'])
# es = EarlyStopping(monitor='val_loss', patience=4, mode='min', verbose=1) # val_loss
es = EarlyStopping(monitor='val_loss',
min_delta=0, # 개선되고 있다고 판단하기 위한 최소 변화량
patience=100, # 개선 없는 epoch 얼마나 기다려 줄거야
mode='min',
verbose=1,
restore_best_weights=True)
lr_reduction = ReduceLROnPlateau(monitor='val_loss',
patience=10,
verbose=1,
factor=0.5, # 0.5를 lr에 곱해주겠다
min_lr=0.000001) # 가장 작은 lr
history = model.fit(x_train_s, y_train_s,
epochs=100,
validation_split=.2,
callbacks=[es, lr_reduction],
verbose=1).history
Epoch 1/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m2s[0m 3ms/step - loss: 0.0994 - mae: 0.0994 - mse: 0.0225 - val_loss: 0.0506 - val_mae: 0.0506 - val_mse: 0.0051 - learning_rate: 0.0100 Epoch 2/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0314 - mae: 0.0314 - mse: 0.0023 - val_loss: 0.0324 - val_mae: 0.0323 - val_mse: 0.0017 - learning_rate: 0.0100 Epoch 3/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0203 - mae: 0.0203 - mse: 9.9511e-04 - val_loss: 0.0237 - val_mae: 0.0236 - val_mse: 0.0012 - learning_rate: 0.0100 Epoch 4/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0181 - mae: 0.0181 - mse: 7.8991e-04 - val_loss: 0.0198 - val_mae: 0.0197 - val_mse: 8.9018e-04 - learning_rate: 0.0100 Epoch 5/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0169 - mae: 0.0169 - mse: 6.9748e-04 - val_loss: 0.0166 - val_mae: 0.0166 - val_mse: 7.9688e-04 - learning_rate: 0.0100 Epoch 6/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0159 - mae: 0.0159 - mse: 6.2733e-04 - val_loss: 0.0164 - val_mae: 0.0163 - val_mse: 6.3786e-04 - learning_rate: 0.0100 Epoch 7/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0141 - mae: 0.0141 - mse: 5.0701e-04 - val_loss: 0.0159 - val_mae: 0.0158 - val_mse: 5.9912e-04 - learning_rate: 0.0100 Epoch 8/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0144 - mae: 0.0144 - mse: 4.8834e-04 - val_loss: 0.0192 - val_mae: 0.0192 - val_mse: 7.3989e-04 - learning_rate: 0.0100 Epoch 9/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0135 - mae: 0.0135 - mse: 4.1427e-04 - val_loss: 0.0163 - val_mae: 0.0163 - val_mse: 4.8485e-04 - learning_rate: 0.0100 Epoch 10/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0122 - mae: 0.0122 - mse: 3.3511e-04 - val_loss: 0.0156 - val_mae: 0.0155 - val_mse: 4.3714e-04 - learning_rate: 0.0100 Epoch 11/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0128 - mae: 0.0128 - mse: 3.2547e-04 - val_loss: 0.0148 - val_mae: 0.0148 - val_mse: 3.7775e-04 - learning_rate: 0.0100 Epoch 12/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0115 - mae: 0.0115 - mse: 2.6377e-04 - val_loss: 0.0140 - val_mae: 0.0140 - val_mse: 3.2774e-04 - learning_rate: 0.0100 Epoch 13/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0108 - mae: 0.0108 - mse: 2.2024e-04 - val_loss: 0.0148 - val_mae: 0.0148 - val_mse: 3.3615e-04 - learning_rate: 0.0100 Epoch 14/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0106 - mae: 0.0106 - mse: 2.0614e-04 - val_loss: 0.0139 - val_mae: 0.0139 - val_mse: 3.0484e-04 - learning_rate: 0.0100 Epoch 15/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0110 - mae: 0.0110 - mse: 2.1856e-04 - val_loss: 0.0151 - val_mae: 0.0151 - val_mse: 3.7601e-04 - learning_rate: 0.0100 Epoch 16/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0101 - mae: 0.0101 - mse: 1.8638e-04 - val_loss: 0.0123 - val_mae: 0.0123 - val_mse: 2.5819e-04 - learning_rate: 0.0100 Epoch 17/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0086 - mae: 0.0086 - mse: 1.4221e-04 - val_loss: 0.0094 - val_mae: 0.0095 - val_mse: 1.7093e-04 - learning_rate: 0.0100 Epoch 18/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0095 - mae: 0.0095 - mse: 1.6749e-04 - val_loss: 0.0102 - val_mae: 0.0102 - val_mse: 1.7834e-04 - learning_rate: 0.0100 Epoch 19/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0090 - mae: 0.0090 - mse: 1.4675e-04 - val_loss: 0.0105 - val_mae: 0.0105 - val_mse: 1.9592e-04 - learning_rate: 0.0100 Epoch 20/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0082 - mae: 0.0082 - mse: 1.2913e-04 - val_loss: 0.0119 - val_mae: 0.0119 - val_mse: 2.3031e-04 - learning_rate: 0.0100 Epoch 21/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0075 - mae: 0.0075 - mse: 1.0409e-04 - val_loss: 0.0079 - val_mae: 0.0079 - val_mse: 1.0940e-04 - learning_rate: 0.0100 Epoch 22/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0087 - mae: 0.0087 - mse: 1.3324e-04 - val_loss: 0.0075 - val_mae: 0.0075 - val_mse: 1.0320e-04 - learning_rate: 0.0100 Epoch 23/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0078 - mae: 0.0078 - mse: 1.1007e-04 - val_loss: 0.0123 - val_mae: 0.0123 - val_mse: 2.3770e-04 - learning_rate: 0.0100 Epoch 24/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0079 - mae: 0.0079 - mse: 1.1471e-04 - val_loss: 0.0092 - val_mae: 0.0093 - val_mse: 1.3855e-04 - learning_rate: 0.0100 Epoch 25/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0071 - mae: 0.0071 - mse: 9.5190e-05 - val_loss: 0.0104 - val_mae: 0.0104 - val_mse: 1.7726e-04 - learning_rate: 0.0100 Epoch 26/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0084 - mae: 0.0084 - mse: 1.2817e-04 - val_loss: 0.0072 - val_mae: 0.0072 - val_mse: 8.9129e-05 - learning_rate: 0.0100 Epoch 27/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0082 - mae: 0.0082 - mse: 1.2287e-04 - val_loss: 0.0087 - val_mae: 0.0087 - val_mse: 1.2838e-04 - learning_rate: 0.0100 Epoch 28/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0072 - mae: 0.0072 - mse: 9.3101e-05 - val_loss: 0.0071 - val_mae: 0.0071 - val_mse: 8.9708e-05 - learning_rate: 0.0100 Epoch 29/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0073 - mae: 0.0073 - mse: 9.9082e-05 - val_loss: 0.0085 - val_mae: 0.0084 - val_mse: 1.2127e-04 - learning_rate: 0.0100 Epoch 30/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0076 - mae: 0.0076 - mse: 1.0832e-04 - val_loss: 0.0096 - val_mae: 0.0096 - val_mse: 1.5494e-04 - learning_rate: 0.0100 Epoch 31/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0072 - mae: 0.0072 - mse: 9.5394e-05 - val_loss: 0.0065 - val_mae: 0.0066 - val_mse: 7.7125e-05 - learning_rate: 0.0100 Epoch 32/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0065 - mae: 0.0065 - mse: 7.7192e-05 - val_loss: 0.0089 - val_mae: 0.0089 - val_mse: 1.4010e-04 - learning_rate: 0.0100 Epoch 33/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0076 - mae: 0.0076 - mse: 1.0781e-04 - val_loss: 0.0084 - val_mae: 0.0084 - val_mse: 1.0854e-04 - learning_rate: 0.0100 Epoch 34/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0083 - mae: 0.0083 - mse: 1.2490e-04 - val_loss: 0.0089 - val_mae: 0.0089 - val_mse: 1.2462e-04 - learning_rate: 0.0100 Epoch 35/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0065 - mae: 0.0065 - mse: 7.6659e-05 - val_loss: 0.0066 - val_mae: 0.0066 - val_mse: 7.9860e-05 - learning_rate: 0.0100 Epoch 36/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0071 - mae: 0.0071 - mse: 9.0655e-05 - val_loss: 0.0059 - val_mae: 0.0059 - val_mse: 6.2682e-05 - learning_rate: 0.0100 Epoch 37/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0067 - mae: 0.0067 - mse: 8.1766e-05 - val_loss: 0.0064 - val_mae: 0.0065 - val_mse: 6.9600e-05 - learning_rate: 0.0100 Epoch 38/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0072 - mae: 0.0072 - mse: 9.3261e-05 - val_loss: 0.0090 - val_mae: 0.0091 - val_mse: 1.2309e-04 - learning_rate: 0.0100 Epoch 39/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0065 - mae: 0.0065 - mse: 7.3983e-05 - val_loss: 0.0081 - val_mae: 0.0081 - val_mse: 1.0242e-04 - learning_rate: 0.0100 Epoch 40/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0073 - mae: 0.0073 - mse: 9.5250e-05 - val_loss: 0.0081 - val_mae: 0.0081 - val_mse: 1.0630e-04 - learning_rate: 0.0100 Epoch 41/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0060 - mae: 0.0060 - mse: 6.7372e-05 - val_loss: 0.0088 - val_mae: 0.0088 - val_mse: 1.1832e-04 - learning_rate: 0.0100 Epoch 42/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0063 - mae: 0.0063 - mse: 7.3525e-05 - val_loss: 0.0064 - val_mae: 0.0064 - val_mse: 6.5460e-05 - learning_rate: 0.0100 Epoch 43/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0071 - mae: 0.0071 - mse: 9.7210e-05 - val_loss: 0.0077 - val_mae: 0.0077 - val_mse: 9.3555e-05 - learning_rate: 0.0100 Epoch 44/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0063 - mae: 0.0063 - mse: 6.9716e-05 - val_loss: 0.0083 - val_mae: 0.0084 - val_mse: 1.1037e-04 - learning_rate: 0.0100 Epoch 45/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0059 - mae: 0.0059 - mse: 6.3911e-05 - val_loss: 0.0059 - val_mae: 0.0059 - val_mse: 5.9217e-05 - learning_rate: 0.0100 Epoch 46/100 [1m178/213[0m [32m━━━━━━━━━━━━━━━━[0m[37m━━━━[0m [1m0s[0m 1ms/step - loss: 0.0063 - mae: 0.0063 - mse: 7.1285e-05 Epoch 46: ReduceLROnPlateau reducing learning rate to 0.004999999888241291. [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0063 - mae: 0.0063 - mse: 7.2818e-05 - val_loss: 0.0109 - val_mae: 0.0110 - val_mse: 2.0690e-04 - learning_rate: 0.0100 Epoch 47/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0051 - mae: 0.0051 - mse: 5.1873e-05 - val_loss: 0.0038 - val_mae: 0.0038 - val_mse: 2.6602e-05 - learning_rate: 0.0050 Epoch 48/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0039 - mae: 0.0039 - mse: 2.8135e-05 - val_loss: 0.0039 - val_mae: 0.0039 - val_mse: 2.6736e-05 - learning_rate: 0.0050 Epoch 49/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0037 - mae: 0.0037 - mse: 2.6855e-05 - val_loss: 0.0042 - val_mae: 0.0042 - val_mse: 3.1381e-05 - learning_rate: 0.0050 Epoch 50/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0037 - mae: 0.0037 - mse: 2.6461e-05 - val_loss: 0.0046 - val_mae: 0.0046 - val_mse: 3.3055e-05 - learning_rate: 0.0050 Epoch 51/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0035 - mae: 0.0035 - mse: 2.3875e-05 - val_loss: 0.0050 - val_mae: 0.0050 - val_mse: 3.8262e-05 - learning_rate: 0.0050 Epoch 52/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0033 - mae: 0.0033 - mse: 2.0422e-05 - val_loss: 0.0060 - val_mae: 0.0061 - val_mse: 5.9627e-05 - learning_rate: 0.0050 Epoch 53/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0038 - mae: 0.0038 - mse: 2.7971e-05 - val_loss: 0.0040 - val_mae: 0.0040 - val_mse: 2.9045e-05 - learning_rate: 0.0050 Epoch 54/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0037 - mae: 0.0037 - mse: 2.5879e-05 - val_loss: 0.0037 - val_mae: 0.0037 - val_mse: 2.3616e-05 - learning_rate: 0.0050 Epoch 55/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0030 - mae: 0.0030 - mse: 1.7587e-05 - val_loss: 0.0033 - val_mae: 0.0033 - val_mse: 1.8795e-05 - learning_rate: 0.0050 Epoch 56/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0034 - mae: 0.0034 - mse: 2.1345e-05 - val_loss: 0.0035 - val_mae: 0.0035 - val_mse: 2.0811e-05 - learning_rate: 0.0050 Epoch 57/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0033 - mae: 0.0033 - mse: 2.1527e-05 - val_loss: 0.0046 - val_mae: 0.0046 - val_mse: 3.5025e-05 - learning_rate: 0.0050 Epoch 58/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0039 - mae: 0.0039 - mse: 2.7958e-05 - val_loss: 0.0038 - val_mae: 0.0039 - val_mse: 2.5865e-05 - learning_rate: 0.0050 Epoch 59/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0041 - mae: 0.0041 - mse: 3.2058e-05 - val_loss: 0.0035 - val_mae: 0.0035 - val_mse: 2.2017e-05 - learning_rate: 0.0050 Epoch 60/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0033 - mae: 0.0033 - mse: 2.0594e-05 - val_loss: 0.0035 - val_mae: 0.0036 - val_mse: 2.2619e-05 - learning_rate: 0.0050 Epoch 61/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0035 - mae: 0.0035 - mse: 2.4030e-05 - val_loss: 0.0030 - val_mae: 0.0030 - val_mse: 1.6971e-05 - learning_rate: 0.0050 Epoch 62/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0038 - mae: 0.0038 - mse: 2.7876e-05 - val_loss: 0.0042 - val_mae: 0.0042 - val_mse: 2.7294e-05 - learning_rate: 0.0050 Epoch 63/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0032 - mae: 0.0032 - mse: 1.9235e-05 - val_loss: 0.0038 - val_mae: 0.0038 - val_mse: 2.6966e-05 - learning_rate: 0.0050 Epoch 64/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0034 - mae: 0.0034 - mse: 2.2641e-05 - val_loss: 0.0039 - val_mae: 0.0039 - val_mse: 2.9681e-05 - learning_rate: 0.0050 Epoch 65/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0037 - mae: 0.0037 - mse: 2.5901e-05 - val_loss: 0.0037 - val_mae: 0.0037 - val_mse: 2.4183e-05 - learning_rate: 0.0050 Epoch 66/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0031 - mae: 0.0031 - mse: 1.8724e-05 - val_loss: 0.0029 - val_mae: 0.0029 - val_mse: 1.5345e-05 - learning_rate: 0.0050 Epoch 67/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0033 - mae: 0.0033 - mse: 2.1464e-05 - val_loss: 0.0039 - val_mae: 0.0039 - val_mse: 2.3871e-05 - learning_rate: 0.0050 Epoch 68/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0033 - mae: 0.0033 - mse: 2.0610e-05 - val_loss: 0.0033 - val_mae: 0.0034 - val_mse: 1.9135e-05 - learning_rate: 0.0050 Epoch 69/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0031 - mae: 0.0031 - mse: 1.9055e-05 - val_loss: 0.0038 - val_mae: 0.0038 - val_mse: 2.3158e-05 - learning_rate: 0.0050 Epoch 70/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0035 - mae: 0.0035 - mse: 2.3784e-05 - val_loss: 0.0042 - val_mae: 0.0042 - val_mse: 3.2033e-05 - learning_rate: 0.0050 Epoch 71/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0033 - mae: 0.0033 - mse: 2.2482e-05 - val_loss: 0.0042 - val_mae: 0.0042 - val_mse: 3.1796e-05 - learning_rate: 0.0050 Epoch 72/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0036 - mae: 0.0036 - mse: 2.4671e-05 - val_loss: 0.0047 - val_mae: 0.0047 - val_mse: 3.5858e-05 - learning_rate: 0.0050 Epoch 73/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0039 - mae: 0.0039 - mse: 2.7899e-05 - val_loss: 0.0030 - val_mae: 0.0030 - val_mse: 1.5276e-05 - learning_rate: 0.0050 Epoch 74/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0031 - mae: 0.0031 - mse: 1.9428e-05 - val_loss: 0.0040 - val_mae: 0.0040 - val_mse: 2.6741e-05 - learning_rate: 0.0050 Epoch 75/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0034 - mae: 0.0034 - mse: 2.1709e-05 - val_loss: 0.0040 - val_mae: 0.0040 - val_mse: 2.5291e-05 - learning_rate: 0.0050 Epoch 76/100 [1m186/213[0m [32m━━━━━━━━━━━━━━━━━[0m[37m━━━[0m [1m0s[0m 1ms/step - loss: 0.0034 - mae: 0.0034 - mse: 2.1117e-05 Epoch 76: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455. [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0034 - mae: 0.0034 - mse: 2.1519e-05 - val_loss: 0.0037 - val_mae: 0.0037 - val_mse: 2.1692e-05 - learning_rate: 0.0050 Epoch 77/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0024 - mae: 0.0024 - mse: 1.1621e-05 - val_loss: 0.0031 - val_mae: 0.0032 - val_mse: 1.5784e-05 - learning_rate: 0.0025 Epoch 78/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0024 - mae: 0.0024 - mse: 1.1768e-05 - val_loss: 0.0027 - val_mae: 0.0027 - val_mse: 1.2809e-05 - learning_rate: 0.0025 Epoch 79/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0021 - mae: 0.0021 - mse: 9.3678e-06 - val_loss: 0.0030 - val_mae: 0.0030 - val_mse: 1.5768e-05 - learning_rate: 0.0025 Epoch 80/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0023 - mae: 0.0023 - mse: 1.0614e-05 - val_loss: 0.0023 - val_mae: 0.0023 - val_mse: 9.8240e-06 - learning_rate: 0.0025 Epoch 81/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0022 - mae: 0.0022 - mse: 9.8187e-06 - val_loss: 0.0025 - val_mae: 0.0025 - val_mse: 1.1666e-05 - learning_rate: 0.0025 Epoch 82/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0021 - mae: 0.0021 - mse: 8.8967e-06 - val_loss: 0.0026 - val_mae: 0.0026 - val_mse: 1.2833e-05 - learning_rate: 0.0025 Epoch 83/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0022 - mae: 0.0022 - mse: 1.0342e-05 - val_loss: 0.0025 - val_mae: 0.0025 - val_mse: 1.2209e-05 - learning_rate: 0.0025 Epoch 84/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0020 - mae: 0.0020 - mse: 8.7240e-06 - val_loss: 0.0023 - val_mae: 0.0023 - val_mse: 1.0567e-05 - learning_rate: 0.0025 Epoch 85/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0019 - mae: 0.0019 - mse: 8.7424e-06 - val_loss: 0.0021 - val_mae: 0.0021 - val_mse: 8.3534e-06 - learning_rate: 0.0025 Epoch 86/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0020 - mae: 0.0020 - mse: 8.4669e-06 - val_loss: 0.0027 - val_mae: 0.0027 - val_mse: 1.2557e-05 - learning_rate: 0.0025 Epoch 87/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0022 - mae: 0.0022 - mse: 1.0109e-05 - val_loss: 0.0030 - val_mae: 0.0031 - val_mse: 1.4851e-05 - learning_rate: 0.0025 Epoch 88/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0023 - mae: 0.0023 - mse: 1.1416e-05 - val_loss: 0.0026 - val_mae: 0.0026 - val_mse: 1.2174e-05 - learning_rate: 0.0025 Epoch 89/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0021 - mae: 0.0021 - mse: 9.3354e-06 - val_loss: 0.0029 - val_mae: 0.0029 - val_mse: 1.5584e-05 - learning_rate: 0.0025 Epoch 90/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0021 - mae: 0.0021 - mse: 8.8909e-06 - val_loss: 0.0025 - val_mae: 0.0025 - val_mse: 1.1642e-05 - learning_rate: 0.0025 Epoch 91/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0022 - mae: 0.0022 - mse: 9.5207e-06 - val_loss: 0.0025 - val_mae: 0.0025 - val_mse: 1.1830e-05 - learning_rate: 0.0025 Epoch 92/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0019 - mae: 0.0019 - mse: 7.7236e-06 - val_loss: 0.0023 - val_mae: 0.0024 - val_mse: 1.0422e-05 - learning_rate: 0.0025 Epoch 93/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0022 - mae: 0.0022 - mse: 9.9449e-06 - val_loss: 0.0025 - val_mae: 0.0025 - val_mse: 1.1530e-05 - learning_rate: 0.0025 Epoch 94/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0020 - mae: 0.0020 - mse: 8.6685e-06 - val_loss: 0.0029 - val_mae: 0.0029 - val_mse: 1.5560e-05 - learning_rate: 0.0025 Epoch 95/100 [1m186/213[0m [32m━━━━━━━━━━━━━━━━━[0m[37m━━━[0m [1m0s[0m 1ms/step - loss: 0.0021 - mae: 0.0021 - mse: 9.8720e-06 Epoch 95: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228. [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0021 - mae: 0.0021 - mse: 9.5910e-06 - val_loss: 0.0023 - val_mae: 0.0024 - val_mse: 1.0410e-05 - learning_rate: 0.0025 Epoch 96/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0014 - mae: 0.0014 - mse: 5.4674e-06 - val_loss: 0.0021 - val_mae: 0.0021 - val_mse: 9.1182e-06 - learning_rate: 0.0012 Epoch 97/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0014 - mae: 0.0014 - mse: 5.3339e-06 - val_loss: 0.0020 - val_mae: 0.0020 - val_mse: 7.7341e-06 - learning_rate: 0.0012 Epoch 98/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0015 - mae: 0.0015 - mse: 5.9010e-06 - val_loss: 0.0018 - val_mae: 0.0018 - val_mse: 6.7897e-06 - learning_rate: 0.0012 Epoch 99/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0014 - mae: 0.0014 - mse: 5.5045e-06 - val_loss: 0.0020 - val_mae: 0.0020 - val_mse: 7.6586e-06 - learning_rate: 0.0012 Epoch 100/100 [1m213/213[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 2ms/step - loss: 0.0014 - mae: 0.0014 - mse: 5.4812e-06 - val_loss: 0.0027 - val_mae: 0.0027 - val_mse: 1.3906e-05 - learning_rate: 0.0012 Restoring model weights from the end of the best epoch: 98.
# 함수로 만들어서 사용합시다.
def dl_history_plot(history):
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.plot(history['loss'], label='loss', marker = '.')
plt.plot(history['val_loss'], label='val_loss', marker = '.')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend()
plt.grid()
plt.subplot(1,2,2)
plt.plot(history['mse'], label='mse', marker = '.')
plt.plot(history['val_mse'], label='val_mse', marker = '.')
plt.ylabel('MSE')
plt.xlabel('Epochs')
plt.legend()
plt.grid()
plt.show()
# 예측 결과 시각화
def plot_model_result(y_train, y_val, pred) :
y_train = pd.Series(y_train)
y_val = pd.Series(y_val)
y_val.index = range(len(y_train), len(y_train) + len(y_val))
pred = pd.Series(pred.reshape(-1,), index = y_val.index)
# 전체 시각화
plt.figure(figsize = (12,8))
plt.subplot(2,1,1)
plt.plot(y_train, label = 'train')
plt.plot(y_val, label = 'val')
plt.plot(pred, label = 'pred')
plt.legend()
plt.grid()
plt.subplot(2,1,2)
plt.plot(y_val, label = 'val')
plt.plot(pred, label = 'pred')
plt.legend()
plt.grid()
plt.show()
dl_history_plot(history)
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error, r2_score
pred = model.predict(x_val_s)
# 예측 결과를 원래 스케일로 돌려 놓기
pred = scaler_y.inverse_transform(pred)
pred = pred.reshape(-1,)
# 평가
print('전체 수량: ', int(y_val.sum()))
print('MAE : {:.2f}'.format(mean_absolute_error(y_val, pred)))
print('MAPE: {:.2f} %'.format(mean_absolute_percentage_error(y_val, pred)*100))
print('R2 : {:.4f}'.format(r2_score(y_val, pred)))
plot_model_result(y_train, y_val, pred)
[1m8/8[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 16ms/step 전체 수량: 148919 MAE : 5.85 MAPE: 1.55 % R2 : 0.9996
'Machine Learning' 카테고리의 다른 글
ViT 훈련 원리 요약, 질문에 대답하기 (1) | 2024.09.13 |
---|---|
AUC가 뭔가요? (0) | 2024.08.11 |
3.다중분류문제_1_아이리스 (0) | 2024.06.02 |
2.이진분류문제_3_악성사이트 (0) | 2024.06.02 |
2.이진분류문제_2_대학진학 (0) | 2024.06.02 |
소중한 공감 감사합니다