CNN Subclassing 모델 학습¶
출처 : https://github.com/deeplearningzerotoall/TensorFlow (모두의 딥러닝)¶
In [0]:
# 런타임 -> 런타임 유형변경 -> 하드웨어 가속도 TPU변경
%tensorflow_version 2.x
#런타임 -> 런타임 다시시작
In [0]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
1. Importing Libraries¶
In [0]:
import tensorflow.compat.v1 as tf # 텐서플로우 1.X 버전 사용 가능
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import os
print(tf.__version__) # 텐서플로우 버전확인 (colab의 기본버전은 1.15.0) --> 2.0 변경 "%tensorflow_version 2.x"
print(keras.__version__) # 케라스 버전확인
2. Enable Eager Mode¶
In [0]:
# 그래프 기반 모드에서 즉시 실행 (Eager Execution) 모드로 변경하여 사용
tf.enable_eager_execution()
3. Hyper Parameters¶
In [0]:
learning_rate = 0.001 # 러닝레이트
training_epochs = 15 # 에폭
batch_size = 100 # 배치사이즈
tf.set_random_seed(777) # 랜덤하게 숫자 추출
4. Creating a Checkpoint Directory¶
In [0]:
cur_dir = os.getcwd() # 현재 스크립트의 실행 경로
ckpt_dir_name = 'checkpoints' # 체크포인트 이름 설정
model_dir_name = 'minst_cnn_seq' # 모델 이름 설정
checkpoint_dir = os.path.join(cur_dir, ckpt_dir_name, model_dir_name) # 경로와 이름 설정
os.makedirs(checkpoint_dir, exist_ok=True) # 폴더 생성
checkpoint_prefix = os.path.join(checkpoint_dir, model_dir_name) # 저장되는 이름 설정
5. MNIST/Fashion MNIST Data¶
In [0]:
## MNIST Dataset #########################################################
mnist = keras.datasets.mnist
class_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
##########################################################################
## Fashion MNIST Dataset #################################################
#mnist = keras.datasets.fashion_mnist
#class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
##########################################################################
6. Datasets¶
In [0]:
# MNIST image load (trian, test)
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# 0~255 중 하나로 표현되는 입력 이미지들의 값을 1 이하가 되도록 정규화
train_images = train_images.astype(np.float32) / 255.
test_images = test_images.astype(np.float32) / 255.
# np.expand_dims 차원을 변경
train_images = np.expand_dims(train_images, axis=-1)
test_images = np.expand_dims(test_images, axis=-1)
# label을 ont-hot encoding
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
# dataset 인스턴스 만들기
train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(
buffer_size=100000).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(batch_size)
# from_tensor_slices : 이미지를 이미지와 라벨로 나누기
# batch : 해당 배치 사이즈 만큼 나누기
# shuffle : 고정된 buffer_size만큼 epoch 마다 이미지를 섞어서 오버피팅이 줄도록 도와줌
8. Model Class¶
In [0]:
# model class 구현
class MNISTModel(tf.keras.Model): # keras.model 구현
def __init__(self): # 기본이 되는 층을 구현
super(MNISTModel, self).__init__()
self.conv1 = keras.layers.Conv2D(filters=32, kernel_size=[3, 3], padding='SAME', activation=tf.nn.relu)
self.pool1 = keras.layers.MaxPool2D(padding='SAME')
self.conv2 = keras.layers.Conv2D(filters=64, kernel_size=[3, 3], padding='SAME', activation=tf.nn.relu)
self.pool2 = keras.layers.MaxPool2D(padding='SAME')
self.conv3 = keras.layers.Conv2D(filters=128, kernel_size=[3, 3], padding='SAME', activation=tf.nn.relu)
self.pool3 = keras.layers.MaxPool2D(padding='SAME')
self.pool3_flat = keras.layers.Flatten()
self.dense4 = keras.layers.Dense(units=256, activation=tf.nn.relu)
self.drop4 = keras.layers.Dropout(rate=0.4)
self.dense5 = keras.layers.Dense(units=10)
def call(self, inputs, training=False): # init에서 만든 층을 불러와서 network 구성
net = self.conv1(inputs)
net = self.pool1(net)
net = self.conv2(net)
net = self.pool2(net)
net = self.conv3(net)
net = self.pool3(net)
net = self.pool3_flat(net)
net = self.dense4(net)
net = self.drop4(net)
net = self.dense5(net)
return net
In [0]:
model = MNISTModel() # 모델 함수를 model로 변경
temp_inputs = keras.Input(shape=(28, 28, 1)) # model input image size
model(temp_inputs) # model input
model.summary() # 모델에 대한 요약 출력해줌
8. Loss Function¶
In [0]:
def loss_fn(model, images, labels):
logits = model(images, training=True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2( #softmax값 함께 계산
logits=logits, labels=labels))
return loss
9. Calculating Gradient¶
In [0]:
def grad(model, images, labels):
with tf.GradientTape() as tape: # 자동 미분이 가능하고 실행된 모든 연산을 테이프에 기록함
loss = loss_fn(model, images, labels)
return tape.gradient(loss, model.variables)
10. Caculating Model's Accuracy¶
In [0]:
def evaluate(model, images, labels):
logits = model(images, training=False)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) # 라벨값들을 비교
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 동일한 라벨값들에 대한 평균을 구함
return accuracy
11. Optimizer¶
In [0]:
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
12. Creating a Checkpoint¶
In [0]:
checkpoint = tf.train.Checkpoint(cnn=model) # 데이터 그룹화하여 저장하고 추후 복원에 사용됨
13. Training¶
In [0]:
# train my model
print('Learning started. It takes sometime.')
for epoch in range(training_epochs): # 하이퍼 파마리터로 설정 (training_epochs = 15)
# 값 초기화
avg_loss = 0.
avg_train_acc = 0.
avg_test_acc = 0.
train_step = 0
test_step = 0
# Training
for images, labels in train_dataset:
grads = grad(model, images, labels) # GradientTape에 계산된 데이터 저장
optimizer.apply_gradients(zip(grads, model.variables)) # optimizer 실행
loss = loss_fn(model, images, labels) # 해당 epoch의 loss 계산
acc = evaluate(model, images, labels) # 해당 epoch의 accuracy 계산
avg_loss = avg_loss + loss # 총 loss 합산
avg_train_acc = avg_train_acc + acc # 총 accuracy 계산
train_step += 1 # 한 epoch 실행마다 step 갯수 1씩 늘어남
avg_loss = avg_loss / train_step # loss 값 계산
avg_train_acc = avg_train_acc / train_step # accuracy 값 계산
# Test
for images, labels in test_dataset:
acc = evaluate(model, images, labels) # 해당 epoch의 accuracy 계산
avg_test_acc = avg_test_acc + acc # 총 accuracy 계산
test_step += 1 # 한 epoch 실행마다 step 갯수 1씩 늘어남
avg_test_acc = avg_test_acc / test_step # accuracy 값 계산
# epoch 별 loss, accuracy값 출력하기
print('Epoch:', '{}'.format(epoch + 1), 'loss =', '{:.8f}'.format(avg_loss),
'train accuracy = ', '{:.4f}'.format(avg_train_acc),
'test accuracy = ', '{:.4f}'.format(avg_test_acc))
# 해당 모델의 값들 저장
checkpoint.save(file_prefix=checkpoint_prefix)
print('Learning Finished!')
'IT 공방 > Python' 카테고리의 다른 글
tensorflow 2.0 CNN(subclassing model.fit) (0) | 2020.01.23 |
---|---|
tensorflow 2.0 CNN(functional GradientTape) (1) | 2020.01.23 |
tensorflow 2.0 CNN(functional model.fit) (0) | 2020.01.23 |
tensorflow 2.0 CNN(sequential GradientTape) (0) | 2020.01.23 |
tensorflow 2.0 CNN(sequential model.fit) (0) | 2020.01.23 |