Search Results for 'Transferlearning'

1 POSTS

  1. 2024.10.29 [keras] 이미지 분류 예제 코드 (데이터분리, CNN모델, 전이학습)

 

  • 이미지 파일을 읽어 라벨링하여 학습, 검증 데이터 생성
  • CNN 모델 생성하여 이미지 분류기 학습
  • Tranfer learning 기반 이미지 분류기 학습

 

----------------------------------------------------------------------------------------------------

- 데이터 불러오기, 라벨링, 학습/검증 data 분리

 

# 모듈 import

import os

from PIL import Image

import numpy as np

import tensorflow as tf

import matplotlib.pyplot as plt

from tensorflow.keras.preprocessing import image

from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint

import pathlib

 

 

#데이터 불러오기

dataset_url = "데이터 url"

data_dir = tf.keras.utils.get_file('data_folder_name', origin=dataset_url, untar=True)

data_dir = pathlib.Path(data_dir)

 

 

# Train, Test Split 분류

# Hyperparameters

input_shape = (224, 224, 3)

batch_size = 32

num_classes = 5

 

# Train Data

train_ds = tf.keras.preprocessing.image_dataset_from_directory(

directory=str(data_dir),

label_mode="categorical", # binary , categorical

batch_size=batch_size,

image_size=(224, 224),

seed=42,

shuffle=True,

validation_split=0.2,

subset="training"

)

# Test Data

test_ds = tf.keras.preprocessing.image_dataset_from_directory(

directory=str(data_dir),

label_mode="categorical", # binary , categorical

batch_size=batch_size,

image_size=(224, 224),

seed=42,

validation_split=0.2,

subset="validation"

)

 

# Class 이름 확인

print(f"Class 이름: {train_ds.class_names} \n")

 

 

# Training data 차원 확인

batch_img, batch_label = next(iter(train_ds))

print(f"이미지의 차원: {batch_img.shape} \n레이블의 차원: {batch_label.shape}")

----------------------------------------------------------------------------------------------------

 

----------------------------------------------------------------------------------------------------

- CNN 모델 생성, 학습

 

# Hyperparameters

num_epochs = 10

batch_size = 32

learning_rate = 0.001

dropout_rate = 0.5

input_shape = (224, 224, 3) # 데이터 사이즈

num_classes = 5

 

# CNN Model

model = Sequential()

model.add(Rescaling(1. / 255))

model.add(Conv2D(32, kernel_size=(5,5), strides=(1,1), padding='same', activation='relu', input_shape=input_shape))

model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))

model.add(Conv2D(64,(3,3), activation='relu', padding='same'))

model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Conv2D(64,(3,3), activation='relu', padding='same'))

model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Conv2D(128,(3,3), activation='relu', padding='same'))

model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Dropout(0.2)) model.add(Flatten())

model.add(Dense(128, activation='relu'))

model.add(Dropout(0.3))

model.add(Dense(5, activation='softmax'))

 

# Compile Model

model.compile( optimizer = tf.keras.optimizers.Adam(learning_rate), loss = "categorical_crossentropy", metrics = ["accuracy"] # metrics )

 

# EarlyStopping

es = EarlyStopping( monitor='val_loss', mode='min', verbose=1, patience=3)

 

# 모델 학습

history = model.fit( train_ds, validation_data=(test_ds), epochs=10, callbacks=[es] )

 

----------------------------------------------------------------------------------------------------

 

 

----------------------------------------------------------------------------------------------------

- Transfer learning으로 분류기 학습

 

# Transfer learning 할 모델 종류 확인

print(dir(tf.keras.applications)) # keras.applications에 어떤 종류의 모델이 있는지 확인

 

 

# 학습된 모델 가져오기.

base_model = tf.keras.applications.VGG16(input_shape=(224, 224, 3), weights='imagenet', include_top=False)

base_model.trainable = False

## VGG모델사용, imagenet에 사용된 사전학습된 weight 사용, include_top : 기존 모델의 class 분류기 사용 여부, base_model.trainable : 사전학습된 weight를 학습시킬지 동결시킬지 여부

 

 

 

# 모델 layer 설계

inputs = tf.keras.Input(shape=(224, 224, 3))

x = tf.keras.layers.Rescaling(1./127.5, offset=-1)(inputs)

x = base_model(x, training=False)

 

x = tf.keras.layers.GlobalAveragePooling2D()(x)

# x = tf.keras.layers.Flatten()(x)

 

# 분류 class 수 5개

output = tf.keras.layers.Dense(5, activation='softmax')(x)

 

model = tf.keras.Model(inputs=inputs, outputs=output)

model.summary()

 

 

 

# Compile Model

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate),

loss='categorical_crossentropy',

metrics=['accuracy'])

 

# EarlyStopping

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)

 

 

# 모델 학습

history = model.fit(

train_ds,

validation_data = test_ds,

epochs=2,

callbacks=[es]

)

 

 

# Model Performance

plt.plot(history.history['accuracy'], label='Accuracy')

plt.plot(history.history['val_accuracy'], label='Val Accuracy')

plt.xlabel('Epoch')

plt.ylabel('Accuracy')

plt.legend()

plt.title('Model Accuracy')

plt.show()