本文分享了我在silent speech 项目过程中实现的基于嘴唇图像数据集的autoencoder自编码器。输入输出都是64∗6464*6464∗64的嘴唇灰度图。自编码器由编码解码两个部分构成,同时实现了利用checkpoint在每个epoch运算时,自动保存测试集loss更小的模型。
数据集共包含84679张图片,其中前68728张图片作为训练集,后15951张图片作为测试集。
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
import numpy as np
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import optimizers
from matplotlib import pyplot as plt
from tensorflow.keras import Input
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten, Conv2DTranspose, UpSampling2D
from tensorflow.keras.models import Modeldef autoencoder_lips():input_img = Input(shape=(64, 64, 1))# encodingconv1 = Conv2D(filters=16, kernel_size=(5, 5), activation='relu', padding='same', name='lip_conv1')(input_img)conv2 = Conv2D(filters=16, kernel_size=(5, 5), activation='relu', padding='same', name='lip_conv2')(conv1)pooling1 = MaxPooling2D(pool_size=(2, 2), name='lip_pooling1')(conv2)conv3 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same', name='lip_conv3')(pooling1)conv4 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same', name='lip_conv4')(conv3)pooling2 = MaxPooling2D(pool_size=(2, 2), name='lip_pooling2')(conv4)# decodingconv5 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same', name='lip_conv5')(pooling2)conv6 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same', name='lip_conv6')(conv5)upsample1 = UpSampling2D(size=(2, 2), name='lip_upsample1')(conv6)conv7 = Conv2D(filters=16, kernel_size=(5, 5), activation='relu', padding='same', name='lip_conv7')(upsample1)conv8 = Conv2D(filters=16, kernel_size=(5, 5), activation='relu', padding='same', name='lip_conv8')(conv7)upsample2 = UpSampling2D(size=(2, 2), name='lip_upsample2')(conv8)decoded = Conv2D(filters=1, kernel_size=(5, 5), activation='sigmoid', padding='same', name='lip_decoded')(upsample2)autoencoder_lip = Model(input_img, decoded, name='autoencoder_lips')autoencoder_lip.summary()return autoencoder_lipif __name__ == "__main__":X = np.load("lips_all_chapiters.npy")nb_images_chapiter7 = 15951# normalisationX = X/255.0# ch1-ch6X_train = X[:-15951, :]X_test = X[-15951:, :]model = autoencoder_lips()my_optimizer = keras.optimizers.Adam(learning_rate=0.0001, epsilon=1e-8)model.compile(optimizer=my_optimizer, loss='binary_crossentropy')filepath = "autoencoder_lips/autoencoder_lips-{epoch:02d}-{val_loss:.8f}.h5"checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1,save_best_only=True, mode='auto') # only save improved accuracy modelcallbacks_list = [checkpoint]history = model.fit(x=X_train, y=X_train, batch_size=256, epochs=100, callbacks=callbacks_list,validation_data=(X_test, X_test))
训练好的自编码器最终在测试集上的表现如下 :
第一行是原始输入的图像,第二行是自编码器输出的对应图像。我们可以发现虽然自编码器生成的图像与原图像相比损失了清晰度,但是整体上还原了图像原本的形态。我们因此可以利用自编码器的中间层作为压缩后的图像信息,或称features特征。自编码器往往可以用于压缩信息,或作为进一步进行学习的中间标签。