MNIST-AutoEncoder Classification on Google Colab
MNIST with AutoEncoder: Classification on Google Colab
3 minute read
View in Github | Download Notebook |
Prerequisites
Install the following packages
! pip3 install cloudmesh-installer
! pip3 install cloudmesh-common
Import Libraries
import tensorflow as tf
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.datasets import mnist
from keras.utils import plot_model
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
Download Data and Pre-Process
(x_train, y_train), (x_test, y_test) = mnist.load_data()
image_size = x_train.shape[1]
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
input_shape = (image_size, image_size, 1)
batch_size = 32
kernel_size = 3
latent_dim = 16
hidden_units = [32, 64]
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
Define Model
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
x = Dense(hidden_units[0], activation='relu')(x)
x = Dense(hidden_units[1], activation='relu')(x)
shape = K.int_shape(x)
# generate latent vector
x = Flatten()(x)
latent = Dense(latent_dim, name='latent_vector')(x)
# instantiate encoder model
encoder = Model(inputs,
latent,
name='encoder')
encoder.summary()
plot_model(encoder,
to_file='encoder.png',
show_shapes=True)
latent_inputs = Input(shape=(latent_dim,), name='decoder_input')
x = Dense(shape[1] * shape[2] * shape[3])(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
x = Dense(hidden_units[0], activation='relu')(x)
x = Dense(hidden_units[1], activation='relu')(x)
outputs = Dense(1, activation='relu')(x)
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='decoder.png', show_shapes=True)
autoencoder = Model(inputs,
decoder(encoder(inputs)),
name='autoencoder')
autoencoder.summary()
plot_model(autoencoder,
to_file='autoencoder.png',
show_shapes=True)
autoencoder.compile(loss='mse', optimizer='adam')
Model: "encoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
encoder_input (InputLayer) [(None, 28, 28, 1)] 0
_________________________________________________________________
dense_2 (Dense) (None, 28, 28, 32) 64
_________________________________________________________________
dense_3 (Dense) (None, 28, 28, 64) 2112
_________________________________________________________________
flatten_1 (Flatten) (None, 50176) 0
_________________________________________________________________
latent_vector (Dense) (None, 16) 802832
=================================================================
Total params: 805,008
Trainable params: 805,008
Non-trainable params: 0
_________________________________________________________________
Model: "decoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
decoder_input (InputLayer) [(None, 16)] 0
_________________________________________________________________
dense_4 (Dense) (None, 50176) 852992
_________________________________________________________________
reshape (Reshape) (None, 28, 28, 64) 0
_________________________________________________________________
dense_5 (Dense) (None, 28, 28, 32) 2080
_________________________________________________________________
dense_6 (Dense) (None, 28, 28, 64) 2112
_________________________________________________________________
dense_7 (Dense) (None, 28, 28, 1) 65
=================================================================
Total params: 857,249
Trainable params: 857,249
Non-trainable params: 0
_________________________________________________________________
Model: "autoencoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
encoder_input (InputLayer) [(None, 28, 28, 1)] 0
_________________________________________________________________
encoder (Functional) (None, 16) 805008
_________________________________________________________________
decoder (Functional) (None, 28, 28, 1) 857249
=================================================================
Total params: 1,662,257
Trainable params: 1,662,257
Non-trainable params: 0
Train
autoencoder.fit(x_train,
x_train,
validation_data=(x_test, x_test),
epochs=1,
batch_size=batch_size)
1875/1875 [==============================] - 112s 60ms/step - loss: 0.0268 - val_loss: 0.0131
<tensorflow.python.keras.callbacks.History at 0x7f3ecb2e0be0>
Test
x_decoded = autoencoder.predict(x_test)
79/79 [==============================] - 7s 80ms/step - loss: 0.2581 - accuracy: 0.9181
Test accuracy: 91.8%
Visualize
imgs = np.concatenate([x_test[:8], x_decoded[:8]])
imgs = imgs.reshape((4, 4, image_size, image_size))
imgs = np.vstack([np.hstack(i) for i in imgs])
plt.figure()
plt.axis('off')
plt.title('Input: 1st 2 rows, Decoded: last 2 rows')
plt.imshow(imgs, interpolation='none', cmap='gray')
plt.savefig('input_and_decoded.png')
plt.show()
Last modified
June 17, 2021
: add aliasses (6b7beab5)