diff --git a/fashion-mnist.ipynb b/fashion-mnist.ipynb
new file mode 100644
index 0000000..0d27667
--- /dev/null
+++ b/fashion-mnist.ipynb
@@ -0,0 +1,654 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Kaggle Challenge - https://www.kaggle.com/zalando-research/fashionmnist \n",
+ "import numpy as np \n",
+ "import pandas as pd "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " label | \n",
+ " pixel1 | \n",
+ " pixel2 | \n",
+ " pixel3 | \n",
+ " pixel4 | \n",
+ " pixel5 | \n",
+ " pixel6 | \n",
+ " pixel7 | \n",
+ " pixel8 | \n",
+ " pixel9 | \n",
+ " ... | \n",
+ " pixel775 | \n",
+ " pixel776 | \n",
+ " pixel777 | \n",
+ " pixel778 | \n",
+ " pixel779 | \n",
+ " pixel780 | \n",
+ " pixel781 | \n",
+ " pixel782 | \n",
+ " pixel783 | \n",
+ " pixel784 | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 9 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 6 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 30 | \n",
+ " 43 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 3 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 3 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 4 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " 4 | \n",
+ " 5 | \n",
+ " 5 | \n",
+ " 3 | \n",
+ " 5 | \n",
+ " ... | \n",
+ " 7 | \n",
+ " 8 | \n",
+ " 7 | \n",
+ " 4 | \n",
+ " 3 | \n",
+ " 7 | \n",
+ " 5 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 4 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 14 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 5 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 4 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " 8 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " ... | \n",
+ " 203 | \n",
+ " 214 | \n",
+ " 166 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
10 rows × 785 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " label pixel1 pixel2 pixel3 pixel4 pixel5 pixel6 pixel7 pixel8 \\\n",
+ "0 2 0 0 0 0 0 0 0 0 \n",
+ "1 9 0 0 0 0 0 0 0 0 \n",
+ "2 6 0 0 0 0 0 0 0 5 \n",
+ "3 0 0 0 0 1 2 0 0 0 \n",
+ "4 3 0 0 0 0 0 0 0 0 \n",
+ "5 4 0 0 0 5 4 5 5 3 \n",
+ "6 4 0 0 0 0 0 0 0 0 \n",
+ "7 5 0 0 0 0 0 0 0 0 \n",
+ "8 4 0 0 0 0 0 0 3 2 \n",
+ "9 8 0 0 0 0 0 0 0 0 \n",
+ "\n",
+ " pixel9 ... pixel775 pixel776 pixel777 pixel778 pixel779 \\\n",
+ "0 0 ... 0 0 0 0 0 \n",
+ "1 0 ... 0 0 0 0 0 \n",
+ "2 0 ... 0 0 0 30 43 \n",
+ "3 0 ... 3 0 0 0 0 \n",
+ "4 0 ... 0 0 0 0 0 \n",
+ "5 5 ... 7 8 7 4 3 \n",
+ "6 0 ... 14 0 0 0 0 \n",
+ "7 0 ... 0 0 0 0 0 \n",
+ "8 0 ... 1 0 0 0 0 \n",
+ "9 0 ... 203 214 166 0 0 \n",
+ "\n",
+ " pixel780 pixel781 pixel782 pixel783 pixel784 \n",
+ "0 0 0 0 0 0 \n",
+ "1 0 0 0 0 0 \n",
+ "2 0 0 0 0 0 \n",
+ "3 1 0 0 0 0 \n",
+ "4 0 0 0 0 0 \n",
+ "5 7 5 0 0 0 \n",
+ "6 0 0 0 0 0 \n",
+ "7 0 0 0 0 0 \n",
+ "8 0 0 0 0 0 \n",
+ "9 0 0 0 0 0 \n",
+ "\n",
+ "[10 rows x 785 columns]"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from keras.utils import to_categorical\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "\n",
+ "data_train = pd.read_csv('data/fashion-mnist/fashion-mnist_train.csv')\n",
+ "data_test = pd.read_csv('data/fashion-mnist/fashion-mnist_test.csv')\n",
+ "\n",
+ "# input data format\n",
+ "data_train.iloc[:10, :]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# first column of each row consists of label of the image, image of size 28x28 is flattened as a row of size 784\n",
+ "X = np.array(data_train.iloc[:, 1:])\n",
+ "y = to_categorical(np.array(data_train.iloc[:, 0]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "img_rows, img_cols = 28, 28\n",
+ "input_shape = (img_rows, img_cols, 1)\n",
+ "\n",
+ "X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=13)\n",
+ "\n",
+ "#Test data\n",
+ "X_test = np.array(data_test.iloc[:, 1:])\n",
+ "y_test = to_categorical(np.array(data_test.iloc[:, 0]))\n",
+ "\n",
+ "X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n",
+ "X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n",
+ "X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1)\n",
+ "\n",
+ "X_train = X_train.astype('float32')\n",
+ "X_test = X_test.astype('float32')\n",
+ "X_val = X_val.astype('float32')\n",
+ "X_train /= 255\n",
+ "X_test /= 255\n",
+ "X_val /= 255"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import keras\n",
+ "from keras.models import Sequential\n",
+ "from keras.layers import Dense, Dropout, Flatten\n",
+ "from keras.layers import Conv2D, MaxPooling2D\n",
+ "from keras.layers.normalization import BatchNormalization\n",
+ "\n",
+ "batch_size = 256\n",
+ "num_classes = 10\n",
+ "epochs = 50\n",
+ "\n",
+ "#input image dimensions\n",
+ "img_rows, img_cols = 28, 28\n",
+ "\n",
+ "model = Sequential()\n",
+ "model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal', input_shape=input_shape))\n",
+ "model.add(MaxPooling2D((2, 2)))\n",
+ "model.add(Dropout(0.25))\n",
+ "model.add(Conv2D(64, (3, 3), activation='relu'))\n",
+ "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
+ "model.add(Dropout(0.25))\n",
+ "model.add(Conv2D(128, (3, 3), activation='relu'))\n",
+ "model.add(Dropout(0.4))\n",
+ "model.add(Flatten())\n",
+ "model.add(Dense(128, activation='relu'))\n",
+ "model.add(Dropout(0.3))\n",
+ "model.add(Dense(num_classes, activation='softmax'))\n",
+ "\n",
+ "model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "_________________________________________________________________\n",
+ "Layer (type) Output Shape Param # \n",
+ "=================================================================\n",
+ "conv2d_1 (Conv2D) (None, 26, 26, 32) 320 \n",
+ "_________________________________________________________________\n",
+ "max_pooling2d_1 (MaxPooling2 (None, 13, 13, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "dropout_1 (Dropout) (None, 13, 13, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "conv2d_2 (Conv2D) (None, 11, 11, 64) 18496 \n",
+ "_________________________________________________________________\n",
+ "max_pooling2d_2 (MaxPooling2 (None, 5, 5, 64) 0 \n",
+ "_________________________________________________________________\n",
+ "dropout_2 (Dropout) (None, 5, 5, 64) 0 \n",
+ "_________________________________________________________________\n",
+ "conv2d_3 (Conv2D) (None, 3, 3, 128) 73856 \n",
+ "_________________________________________________________________\n",
+ "dropout_3 (Dropout) (None, 3, 3, 128) 0 \n",
+ "_________________________________________________________________\n",
+ "flatten_1 (Flatten) (None, 1152) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_1 (Dense) (None, 128) 147584 \n",
+ "_________________________________________________________________\n",
+ "dropout_4 (Dropout) (None, 128) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_2 (Dense) (None, 10) 1290 \n",
+ "=================================================================\n",
+ "Total params: 241,546\n",
+ "Trainable params: 241,546\n",
+ "Non-trainable params: 0\n",
+ "_________________________________________________________________\n"
+ ]
+ }
+ ],
+ "source": [
+ "model.summary()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Train on 48000 samples, validate on 12000 samples\n",
+ "Epoch 1/50\n",
+ "48000/48000 [==============================] - 5s 112us/step - loss: 0.8480 - acc: 0.6839 - val_loss: 0.5149 - val_acc: 0.8129\n",
+ "Epoch 2/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.5158 - acc: 0.8090 - val_loss: 0.4042 - val_acc: 0.8529\n",
+ "Epoch 3/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.4456 - acc: 0.8356 - val_loss: 0.3815 - val_acc: 0.8588\n",
+ "Epoch 4/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.4026 - acc: 0.8531 - val_loss: 0.3349 - val_acc: 0.8757\n",
+ "Epoch 5/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.3708 - acc: 0.8657 - val_loss: 0.3147 - val_acc: 0.8832\n",
+ "Epoch 6/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.3517 - acc: 0.8711 - val_loss: 0.2991 - val_acc: 0.8912\n",
+ "Epoch 7/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.3313 - acc: 0.8800 - val_loss: 0.2985 - val_acc: 0.8887\n",
+ "Epoch 8/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.3194 - acc: 0.8826 - val_loss: 0.2780 - val_acc: 0.8974\n",
+ "Epoch 9/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.3095 - acc: 0.8873 - val_loss: 0.2763 - val_acc: 0.8993\n",
+ "Epoch 10/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.3015 - acc: 0.8894 - val_loss: 0.2671 - val_acc: 0.9008\n",
+ "Epoch 11/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2938 - acc: 0.8925 - val_loss: 0.2605 - val_acc: 0.9025\n",
+ "Epoch 12/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2862 - acc: 0.8943 - val_loss: 0.2576 - val_acc: 0.9018\n",
+ "Epoch 13/50\n",
+ "48000/48000 [==============================] - 3s 63us/step - loss: 0.2780 - acc: 0.8973 - val_loss: 0.2533 - val_acc: 0.9033\n",
+ "Epoch 14/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2707 - acc: 0.8995 - val_loss: 0.2484 - val_acc: 0.9073\n",
+ "Epoch 15/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2684 - acc: 0.9021 - val_loss: 0.2472 - val_acc: 0.9068\n",
+ "Epoch 16/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2622 - acc: 0.9040 - val_loss: 0.2452 - val_acc: 0.9074\n",
+ "Epoch 17/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2581 - acc: 0.9049 - val_loss: 0.2383 - val_acc: 0.9108\n",
+ "Epoch 18/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2511 - acc: 0.9062 - val_loss: 0.2371 - val_acc: 0.9117\n",
+ "Epoch 19/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2450 - acc: 0.9085 - val_loss: 0.2406 - val_acc: 0.9121\n",
+ "Epoch 20/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2425 - acc: 0.9085 - val_loss: 0.2306 - val_acc: 0.9148\n",
+ "Epoch 21/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2409 - acc: 0.9093 - val_loss: 0.2303 - val_acc: 0.9133\n",
+ "Epoch 22/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2393 - acc: 0.9115 - val_loss: 0.2337 - val_acc: 0.9096\n",
+ "Epoch 23/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2334 - acc: 0.9126 - val_loss: 0.2322 - val_acc: 0.9122\n",
+ "Epoch 24/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2322 - acc: 0.9126 - val_loss: 0.2312 - val_acc: 0.9117\n",
+ "Epoch 25/50\n",
+ "48000/48000 [==============================] - 3s 61us/step - loss: 0.2283 - acc: 0.9140 - val_loss: 0.2299 - val_acc: 0.9137\n",
+ "Epoch 26/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2228 - acc: 0.9167 - val_loss: 0.2290 - val_acc: 0.9149\n",
+ "Epoch 27/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2221 - acc: 0.9167 - val_loss: 0.2218 - val_acc: 0.9164\n",
+ "Epoch 28/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2209 - acc: 0.9165 - val_loss: 0.2266 - val_acc: 0.9162\n",
+ "Epoch 29/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2187 - acc: 0.9187 - val_loss: 0.2201 - val_acc: 0.9168\n",
+ "Epoch 30/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2141 - acc: 0.9190 - val_loss: 0.2233 - val_acc: 0.9178\n",
+ "Epoch 31/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2153 - acc: 0.9187 - val_loss: 0.2200 - val_acc: 0.9176\n",
+ "Epoch 32/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2105 - acc: 0.9212 - val_loss: 0.2187 - val_acc: 0.9168\n",
+ "Epoch 33/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2092 - acc: 0.9216 - val_loss: 0.2248 - val_acc: 0.9149\n",
+ "Epoch 34/50\n",
+ "48000/48000 [==============================] - 3s 63us/step - loss: 0.2075 - acc: 0.9216 - val_loss: 0.2240 - val_acc: 0.9158\n",
+ "Epoch 35/50\n",
+ "48000/48000 [==============================] - 3s 63us/step - loss: 0.2082 - acc: 0.9228 - val_loss: 0.2183 - val_acc: 0.9180\n",
+ "Epoch 36/50\n",
+ "48000/48000 [==============================] - 3s 63us/step - loss: 0.2059 - acc: 0.9234 - val_loss: 0.2202 - val_acc: 0.9180\n",
+ "Epoch 37/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2046 - acc: 0.9236 - val_loss: 0.2191 - val_acc: 0.9199\n",
+ "Epoch 38/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.2024 - acc: 0.9236 - val_loss: 0.2226 - val_acc: 0.9183\n",
+ "Epoch 39/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1984 - acc: 0.9253 - val_loss: 0.2220 - val_acc: 0.9167\n",
+ "Epoch 40/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1997 - acc: 0.9249 - val_loss: 0.2201 - val_acc: 0.9197\n",
+ "Epoch 41/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1966 - acc: 0.9251 - val_loss: 0.2172 - val_acc: 0.9195\n",
+ "Epoch 42/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1958 - acc: 0.9277 - val_loss: 0.2172 - val_acc: 0.9191\n",
+ "Epoch 43/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1913 - acc: 0.9280 - val_loss: 0.2126 - val_acc: 0.9214\n",
+ "Epoch 44/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1926 - acc: 0.9272 - val_loss: 0.2113 - val_acc: 0.9222\n",
+ "Epoch 45/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1944 - acc: 0.9265 - val_loss: 0.2165 - val_acc: 0.9193\n",
+ "Epoch 46/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1883 - acc: 0.9282 - val_loss: 0.2146 - val_acc: 0.9202\n",
+ "Epoch 47/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1886 - acc: 0.9283 - val_loss: 0.2147 - val_acc: 0.9187\n",
+ "Epoch 48/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1919 - acc: 0.9270 - val_loss: 0.2152 - val_acc: 0.9197\n",
+ "Epoch 49/50\n",
+ "48000/48000 [==============================] - 3s 62us/step - loss: 0.1874 - acc: 0.9300 - val_loss: 0.2137 - val_acc: 0.9212\n",
+ "Epoch 50/50\n",
+ "48000/48000 [==============================] - 3s 63us/step - loss: 0.1851 - acc: 0.9314 - val_loss: 0.2136 - val_acc: 0.9219\n"
+ ]
+ }
+ ],
+ "source": [
+ "model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_val, y_val))\n",
+ "score = model.evaluate(X_test, y_test, verbose=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Test loss: 0.1976667128264904\n",
+ "Test accuracy: 0.9266\n"
+ ]
+ }
+ ],
+ "source": [
+ "print('Test loss:', score[0])\n",
+ "print('Test accuracy:', score[1])"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}