diff --git a/your-code/challenge-1.ipynb b/your-code/challenge-1.ipynb
index 2487c5f..f950193 100644
--- a/your-code/challenge-1.ipynb
+++ b/your-code/challenge-1.ipynb
@@ -34,11 +34,344 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 85,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " o | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 953 | \n",
+ " o | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " x | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " 954 | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " x | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " 955 | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " x | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " 956 | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " x | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " 957 | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " x | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
958 rows × 10 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 x x x x o o x o o True\n",
+ "1 x x x x o o o x o True\n",
+ "2 x x x x o o o o x True\n",
+ "3 x x x x o o o b b True\n",
+ "4 x x x x o o b o b True\n",
+ ".. .. .. .. .. .. .. .. .. .. ...\n",
+ "953 o x x x o o o x x False\n",
+ "954 o x o x x o x o x False\n",
+ "955 o x o x o x x o x False\n",
+ "956 o x o o x x x o x False\n",
+ "957 o o x x x o o x x False\n",
+ "\n",
+ "[958 rows x 10 columns]"
+ ]
+ },
+ "execution_count": 85,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# your code here\n",
+ "import pandas as pd\n",
+ "import numpy as np\n",
+ "\n",
+ "data = pd.read_csv('tic-tac-toe.csv')\n",
+ "data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 86,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "TL 0\n",
+ "TM 0\n",
+ "TR 0\n",
+ "ML 0\n",
+ "MM 0\n",
+ "MR 0\n",
+ "BL 0\n",
+ "BM 0\n",
+ "BR 0\n",
+ "class 0\n",
+ "dtype: int64"
+ ]
+ },
+ "execution_count": 86,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "data.shape\n",
+ "data.describe()\n",
+ "data.isnull().sum()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 87,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 1 1 1 1 0 0 1 0 0 1\n",
+ "1 1 1 1 1 0 0 0 1 0 1\n",
+ "2 1 1 1 1 0 0 0 0 1 1\n",
+ "3 1 1 1 1 0 0 0 2 2 1\n",
+ "4 1 1 1 1 0 0 2 0 2 1\n",
+ ".. .. .. .. .. .. .. .. .. .. ...\n",
+ "953 0 1 1 1 0 0 0 1 1 0\n",
+ "954 0 1 0 1 1 0 1 0 1 0\n",
+ "955 0 1 0 1 0 1 1 0 1 0\n",
+ "956 0 1 0 0 1 1 1 0 1 0\n",
+ "957 0 0 1 1 1 0 0 1 1 0\n",
+ "\n",
+ "[958 rows x 10 columns]\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/var/folders/h4/hbwp12nd2pv6bypw4ypm6q280000gn/T/ipykernel_23158/773542222.py:4: FutureWarning: Downcasting behavior in `replace` is deprecated and will be removed in a future version. To retain the old behavior, explicitly call `result.infer_objects(copy=False)`. To opt-in to the future behavior, set `pd.set_option('future.no_silent_downcasting', True)`\n",
+ " df = df.replace(mapping)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Convert the categorical values to numeric in all columns.\n",
+ "df = pd.DataFrame(data)\n",
+ "mapping = {'x': 1, 'o': 0, 'b': 2}\n",
+ "df = df.replace(mapping)\n",
+ "\n",
+ "df['class'] = df['class'].astype(int)\n",
+ "\n",
+ "print(df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 88,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "# Separate the inputs and output.\n",
+ "X = df.drop(columns=['class'])\n",
+ "y = df['class']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 89,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " TL TM TR ML MM MR BL \\\n",
+ "0 0.183675 0.10789 0.183675 0.10789 -1.163573 -1.184095 0.183675 \n",
+ "1 0.183675 0.10789 0.183675 0.10789 -1.163573 -1.184095 -1.169866 \n",
+ "2 0.183675 0.10789 0.183675 0.10789 -1.163573 -1.184095 -1.169866 \n",
+ "3 0.183675 0.10789 0.183675 0.10789 -1.163573 -1.184095 -1.169866 \n",
+ "4 0.183675 0.10789 0.183675 0.10789 -1.163573 -1.184095 1.537216 \n",
+ "\n",
+ " BM BR \n",
+ "0 -1.184095 -1.169866 \n",
+ "1 0.107890 -1.169866 \n",
+ "2 -1.184095 0.183675 \n",
+ "3 1.399876 1.537216 \n",
+ "4 -1.184095 1.537216 \n"
+ ]
+ }
+ ],
+ "source": [
+ "# Normalize the input data.\n",
+ "X_norm = (X - X.mean()) / X.std()\n",
+ "print(X_norm.head())\n",
+ "\n",
+ "# from sklearn.preprocessing import MinMaxScaler\n",
+ "# scaler = MinMaxScaler()\n",
+ "# X_norm = scaler.fit_transform(X)\n",
+ "# print(X_norm)"
]
},
{
@@ -60,11 +393,190 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 90,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "9"
+ ]
+ },
+ "execution_count": 90,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "n_cols = X_norm.shape[1]\n",
+ "n_cols\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 91,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "# your code here\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "\n",
+ "X_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size= 0.3, random_state=12)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 92,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/opt/anaconda3/lib/python3.12/site-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
+ " super().__init__(activity_regularizer=activity_regularizer, **kwargs)\n"
+ ]
+ }
+ ],
+ "source": [
+ "from tensorflow import keras\n",
+ "from keras.models import Sequential\n",
+ "from keras.layers import Dense\n",
+ "\n",
+ "model = Sequential()\n",
+ "\n",
+ "model.add(Dense(64, activation='relu', input_shape=(n_cols,)))\n",
+ "model.add(Dense(32, activation='relu'))\n",
+ "model.add(Dense(16, activation='relu'))\n",
+ "\n",
+ "# Softmax activation in the output layer for multi-class classification\n",
+ "model.add(Dense(2, activation='softmax'))\n",
+ "\n",
+ "model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 93,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Epoch 1/20\n",
+ "21/21 - 0s - 15ms/step - accuracy: 0.6507 - loss: 0.6508\n",
+ "Epoch 2/20\n",
+ "21/21 - 0s - 760us/step - accuracy: 0.6657 - loss: 0.6055\n",
+ "Epoch 3/20\n",
+ "21/21 - 0s - 704us/step - accuracy: 0.6731 - loss: 0.5735\n",
+ "Epoch 4/20\n",
+ "21/21 - 0s - 683us/step - accuracy: 0.6955 - loss: 0.5412\n",
+ "Epoch 5/20\n",
+ "21/21 - 0s - 647us/step - accuracy: 0.7537 - loss: 0.5054\n",
+ "Epoch 6/20\n",
+ "21/21 - 0s - 631us/step - accuracy: 0.7687 - loss: 0.4679\n",
+ "Epoch 7/20\n",
+ "21/21 - 0s - 622us/step - accuracy: 0.8090 - loss: 0.4332\n",
+ "Epoch 8/20\n",
+ "21/21 - 0s - 620us/step - accuracy: 0.8522 - loss: 0.3923\n",
+ "Epoch 9/20\n",
+ "21/21 - 0s - 625us/step - accuracy: 0.8657 - loss: 0.3558\n",
+ "Epoch 10/20\n",
+ "21/21 - 0s - 617us/step - accuracy: 0.8716 - loss: 0.3236\n",
+ "Epoch 11/20\n",
+ "21/21 - 0s - 624us/step - accuracy: 0.8985 - loss: 0.2920\n",
+ "Epoch 12/20\n",
+ "21/21 - 0s - 630us/step - accuracy: 0.8970 - loss: 0.2774\n",
+ "Epoch 13/20\n",
+ "21/21 - 0s - 625us/step - accuracy: 0.9090 - loss: 0.2506\n",
+ "Epoch 14/20\n",
+ "21/21 - 0s - 644us/step - accuracy: 0.9045 - loss: 0.2339\n",
+ "Epoch 15/20\n",
+ "21/21 - 0s - 651us/step - accuracy: 0.9224 - loss: 0.2162\n",
+ "Epoch 16/20\n",
+ "21/21 - 0s - 641us/step - accuracy: 0.9209 - loss: 0.2012\n",
+ "Epoch 17/20\n",
+ "21/21 - 0s - 650us/step - accuracy: 0.9403 - loss: 0.1830\n",
+ "Epoch 18/20\n",
+ "21/21 - 0s - 630us/step - accuracy: 0.9433 - loss: 0.1684\n",
+ "Epoch 19/20\n",
+ "21/21 - 0s - 632us/step - accuracy: 0.9493 - loss: 0.1528\n",
+ "Epoch 20/20\n",
+ "21/21 - 0s - 616us/step - accuracy: 0.9597 - loss: 0.1449\n"
+ ]
+ }
+ ],
+ "source": [
+ "history = model.fit(X_train, y_train, epochs=20, verbose=2)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 94,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 400us/step - accuracy: 0.8624 - loss: 0.3150\n",
+ "test loss 0.32062458992004395\n",
+ "test accuracy 0.8611111044883728\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Evaluate the model with the test data\n",
+ "test_loss, test_accuracy = model.evaluate(X_test, y_test)\n",
+ "print('test loss', test_loss)\n",
+ "print('test accuracy', test_accuracy)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 95,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "INFO:tensorflow:Assets written to: tic-tac-toe.model/assets\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "INFO:tensorflow:Assets written to: tic-tac-toe.model/assets\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Saved artifact at 'tic-tac-toe.model'. The following endpoints are available:\n",
+ "\n",
+ "* Endpoint 'serve'\n",
+ " args_0 (POSITIONAL_ONLY): TensorSpec(shape=(None, 9), dtype=tf.float32, name='keras_tensor_291')\n",
+ "Output Type:\n",
+ " TensorSpec(shape=(None, 2), dtype=tf.float32, name=None)\n",
+ "Captures:\n",
+ " 14021117136: TensorSpec(shape=(), dtype=tf.resource, name=None)\n",
+ " 14021118288: TensorSpec(shape=(), dtype=tf.resource, name=None)\n",
+ " 14021118672: TensorSpec(shape=(), dtype=tf.resource, name=None)\n",
+ " 14021116944: TensorSpec(shape=(), dtype=tf.resource, name=None)\n",
+ " 14021127120: TensorSpec(shape=(), dtype=tf.resource, name=None)\n",
+ " 14021118480: TensorSpec(shape=(), dtype=tf.resource, name=None)\n",
+ " 14021118096: TensorSpec(shape=(), dtype=tf.resource, name=None)\n",
+ " 14021126160: TensorSpec(shape=(), dtype=tf.resource, name=None)\n"
+ ]
+ }
+ ],
+ "source": [
+ "model.save('tic-tac-toe.keras')\n",
+ "model.export('tic-tac-toe.model')"
]
},
{
@@ -78,11 +590,41 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 96,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 17ms/step\n",
+ "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 63ms/step - accuracy: 1.0000 - loss: 0.0878\n",
+ "loss 0.08782690763473511\n",
+ "accuracy 1.0\n"
+ ]
+ }
+ ],
"source": [
- "# your code here"
+ "# your code here\n",
+ "from keras.models import load_model\n",
+ "\n",
+ "saved_model = load_model('tic-tac-toe.keras')\n",
+ "\n",
+ "# Randomly select a few rows from the test set\n",
+ "num_samples = 5\n",
+ "random_indices = np.random.choice(X_test.shape[0], num_samples, replace=False)\n",
+ "\n",
+ "# Get the corresponding input and actual labels\n",
+ "X_sample = X_test.iloc[random_indices].values\n",
+ "y_actual = y_test.iloc[random_indices].values\n",
+ "\n",
+ "predictions = saved_model.predict(X_sample)\n",
+ "\n",
+ "predictions\n",
+ "\n",
+ "loss, accuracy = saved_model.evaluate(X_sample, y_actual)\n",
+ "print('loss', loss)\n",
+ "print('accuracy', accuracy)"
]
},
{
@@ -104,11 +646,311 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 97,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(5, 9)"
+ ]
+ },
+ "execution_count": 97,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "X_sample.shape"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 104,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/opt/anaconda3/lib/python3.12/site-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
+ " super().__init__(activity_regularizer=activity_regularizer, **kwargs)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# your code her\n",
+ "improved_model = Sequential()\n",
+ "\n",
+ "improved_model.add(Dense(128, activation='relu', input_shape=(X_train.shape[1],))) # More neurons\n",
+ "improved_model.add(Dense(64, activation='relu')) # Hidden layer 1\n",
+ "improved_model.add(Dense(32, activation='relu')) # Hidden layer 2\n",
+ "improved_model.add(Dense(16, activation='relu')) # Hidden layer 3\n",
+ "improved_model.add(Dense(2, activation='softmax')) \n",
+ "\n",
+ "# improved_model.add(Dense(16, activation='relu', input_shape=(X_sample.shape[1], )))\n",
+ "# improved_model.add(Dense(8, activation='relu'))\n",
+ "# improved_model.add(Dense(2, activation='softmax'))\n",
+ "\n",
+ "# improved_model.compile(optimizer='adam', loss='mean_squared_error')\n",
+ "\n",
+ "# improved_model.fit(X_train, y_train, epochs=100, verbose=2)\n",
+ "\n",
+ "# y_pred = model.predict(X_sample)\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 121,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Epoch 1/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - accuracy: 1.0000 - loss: 1.2534e-08 - val_accuracy: 0.9236 - val_loss: 0.6381\n",
+ "Epoch 2/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 949us/step - accuracy: 1.0000 - loss: 7.8420e-09 - val_accuracy: 0.9271 - val_loss: 0.6386\n",
+ "Epoch 3/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 909us/step - accuracy: 1.0000 - loss: 1.1095e-08 - val_accuracy: 0.9271 - val_loss: 0.6401\n",
+ "Epoch 4/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 905us/step - accuracy: 1.0000 - loss: 5.0581e-09 - val_accuracy: 0.9271 - val_loss: 0.6440\n",
+ "Epoch 5/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 883us/step - accuracy: 1.0000 - loss: 4.6943e-09 - val_accuracy: 0.9271 - val_loss: 0.6461\n",
+ "Epoch 6/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 841us/step - accuracy: 1.0000 - loss: 4.0089e-09 - val_accuracy: 0.9271 - val_loss: 0.6484\n",
+ "Epoch 7/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 830us/step - accuracy: 1.0000 - loss: 3.9341e-09 - val_accuracy: 0.9306 - val_loss: 0.6504\n",
+ "Epoch 8/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 835us/step - accuracy: 1.0000 - loss: 2.4971e-09 - val_accuracy: 0.9306 - val_loss: 0.6530\n",
+ "Epoch 9/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 832us/step - accuracy: 1.0000 - loss: 2.0598e-09 - val_accuracy: 0.9306 - val_loss: 0.6527\n",
+ "Epoch 10/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 848us/step - accuracy: 1.0000 - loss: 2.1864e-09 - val_accuracy: 0.9306 - val_loss: 0.6540\n",
+ "Epoch 11/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 820us/step - accuracy: 1.0000 - loss: 9.9536e-10 - val_accuracy: 0.9306 - val_loss: 0.6558\n",
+ "Epoch 12/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 1ms/step - accuracy: 1.0000 - loss: 1.1459e-09 - val_accuracy: 0.9306 - val_loss: 0.6575\n",
+ "Epoch 13/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 807us/step - accuracy: 1.0000 - loss: 1.8063e-10 - val_accuracy: 0.9306 - val_loss: 0.6576\n",
+ "Epoch 14/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 812us/step - accuracy: 1.0000 - loss: 1.6700e-09 - val_accuracy: 0.9306 - val_loss: 0.6583\n",
+ "Epoch 15/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 815us/step - accuracy: 1.0000 - loss: 4.8732e-10 - val_accuracy: 0.9306 - val_loss: 0.6607\n",
+ "Epoch 16/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 844us/step - accuracy: 1.0000 - loss: 3.0036e-10 - val_accuracy: 0.9306 - val_loss: 0.6611\n",
+ "Epoch 17/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 845us/step - accuracy: 1.0000 - loss: 4.5894e-10 - val_accuracy: 0.9306 - val_loss: 0.6626\n",
+ "Epoch 18/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 812us/step - accuracy: 1.0000 - loss: 1.9927e-10 - val_accuracy: 0.9306 - val_loss: 0.6631\n",
+ "Epoch 19/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 846us/step - accuracy: 1.0000 - loss: 3.1152e-10 - val_accuracy: 0.9306 - val_loss: 0.6638\n",
+ "Epoch 20/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 845us/step - accuracy: 1.0000 - loss: 6.2539e-10 - val_accuracy: 0.9340 - val_loss: 0.6640\n",
+ "Epoch 21/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 807us/step - accuracy: 1.0000 - loss: 9.9914e-11 - val_accuracy: 0.9340 - val_loss: 0.6637\n",
+ "Epoch 22/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 810us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6648\n",
+ "Epoch 23/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 816us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6653\n",
+ "Epoch 24/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 793us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6654\n",
+ "Epoch 25/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 797us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6657\n",
+ "Epoch 26/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 798us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6666\n",
+ "Epoch 27/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 799us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6661\n",
+ "Epoch 28/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 799us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6666\n",
+ "Epoch 29/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 783us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6664\n",
+ "Epoch 30/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 770us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6666\n",
+ "Epoch 31/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 784us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6669\n",
+ "Epoch 32/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 827us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6671\n",
+ "Epoch 33/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 1ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6679\n",
+ "Epoch 34/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 792us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6677\n",
+ "Epoch 35/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 800us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6682\n",
+ "Epoch 36/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 819us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6678\n",
+ "Epoch 37/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 819us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6684\n",
+ "Epoch 38/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6686\n",
+ "Epoch 39/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 836us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6692\n",
+ "Epoch 40/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 798us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6690\n",
+ "Epoch 41/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 792us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6691\n",
+ "Epoch 42/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 793us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6691\n",
+ "Epoch 43/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 785us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6697\n",
+ "Epoch 44/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 767us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6696\n",
+ "Epoch 45/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 771us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6697\n",
+ "Epoch 46/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 783us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6700\n",
+ "Epoch 47/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 773us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6706\n",
+ "Epoch 48/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 813us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9340 - val_loss: 0.6709\n",
+ "Epoch 49/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 787us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6709\n",
+ "Epoch 50/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 813us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6712\n",
+ "Epoch 51/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 786us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6713\n",
+ "Epoch 52/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 781us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6714\n",
+ "Epoch 53/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 787us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6720\n",
+ "Epoch 54/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 783us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6722\n",
+ "Epoch 55/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 771us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6726\n",
+ "Epoch 56/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 779us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6725\n",
+ "Epoch 57/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 763us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9375 - val_loss: 0.6730\n",
+ "Epoch 58/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 771us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6733\n",
+ "Epoch 59/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 780us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6737\n",
+ "Epoch 60/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 809us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6737\n",
+ "Epoch 61/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 954us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6740\n",
+ "Epoch 62/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 1ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6741\n",
+ "Epoch 63/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 787us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6745\n",
+ "Epoch 64/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 769us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6748\n",
+ "Epoch 65/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 769us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6748\n",
+ "Epoch 66/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 795us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6748\n",
+ "Epoch 67/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 796us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6750\n",
+ "Epoch 68/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 797us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6756\n",
+ "Epoch 69/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 788us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9410 - val_loss: 0.6758\n",
+ "Epoch 70/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 789us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6760\n",
+ "Epoch 71/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 798us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6760\n",
+ "Epoch 72/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 786us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6762\n",
+ "Epoch 73/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 806us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6766\n",
+ "Epoch 74/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 789us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6767\n",
+ "Epoch 75/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 780us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6771\n",
+ "Epoch 76/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 763us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6771\n",
+ "Epoch 77/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 766us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6774\n",
+ "Epoch 78/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 795us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6776\n",
+ "Epoch 79/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 783us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6777\n",
+ "Epoch 80/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 785us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6778\n",
+ "Epoch 81/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 1ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6781\n",
+ "Epoch 82/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 819us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6784\n",
+ "Epoch 83/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 801us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6787\n",
+ "Epoch 84/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 781us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6787\n",
+ "Epoch 85/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 767us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6792\n",
+ "Epoch 86/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 792us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6792\n",
+ "Epoch 87/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 786us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6795\n",
+ "Epoch 88/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 768us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6796\n",
+ "Epoch 89/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 825us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6798\n",
+ "Epoch 90/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 799us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6800\n",
+ "Epoch 91/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 778us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6802\n",
+ "Epoch 92/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 795us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6804\n",
+ "Epoch 93/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 778us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6807\n",
+ "Epoch 94/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 781us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6807\n",
+ "Epoch 95/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 787us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6807\n",
+ "Epoch 96/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 779us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6811\n",
+ "Epoch 97/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 779us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6813\n",
+ "Epoch 98/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 787us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6814\n",
+ "Epoch 99/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 777us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6815\n",
+ "Epoch 100/100\n",
+ "\u001b[1m21/21\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 969us/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 0.9444 - val_loss: 0.6817\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 121,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "\n",
+ " # Compile the model\n",
+ "improved_model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n",
+ "\n",
+ "# Fit the model for a greater number of epochs\n",
+ "improved_model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=32)\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 124,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "loss 0.6817036271095276\n",
+ "accuracy 0.9444444179534912\n"
+ ]
+ }
+ ],
"source": [
- "# your code here"
+ " # Evaluate the improved model\n",
+ "loss, accuracy = improved_model.evaluate(X_test, y_test, verbose=0)\n",
+ "print('loss', loss)\n",
+ "print('accuracy', accuracy)"
]
},
{
@@ -120,11 +962,13 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 125,
"metadata": {},
"outputs": [],
"source": [
- "# your answer here"
+ "# your answer here\n",
+ "\n",
+ "## Neural network improves the performance"
]
}
],
@@ -144,7 +988,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.3"
+ "version": "3.12.4"
}
},
"nbformat": 4,
diff --git a/your-code/tic-tac-toe.keras b/your-code/tic-tac-toe.keras
new file mode 100644
index 0000000..b7299f9
Binary files /dev/null and b/your-code/tic-tac-toe.keras differ
diff --git a/your-code/tic-tac-toe.model/fingerprint.pb b/your-code/tic-tac-toe.model/fingerprint.pb
new file mode 100644
index 0000000..77af10f
--- /dev/null
+++ b/your-code/tic-tac-toe.model/fingerprint.pb
@@ -0,0 +1 @@
+ο䯯դő КѺ.(2
\ No newline at end of file
diff --git a/your-code/tic-tac-toe.model/saved_model.pb b/your-code/tic-tac-toe.model/saved_model.pb
new file mode 100644
index 0000000..aeccb7d
Binary files /dev/null and b/your-code/tic-tac-toe.model/saved_model.pb differ
diff --git a/your-code/tic-tac-toe.model/variables/variables.data-00000-of-00001 b/your-code/tic-tac-toe.model/variables/variables.data-00000-of-00001
new file mode 100644
index 0000000..27c5f83
Binary files /dev/null and b/your-code/tic-tac-toe.model/variables/variables.data-00000-of-00001 differ
diff --git a/your-code/tic-tac-toe.model/variables/variables.index b/your-code/tic-tac-toe.model/variables/variables.index
new file mode 100644
index 0000000..af52e65
Binary files /dev/null and b/your-code/tic-tac-toe.model/variables/variables.index differ