164 lines
5.5 KiB
Plaintext
164 lines
5.5 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text",
|
|
"id": "tOoyQ70H00_s"
|
|
},
|
|
"source": [
|
|
"## Exercise 2\n",
|
|
"In the course you learned how to do classificaiton using Fashion MNIST, a data set containing items of clothing. There's another, similar dataset called MNIST which has items of handwriting -- the digits 0 through 9.\n",
|
|
"\n",
|
|
"Write an MNIST classifier that trains to 99% accuracy or above, and does it without a fixed number of epochs -- i.e. you should stop training once you reach that level of accuracy.\n",
|
|
"\n",
|
|
"Some notes:\n",
|
|
"1. It should succeed in less than 10 epochs, so it is okay to change epochs= to 10, but nothing larger\n",
|
|
"2. When it reaches 99% or greater it should print out the string \"Reached 99% accuracy so cancelling training!\"\n",
|
|
"3. If you add any additional variables, make sure you use the same names as the ones used in the class\n",
|
|
"\n",
|
|
"I've started the code for you below -- how would you finish it? "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import tensorflow as tf\n",
|
|
"from os import path, getcwd, chdir\n",
|
|
"\n",
|
|
"# DO NOT CHANGE THE LINE BELOW. If you are developing in a local\n",
|
|
"# environment, then grab mnist.npz from the Coursera Jupyter Notebook\n",
|
|
"# and place it inside a local folder and edit the path to that location\n",
|
|
"path = f\"{getcwd()}/../tmp2/mnist.npz\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {
|
|
"colab": {},
|
|
"colab_type": "code",
|
|
"id": "9rvXQGAA0ssC"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# GRADED FUNCTION: train_mnist\n",
|
|
"def train_mnist():\n",
|
|
" # Please write your code only where you are indicated.\n",
|
|
" # please do not remove # model fitting inline comments.\n",
|
|
"\n",
|
|
" # Setup callback such that it stops training after hitting 99% accuracy\n",
|
|
" class myCallback(tf.keras.callbacks.Callback):\n",
|
|
" def on_epoch_end(self, epoch, logs={}):\n",
|
|
" if logs.get('acc')>0.99:\n",
|
|
" print(\"Reached 99% accuracy so cancelling training!\")\n",
|
|
" self.model.stop_training=True\n",
|
|
"\n",
|
|
" # Call your callbacks\n",
|
|
" callbacks=myCallback()\n",
|
|
"\n",
|
|
" mnist = tf.keras.datasets.mnist\n",
|
|
"\n",
|
|
" (x_train, y_train),(x_test, y_test) = mnist.load_data(path=path)\n",
|
|
" \n",
|
|
" # Normalize our features\n",
|
|
" x_train = x_train / 255.0\n",
|
|
" x_test = x_test / 255.0\n",
|
|
"\n",
|
|
" model = tf.keras.models.Sequential([\n",
|
|
" # Flatten our input to be a vector as opposed to a 28 x 28 matrix\n",
|
|
" tf.keras.layers.Flatten(),\n",
|
|
" \n",
|
|
" # Setup first (and only) hidden layer with relu activation so that only positive values are accepted\n",
|
|
" tf.keras.layers.Dense(1024, activation=tf.nn.relu),\n",
|
|
" \n",
|
|
" # Setup final output layer with size corrosponding to our 10 classes and softmax activation to\n",
|
|
" # expedite our code for deciding which class a sample should be attributed to\n",
|
|
" tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n",
|
|
" ])\n",
|
|
"\n",
|
|
" model.compile(optimizer='adam',\n",
|
|
" loss='sparse_categorical_crossentropy',\n",
|
|
" metrics=['accuracy'])\n",
|
|
" \n",
|
|
" # model fitting\n",
|
|
" history = model.fit( \n",
|
|
" # Fit model to the training set (not our test set) and run for a maximum of 10 epochs\n",
|
|
" # We include callbacks to stop the training when we hit our accuracy threshold\n",
|
|
" x_train, y_train, epochs=6, callbacks=[callbacks] \n",
|
|
" )\n",
|
|
" # model fitting\n",
|
|
" return history.epoch, history.history['acc'][-1]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"colab": {},
|
|
"colab_type": "code",
|
|
"id": "9rvXQGAA0ssC"
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"WARNING: Logging before flag parsing goes to stderr.\n",
|
|
"W0930 19:29:01.265596 139898245306176 deprecation.py:506] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
|
|
"Instructions for updating:\n",
|
|
"Call initializer instance with the dtype argument instead of passing it to the constructor\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Epoch 1/6\n",
|
|
"34016/60000 [================>.............] - ETA: 7s - loss: 0.2347 - acc: 0.9305"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"train_mnist()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"coursera": {
|
|
"course_slug": "introduction-tensorflow",
|
|
"graded_item_id": "d6dew",
|
|
"launcher_item_id": "FExZ4"
|
|
},
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 1
|
|
}
|