414 lines
24 KiB
Plaintext
414 lines
24 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Saving and Loading Models\n",
|
|
"\n",
|
|
"In this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"%matplotlib inline\n",
|
|
"%config InlineBackend.figure_format = 'retina'\n",
|
|
"\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"\n",
|
|
"import torch\n",
|
|
"from torch import nn\n",
|
|
"from torch import optim\n",
|
|
"import torch.nn.functional as F\n",
|
|
"from torchvision import datasets, transforms\n",
|
|
"\n",
|
|
"import helper\n",
|
|
"import fc_model"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz\n",
|
|
"Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz\n",
|
|
"Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz\n",
|
|
"Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz\n",
|
|
"Processing...\n",
|
|
"Done!\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Define a transform to normalize the data\n",
|
|
"transform = transforms.Compose([transforms.ToTensor(),\n",
|
|
" transforms.Normalize((0.5,), (0.5,))])\n",
|
|
"# Download and load the training data\n",
|
|
"trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)\n",
|
|
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n",
|
|
"\n",
|
|
"# Download and load the test data\n",
|
|
"testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)\n",
|
|
"testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Here we can see one of the images."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAdMAAAHTCAYAAAB8/vKtAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAWJQAAFiUBSVIk8AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAECFJREFUeJzt3Vlv3Pd1x+HfbJwZckxSpFRLDhpEXlTAaOMuqLvESBq/gravNshF2qJug7YXtQEjKhDDQFzDkqGFEvdZ/jO9aN9Azzf1lODz3B+d4cyQH/2vTm+z2TQAoK6/7RcAADedmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCw/Qf+PijDxxEvUV6vV40v837uffu3SvP/uEPP4h2z2az8ux8MY92L+bZ/OXVVXn2n3/5y2g3fFd+8cln0R83T6YAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQCi+Z8p3b5s3Rbd5j/TRe+9F8x/+6Yfl2eOjo2j3erMuz3ZdF+0eDrNf89VqVZ793lvfi3b/+2eflme/+OKLaPc2Jb/j2/wdvc08mQJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCTrDdQNs8sXTn8DCaf//998uz77z9drT75ORVefbw4CDa/Y//9El9d/iepyfY3nvn3fLs9fVVtPtHf/4X5dnRcBTtfvwfj6P5hDNqN48nUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEg5J7pDfQ79+5F8x//9OPy7M5OdiPy8vKyPPubr76Kdh8fHZdnp9NJtPvd4Cbo7u402t1162h+MhmXZ2ezWbT7N/9Z/8x/+Ae/H+3+kz/+o/Lsr3/9RbT7X/7tX6N5vnueTAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhJ9huoJ/8+MfR/HK5LM+enp5Gu/f2dsuzD3/wMNr9VXDO6yI4Hddaa28//EF5Nj2hNhhk/2dedV159uTkJNp9dOeoPDudZqfrTl69Ks8+evRetHt3t/578nf/8PfRbmo8mQJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAIfdMt2Q0GpVnN5tNtLtb1+9Tpjcih8P6V+78/CzavZgvyrNff/11tPv3Hj0qz16Gt1R3gxuyrbX27Nnz8uzzFy+i3YfBLdXBYBDtnk4m5dn0M0vu/rIdnkwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAISfYtuSN2aw8OxzWz7e11tpyuSrPXi2uot3JGbXkFFhr2dm7lycn0e5nz+unyO4eH0e705N9L1++LM8eHhxGuyfjcXn2088+jXbff/N+efbevXvR7v39/fLsJDgd11pr19fX0fxt5ckUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAi5Z7olb775Znm210u31+9b9sPlg536fcrvf/93o91XV/VbrPP5Itr9+eefl2d/+lc/iXY/D26pttba69PX5dlxcI+0tdZeva7v/rMPP4x2d11Xnk2/L8Nh/U/z4cFBtPupe6YlnkwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAISfYtuTo6Kg8u1wuo93j8aQ8u1qtot3XwXmn/f39aPds9kY0n3j67dPy7K8eP452J+e8WmttsaifEzsIz4G998675dn5fB7tXi7r3/XBIHtO6bp1efbu3bvR7qfffhvN31aeTAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkHumW3L3uH5zcLPJdo/HO+XZw8PsPuWTJ/W7ni9evIh2J9Jbqm8/fFienV9ndznXm/ptzNZaG4/H5dn0nunLk5fl2X4/e1Z4cP9+efbJ0/r3vLXWNuv6Z3Z4cBjtpsaTKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASDkBNuWJKfMzs7Oot3T6bQ8O5lMot2b4BxYegZtuVyWZ88vzqPdl5eX5dnkZF5r+Smyfn9Qnr24uIh2d139+7KzUz8d11pro9GoPJucrWuttcWi/l3dP8h+T6jxZAoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhNwzLRps8UZkep9yMKjvXgf3JePd6y7cXX/fppP6DdjWstd+fX0d7V4uV9F88r7dv38/2t029dHlcpHtDozDW6rz+bw8e7hfv5VMnSdTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQMgJtqLJNDvJldyWSk+w9Xq98uzr09fR7tbqu3u97OdereqnyIbD+um41lpbB5frknN9rbU2m02i+S44H3d+fh7tzn72+nctNQi/L8l3fTLNPm9qPJkCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACH3TIt2w3umm3VyzzS7lbi3u1ue/fLLL6PdOzvj8ux8fh3tHo1G5dllcAu1tda6YH4wSG+p1u+RttZaP7it2XXBIdeW3d5dLpfR7pNXr8qzk0l2UzS5nzscZn/Wxzs75dn5YhHtvsk8mQJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCTrAVpSeW1pv6aar0xNL1fF6e/dXjx9Huj/7yR+XZV6/rJ7Fay06w1Q+B/c98cMZsMMg+79UqO0XW6yXnArf3//Wd4JRYa62dnZ2XZ2d7s2h38r71wm/r0dFRefbJ06fR7pvMkykAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEHLPtGg8HkfzyY3Kfr9+X7K11hbzRXl2f38/2r1crcqzo1F2nzK5Eble1+/P/vfu+o3JXnhMdTAYRPPJz57uXgXfl729vWj3ixcvyrPPnmc/d3IvedOyvw+zWXaL9bbyZAoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhNwzLdqd7kbzXVe/09jvZ7cSh8P6x/7gwYNodwtuLU6n9RuPrbXWdV15NrsQmUluwLbWWi88iJp8X9I7sKtV/TNLXVxelmfTz+zthw/Ls8kN2NbyW823lSdTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQMgJtqI7R3ei+eRM0t5ediLpen5dnl0ultHu5CTXcpntTk7X9XvZ/zvXrf5zD8ITaqnkhNt6nR2v22zq71vXZeff+sHPfbB/EO1O3vNN+J5PJtmpw9vKkykAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEHLPtGhntBPNJ3cek1uHrbV2fV2/Z9rvb++2Zi+8KbpNg379tW+y85TRTdDWWusHr3043N5ntgnfuNOzs/LsnTvZvePRcFSevby4jHYPB7JQcXP/OgHA/xNiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEHJrp2gdnrVar+vz6Ymk8/Pz8uxoVD8N1Vprg+C1d+tFtDs5yZWcUEulJ/day+aXy1V5Nj3Zt1535dnFYh7tPj4+Ls8eHBxEuxeL+nd9MBxEu+Ov2y3lyRQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACLlnuiXJncbBILtX+Pr1aXl2Op1Gu5fLZXm2W9Xfs9Za67r6fHqXc2dnpzyb3s7dBLdzW8u+b+t1/YZsa60l08n92tZam4zH5dnRMLv7Ow9usfZ72TNS9q7dXp5MASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACEn2Ip6vewkV79f/3/MOjypNZnUT0sdHx9Fu2d7s/LsarWKdp+enZVn0897EHzeg2F2cm+xqJ+9a6210ah+Tiw9g/bGG/XvS7+fvW9Pnjwpz568ehXtTn7uxWIR7d7b24vmbytPpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJAyD3TovS+ZXKT9Pr6Otp9fnFRnv3Zz38e7X7rrbfKs5vwjmtLPrPwLmdyv3a5zO6R7u5m9ymn02l5dr3uot1XV1fl2cvLy2j38xcvyrN/+9d/E+1ObrEOBtkd18m4fu/4NvNkCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAg5wVa07rLTUonknFdrrZ2env2WXsn/3jfffLO13bfXs22/gFtnZ2cUzXfdqjy7XNZnW2vt7Pw8mr+tPJkCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACH3TIsuLi6j+ePj4/Jst85uqZ6evo7mE4PBoDy7Xq9/i6/k9uj1elvbvdlsovnkla/D3Yn0JuhsNivPpp/31dVVNH9beTIFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhJxgKxqPx9F8123vnNhNPcmVnvO6rW7y+5a88vR7nrxvZ2dn0e63Hjwoz6677EQjNZ5MASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQe6ZFvX52KzE5tbgOb6Fu877l9i6pws0xHNT/NA9H2Z/12d4smr+tPJkCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQr30HNfHH32wvXteN9hkPC7Ppp/ZfLGI5oH/W0dHR+XZwWAQ7X727Fk0f1P94pPPoguRnkwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgFB8zxQAbjtPpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJA6L8Awb2jmooxzgkAAAAASUVORK5CYII=\n",
|
|
"text/plain": [
|
|
"<matplotlib.figure.Figure at 0x7fa4f2708b00>"
|
|
]
|
|
},
|
|
"metadata": {
|
|
"image/png": {
|
|
"height": 233,
|
|
"width": 233
|
|
},
|
|
"needs_background": "light"
|
|
},
|
|
"output_type": "display_data"
|
|
}
|
|
],
|
|
"source": [
|
|
"image, label = next(iter(trainloader))\n",
|
|
"helper.imshow(image[0,:]);"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Train a network\n",
|
|
"\n",
|
|
"To make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Create the network, define the criterion and optimizer\n",
|
|
"\n",
|
|
"model = fc_model.Network(784, 10, [512, 256, 128])\n",
|
|
"criterion = nn.NLLLoss()\n",
|
|
"optimizer = optim.Adam(model.parameters(), lr=0.001)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Epoch: 1/2.. Training Loss: 1.710.. Test Loss: 0.976.. Test Accuracy: 0.679\n",
|
|
"Epoch: 1/2.. Training Loss: 1.043.. Test Loss: 0.760.. Test Accuracy: 0.713\n",
|
|
"Epoch: 1/2.. Training Loss: 0.864.. Test Loss: 0.675.. Test Accuracy: 0.737\n",
|
|
"Epoch: 1/2.. Training Loss: 0.780.. Test Loss: 0.635.. Test Accuracy: 0.749\n",
|
|
"Epoch: 1/2.. Training Loss: 0.787.. Test Loss: 0.617.. Test Accuracy: 0.770\n",
|
|
"Epoch: 1/2.. Training Loss: 0.755.. Test Loss: 0.590.. Test Accuracy: 0.774\n",
|
|
"Epoch: 1/2.. Training Loss: 0.717.. Test Loss: 0.583.. Test Accuracy: 0.773\n",
|
|
"Epoch: 1/2.. Training Loss: 0.650.. Test Loss: 0.565.. Test Accuracy: 0.778\n",
|
|
"Epoch: 1/2.. Training Loss: 0.661.. Test Loss: 0.561.. Test Accuracy: 0.790\n",
|
|
"Epoch: 1/2.. Training Loss: 0.637.. Test Loss: 0.560.. Test Accuracy: 0.796\n",
|
|
"Epoch: 1/2.. Training Loss: 0.586.. Test Loss: 0.529.. Test Accuracy: 0.799\n",
|
|
"Epoch: 1/2.. Training Loss: 0.627.. Test Loss: 0.525.. Test Accuracy: 0.805\n",
|
|
"Epoch: 1/2.. Training Loss: 0.602.. Test Loss: 0.521.. Test Accuracy: 0.809\n",
|
|
"Epoch: 1/2.. Training Loss: 0.635.. Test Loss: 0.522.. Test Accuracy: 0.810\n",
|
|
"Epoch: 1/2.. Training Loss: 0.606.. Test Loss: 0.502.. Test Accuracy: 0.810\n",
|
|
"Epoch: 1/2.. Training Loss: 0.582.. Test Loss: 0.522.. Test Accuracy: 0.805\n",
|
|
"Epoch: 1/2.. Training Loss: 0.584.. Test Loss: 0.507.. Test Accuracy: 0.822\n",
|
|
"Epoch: 1/2.. Training Loss: 0.545.. Test Loss: 0.500.. Test Accuracy: 0.818\n",
|
|
"Epoch: 1/2.. Training Loss: 0.599.. Test Loss: 0.489.. Test Accuracy: 0.826\n",
|
|
"Epoch: 1/2.. Training Loss: 0.585.. Test Loss: 0.498.. Test Accuracy: 0.816\n",
|
|
"Epoch: 1/2.. Training Loss: 0.546.. Test Loss: 0.482.. Test Accuracy: 0.821\n",
|
|
"Epoch: 1/2.. Training Loss: 0.527.. Test Loss: 0.477.. Test Accuracy: 0.828\n",
|
|
"Epoch: 1/2.. Training Loss: 0.563.. Test Loss: 0.482.. Test Accuracy: 0.820\n",
|
|
"Epoch: 2/2.. Training Loss: 0.558.. Test Loss: 0.473.. Test Accuracy: 0.830\n",
|
|
"Epoch: 2/2.. Training Loss: 0.525.. Test Loss: 0.492.. Test Accuracy: 0.821\n",
|
|
"Epoch: 2/2.. Training Loss: 0.534.. Test Loss: 0.469.. Test Accuracy: 0.830\n",
|
|
"Epoch: 2/2.. Training Loss: 0.548.. Test Loss: 0.483.. Test Accuracy: 0.827\n",
|
|
"Epoch: 2/2.. Training Loss: 0.559.. Test Loss: 0.471.. Test Accuracy: 0.825\n",
|
|
"Epoch: 2/2.. Training Loss: 0.534.. Test Loss: 0.469.. Test Accuracy: 0.831\n",
|
|
"Epoch: 2/2.. Training Loss: 0.568.. Test Loss: 0.473.. Test Accuracy: 0.823\n",
|
|
"Epoch: 2/2.. Training Loss: 0.551.. Test Loss: 0.463.. Test Accuracy: 0.829\n",
|
|
"Epoch: 2/2.. Training Loss: 0.566.. Test Loss: 0.453.. Test Accuracy: 0.832\n",
|
|
"Epoch: 2/2.. Training Loss: 0.513.. Test Loss: 0.446.. Test Accuracy: 0.835\n",
|
|
"Epoch: 2/2.. Training Loss: 0.572.. Test Loss: 0.476.. Test Accuracy: 0.821\n",
|
|
"Epoch: 2/2.. Training Loss: 0.511.. Test Loss: 0.447.. Test Accuracy: 0.833\n",
|
|
"Epoch: 2/2.. Training Loss: 0.554.. Test Loss: 0.449.. Test Accuracy: 0.833\n",
|
|
"Epoch: 2/2.. Training Loss: 0.502.. Test Loss: 0.468.. Test Accuracy: 0.832\n",
|
|
"Epoch: 2/2.. Training Loss: 0.507.. Test Loss: 0.451.. Test Accuracy: 0.834\n",
|
|
"Epoch: 2/2.. Training Loss: 0.502.. Test Loss: 0.454.. Test Accuracy: 0.840\n",
|
|
"Epoch: 2/2.. Training Loss: 0.536.. Test Loss: 0.444.. Test Accuracy: 0.839\n",
|
|
"Epoch: 2/2.. Training Loss: 0.503.. Test Loss: 0.447.. Test Accuracy: 0.837\n",
|
|
"Epoch: 2/2.. Training Loss: 0.542.. Test Loss: 0.446.. Test Accuracy: 0.840\n",
|
|
"Epoch: 2/2.. Training Loss: 0.549.. Test Loss: 0.438.. Test Accuracy: 0.838\n",
|
|
"Epoch: 2/2.. Training Loss: 0.510.. Test Loss: 0.461.. Test Accuracy: 0.830\n",
|
|
"Epoch: 2/2.. Training Loss: 0.544.. Test Loss: 0.448.. Test Accuracy: 0.837\n",
|
|
"Epoch: 2/2.. Training Loss: 0.496.. Test Loss: 0.448.. Test Accuracy: 0.843\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Saving and loading networks\n",
|
|
"\n",
|
|
"As you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.\n",
|
|
"\n",
|
|
"The parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Our model: \n",
|
|
"\n",
|
|
" Network(\n",
|
|
" (hidden_layers): ModuleList(\n",
|
|
" (0): Linear(in_features=784, out_features=512, bias=True)\n",
|
|
" (1): Linear(in_features=512, out_features=256, bias=True)\n",
|
|
" (2): Linear(in_features=256, out_features=128, bias=True)\n",
|
|
" )\n",
|
|
" (output): Linear(in_features=128, out_features=10, bias=True)\n",
|
|
" (dropout): Dropout(p=0.5)\n",
|
|
") \n",
|
|
"\n",
|
|
"The state dict keys: \n",
|
|
"\n",
|
|
" odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"print(\"Our model: \\n\\n\", model, '\\n')\n",
|
|
"print(\"The state dict keys: \\n\\n\", model.state_dict().keys())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"torch.save(model.state_dict(), 'checkpoint.pth')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Then we can load the state dict with `torch.load`."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"state_dict = torch.load('checkpoint.pth')\n",
|
|
"print(state_dict.keys())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model.load_state_dict(state_dict)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"ename": "RuntimeError",
|
|
"evalue": "Error(s) in loading state_dict for Network:\n\tWhile copying the parameter named \"hidden_layers.0.weight\", whose dimensions in the model are torch.Size([400, 784]) and whose dimensions in the checkpoint are torch.Size([512, 784]).\n\tWhile copying the parameter named \"hidden_layers.0.bias\", whose dimensions in the model are torch.Size([400]) and whose dimensions in the checkpoint are torch.Size([512]).\n\tWhile copying the parameter named \"hidden_layers.1.weight\", whose dimensions in the model are torch.Size([200, 400]) and whose dimensions in the checkpoint are torch.Size([256, 512]).\n\tWhile copying the parameter named \"hidden_layers.1.bias\", whose dimensions in the model are torch.Size([200]) and whose dimensions in the checkpoint are torch.Size([256]).\n\tWhile copying the parameter named \"hidden_layers.2.weight\", whose dimensions in the model are torch.Size([100, 200]) and whose dimensions in the checkpoint are torch.Size([128, 256]).\n\tWhile copying the parameter named \"hidden_layers.2.bias\", whose dimensions in the model are torch.Size([100]) and whose dimensions in the checkpoint are torch.Size([128]).\n\tWhile copying the parameter named \"output.weight\", whose dimensions in the model are torch.Size([10, 100]) and whose dimensions in the checkpoint are torch.Size([10, 128]).",
|
|
"output_type": "error",
|
|
"traceback": [
|
|
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
|
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
|
|
"\u001b[0;32m<ipython-input-10-d859c59ebec0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfc_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNetwork\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m784\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m400\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m200\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m100\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;31m# This will throw an error because the tensor sizes are wrong!\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_state_dict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstate_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
|
|
"\u001b[0;32m/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mload_state_dict\u001b[0;34m(self, state_dict, strict)\u001b[0m\n\u001b[1;32m 719\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merror_msgs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 720\u001b[0m raise RuntimeError('Error(s) in loading state_dict for {}:\\n\\t{}'.format(\n\u001b[0;32m--> 721\u001b[0;31m self.__class__.__name__, \"\\n\\t\".join(error_msgs)))\n\u001b[0m\u001b[1;32m 722\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 723\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
|
"\u001b[0;31mRuntimeError\u001b[0m: Error(s) in loading state_dict for Network:\n\tWhile copying the parameter named \"hidden_layers.0.weight\", whose dimensions in the model are torch.Size([400, 784]) and whose dimensions in the checkpoint are torch.Size([512, 784]).\n\tWhile copying the parameter named \"hidden_layers.0.bias\", whose dimensions in the model are torch.Size([400]) and whose dimensions in the checkpoint are torch.Size([512]).\n\tWhile copying the parameter named \"hidden_layers.1.weight\", whose dimensions in the model are torch.Size([200, 400]) and whose dimensions in the checkpoint are torch.Size([256, 512]).\n\tWhile copying the parameter named \"hidden_layers.1.bias\", whose dimensions in the model are torch.Size([200]) and whose dimensions in the checkpoint are torch.Size([256]).\n\tWhile copying the parameter named \"hidden_layers.2.weight\", whose dimensions in the model are torch.Size([100, 200]) and whose dimensions in the checkpoint are torch.Size([128, 256]).\n\tWhile copying the parameter named \"hidden_layers.2.bias\", whose dimensions in the model are torch.Size([100]) and whose dimensions in the checkpoint are torch.Size([128]).\n\tWhile copying the parameter named \"output.weight\", whose dimensions in the model are torch.Size([10, 100]) and whose dimensions in the checkpoint are torch.Size([10, 128])."
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Try this\n",
|
|
"model = fc_model.Network(784, 10, [400, 200, 100])\n",
|
|
"# This will throw an error because the tensor sizes are wrong!\n",
|
|
"model.load_state_dict(state_dict)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"checkpoint = {'input_size': 784,\n",
|
|
" 'output_size': 10,\n",
|
|
" 'hidden_layers': [each.out_features for each in model.hidden_layers],\n",
|
|
" 'state_dict': model.state_dict()}\n",
|
|
"\n",
|
|
"torch.save(checkpoint, 'checkpoint.pth')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints. "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def load_checkpoint(filepath):\n",
|
|
" checkpoint = torch.load(filepath)\n",
|
|
" model = fc_model.Network(checkpoint['input_size'],\n",
|
|
" checkpoint['output_size'],\n",
|
|
" checkpoint['hidden_layers'])\n",
|
|
" model.load_state_dict(checkpoint['state_dict'])\n",
|
|
" \n",
|
|
" return model"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Network(\n",
|
|
" (hidden_layers): ModuleList(\n",
|
|
" (0): Linear(in_features=784, out_features=400, bias=True)\n",
|
|
" (1): Linear(in_features=400, out_features=200, bias=True)\n",
|
|
" (2): Linear(in_features=200, out_features=100, bias=True)\n",
|
|
" )\n",
|
|
" (output): Linear(in_features=100, out_features=10, bias=True)\n",
|
|
" (dropout): Dropout(p=0.5)\n",
|
|
")\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"model = load_checkpoint('checkpoint.pth')\n",
|
|
"print(model)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.3"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|