diff --git a/python/Deep Learning/Deep Learning with PyTorch/.ipynb_checkpoints/Untitled1-checkpoint.ipynb b/python/Deep Learning/Deep Learning with PyTorch/.ipynb_checkpoints/Untitled1-checkpoint.ipynb new file mode 100644 index 0000000..913ec4a --- /dev/null +++ b/python/Deep Learning/Deep Learning with PyTorch/.ipynb_checkpoints/Untitled1-checkpoint.ipynb @@ -0,0 +1,281 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mode=test: NumWorkers= 1 BatchSize= 1 Time=33.559s Imgs/s= 38.14\n", + "Mode=test: NumWorkers= 1 BatchSize= 2 Time=16.639s Imgs/s= 76.93\n", + "Mode=test: NumWorkers= 1 BatchSize= 4 Time= 8.817s Imgs/s=145.17\n", + "Mode=test: NumWorkers= 1 BatchSize= 8 Time= 8.802s Imgs/s=145.41\n", + "Mode=test: NumWorkers= 1 BatchSize=16 Time= 9.094s Imgs/s=140.76\n", + "Mode=test: NumWorkers= 1 BatchSize=32 Time= 8.247s Imgs/s=155.21\n", + "Mode=test: NumWorkers= 2 BatchSize= 1 Time=34.151s Imgs/s= 37.48\n", + "Mode=test: NumWorkers= 2 BatchSize= 2 Time=16.366s Imgs/s= 78.21\n", + "Mode=test: NumWorkers= 2 BatchSize= 4 Time= 7.701s Imgs/s=166.20\n", + "Mode=test: NumWorkers= 2 BatchSize= 8 Time= 3.888s Imgs/s=329.25\n", + "Mode=test: NumWorkers= 2 BatchSize=16 Time= 3.824s Imgs/s=334.75\n", + "Mode=test: NumWorkers= 2 BatchSize=32 Time= 3.706s Imgs/s=345.38\n", + "Mode=test: NumWorkers= 4 BatchSize= 1 Time=34.202s Imgs/s= 37.43\n", + "Mode=test: NumWorkers= 4 BatchSize= 2 Time=16.350s Imgs/s= 78.29\n", + "Mode=test: NumWorkers= 4 BatchSize= 4 Time= 7.816s Imgs/s=163.76\n", + "Mode=test: NumWorkers= 4 BatchSize= 8 Time= 3.884s Imgs/s=329.59\n", + "Mode=test: NumWorkers= 4 BatchSize=16 Time= 2.029s Imgs/s=630.98\n", + "Mode=test: NumWorkers= 4 BatchSize=32 Time= 1.819s Imgs/s=703.63\n", + "Mode=test: NumWorkers= 8 BatchSize= 1 Time=33.488s Imgs/s= 38.22\n", + "Mode=test: NumWorkers= 8 BatchSize= 2 Time=16.172s Imgs/s= 79.15\n", + "Mode=test: NumWorkers= 8 BatchSize= 4 Time= 7.842s Imgs/s=163.22\n", + "Mode=test: NumWorkers= 8 BatchSize= 8 Time= 3.866s Imgs/s=331.12\n", + "Mode=test: NumWorkers= 8 BatchSize=16 Time= 2.034s Imgs/s=629.43\n", + "Mode=test: NumWorkers= 8 BatchSize=32 Time= 1.469s Imgs/s=871.30\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader\n", + "\n", + "import torchvision.models as models\n", + "import torchvision.datasets as datasets\n", + "import torchvision.transforms as transforms\n", + "import time\n", + "\n", + "def main():\n", + " mode = 'test'\n", + " model = models.resnet50()\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n", + " N = 1280\n", + " dataset = datasets.FakeData(size=N, transform=transforms.ToTensor())\n", + " if mode=='test': # switch to evaluate mode\n", + " model.eval()\n", + " model.to('cuda')\n", + " for num_workers in [1, 2, 4, 8]: # 4 < 2 for test\n", + " for batch_size in [1, 2, 4, 8, 16, 32]:\n", + " loader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n", + " if mode=='test':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " output = model(data)\n", + " else: # mode=='train':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " target = target.to('cuda', non_blocking=True).long()\n", + " optimizer.zero_grad()\n", + " output = model(data)\n", + " loss = criterion(output, target)\n", + " loss.backward()\n", + " optimizer.step()\n", + " tm = time.time() - tm\n", + " print('Mode=%s: NumWorkers=%2d BatchSize=%2d Time=%6.3fs Imgs/s=%6.2f' % (mode, num_workers, batch_size, tm, N/tm))\n", + " torch.cuda.empty_cache() # doesn't seem to be working...\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mode=test: NumWorkers= 1 BatchSize=40 Time= 7.026s Imgs/s=182.17\n", + "Mode=test: NumWorkers= 2 BatchSize=40 Time= 3.407s Imgs/s=375.71\n", + "Mode=test: NumWorkers= 4 BatchSize=40 Time= 1.752s Imgs/s=730.46\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.323s Imgs/s=967.16\n", + "Mode=test: NumWorkers=16 BatchSize=40 Time= 1.419s Imgs/s=901.91\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader\n", + "\n", + "import torchvision.models as models\n", + "import torchvision.datasets as datasets\n", + "import torchvision.transforms as transforms\n", + "import time\n", + "\n", + "def main():\n", + " mode = 'test'\n", + " model = models.resnet50()\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n", + " N = 1280\n", + " dataset = datasets.FakeData(size=N, transform=transforms.ToTensor())\n", + " if mode=='test': # switch to evaluate mode\n", + " model.eval()\n", + " model.to('cuda')\n", + " for num_workers in [1, 2, 4, 8, 16]: # 4 < 2 for test\n", + " for batch_size in [40]:\n", + " loader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n", + " if mode=='test':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " output = model(data)\n", + " else: # mode=='train':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " target = target.to('cuda', non_blocking=True).long()\n", + " optimizer.zero_grad()\n", + " output = model(data)\n", + " loss = criterion(output, target)\n", + " loss.backward()\n", + " optimizer.step()\n", + " tm = time.time() - tm\n", + " print('Mode=%s: NumWorkers=%2d BatchSize=%2d Time=%6.3fs Imgs/s=%6.2f' % (mode, num_workers, batch_size, tm, N/tm))\n", + " torch.cuda.empty_cache() # doesn't seem to be working...\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "torch.cuda.empty_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.509s Imgs/s=848.26\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.310s Imgs/s=976.73\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.348s Imgs/s=949.28\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.324s Imgs/s=966.43\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.348s Imgs/s=949.28\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.362s Imgs/s=939.55\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.415s Imgs/s=904.46\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.314s Imgs/s=973.77\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.445s Imgs/s=885.73\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.417s Imgs/s=903.18\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.415s Imgs/s=904.46\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.432s Imgs/s=893.75\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.553s Imgs/s=824.29\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.328s Imgs/s=963.53\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.498s Imgs/s=854.48\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.394s Imgs/s=918.04\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.531s Imgs/s=836.11\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.375s Imgs/s=930.69\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.401s Imgs/s=913.47\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.391s Imgs/s=920.02\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.328s Imgs/s=963.53\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.328s Imgs/s=963.53\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.431s Imgs/s=894.37\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.326s Imgs/s=964.98\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.386s Imgs/s=923.33\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.329s Imgs/s=962.81\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.459s Imgs/s=877.25\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.427s Imgs/s=896.87\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.441s Imgs/s=888.18\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.448s Imgs/s=883.90\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader\n", + "\n", + "import torchvision.models as models\n", + "import torchvision.datasets as datasets\n", + "import torchvision.transforms as transforms\n", + "import time\n", + "\n", + "def main():\n", + " mode = 'test'\n", + " model = models.resnet50()\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n", + " N = 1280\n", + " dataset = datasets.FakeData(size=N, transform=transforms.ToTensor())\n", + " if mode=='test': # switch to evaluate mode\n", + " model.eval()\n", + " model.to('cuda')\n", + " for _ in range (30):\n", + " num_workers = 8\n", + " batch_size = 40\n", + " loader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n", + " if mode=='test':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " output = model(data)\n", + " else: # mode=='train':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " target = target.to('cuda', non_blocking=True).long()\n", + " optimizer.zero_grad()\n", + " output = model(data)\n", + " loss = criterion(output, target)\n", + " loss.backward()\n", + " optimizer.step()\n", + " tm = time.time() - tm\n", + " print('Mode=%s: NumWorkers=%2d BatchSize=%2d Time=%6.3fs Imgs/s=%6.2f' % (mode, num_workers, batch_size, tm, N/tm))\n", + " torch.cuda.empty_cache() # doesn't seem to be working...\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/Deep Learning/Deep Learning with PyTorch/Pytorch - testing cuda performance.ipynb b/python/Deep Learning/Deep Learning with PyTorch/Pytorch - testing cuda performance.ipynb new file mode 100644 index 0000000..913ec4a --- /dev/null +++ b/python/Deep Learning/Deep Learning with PyTorch/Pytorch - testing cuda performance.ipynb @@ -0,0 +1,281 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mode=test: NumWorkers= 1 BatchSize= 1 Time=33.559s Imgs/s= 38.14\n", + "Mode=test: NumWorkers= 1 BatchSize= 2 Time=16.639s Imgs/s= 76.93\n", + "Mode=test: NumWorkers= 1 BatchSize= 4 Time= 8.817s Imgs/s=145.17\n", + "Mode=test: NumWorkers= 1 BatchSize= 8 Time= 8.802s Imgs/s=145.41\n", + "Mode=test: NumWorkers= 1 BatchSize=16 Time= 9.094s Imgs/s=140.76\n", + "Mode=test: NumWorkers= 1 BatchSize=32 Time= 8.247s Imgs/s=155.21\n", + "Mode=test: NumWorkers= 2 BatchSize= 1 Time=34.151s Imgs/s= 37.48\n", + "Mode=test: NumWorkers= 2 BatchSize= 2 Time=16.366s Imgs/s= 78.21\n", + "Mode=test: NumWorkers= 2 BatchSize= 4 Time= 7.701s Imgs/s=166.20\n", + "Mode=test: NumWorkers= 2 BatchSize= 8 Time= 3.888s Imgs/s=329.25\n", + "Mode=test: NumWorkers= 2 BatchSize=16 Time= 3.824s Imgs/s=334.75\n", + "Mode=test: NumWorkers= 2 BatchSize=32 Time= 3.706s Imgs/s=345.38\n", + "Mode=test: NumWorkers= 4 BatchSize= 1 Time=34.202s Imgs/s= 37.43\n", + "Mode=test: NumWorkers= 4 BatchSize= 2 Time=16.350s Imgs/s= 78.29\n", + "Mode=test: NumWorkers= 4 BatchSize= 4 Time= 7.816s Imgs/s=163.76\n", + "Mode=test: NumWorkers= 4 BatchSize= 8 Time= 3.884s Imgs/s=329.59\n", + "Mode=test: NumWorkers= 4 BatchSize=16 Time= 2.029s Imgs/s=630.98\n", + "Mode=test: NumWorkers= 4 BatchSize=32 Time= 1.819s Imgs/s=703.63\n", + "Mode=test: NumWorkers= 8 BatchSize= 1 Time=33.488s Imgs/s= 38.22\n", + "Mode=test: NumWorkers= 8 BatchSize= 2 Time=16.172s Imgs/s= 79.15\n", + "Mode=test: NumWorkers= 8 BatchSize= 4 Time= 7.842s Imgs/s=163.22\n", + "Mode=test: NumWorkers= 8 BatchSize= 8 Time= 3.866s Imgs/s=331.12\n", + "Mode=test: NumWorkers= 8 BatchSize=16 Time= 2.034s Imgs/s=629.43\n", + "Mode=test: NumWorkers= 8 BatchSize=32 Time= 1.469s Imgs/s=871.30\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader\n", + "\n", + "import torchvision.models as models\n", + "import torchvision.datasets as datasets\n", + "import torchvision.transforms as transforms\n", + "import time\n", + "\n", + "def main():\n", + " mode = 'test'\n", + " model = models.resnet50()\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n", + " N = 1280\n", + " dataset = datasets.FakeData(size=N, transform=transforms.ToTensor())\n", + " if mode=='test': # switch to evaluate mode\n", + " model.eval()\n", + " model.to('cuda')\n", + " for num_workers in [1, 2, 4, 8]: # 4 < 2 for test\n", + " for batch_size in [1, 2, 4, 8, 16, 32]:\n", + " loader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n", + " if mode=='test':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " output = model(data)\n", + " else: # mode=='train':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " target = target.to('cuda', non_blocking=True).long()\n", + " optimizer.zero_grad()\n", + " output = model(data)\n", + " loss = criterion(output, target)\n", + " loss.backward()\n", + " optimizer.step()\n", + " tm = time.time() - tm\n", + " print('Mode=%s: NumWorkers=%2d BatchSize=%2d Time=%6.3fs Imgs/s=%6.2f' % (mode, num_workers, batch_size, tm, N/tm))\n", + " torch.cuda.empty_cache() # doesn't seem to be working...\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mode=test: NumWorkers= 1 BatchSize=40 Time= 7.026s Imgs/s=182.17\n", + "Mode=test: NumWorkers= 2 BatchSize=40 Time= 3.407s Imgs/s=375.71\n", + "Mode=test: NumWorkers= 4 BatchSize=40 Time= 1.752s Imgs/s=730.46\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.323s Imgs/s=967.16\n", + "Mode=test: NumWorkers=16 BatchSize=40 Time= 1.419s Imgs/s=901.91\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader\n", + "\n", + "import torchvision.models as models\n", + "import torchvision.datasets as datasets\n", + "import torchvision.transforms as transforms\n", + "import time\n", + "\n", + "def main():\n", + " mode = 'test'\n", + " model = models.resnet50()\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n", + " N = 1280\n", + " dataset = datasets.FakeData(size=N, transform=transforms.ToTensor())\n", + " if mode=='test': # switch to evaluate mode\n", + " model.eval()\n", + " model.to('cuda')\n", + " for num_workers in [1, 2, 4, 8, 16]: # 4 < 2 for test\n", + " for batch_size in [40]:\n", + " loader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n", + " if mode=='test':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " output = model(data)\n", + " else: # mode=='train':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " target = target.to('cuda', non_blocking=True).long()\n", + " optimizer.zero_grad()\n", + " output = model(data)\n", + " loss = criterion(output, target)\n", + " loss.backward()\n", + " optimizer.step()\n", + " tm = time.time() - tm\n", + " print('Mode=%s: NumWorkers=%2d BatchSize=%2d Time=%6.3fs Imgs/s=%6.2f' % (mode, num_workers, batch_size, tm, N/tm))\n", + " torch.cuda.empty_cache() # doesn't seem to be working...\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "torch.cuda.empty_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.509s Imgs/s=848.26\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.310s Imgs/s=976.73\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.348s Imgs/s=949.28\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.324s Imgs/s=966.43\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.348s Imgs/s=949.28\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.362s Imgs/s=939.55\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.415s Imgs/s=904.46\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.314s Imgs/s=973.77\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.445s Imgs/s=885.73\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.417s Imgs/s=903.18\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.415s Imgs/s=904.46\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.432s Imgs/s=893.75\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.553s Imgs/s=824.29\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.328s Imgs/s=963.53\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.498s Imgs/s=854.48\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.394s Imgs/s=918.04\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.531s Imgs/s=836.11\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.375s Imgs/s=930.69\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.401s Imgs/s=913.47\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.391s Imgs/s=920.02\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.328s Imgs/s=963.53\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.328s Imgs/s=963.53\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.431s Imgs/s=894.37\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.326s Imgs/s=964.98\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.386s Imgs/s=923.33\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.329s Imgs/s=962.81\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.459s Imgs/s=877.25\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.427s Imgs/s=896.87\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.441s Imgs/s=888.18\n", + "Mode=test: NumWorkers= 8 BatchSize=40 Time= 1.448s Imgs/s=883.90\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader\n", + "\n", + "import torchvision.models as models\n", + "import torchvision.datasets as datasets\n", + "import torchvision.transforms as transforms\n", + "import time\n", + "\n", + "def main():\n", + " mode = 'test'\n", + " model = models.resnet50()\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n", + " N = 1280\n", + " dataset = datasets.FakeData(size=N, transform=transforms.ToTensor())\n", + " if mode=='test': # switch to evaluate mode\n", + " model.eval()\n", + " model.to('cuda')\n", + " for _ in range (30):\n", + " num_workers = 8\n", + " batch_size = 40\n", + " loader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n", + " if mode=='test':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " output = model(data)\n", + " else: # mode=='train':\n", + " for i, (data, target) in enumerate(loader):\n", + " if i==1:\n", + " tm = time.time()\n", + " data = data.to('cuda', non_blocking=True)\n", + " target = target.to('cuda', non_blocking=True).long()\n", + " optimizer.zero_grad()\n", + " output = model(data)\n", + " loss = criterion(output, target)\n", + " loss.backward()\n", + " optimizer.step()\n", + " tm = time.time() - tm\n", + " print('Mode=%s: NumWorkers=%2d BatchSize=%2d Time=%6.3fs Imgs/s=%6.2f' % (mode, num_workers, batch_size, tm, N/tm))\n", + " torch.cuda.empty_cache() # doesn't seem to be working...\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/Deep Learning/Deep Learning with PyTorch/test.ipynb b/python/Deep Learning/Deep Learning with PyTorch/test.ipynb new file mode 100644 index 0000000..08ec6ba --- /dev/null +++ b/python/Deep Learning/Deep Learning with PyTorch/test.ipynb @@ -0,0 +1,332 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([64, 10])\n", + "tensor([[9],\n", + " [9],\n", + " [9],\n", + " [9],\n", + " [9],\n", + " [5],\n", + " [9],\n", + " [5],\n", + " [2],\n", + " [9]])\n", + "Accuracy: 1.5625%\n", + "Epoch: 1 out of 30\n", + "Training Loss: 0.510\n", + "Test Loss: 0.454\n", + "Test Accuracy: 0.836\n", + "\n", + "Epoch: 2 out of 30\n", + "Training Loss: 0.389\n", + "Test Loss: 0.412\n", + "Test Accuracy: 0.852\n", + "\n", + "Epoch: 3 out of 30\n", + "Training Loss: 0.353\n", + "Test Loss: 0.388\n", + "Test Accuracy: 0.861\n", + "\n", + "Epoch: 4 out of 30\n", + "Training Loss: 0.330\n", + "Test Loss: 0.428\n", + "Test Accuracy: 0.847\n", + "\n", + "Epoch: 5 out of 30\n", + "Training Loss: 0.315\n", + "Test Loss: 0.381\n", + "Test Accuracy: 0.865\n", + "\n", + "Epoch: 6 out of 30\n", + "Training Loss: 0.303\n", + "Test Loss: 0.388\n", + "Test Accuracy: 0.864\n", + "\n", + "Epoch: 7 out of 30\n", + "Training Loss: 0.292\n", + "Test Loss: 0.364\n", + "Test Accuracy: 0.872\n", + "\n", + "Epoch: 8 out of 30\n", + "Training Loss: 0.281\n", + "Test Loss: 0.370\n", + "Test Accuracy: 0.869\n", + "\n", + "Epoch: 9 out of 30\n", + "Training Loss: 0.270\n", + "Test Loss: 0.365\n", + "Test Accuracy: 0.877\n", + "\n", + "Epoch: 10 out of 30\n", + "Training Loss: 0.267\n", + "Test Loss: 0.366\n", + "Test Accuracy: 0.877\n", + "\n", + "Epoch: 11 out of 30\n", + "Training Loss: 0.260\n", + "Test Loss: 0.369\n", + "Test Accuracy: 0.873\n", + "\n", + "Epoch: 12 out of 30\n", + "Training Loss: 0.254\n", + "Test Loss: 0.377\n", + "Test Accuracy: 0.876\n", + "\n", + "Epoch: 13 out of 30\n", + "Training Loss: 0.244\n", + "Test Loss: 0.369\n", + "Test Accuracy: 0.879\n", + "\n", + "Epoch: 14 out of 30\n", + "Training Loss: 0.243\n", + "Test Loss: 0.371\n", + "Test Accuracy: 0.879\n", + "\n", + "Epoch: 15 out of 30\n", + "Training Loss: 0.237\n", + "Test Loss: 0.377\n", + "Test Accuracy: 0.883\n", + "\n", + "Epoch: 16 out of 30\n", + "Training Loss: 0.230\n", + "Test Loss: 0.407\n", + "Test Accuracy: 0.874\n", + "\n", + "Epoch: 17 out of 30\n", + "Training Loss: 0.228\n", + "Test Loss: 0.370\n", + "Test Accuracy: 0.879\n", + "\n", + "Epoch: 18 out of 30\n", + "Training Loss: 0.221\n", + "Test Loss: 0.376\n", + "Test Accuracy: 0.878\n", + "\n", + "Epoch: 19 out of 30\n", + "Training Loss: 0.222\n", + "Test Loss: 0.376\n", + "Test Accuracy: 0.881\n", + "\n", + "Epoch: 20 out of 30\n", + "Training Loss: 0.217\n", + "Test Loss: 0.387\n", + "Test Accuracy: 0.880\n", + "\n", + "Epoch: 21 out of 30\n", + "Training Loss: 0.209\n", + "Test Loss: 0.401\n", + "Test Accuracy: 0.877\n", + "\n", + "Epoch: 22 out of 30\n", + "Training Loss: 0.210\n", + "Test Loss: 0.392\n", + "Test Accuracy: 0.883\n", + "\n", + "Epoch: 23 out of 30\n", + "Training Loss: 0.204\n", + "Test Loss: 0.411\n", + "Test Accuracy: 0.878\n", + "\n", + "Epoch: 24 out of 30\n", + "Training Loss: 0.202\n", + "Test Loss: 0.391\n", + "Test Accuracy: 0.882\n", + "\n", + "Epoch: 25 out of 30\n", + "Training Loss: 0.195\n", + "Test Loss: 0.392\n", + "Test Accuracy: 0.883\n", + "\n", + "Epoch: 26 out of 30\n", + "Training Loss: 0.195\n", + "Test Loss: 0.471\n", + "Test Accuracy: 0.878\n", + "\n", + "Epoch: 27 out of 30\n", + "Training Loss: 0.191\n", + "Test Loss: 0.431\n", + "Test Accuracy: 0.881\n", + "\n", + "Epoch: 28 out of 30\n", + "Training Loss: 0.195\n", + "Test Loss: 0.418\n", + "Test Accuracy: 0.882\n", + "\n", + "Epoch: 29 out of 30\n", + "Training Loss: 0.192\n", + "Test Loss: 0.390\n", + "Test Accuracy: 0.887\n", + "\n", + "Epoch: 30 out of 30\n", + "Training Loss: 0.185\n", + "Test Loss: 0.428\n", + "Test Accuracy: 0.875\n", + "\n" + ] + } + ], + "source": [ + "import torch\n", + "from torchvision import datasets, transforms\n", + "from torch import nn, optim\n", + "import torch.nn.functional as F\n", + "\n", + "# Define a transform to normalize the data\n", + "transform = transforms.Compose([transforms.ToTensor(),\n", + " transforms.Normalize((0.5, 0.5, 0.5),\n", + " (0.5, 0.5, 0.5))])\n", + "\n", + "# Download and load the training data\n", + "trainset = datasets.FashionMNIST(\n", + " '.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\n", + "\n", + "trainloader = torch.utils.data.DataLoader(\n", + " trainset, batch_size=64, shuffle=True)\n", + "\n", + "# Download and load the test data\n", + "testset = datasets.FashionMNIST(\n", + " '.pytorch/F_MNIST_data/', download=True, train=False,\n", + " transform=transform)\n", + "\n", + "testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)\n", + "\n", + "\n", + "class Classifier(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.fc1 = nn.Linear(784, 256)\n", + " self.fc2 = nn.Linear(256, 128)\n", + " self.fc3 = nn.Linear(128, 64)\n", + " self.fc4 = nn.Linear(64, 10)\n", + "\n", + " def forward(self, x):\n", + " # make sure input tensor is flattened\n", + " x = x.view(x.shape[0], -1)\n", + "\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = F.relu(self.fc3(x))\n", + " x = F.log_softmax(self.fc4(x), dim=1)\n", + "\n", + " return x\n", + "\n", + "\n", + "model = Classifier()\n", + "\n", + "images, labels = next(iter(testloader))\n", + "\n", + "# Get the class probabilities\n", + "ps = torch.exp(model(images))\n", + "\n", + "# Make sure the shape is appropriate, we should get 10 class probabilities for\n", + "# 64 examples\n", + "print(ps.shape)\n", + "\n", + "top_p, top_class = ps.topk(1, dim=1)\n", + "# Look at the most likely classes for the first 10 examples\n", + "print(top_class[:10, :])\n", + "\n", + "\n", + "equals = top_class == labels.view(*top_class.shape)\n", + "\n", + "\n", + "accuracy = torch.mean(equals.type(torch.FloatTensor))\n", + "print(f'Accuracy: {accuracy.item()*100}%')\n", + "\n", + "\n", + "# Model begins\n", + "\n", + "model = Classifier()\n", + "criterion = nn.NLLLoss()\n", + "optimizer = optim.Adam(model.parameters(), lr=0.003)\n", + "\n", + "epochs = 30\n", + "steps = 0\n", + "\n", + "trainLosses, testLosses = [], []\n", + "for e in range(epochs):\n", + " runningLoss = 0\n", + " for images, labels in trainloader:\n", + "\n", + " optimizer.zero_grad()\n", + "\n", + " log_ps = model(images)\n", + " loss = criterion(log_ps, labels)\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " runningLoss += loss.item()\n", + "\n", + " else:\n", + " testLoss = 0\n", + " accuracy = 0\n", + "\n", + " # Turn off gradients for validation step\n", + " with torch.no_grad():\n", + " for images, labels in testloader:\n", + " # Get the output\n", + " log_ps = model(images)\n", + " # Get the loss\n", + " testLoss += criterion(log_ps, labels)\n", + "\n", + " # Get the probabilities\n", + " ps = torch.exp(log_ps)\n", + " # Get the most likely class for each prediction\n", + " top_p, top_class = ps.topk(1, dim=1)\n", + " # Check if the predictions match the actual label\n", + " equals = top_class == labels.view(*top_class.shape)\n", + " # Update accuracy\n", + " accuracy += torch.mean(equals.type(torch.FloatTensor))\n", + "\n", + " # Update train loss\n", + " trainLosses.append(runningLoss / len(trainloader))\n", + " # Update test loss\n", + " testLosses.append(testLoss / len(testloader))\n", + "\n", + " # Print output\n", + " print(f'Epoch: {e+1} out of {epochs}')\n", + " print(f'Training Loss: {runningLoss/len(trainloader):.3f}')\n", + " print(f'Test Loss: {testLoss/len(testloader):.3f}')\n", + " print(f'Test Accuracy: {accuracy/len(testloader):.3f}')\n", + " print()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}