1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
| import torch import torch.nn as nn import torchvision.datasets as dsets import torchvision.transforms as transforms from torch.autograd import Variable import os import sys path = os.path.split(os.path.abspath(os.path.realpath(sys.argv[0])))[0] + os.path.sep
batch_size = 100 learning_rate = 0.001 num_epochs = 1
train_dataset = dsets.MNIST(root='./data/', train=True, transform=transforms.ToTensor(), download=False) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 16, kernel_size=5, padding=2), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(2)) self.layer2 = nn.Sequential( nn.Conv2d(16, 32, kernel_size=5, padding=2), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(2)) self.fc = nn.Linear(7*7*32, 10) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = out.view(out.size(0), -1) out = self.fc(out) return out cnnmodel = CNN()
f = open(path + 'output.txt', 'w') f.seek(0) f.truncate()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnnmodel.parameters(),lr=learning_rate)
for i, (images, labels) in enumerate(train_loader): images = Variable(images) labels = Variable(labels) optimizer.zero_grad() outputs = cnnmodel(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (i+1) % 10 == 0: f.writelines('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f \n' %(1, num_epochs, i+1, len(train_dataset)//1000, loss.data[0])) print ('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f' %(1, num_epochs, i+1, len(train_dataset)//1000, loss.data[0])) if i > 60: break f.close()
|