导包:

1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4 import torch.optim as optim
5 from torchvision import datasets, transforms

关于torchvision:

torchvision主要包括一下几个包:

  • vision.datasets : 几个常用视觉数据集,可以下载和加载;
  • vision.models : 流行的模型,例如 AlexNet, VGG, and ResNet 以及 与训练好的参数;
  • vision.transforms : 常用的图像操作,例如:随机切割,旋转等;
  • vision.utils : 用于把形似 (3 x H x W) 的张量保存到硬盘中,给一个mini-batch的图像可以产生一个图像格网;

设置参数:

1 #设置超参数 
2 torch.manual_seed(53113)  #cpu随机种子
3 batch_size = test_batch_size = 32  
4 
5 #设置GPU参数 
6 use_cuda = torch.cuda.is_available()  
7 device = torch.device("cuda" if use_cuda else "cpu")  
8 kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}

1.数据预处理

torch.utils.data.DataLoader在训练模型时使用到此函数,用来把训练数据分成多个batch,此函数每次抛出一个batch数据,直至把所有的数据都抛出,也就是个数据迭代器。

DataLoader中的transform参数:接受一个图像返回变换后的图像的函数,相当于图像先预处理下,常用的操作如 ToTensor, RandomCrop,Normalize等,他们可以通过transforms.Compose被组合在一起。

  • .ToTensor()将shape为(H, W, C)的nump.ndarray或img转为shape为(C, H, W)的tensor,其将每一个数值归一化到[0,1],其归一化方法比较简单,直接除以255即可。

  • .Normalize作用就是.ToTensor将输入归一化到(0,1)后,再使用公式”(x-mean)/std”,将每个元素分布到(-1,1) 
 1 train_loader = torch.utils.data.DataLoader(
 2     datasets.MNIST('./mnist_data',    #数据集
 3                    train=True,        #如果true,从training.pt创建数据集
 4                    download=True,     #如果ture,从网上自动下载
 5                    
 6                    transform=transforms.Compose([        
 7                               transforms.ToTensor(),                      
 8                               transforms.Normalize((0.1307,), (0.3081,)) # 所有图片像素均值和方差
 9                    ])), 
10     batch_size = batch_size, 
11     shuffle=True,  
12     **kwargs)            #kwargs是上面gpu的设置
 1 test_loader = torch.utils.data.DataLoader(
 2     datasets.MNIST('./mnist_data', 
 3                    train=False,               #如果False,从test.pt创建数据集
 4                    transform=transforms.Compose([
 5                        transforms.ToTensor(),
 6                        transforms.Normalize((0.1307,), (0.3081,))
 7                    ])),
 8     batch_size=test_batch_size, 
 9     shuffle=True, 
10     **kwargs)

查看一下:

1 print(train_loader.dataset[0][0].shape)            #torch.Size([1, 28, 28])

2.创建模型

 1 class Net(nn.Module):
 2     def __init__(self):
 3         super(Net, self).__init__()
 4         self.conv1 = nn.Conv2d(1, 20, 5, 1)       #(in_channels, out_channels, kernel_size, stride=1)
 5         self.conv2 = nn.Conv2d(20, 50, 5, 1)      #上个卷积网络的out_channels,就是下一个网络的in_channels,所以这里是20
 6 
 7         self.fc1 = nn.Linear(4*4*50, 500)
 8         self.fc2 = nn.Linear(500, 10)             #10分类
 9 
10     def forward(self, x):              #手写数字的输入维度,(N,1,28,28), N为batch_size
11         x = F.relu(self.conv1(x))      # x = (N,20,24,24)
12         x = F.max_pool2d(x, 2, 2)      # x = (N,20,12,12)
13         x = F.relu(self.conv2(x))      # x = (N,50,8,8)
14         x = F.max_pool2d(x, 2, 2)      # x = (N,50,4,4)
15         x = x.view(-1, 4*4*50)         # x = (N,4*4*50)
16         x = F.relu(self.fc1(x))        # x = (N,4*4*50)*(4*4*50, 500)=(N,500)
17         x = self.fc2(x)                # x = (N,500)*(500, 10)=(N,10)
18         return F.log_softmax(x, dim=1) #带log的softmax分类,每张图片返回10个概率

模型初始化:

1 lr = 0.01
2 momentum = 0.5
3 model = Net().to(device) #模型初始化
4 optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) #定义优化器

3.训练函数

 1 def train(model, device, train_loader, optimizer, epoch, log_interval=100):
 2     model.train() 
 3     for batch_idx, (data, target) in enumerate(train_loader):
 4         data, target = data.to(device), target.to(device)
 5         optimizer.zero_grad() 
 6         output = model(data)              #输出的维度[N,10] 这里的data是函数的forward参数x
 7         loss = F.nll_loss(output, target) #这里loss求的是平均数,除以了batch
 8         loss.backward()
 9         optimizer.step()
10         if batch_idx % log_interval == 0:
11             print("Train Epoch: {} [{}/{} ({:0f}%)]\tLoss: {:.6f}".format(
12                 epoch, 
13                 batch_idx * len(data),                #100*32
14                 len(train_loader.dataset),            #60000
15                 100. * batch_idx / len(train_loader), #len(train_loader)=60000/32=1875
16                 loss.item()
17             ))

4.测试函数

 1 def test(model, device, test_loader):
 2     model.eval()
 3     test_loss = 0
 4     correct = 0
 5     with torch.no_grad():
 6         for data, target in test_loader:
 7             data, target = data.to(device), target.to(device)
 8             output = model(data)
 9             test_loss += F.nll_loss(output, target, reduction='sum').item()  #reduction='sum'代表batch的每个元素loss累加求和,默认是mean求平均
10 
11             pred = output.argmax(dim=1, keepdim=True)                        #pred.shape=torch.Size([32, 1])
12 
13             correct += pred.eq(target.view_as(pred)).sum().item()            #target.shape=torch.Size([32])
14 
15     test_loss /= len(test_loader.dataset)
16 
17     print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
18         test_loss, correct, len(test_loader.dataset),
19         100. * correct / len(test_loader.dataset)))

执行:

1 epochs = 2
2 for epoch in range(1, epochs + 1):
3     train(model, device, train_loader, optimizer, epoch)
4     test(model, device, test_loader)
5 
6 save_model = True
7 if (save_model):
8     torch.save(model.state_dict(),"mnist_cnn.pt")    #词典格式,model.state_dict()只保存模型参数

训练结果:

 1 Train Epoch: 1 [0/60000 (0.000000%)]    Loss: 2.297938
 2 Train Epoch: 1 [3200/60000 (5.333333%)] Loss: 0.570356
 3 Train Epoch: 1 [6400/60000 (10.666667%)]        Loss: 0.207343
 4 Train Epoch: 1 [9600/60000 (16.000000%)]        Loss: 0.094465
 5 Train Epoch: 1 [12800/60000 (21.333333%)]       Loss: 0.178536
 6 Train Epoch: 1 [16000/60000 (26.666667%)]       Loss: 0.041227
 7 Train Epoch: 1 [19200/60000 (32.000000%)]       Loss: 0.136767
 8 Train Epoch: 1 [22400/60000 (37.333333%)]       Loss: 0.051781
 9 Train Epoch: 1 [25600/60000 (42.666667%)]       Loss: 0.112557
10 Train Epoch: 1 [28800/60000 (48.000000%)]       Loss: 0.058771
11 Train Epoch: 1 [32000/60000 (53.333333%)]       Loss: 0.085873
12 Train Epoch: 1 [35200/60000 (58.666667%)]       Loss: 0.188629
13 Train Epoch: 1 [38400/60000 (64.000000%)]       Loss: 0.092433
14 Train Epoch: 1 [41600/60000 (69.333333%)]       Loss: 0.075023
15 Train Epoch: 1 [44800/60000 (74.666667%)]       Loss: 0.038028
16 Train Epoch: 1 [48000/60000 (80.000000%)]       Loss: 0.038069
17 Train Epoch: 1 [51200/60000 (85.333333%)]       Loss: 0.052910
18 Train Epoch: 1 [54400/60000 (90.666667%)]       Loss: 0.012891
19 Train Epoch: 1 [57600/60000 (96.000000%)]       Loss: 0.033460
20 
21 Test set: Average loss: 0.0653, Accuracy: 9799/10000 (98%)
22 
23 Train Epoch: 2 [0/60000 (0.000000%)]    Loss: 0.057514
24 Train Epoch: 2 [3200/60000 (5.333333%)] Loss: 0.030869
25 Train Epoch: 2 [6400/60000 (10.666667%)]        Loss: 0.091362
26 Train Epoch: 2 [9600/60000 (16.000000%)]        Loss: 0.059315
27 Train Epoch: 2 [12800/60000 (21.333333%)]       Loss: 0.031055
28 Train Epoch: 2 [16000/60000 (26.666667%)]       Loss: 0.012735
29 Train Epoch: 2 [19200/60000 (32.000000%)]       Loss: 0.104735
30 Train Epoch: 2 [22400/60000 (37.333333%)]       Loss: 0.132139
31 Train Epoch: 2 [25600/60000 (42.666667%)]       Loss: 0.010015
32 Train Epoch: 2 [28800/60000 (48.000000%)]       Loss: 0.012915
33 Train Epoch: 2 [32000/60000 (53.333333%)]       Loss: 0.038762
34 Train Epoch: 2 [35200/60000 (58.666667%)]       Loss: 0.015236
35 Train Epoch: 2 [38400/60000 (64.000000%)]       Loss: 0.163834
36 Train Epoch: 2 [41600/60000 (69.333333%)]       Loss: 0.064514
37 Train Epoch: 2 [44800/60000 (74.666667%)]       Loss: 0.007881
38 Train Epoch: 2 [48000/60000 (80.000000%)]       Loss: 0.074057
39 Train Epoch: 2 [51200/60000 (85.333333%)]       Loss: 0.209342
40 Train Epoch: 2 [54400/60000 (90.666667%)]       Loss: 0.018052
41 Train Epoch: 2 [57600/60000 (96.000000%)]       Loss: 0.012391
42 
43 Test set: Average loss: 0.0460, Accuracy: 9851/10000 (99%)
View Code

相关文章: