100字范文,内容丰富有趣,生活中的好帮手!
100字范文 > PyTorch学习(九)--用CNN模型识别手写数字数据集MNIST

PyTorch学习(九)--用CNN模型识别手写数字数据集MNIST

时间:2019-11-24 19:45:45

相关推荐

PyTorch学习(九)--用CNN模型识别手写数字数据集MNIST

什么是MNIST数据集?

直接看图!!他就是一套手写数字的图片,他的规格是(1x28x28),单通道,横竖都是28个像素,下面是前20个图片。我们今天的目的就是用卷积神经网络去识别这些数字到底是多少。

话不多说,咱们边看代码边聊,相信你一定能和我一起看懂他。

import torchfrom torch.utils.data import DataLoader #我们要加载数据集的from torchvision import transforms #数据的原始处理from torchvision import datasets #pytorch十分贴心的为我们直接准备了这个数据集import torch.nn.functional as F#激活函数import torch.optim as optimbatch_size = 64#我们拿到的图片是pillow,我们要把他转换成模型里能训练的tensor也就是张量的格式transform = pose([transforms.ToTensor()])#加载训练集,pytorch十分贴心的为我们直接准备了这个数据集,注意,即使你没有下载这个数据集#在函数中输入download=True,他在运行到这里的时候发现你给的路径没有,就自动下载train_dataset = datasets.MNIST(root='../data', train=True, download=True, transform=transform)train_loader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=batch_size)#同样的方式加载一下测试集test_dataset = datasets.MNIST(root='../data', train=False, download=True, transform=transform)test_loader = DataLoader(dataset=test_dataset, shuffle=False, batch_size=batch_size)#接下来我们看一下模型是怎么做的class Net(torch.nn.Module):def __init__(self):super(Net, self).__init__()#定义了我们第一个要用到的卷积层,因为图片输入通道为1,第一个参数就是1#输出的通道为10,kernel_size是卷积核的大小,这里定义的是5x5的self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)#看懂了上面的定义,下面这个你肯定也能看懂self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)#再定义一个池化层self.pooling = torch.nn.MaxPool2d(2)#最后是我们做分类用的线性层self.fc = torch.nn.Linear(320, 10)#下面就是计算的过程def forward(self, x):# Flatten data from (n, 1, 28, 28) to (n, 784)batch_size = x.size(0) #这里面的0是x大小第1个参数,自动获取batch大小#输入x经过一个卷积层,之后经历一个池化层,最后用relu做激活x = F.relu(self.pooling(self.conv1(x)))#再经历上面的过程x = F.relu(self.pooling(self.conv2(x)))#为了给我们最后一个全连接的线性层用#我们要把一个二维的图片(实际上这里已经是处理过的)20x4x4张量变成一维的x = x.view(batch_size, -1) # flatten#经过线性层,确定他是0~9每一个数的概率x = self.fc(x)return xmodel = Net()#实例化模型#把计算迁移到GPUdevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")model.to(device)#定义一个损失函数,来计算我们模型输出的值和标准值的差距criterion = torch.nn.CrossEntropyLoss()#定义一个优化器,训练模型咋训练的,就靠这个,他会反向的更改相应层的权重optimizer = optim.SGD(model.parameters(),lr=0.1,momentum=0.5)#lr为学习率def train(epoch):running_loss = 0.0for batch_idx, data in enumerate(train_loader, 0):#每次取一个样本inputs, target = datainputs, target = inputs.to(device), target.to(device)#优化器清零optimizer.zero_grad()# 正向计算一下outputs = model(inputs)#计算损失loss = criterion(outputs, target)#反向求梯度loss.backward()#更新权重optimizer.step()#把损失加起来running_loss += loss.item()#每300次输出一下数据if batch_idx % 300 == 299:print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 2000))running_loss = 0.0def test():correct = 0total = 0with torch.no_grad():#不用算梯度for data in test_loader:inputs, target = datainputs, target = inputs.to(device), target.to(device)outputs = model(inputs)#我们取概率最大的那个数作为输出_, predicted = torch.max(outputs.data, dim=1)total += target.size(0)#计算正确率correct += (predicted == target).sum().item()print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total))if __name__=='__main__':for epoch in range(10):train(epoch)if epoch % 10 == 9:test()

结果:

[1, 300] loss: 0.054

[1, 600] loss: 0.018

[1, 900] loss: 0.014

[2, 300] loss: 0.011

[2, 600] loss: 0.009

[2, 900] loss: 0.009

[3, 300] loss: 0.008

[3, 600] loss: 0.007

[3, 900] loss: 0.007

[4, 300] loss: 0.006

[4, 600] loss: 0.006

[4, 900] loss: 0.006

[5, 300] loss: 0.005

[5, 600] loss: 0.005

[5, 900] loss: 0.005

[6, 300] loss: 0.005

[6, 600] loss: 0.004

[6, 900] loss: 0.005

[7, 300] loss: 0.004

[7, 600] loss: 0.004

[7, 900] loss: 0.004

[8, 300] loss: 0.003

[8, 600] loss: 0.003

[8, 900] loss: 0.004

[9, 300] loss: 0.003

[9, 600] loss: 0.003

[9, 900] loss: 0.003

[10, 300] loss: 0.003

[10, 600] loss: 0.003

[10, 900] loss: 0.003

Accuracy on test set: 99 % [9903/10000]

如果你觉得我说的那些你还是听不懂,去看看我前面发布的文章呀!!!!

如果你觉得我说的太罗嗦了,可以复制下面的代码,和上面的一样,只不过少了注释

import torchfrom torch.utils.data import DataLoaderfrom torchvision import transforms #数据的原始处理from torchvision import datasetsimport torch.nn.functional as F#激活函数import torch.optim as optimbatch_size = 64transform = pose([transforms.ToTensor()])train_dataset = datasets.MNIST(root='../data', train=True, download=True, transform=transform)train_loader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=batch_size)test_dataset = datasets.MNIST(root='../data', train=False, download=True, transform=transform)test_loader = DataLoader(dataset=test_dataset, shuffle=False, batch_size=batch_size)class Net(torch.nn.Module):def __init__(self):super(Net, self).__init__()self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)self.pooling = torch.nn.MaxPool2d(2)self.fc = torch.nn.Linear(320, 10)def forward(self, x):# Flatten data from (n, 1, 28, 28) to (n, 784)batch_size = x.size(0)x = F.relu(self.pooling(self.conv1(x)))x = F.relu(self.pooling(self.conv2(x)))x = x.view(batch_size, -1) # flattenx = self.fc(x)return xmodel = Net()device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")model.to(device)criterion = torch.nn.CrossEntropyLoss()optimizer = optim.SGD(model.parameters(),lr=0.1,momentum=0.5)#lr为学习率def train(epoch):running_loss = 0.0for batch_idx, data in enumerate(train_loader, 0):inputs, target = datainputs, target = inputs.to(device), target.to(device)optimizer.zero_grad()# forward + backward + updateoutputs = model(inputs)loss = criterion(outputs, target)loss.backward()optimizer.step()running_loss += loss.item()if batch_idx % 300 == 299:print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 2000))running_loss = 0.0def test():correct = 0total = 0with torch.no_grad():for data in test_loader:inputs, target = datainputs, target = inputs.to(device), target.to(device)outputs = model(inputs)_, predicted = torch.max(outputs.data, dim=1)total += target.size(0)correct += (predicted == target).sum().item()print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total))if __name__=='__main__':for epoch in range(10):train(epoch)if epoch % 10 == 9:test()

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。