Pytorch 1 简单卷积神经网络 minist分类
2023-09-14 09:15:04 时间
1、基本库导入
import torch
import torchvision
from torch.autograd import Variable
import torch.utils.data.dataloader as Data
2、minist数据加载
train_data = torchvision.datasets.MNIST(
'./mnist', train=True, transform=torchvision.transforms.ToTensor(), download=True
)
test_data = torchvision.datasets.MNIST(
'./mnist', train=False, transform=torchvision.transforms.ToTensor()
)
print("train_data:", train_data.train_data.size())
print("train_labels:", train_data.train_labels.size())
print("test_data:", test_data.test_data.size())
train_loader = Data.DataLoader(dataset=train_data, batch_size=256, shuffle=True)
test_loader = Data.DataLoader(dataset=test_data, batch_size=256)
3、分类网络搭建
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.conv2 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2)
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv2d(64, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2)
)
self.dense = torch.nn.Sequential(
torch.nn.Linear(64, 32),
torch.nn.ReLU(),
torch.nn.Linear(32, 10)
)
def forward(self, x):
conv1_out = self.conv1(x)
conv2_out = self.conv2(conv1_out)
conv3_out = self.conv3(conv2_out)
#自适应池化, w,h都输出为1 得到全局平均池化
res = torch.nn.functional.adaptive_avg_pool2d(conv3_out, (1, 1))
#扁平化
res = res.view(res.size(0), -1)
out = self.dense(res)
return out
4、初始化device、model、loss和optimizer
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Net().to(device)
print(model)
optimizer = torch.optim.Adam(model.parameters())
loss_func = torch.nn.CrossEntropyLoss()
5、训练与测试
def train():
train_loss = 0.
train_acc = 0.
for batch_x, batch_y in train_loader:
batch_x, batch_y = Variable(batch_x).to(device), Variable(batch_y).to(device)
#print(batch_x.shape,batch_y.shape)
optimizer.zero_grad() #梯度置0
out = model(batch_x) #前向传播
loss = loss_func(out, batch_y) #计算loss
loss.backward() #返向传播
optimizer.step() #优化器计步
#------计算loss,acc
train_loss += loss.item()
#torch.max(out, 1) 指第一维最大值,返回[最大值,最大值索引]
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
train_acc += train_correct.item()
print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
train_data)), train_acc / (len(train_data))))
def eval():
model.eval()
eval_loss = 0.
eval_acc = 0.
for batch_x, batch_y in test_loader:
#测试阶段不需要保存梯度信息
with torch.no_grad():
batch_x, batch_y = Variable(batch_x).to(device), Variable(batch_y).to(device)
out = model(batch_x)
loss = loss_func(out, batch_y)
eval_loss += loss.item()
pred = torch.max(out, 1)[1]
num_correct = (pred == batch_y).sum()
eval_acc += num_correct.item()
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
test_data)), eval_acc / (len(test_data))))
for epoch in range(10):
print('epoch {}'.format(epoch + 1))
# training-----------------------------
train()
# evaluation--------------------------------
eval()
6、全部代码
import torch
import torchvision
from torch.autograd import Variable
import torch.utils.data.dataloader as Data
train_data = torchvision.datasets.MNIST(
'./mnist', train=True, transform=torchvision.transforms.ToTensor(), download=True
)
test_data = torchvision.datasets.MNIST(
'./mnist', train=False, transform=torchvision.transforms.ToTensor()
)
print("train_data:", train_data.train_data.size())
print("train_labels:", train_data.train_labels.size())
print("test_data:", test_data.test_data.size())
train_loader = Data.DataLoader(dataset=train_data, batch_size=256, shuffle=True)
test_loader = Data.DataLoader(dataset=test_data, batch_size=256)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.conv2 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2)
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv2d(64, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2)
)
self.dense = torch.nn.Sequential(
torch.nn.Linear(64, 32),
torch.nn.ReLU(),
torch.nn.Linear(32, 10)
)
def forward(self, x):
conv1_out = self.conv1(x)
conv2_out = self.conv2(conv1_out)
conv3_out = self.conv3(conv2_out)
#自适应池化, w,h都输出为1 得到全局平均池化
res = torch.nn.functional.adaptive_avg_pool2d(conv3_out, (1, 1))
#扁平化
res = res.view(res.size(0), -1)
out = self.dense(res)
return out
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Net().to(device)
print(model)
optimizer = torch.optim.Adam(model.parameters())
loss_func = torch.nn.CrossEntropyLoss()
def train():
train_loss = 0.
train_acc = 0.
for batch_x, batch_y in train_loader:
batch_x, batch_y = Variable(batch_x).to(device), Variable(batch_y).to(device)
# print(batch_x.shape,batch_y.shape)
optimizer.zero_grad() # 梯度置0
out = model(batch_x) # 前向传播
loss = loss_func(out, batch_y) # 计算loss
loss.backward() # 返向传播
optimizer.step() # 优化器计步
# ------计算loss,acc
train_loss += loss.item()
# torch.max(out, 1) 指第一维最大值,返回[最大值,最大值索引]
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
train_acc += train_correct.item()
print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
train_data)), train_acc / (len(train_data))))
def eval():
model.eval()
eval_loss = 0.
eval_acc = 0.
for batch_x, batch_y in test_loader:
# 测试阶段不需要保存梯度信息
with torch.no_grad():
batch_x, batch_y = Variable(batch_x).to(device), Variable(batch_y).to(device)
out = model(batch_x)
loss = loss_func(out, batch_y)
eval_loss += loss.item()
pred = torch.max(out, 1)[1]
num_correct = (pred == batch_y).sum()
eval_acc += num_correct.item()
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
test_data)), eval_acc / (len(test_data))))
for epoch in range(10):
print('epoch {}'.format(epoch + 1))
# training-----------------------------
train()
# evaluation--------------------------------
eval()
相关文章
- 从零开始学Pytorch(十一)之ModernRNN
- 从零开始学Pytorch(十六)之模型微调
- anconda pycharm_conda pytorch
- Pytorch模型训练实用教程学习笔记:三、损失函数汇总
- PyTorch 进阶之路:在 GPU 上训练深度神经网络
- pytorch最新版本_pytorch linear
- resnet残差网络代码_pytorch卷积神经网络
- win10下pytorch-gpu安装以及CUDA详细安装过程
- PyTorch : 了解Tensor(张量)及其创建方法
- Python中用PyTorch机器学习神经网络分类预测银行客户流失模型|附代码数据
- 【Pytorch基础】深度残差神经网络
- PyTorch自然语言处理入门与实战
- #新闻拍一拍# Facebook 将 Windows 版 PyTorch 的开发维护权移交给微软