PyTorch高级教程:自定义模型、数据加载及设备间数据移动

在深入理解了PyTorch的核心组件之后,我们将进一步学习一些高级主题,包括如何自定义模型、加载自定义数据集,以及如何在设备(例如CPU和GPU)之间移动数据。

一、自定义模型

虽然PyTorch提供了许多预构建的模型层,但在某些情况下,你可能需要自定义模型层。这可以通过继承torch.nn.Module类并实现forward方法来实现:

import torch.nn as nn
import torch.nn.functional as F
class CustomModel(nn.Module):
def __init__(self):
super(CustomModel, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = CustomModel()
import torch.nn as nn
import torch.nn.functional as F

class CustomModel(nn.Module):
    def __init__(self):
        super(CustomModel, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

net = CustomModel()
import torch.nn as nn import torch.nn.functional as F class CustomModel(nn.Module): def __init__(self): super(CustomModel, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = CustomModel()

二、自定义数据加载

PyTorch的DataLoader类使数据加载变得简单,但有时候你可能需要加载自定义的数据。你可以通过继承torch.utils.data.Dataset类并实现__getitem____len__方法来实现这个目标:

from torch.utils.data import Dataset
class CustomDataset(Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __getitem__(self, index):
return self.data[index], self.labels[index]
def __len__(self):
return len(self.data)
from torch.utils.data import Dataset


class CustomDataset(Dataset):
    def __init__(self, data, labels):
        self.data = data
        self.labels = labels

    def __getitem__(self, index):
        return self.data[index], self.labels[index]

    def __len__(self):
        return len(self.data)
from torch.utils.data import Dataset class CustomDataset(Dataset): def __init__(self, data, labels): self.data = data self.labels = labels def __getitem__(self, index): return self.data[index], self.labels[index] def __len__(self): return len(self.data)

三、设备间的数据移动

在PyTorch中,你可以通过将模型和数据移动到GPU上来加速训练。这可以通过调用.to方法实现:

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 确定我们在可用的设备上运行
net.to(device)
# 也可以将输入和目标值每次迭代时都移动到GPU上
inputs, labels = data[0].to(device), data[1].to(device)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# 确定我们在可用的设备上运行
net.to(device)

# 也可以将输入和目标值每次迭代时都移动到GPU上
inputs, labels = data[0].to(device), data[1].to(device)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 确定我们在可用的设备上运行 net.to(device) # 也可以将输入和目标值每次迭代时都移动到GPU上 inputs, labels = data[0].to(device), data[1].to(device)

以上就是在PyTorch中使用自定义模型、数据加载和设备间数据移动的简单示例。这些高级技术可以帮助你更灵活地使用PyTorch,以满足特定的项目需求。

© 版权声明
THE END
喜欢就支持一下吧
点赞0

Warning: mysqli_query(): (HY000/3): Error writing file '/tmp/MYztTj3K' (Errcode: 28 - No space left on device) in /www/wwwroot/583.cn/wp-includes/class-wpdb.php on line 2345
admin的头像-五八三
评论 抢沙发
头像
欢迎您留下宝贵的见解!
提交
头像

昵称

图形验证码
取消
昵称代码图片