1. 层与块
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
# 调用MLP的父类Module的构造函数来执行必要的初始化
# 这样,在类实例化时也可以指定其他函数参数
self.hidden = nn.Linear(20, 256)
self.out = nn.Linear(256, 10)
# 定义模型的前向传播,即如何根据输入X返回所需的模型输出
def forward(self, x):
# 注意,这里我们使用的ReLU函数版本,其在nn.functional模块中定义
return self.out(F.relu(self.hidden(x)))
# 顺序块
class Mysequential(nn.Module):
def __init__(self, *args):
# 这里*args是收集参数, 相当于把若干个参数(都是Module子类的实例)打包成一个元组来传入
super().__init__()
for idx, module in enumerate(args):
# 这里,module是Module子类的一个实例,我们把他保存在Module类的成员
# 变量_modules中 _modules的类型是OrderedDict
self._modules[str(idx)] = module
def forward(self, x):
# OrderedDict保证了按照成员添加的顺序遍历它们
for block in self._modules.values():
X = block(x)
return X
参数管理
import torch
from torch import nn
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
X = torch.rand(size=(2, 4))
net(X)
# 参数访问
# 模型就像一个列表一样,每层的参数都在其属性中,我们可以通过索引访问模型的任意层
print(net[2].state_dict()) # 检查第二个全连接层的参数
# 1、目标参数
# 从第二个全连接层提取偏置
print(type(net[2].bias))
print(net[2].bias)
print(net[2].bias.data)
print(net[2].weight.grad == None)
# 2、一次性访问所有参数
print(*[(name, param.shape) for name, param in net[0].named_parameters()]) # 这里的*起了拆包的作用
print(*[(name, param.shape) for name, param in net.named_parameters()])
print(net.state_dict()['2.bias'].data)
# 3、从嵌套块收集参数
def block1():
return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())
def block2():
net = nn.Sequential()
for i in range(4):
net.add_module(f'block {i}', block1())
return net
rgnet = nn.Sequential(block2(), nn.Linear(4, 1))
rgnet(X)
print(rgnet)
# 层是分层嵌套的,我们可以像通过嵌套列表索引那样访问层
print(rgnet[0][1][0].bias.data)
# 参数初始化
# 1、内置初始化
# 正态分布
def init_normal(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
net.apply(init_normal) # 遍历整个神经网络进行初始化
# 初始为常数
def init_constant(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
net.apply(init_constant)
# 还可以对某些块应用不同的初始化方法
def init_xavier(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight) # 均匀分布式的xavier
def init_42(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 42)
net[0].apply(init_xavier)
net[2].apply(init_42)
print(net[0].weight.data[0])
print(net[2].weight.data)
# 2、自定义初始化
def my_init(m):
if type(m) == nn.Linear:
print('Init', *[(name, param.shape)
for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() > 5
net.apply(my_init)
print(net[0].weight[:2])
# 我们还可以直接设置参数
net[0].weight.data[0, 0] = 42
# 3、参数绑定
# 在多个层间共享参数
shared = nn.Linear(8, 8)
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), shared, nn.ReLU(),
shared, nn.ReLU(), nn.Linear(8, 1))
net(X)
print(net[2].weight.data[0] == net[4].weight.data[0])
net[2].weight.data[0, 0] = 100
print(net[2].weight.data[0] == net[4].weight.data[0])
# 发现两次的输出都是True, 说明参数被绑定了
自定义层
import torch
import torch.nn.functional as F
from torch import nn
# 不带参数的层
class CenteredLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, X):
return X - X.mean()
# 带参数的层
class MyLinear(nn.Module):
def __init__(self, in_units, out_units):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, out_units))
self.bias = nn.Parameter(torch.randn(out_units, )) # 这里的逗号只是增加可读性QAQ
def forward(self, X):
linear = torch.matmul(X, self.weight.data) + self.bias.data
return F.relu(linear)
dense = MyLinear(5, 3)
print(dense.weight)
读写文件
import torch
from torch import nn
from torch.nn import functional as F
# 加载和保存张量
x = torch.arange(4)
torch.save(x, 'x-file')
x2 = torch.load('x-file')
print(x2)
# 存储一个张量列表
y = torch.zeros(4)
torch.save([x, y], 'x-files')
x2, y2 = torch.load('x-files')
print((x2, y2))
# 读取或写入从字符串映射到张量的字典
mydict = {'x': x, 'y': y}
torch.save(mydict, 'mydict')
mydict2 = torch.load('mydict')
print(mydict2)
# 加载和保存模型参数
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.output = nn.Linear(256, 10)
def forward(self, x):
return self.output(F.relu(self.hidden(x)))
net = MLP()
X = torch.randn(size=(2, 20))
Y = net(X)
# 保存模型参数
torch.save(net.state_dict(), 'mlp.params')
# 恢复模型
clone = MLP()
clone.load_state_dict(torch.load('mlp.params'))
clone.eval()