vlambda博客
学习文章列表

打印网络模型的conv.weight(一)


今天现讲常见的几种简单的网络模型搭建,利用

for name, module in model.named_modules(): print('modules:', module)

打印conv.weight遇到的问题。



一、首先、我们要知道打印出来的是什么结构

print('modules:', module)

给一个简单网络, 来看一下

import torchimport torch.nn as nn class MyNet(nn.Module): def __init__(self, inplanes, planes): super(MyNet, self,).__init__() self.conv_block=torch.nn.Sequential() self.conv_block.add_module("conv1",torch.nn.Conv2d(inplanes, planes, 3, 1, 1)) self.conv_block.add_module("relu1",torch.nn.ReLU()) self.conv_block.add_module("pool1",torch.nn.MaxPool2d(2))  self.dense_block = torch.nn.Sequential() self.dense_block.add_module("dense1",torch.nn.Linear(32 * 3 * 3, 128)) self.dense_block.add_module("relu2",torch.nn.ReLU()) self.dense_block.add_module("dense2",torch.nn.Linear(128, 10))  def forward(self, x): conv_out = self.conv_block(x) res = conv_out.view(conv_out.size(0), -1) out = self.dense_block(res) return out model = MyNet(3,32)
# print(model.conv_block.conv1.weight)
for name, module in model.named_modules():
print('modules:', module) print("******************")
modules: MyNet( (conv_block): Sequential( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu1): ReLU() (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (dense_block): Sequential( (dense1): Linear(in_features=288, out_features=128, bias=True) (relu2): ReLU() (dense2): Linear(in_features=128, out_features=10, bias=True) ))******************modules: Sequential( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu1): ReLU() (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))******************modules: Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))******************modules: ReLU()******************modules: MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)******************modules: Sequential( (dense1): Linear(in_features=288, out_features=128, bias=True) (relu2): ReLU() (dense2): Linear(in_features=128, out_features=10, bias=True))******************modules: Linear(in_features=288, out_features=128, bias=True)******************modules: ReLU()******************modules: Linear(in_features=128, out_features=10, bias=True)******************

我们可以看到,打印出来的结构是“总-分”形式的。即先显示总体结构, 再细分到每一个层。 

这个概念很重要, 后面会用到。



二、打印conv.weight


1)直接定义一个Conv2d,也能打印weight

x = nn.Conv2d(in_channels=32, out_channels=6, kernel_size=3, stride=1 ,padding=0)print(x.weight)



2)全部自己定义网络层,不要Sequential构建。利用

for name, module in model.named_modules():


import torchimport torch.nn as nn 
class Bottleneck(nn.Module): expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride
def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out)
out = self.conv2(out) out = self.bn2(out) out = self.relu(out)
out = self.conv3(out) out = self.bn3(out)
if self.downsample is not None: residual = self.downsample(x)
out += residual out = self.relu(out) return out
x = Bottleneck(32,32)# print(x.conv1.weight)

for name, module in x.named_modules(): print('modules:', module)
print(module.conv1.weight) # 只能打印一个
modules: Bottleneck( (conv1): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True))Parameter containing:tensor([[[[ 0.0640]],
[[ 0.0129]],
[[ 0.0190]],
...,
[[ 0.1447]],
[[-0.0357]],
[[-0.1319]]],

[[[ 0.1504]],
[[-0.0666]],
[[ 0.0321]],
...,
[[-0.0997]],
[[ 0.0841]],
[[-0.1449]]],

[[[-0.0722]],
[[ 0.0735]],
[[-0.0563]],
...,
[[-0.1673]],
[[-0.0226]],
[[-0.1339]]],

...,

[[[ 0.0780]],
[[-0.0488]],
[[ 0.0030]],
...,
[[-0.1003]],
[[-0.0494]],
[[ 0.0911]]],

[[[ 0.1694]],
[[-0.0948]],
[[-0.0958]],
...,
[[ 0.0994]],
[[ 0.1627]],
[[-0.1114]]],

[[[-0.0183]],
[[-0.1526]],
[[-0.0757]],
...,
[[ 0.0558]],
[[-0.1081]],
[[ 0.0907]]]], requires_grad=True)modules: Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)


说明:  总-分的 总体打印出来了,module.conv1.weight也打印出来了。


但到了分的时候就报错了 “Sequential中没有conv1”之类的错误


我推测一个是module.conv1.weight的定义原因。第二个是因为module针对的是“总”而不是“分”


所以总体的都能打印出权值, 一旦带“分”就会报错。



2)、利用sequential搭建



import torchimport torch.nn as nn 
# x = nn.Conv2d(in_channels=32, out_channels=6, kernel_size=3, stride=1 ,padding=0)# print(x.weight)

class MyNet(nn.Module): def __init__(self, inplanes, planes): super(MyNet, self,).__init__() self.conv_block=torch.nn.Sequential() self.conv_block.add_module("conv1",torch.nn.Conv2d(inplanes, planes, 3, 1, 1)) self.conv_block.add_module("relu1",torch.nn.ReLU()) self.conv_block.add_module("pool1",torch.nn.MaxPool2d(2)) self.dense_block = torch.nn.Sequential() self.dense_block.add_module("dense1",torch.nn.Linear(32 * 3 * 3, 128)) self.dense_block.add_module("relu2",torch.nn.ReLU()) self.dense_block.add_module("dense2",torch.nn.Linear(128, 10)) def forward(self, x): conv_out = self.conv_block(x) res = conv_out.view(conv_out.size(0), -1) out = self.dense_block(res) return out model = MyNet(3,32)
# print(model.conv_block.conv1.weight)
for name, module in model.named_modules():
print('modules:', module)
print(module.conv_block.conv1.weight)



modules: MyNet( (conv_block): Sequential( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu1): ReLU() (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (dense_block): Sequential( (dense1): Linear(in_features=288, out_features=128, bias=True) (relu2): ReLU() (dense2): Linear(in_features=128, out_features=10, bias=True) ))Parameter containing:tensor([[[[ 1.7723e-01, -1.5411e-01, -1.1020e-01], [ 1.4368e-01, 1.3152e-01, -8.4085e-02], [ 1.4314e-01, 1.6141e-01, -1.7310e-01]],
[[ 1.5719e-01, -6.7944e-02, 4.7269e-02], [ 1.2120e-01, -1.6804e-01, 7.2537e-02], [-1.4153e-01, -7.3249e-02, 1.8465e-01]],
[[-1.3284e-01, 4.5895e-02, -4.2830e-02], [ 1.6394e-02, -8.8726e-02, -1.2167e-01], [ 1.6000e-01, -1.6773e-01, 1.8218e-01]]],

[[[ 1.5271e-03, -1.4902e-01, 5.6462e-02], [ 2.9342e-02, 9.9406e-02, -9.6803e-02], [ 6.5731e-02, 1.0520e-03, 1.1914e-01]],
[[ 2.2494e-02, 1.2929e-01, 1.2066e-01], [-7.6994e-02, 2.3993e-02, 4.3274e-03], [-1.2490e-01, 1.6980e-01, -1.7477e-01]],
[[-1.8813e-01, 1.7437e-01, 1.9045e-01], [-4.5048e-02, 8.0535e-02, 1.4025e-01], [ 1.0111e-01, 8.4647e-02, -4.2017e-02]]],

[[[-1.8284e-01, 1.0795e-01, -7.9599e-02], [-5.9434e-02, 1.1489e-01, 1.5046e-01], [-1.2537e-01, -4.5372e-02, -1.2263e-01]],
[[-1.8446e-01, 6.2345e-02, -3.0811e-02], [ 2.2047e-02, -5.4291e-02, -1.4901e-01], [-6.4447e-02, 6.1447e-03, -1.8775e-01]],
[[ 1.6665e-04, -1.8464e-01, 1.1839e-01], [ 1.8734e-01, 1.0032e-02, -1.3797e-01], [ 5.4280e-03, -6.0442e-02, -1.0769e-01]]],

[[[ 1.5681e-01, 5.3734e-03, -1.8059e-01], [ 1.5029e-01, 6.8155e-02, 6.8749e-02], [ 1.8830e-01, -1.8841e-02, 3.1601e-02]],
[[-1.3593e-01, 8.3327e-03, 6.2958e-02], [ 1.8929e-04, -1.3514e-01, -7.9718e-02], [ 1.3897e-01, 1.1915e-01, 1.8830e-01]],
[[ 7.3816e-02, -1.3053e-01, 2.5840e-02], [-5.4686e-02, -2.6862e-02, -5.7559e-02], [-3.7206e-02, -1.9204e-01, -1.7588e-01]]],

[[[ 1.0703e-01, -8.2050e-02, -6.6084e-02], [ 2.1707e-02, 7.7292e-02, -9.4982e-02], [ 1.1991e-01, -6.9100e-02, -5.4638e-02]],
[[ 1.8477e-01, -4.3335e-02, 2.8409e-03], [ 1.0179e-01, -1.6307e-01, -6.4580e-03], [ 1.5121e-01, 1.1347e-01, -8.9041e-02]],
[[-8.0634e-02, -3.9794e-02, 1.4291e-01], [ 6.2002e-02, -1.0391e-01, -1.1690e-01], [ 9.6543e-02, 1.1869e-01, -6.6659e-02]]],

[[[ 7.0351e-02, -6.0988e-02, 1.5027e-01], [-8.9234e-02, -1.2537e-01, 1.1996e-01], [-1.1487e-01, 1.3092e-01, 6.6861e-02]],
[[ 1.6705e-01, 1.9406e-02, 2.9617e-03], [-1.7910e-02, 1.6199e-01, -2.8894e-02], [-1.5269e-01, -1.0791e-01, -3.0392e-02]],
[[-1.9767e-02, 7.7829e-02, -1.2146e-01], [-1.5302e-01, 1.3360e-01, -9.4132e-02], [ 3.1347e-02, -1.2343e-01, 1.0983e-01]]],

[[[-1.8602e-01, -6.3946e-02, 1.9647e-02], [ 6.0867e-02, 3.2062e-02, -1.4664e-03], [ 1.6645e-01, 3.4494e-02, 1.8877e-01]],
[[-9.9960e-02, -9.1339e-02, -8.3551e-02], [ 1.2969e-01, -1.8443e-02, 1.8456e-01], [ 1.5723e-02, 1.7418e-01, -1.6666e-01]],
[[-2.7917e-02, -1.7274e-02, 1.4394e-01], [-1.0080e-01, -4.7853e-02, -1.6490e-02], [-1.7133e-01, -3.0923e-02, -1.2278e-01]]],

[[[ 6.0785e-02, -3.0475e-02, 2.2560e-02], [ 6.9155e-02, -1.0349e-02, 5.4980e-02], [ 1.2542e-01, -8.9835e-02, -2.0642e-02]],
[[ 5.2013e-02, -1.0567e-01, 1.7364e-01], [-4.5699e-02, 1.5325e-01, -7.9986e-02], [ 4.2833e-02, 1.7172e-01, -4.2968e-02]],
[[ 1.2410e-01, -6.5363e-02, 2.5704e-02], [ 1.4170e-01, 1.3804e-01, 5.7095e-02], [ 2.9704e-02, 1.9089e-01, -9.8003e-02]]],

[[[ 1.6578e-01, 1.5088e-01, -9.1278e-02], [ 1.2357e-01, -1.6874e-01, 1.0370e-02], [-5.1830e-02, -1.4517e-01, -3.1605e-02]],
[[-9.1860e-02, -1.8855e-01, -1.1113e-01], [ 1.7286e-01, -2.4035e-02, -3.2304e-02], [ 1.1924e-01, -3.9774e-02, 1.4253e-01]],
[[ 1.1778e-01, -1.8438e-01, -3.7995e-02], [ 9.3429e-04, 7.5663e-02, -1.3344e-01], [-1.8852e-01, -7.2000e-02, 7.8469e-03]]],

[[[ 1.0804e-01, 1.1058e-01, -1.7716e-01], [ 1.4189e-01, -1.1833e-02, -1.2759e-01], [-1.0900e-01, 1.0096e-01, 4.5691e-02]],
[[-3.8315e-02, -1.7748e-01, -5.1315e-02], [-2.9803e-02, 2.4729e-02, -1.7748e-01], [ 1.0698e-01, 1.7462e-01, -4.0771e-02]],
[[ 7.0112e-02, -1.1790e-01, -1.8663e-02], [-1.0874e-01, 2.6357e-02, -3.6516e-02], [-1.0357e-01, -1.5886e-01, 1.8957e-01]]],

[[[ 1.6268e-01, 6.3585e-02, 1.8393e-01], [-1.6609e-01, 1.3312e-01, 2.7525e-02], [-6.3190e-02, 8.6723e-02, 9.4740e-02]],
[[ 5.2894e-02, -8.3278e-02, 1.2604e-01], [ 4.6612e-03, -8.4200e-02, 6.0069e-03], [ 1.5177e-01, 1.1195e-01, -2.3808e-02]],
[[ 9.6096e-02, -1.0349e-01, 1.3590e-01], [ 1.3989e-01, -4.0161e-02, 1.4739e-01], [-1.6409e-02, -7.6253e-02, -8.5379e-02]]],

[[[-4.6940e-02, 1.7416e-01, 6.3826e-02], [ 1.7619e-01, -4.1503e-02, 9.7316e-02], [-1.6217e-02, -1.1806e-01, -6.3072e-02]],
[[-5.1763e-02, -1.4303e-02, 1.0484e-01], [ 1.7937e-02, 7.4330e-02, -1.7916e-01], [-5.4877e-02, 4.5352e-02, -1.4611e-01]],
[[ 1.5203e-01, 6.5219e-03, -1.1079e-02], [ 1.8673e-01, -2.6226e-02, -1.1513e-02], [ 1.7050e-01, 7.9691e-02, 1.5928e-01]]],

[[[ 2.3691e-02, 1.7915e-01, -1.3350e-01], [-1.3598e-01, 1.7657e-01, -1.6766e-01], [-1.3705e-01, -1.9024e-01, -1.0670e-02]],
[[-3.1349e-02, 3.5886e-02, -1.6549e-01], [ 3.2467e-02, -1.0675e-01, 1.3922e-02], [ 1.8321e-02, -1.0467e-01, 1.7268e-01]],
[[ 5.2302e-03, -1.7241e-01, -1.0033e-01], [ 1.3820e-01, -2.8736e-02, 6.3338e-02], [-2.1389e-02, 6.8743e-02, -8.9915e-03]]],

[[[ 1.0615e-02, -1.0510e-01, -1.0044e-01], [-2.6099e-02, 1.8519e-02, -1.7633e-01], [-5.5757e-03, -7.5406e-02, -1.8654e-01]],
[[ 3.6466e-02, 3.1995e-02, -9.0401e-02], [-1.7978e-01, 1.8862e-01, 1.8258e-02], [-1.0419e-01, 6.6768e-03, 3.3108e-02]],
[[-7.5137e-02, -1.1768e-01, 6.3381e-02], [-8.5325e-02, 1.3742e-01, 1.7600e-01], [ 9.6007e-02, -6.3622e-03, 1.3692e-01]]],

[[[ 4.1438e-02, -1.0885e-02, -1.1929e-01], [-1.0280e-01, 1.2100e-01, -4.0669e-02], [-7.2578e-02, -1.5792e-01, -4.8435e-02]],
[[ 9.6727e-02, 1.1247e-01, 1.1729e-01], [-1.2477e-01, -1.0848e-01, -1.6390e-01], [-9.8475e-03, -8.8274e-02, 1.3487e-03]],
[[-1.0446e-01, 1.0695e-01, -1.1334e-01], [ 1.9452e-02, 4.5545e-02, -4.0839e-03], [-1.0553e-01, 7.2998e-03, -1.3185e-01]]],

[[[-1.1457e-01, 1.7437e-01, 7.0814e-02], [ 1.8696e-01, 8.6422e-02, 1.7698e-01], [ 9.5457e-02, 5.2190e-02, 4.2058e-02]],
[[-5.7994e-02, 2.8807e-03, -5.0472e-02], [-1.4685e-01, 1.6405e-01, -1.4611e-01], [ 1.8430e-01, -3.1619e-02, 7.8124e-04]],
[[ 6.0709e-02, 7.8404e-02, -1.5970e-01], [ 3.1988e-02, -5.8158e-02, 1.6364e-01], [ 1.3861e-01, 5.3622e-02, -1.0823e-01]]],

[[[ 1.1828e-01, 7.9877e-02, 8.7735e-02], [ 1.6349e-01, 1.7245e-02, -1.3348e-01], [ 9.9614e-02, -8.3207e-02, 1.2518e-01]],
[[ 1.0126e-01, -1.9649e-02, -1.6797e-01], [-8.3892e-02, 8.3674e-02, -7.9036e-02], [ 7.7645e-02, -1.0639e-01, 8.7243e-02]],
[[-1.2788e-01, -2.5315e-02, 1.0005e-01], [-1.7569e-01, -2.0980e-02, -3.2187e-02], [-1.0922e-01, -1.6801e-01, -8.8553e-02]]],

[[[-1.5681e-01, 4.9736e-02, 1.1805e-01], [ 4.0061e-02, 4.6174e-03, -1.4409e-01], [ 1.6202e-01, -1.5059e-01, 1.5988e-02]],
[[ 1.0499e-01, -6.8925e-03, -3.9465e-02], [-1.6021e-01, -9.3324e-02, 1.8493e-01], [-1.5928e-01, -1.3576e-01, -1.4984e-01]],
[[ 1.0726e-01, -9.3953e-03, 1.3118e-01], [-5.3700e-02, 1.5741e-01, 1.1675e-01], [ 1.1964e-01, 1.0868e-01, 1.7697e-01]]],

[[[ 1.4646e-01, 9.4915e-02, -8.2535e-02], [-7.0230e-02, 1.5785e-02, -6.7408e-02], [ 1.4377e-01, -1.3898e-01, -1.1059e-01]],
[[ 1.2637e-02, -1.4127e-01, -1.4138e-01], [ 1.3513e-01, -1.8836e-01, -8.5215e-02], [ 3.1971e-02, 1.8971e-02, -8.7198e-03]],
[[-1.5203e-01, -1.3347e-01, 3.9215e-02], [-5.8726e-02, -7.2954e-02, -4.9106e-02], [-1.0908e-01, -1.0276e-01, 6.8820e-02]]],

[[[-6.1032e-02, -1.8205e-01, -9.4852e-02], [-6.9741e-02, -9.6108e-02, 1.3828e-01], [-3.2270e-03, -8.4920e-02, -2.9239e-02]],
[[ 7.4433e-02, -3.4101e-02, -1.3491e-01], [-7.8748e-02, -1.5410e-03, 5.1533e-02], [ 1.4144e-01, -1.8581e-01, 1.6009e-01]],
[[ 1.7287e-01, -8.2069e-02, 1.2171e-01], [-3.4457e-03, 2.8110e-02, -1.1366e-01], [-1.6392e-01, 1.2084e-01, -1.1515e-01]]],

[[[-9.7665e-02, 9.1293e-03, 1.3879e-02], [-1.8690e-01, -1.0930e-01, 1.9207e-01], [ 6.8454e-02, 2.4949e-02, -5.9252e-02]],
[[ 1.2008e-01, 7.9585e-02, -7.8902e-03], [-1.2335e-01, 1.3811e-01, -8.8969e-02], [ 1.9188e-01, 1.0169e-01, 1.9050e-01]],
[[-4.7098e-02, -4.9603e-02, -4.5357e-02], [ 1.1388e-01, -9.3586e-02, -5.9370e-02], [ 2.7578e-03, 3.6049e-02, -2.5328e-02]]],

[[[ 2.5772e-02, -8.8989e-02, 2.6716e-02], [ 6.7259e-02, 9.0463e-02, 4.2918e-02], [ 8.3878e-02, 1.8272e-02, -1.7770e-01]],
[[-6.0743e-02, -4.8805e-02, 4.2698e-02], [-1.0580e-01, 6.8187e-02, -1.8864e-01], [-1.7599e-01, -2.8032e-02, -6.6849e-02]],
[[-8.5626e-02, -1.8343e-01, 1.5436e-01], [ 7.0824e-02, -2.5575e-02, 3.1471e-02], [-1.2619e-01, -1.7087e-01, -8.7480e-03]]],

[[[ 2.3842e-02, -1.4813e-01, -7.5496e-02], [ 1.2815e-01, 3.3805e-02, -1.5007e-02], [-1.2560e-01, 8.0575e-02, 4.7276e-02]],
[[-3.3064e-02, 5.9836e-02, -6.5416e-02], [ 5.4178e-02, -4.4775e-02, -5.3386e-02], [ 1.8312e-01, -1.1280e-03, 1.1499e-01]],
[[-3.7864e-02, -7.3045e-02, -8.7048e-02], [ 1.4920e-01, 1.1364e-01, -3.3509e-02], [ 1.7871e-01, -1.1932e-01, -1.8256e-01]]],

[[[-5.3785e-03, 5.2897e-02, 5.3514e-02], [-1.7856e-02, 1.2338e-01, 7.9170e-02], [-8.2314e-02, 6.6854e-02, -1.4107e-01]],
[[ 1.6003e-01, -1.0909e-01, 1.3856e-01], [ 1.5984e-01, 3.4632e-02, 1.4117e-01], [ 6.2603e-02, -1.4309e-01, 1.2196e-01]],
[[-5.6485e-02, 2.6381e-02, 1.0325e-02], [ 1.8338e-01, 1.1339e-01, -1.8391e-01], [-1.7683e-01, 1.4050e-02, 1.0608e-01]]],

[[[ 8.3929e-02, 3.3022e-02, -1.0205e-01], [-1.8357e-01, 1.0118e-01, 1.9007e-01], [-1.4385e-01, 9.7732e-02, -1.6525e-02]],
[[-9.0723e-03, -1.6846e-02, 5.4031e-02], [ 1.5147e-01, -9.7195e-02, 1.4112e-02], [ 4.0804e-02, 7.5529e-02, 3.9312e-03]],
[[-1.4392e-01, -6.7364e-02, 1.6855e-04], [ 1.8638e-01, -6.1376e-02, -1.5542e-01], [ 1.1440e-01, 6.7569e-02, 2.3479e-02]]],

[[[ 6.2760e-02, 1.1656e-01, 1.2973e-01], [ 3.3403e-03, -1.4738e-01, -1.8418e-01], [-3.2476e-02, 1.4896e-01, -1.7260e-01]],
[[ 1.5503e-01, 1.6119e-01, 1.0386e-01], [ 1.4220e-01, 7.4382e-02, -9.0094e-02], [-1.7508e-01, -1.5298e-01, -1.4623e-01]],
[[ 4.1318e-02, -1.6284e-01, -1.0854e-01], [-1.7059e-01, -1.7136e-01, -1.4920e-03], [ 1.1854e-02, -7.5491e-02, -1.8135e-01]]],

[[[-7.5641e-02, -1.9837e-02, -3.6080e-02], [ 1.6522e-01, -2.3479e-02, 1.9074e-01], [-1.1835e-01, 1.3500e-01, 6.3936e-02]],
[[ 5.9551e-02, -9.6127e-02, 9.9682e-02], [ 8.7222e-02, -4.4386e-03, -1.6344e-02], [ 4.4294e-02, -5.0437e-02, 3.4812e-02]],
[[ 1.6582e-01, -1.0691e-01, 3.3737e-02], [ 9.5978e-02, 1.4098e-01, -1.3150e-02], [-8.1499e-02, 1.0743e-01, 1.7527e-01]]],

[[[ 1.3367e-01, -7.0072e-02, 1.0393e-01], [ 8.5582e-02, -1.0689e-01, -8.9711e-02], [-7.2446e-02, -1.1076e-02, 1.2153e-01]],
[[-1.1069e-01, 8.6288e-02, 1.3902e-01], [-1.8451e-02, -1.9133e-01, 5.8069e-02], [ 7.1024e-02, 7.0418e-02, 1.1333e-01]],
[[ 5.7377e-02, 1.1837e-01, 5.5851e-02], [ 5.2166e-02, -7.6641e-02, 1.2338e-02], [-6.0599e-02, 1.0994e-01, 4.2019e-02]]],

[[[-3.5270e-02, 7.3904e-03, 8.7119e-02], [ 1.5788e-01, -2.2565e-02, 1.5929e-02], [-9.3321e-02, 2.1127e-02, 1.3271e-01]],
[[ 3.9329e-02, -1.7214e-01, 7.5898e-02], [ 1.3446e-01, 6.8285e-02, 1.4008e-01], [-1.5643e-01, 1.7654e-01, 1.7355e-01]],
[[ 9.9671e-02, 1.1026e-01, 4.8084e-02], [-7.9218e-02, 5.3639e-02, 2.2868e-02], [-1.0894e-01, -1.1125e-01, -1.4505e-01]]],

[[[ 1.2367e-01, 4.1587e-02, -1.4986e-01], [-1.1677e-01, 2.5681e-02, -1.8656e-01], [-9.8910e-02, 6.6706e-02, -1.0912e-01]],
[[ 9.6770e-03, 9.7456e-03, 1.5028e-01], [ 1.3782e-02, 1.7728e-01, 1.8127e-01], [ 1.3938e-01, 3.9131e-03, 5.8189e-02]],
[[ 1.2395e-01, 1.8756e-01, -1.5142e-01], [-2.0989e-02, -1.4696e-02, -7.9472e-02], [-1.6609e-01, 1.0467e-02, 3.1970e-02]]],

[[[-1.6156e-01, 4.1195e-02, -8.1984e-03], [ 1.2722e-01, 1.0805e-01, -3.7216e-02], [ 9.0060e-03, -1.2207e-01, 1.1916e-01]],
[[ 1.7258e-01, -3.2820e-02, -1.2777e-01], [-3.8691e-02, 5.7823e-02, 1.1832e-01], [-4.0635e-02, 1.7110e-01, 1.3502e-01]],
[[-1.5331e-02, 1.3501e-01, 2.2501e-02], [-1.3679e-01, 1.7157e-01, 8.5327e-02], [-1.7002e-01, -1.5186e-01, -1.3200e-01]]],

[[[ 1.5700e-01, -1.8891e-01, -1.2683e-01], [ 9.9102e-02, -7.7912e-02, -1.8438e-01], [ 2.5948e-02, 8.5860e-02, 3.3016e-02]],
[[-1.2053e-02, -8.0931e-02, -1.0268e-02], [ 1.7001e-01, -1.1713e-01, 3.1018e-02], [ 9.3637e-02, -9.5935e-03, 1.0162e-01]],
[[-9.4330e-02, 1.0214e-01, -4.9583e-02], [-7.9040e-03, 1.1237e-01, 7.9723e-02], [ 3.2347e-02, -8.8779e-02, -1.0160e-02]]]], requires_grad=True)modules: Sequential( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu1): ReLU() (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))


必须是module.conv_block.conv1.weight才能打印, 因为网络定义里是先定义的conv_block再定义的con1


也是“总”打印完毕后, 可以得到权值, 轮到“分”的时候便报错



三、 Sequential定义模型, 打印两个权值, 遇到“分”也会报错


import torchimport torch.nn as nn import torchimport torch.nn as nnimport torch.nn.functional as Ffrom collections import OrderedDict
class MyNet(torch.nn.Module): def __init__(self,inplanes, planes): super(MyNet, self).__init__() self.conv1 = torch.nn.Sequential( OrderedDict( [ ("conv", torch.nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)), ("relu1", torch.nn.ReLU()), ("pool", torch.nn.MaxPool2d(2)) ] ))
self.dense = torch.nn.Sequential( OrderedDict([ ("dense1", torch.nn.Linear(32 * 3 * 3, 128)), ("relu2", torch.nn.ReLU()), ("dense2", torch.nn.Linear(128, 10)) ]) )
def forward(self, x): conv_out = self.conv1(x) res = conv_out.view(conv_out.size(0), -1) out = self.dense(res) return out
model = MyNet(3, 32)
# print(model.conv1.conv.weight) # for name, module in model.named_children():# print('children module:', name) for name, module in model.named_modules():
print("***************")
print('names:', name)
print('modules:', module)
# converted_weights = {} print(module.conv1.conv.weight) # 只能打印conv1中的conv.weight print(module.dense.dense1.weight)


modules: MyNet( (conv_block): Sequential( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu1): ReLU() (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (dense_block): Sequential( (dense1): Linear(in_features=288, out_features=128, bias=True) (relu2): ReLU() (dense2): Linear(in_features=128, out_features=10, bias=True) ))******************modules: Sequential( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu1): ReLU() (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))******************modules: Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))******************modules: ReLU()******************modules: MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)******************modules: Sequential( (dense1): Linear(in_features=288, out_features=128, bias=True) (relu2): ReLU() (dense2): Linear(in_features=128, out_features=10, bias=True))******************modules: Linear(in_features=288, out_features=128, bias=True)******************modules: ReLU()******************modules: Linear(in_features=128, out_features=10, bias=True)******************(pytorch) xzq@xzq-GL552JX:~/1$ ^C(pytorch) xzq@xzq-GL552JX:~/1$ cd /home/xzq/1 ; env /home/xzq/anaconda3/envs/pytorch/bin/python /home/xzq/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/launcher 33663 -- /home/xzq/1/2.py ***************names: modules: MyNet( (conv1): Sequential( (conv): Conv2d(3, 32, kernel_size=(1, 1), stride=(1, 1), bias=False) (relu1): ReLU() (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (dense): Sequential( (dense1): Linear(in_features=288, out_features=128, bias=True) (relu2): ReLU() (dense2): Linear(in_features=128, out_features=10, bias=True) ))Parameter containing:tensor([[[[ 0.1538]],
[[ 0.3245]],
[[ 0.1077]]],

[[[-0.0101]],
[[-0.0590]],
[[-0.2294]]],

[[[-0.0574]],
[[ 0.2458]],
[[-0.4793]]],

[[[ 0.1205]],
[[-0.5152]],
[[-0.3439]]],

[[[-0.5019]],
[[ 0.0285]],
[[-0.1112]]],

[[[ 0.0754]],
[[ 0.0933]],
[[ 0.0517]]],

[[[ 0.0500]],
[[ 0.3944]],
[[-0.0366]]],

[[[-0.4968]],
[[-0.1363]],
[[ 0.2975]]],

[[[ 0.0057]],
[[ 0.5693]],
[[-0.3720]]],

[[[ 0.3055]],
[[ 0.3639]],
[[-0.0431]]],

[[[ 0.4133]],
[[ 0.3786]],
[[ 0.4531]]],

[[[ 0.4411]],
[[-0.5318]],
[[ 0.4646]]],

[[[ 0.4949]],
[[-0.1644]],
[[-0.5023]]],

[[[ 0.5589]],
[[ 0.1440]],
[[-0.2204]]],

[[[ 0.0998]],
[[-0.3182]],
[[-0.4858]]],

[[[-0.3131]],
[[ 0.4901]],
[[-0.2796]]],

[[[ 0.1283]],
[[-0.5417]],
[[-0.4835]]],

[[[ 0.3485]],
[[ 0.2474]],
[[-0.5296]]],

[[[ 0.4832]],
[[-0.1623]],
[[ 0.0966]]],

[[[-0.3036]],
[[-0.4915]],
[[-0.1437]]],

[[[-0.2073]],
[[-0.1144]],
[[-0.2780]]],

[[[ 0.4338]],
[[-0.2723]],
[[-0.0856]]],

[[[ 0.1173]],
[[-0.0981]],
[[ 0.2393]]],

[[[-0.2529]],
[[-0.2813]],
[[-0.1992]]],

[[[-0.3658]],
[[-0.0833]],
[[ 0.2231]]],

[[[ 0.5611]],
[[-0.3887]],
[[-0.3776]]],

[[[-0.4609]],
[[ 0.1353]],
[[ 0.0734]]],

[[[ 0.4316]],
[[-0.4922]],
[[ 0.1871]]],

[[[ 0.0806]],
[[ 0.3724]],
[[ 0.2636]]],

[[[ 0.3638]],
[[-0.5223]],
[[ 0.1349]]],

[[[ 0.5642]],
[[ 0.4122]],
[[-0.4067]]],

[[[-0.1574]],
[[-0.0336]],
[[-0.1971]]]], requires_grad=True)Parameter containing:tensor([[ 0.0487, -0.0190, -0.0245, ..., 0.0512, -0.0006, -0.0239], [ 0.0179, 0.0430, -0.0100, ..., -0.0310, 0.0270, 0.0135], [ 0.0280, 0.0334, -0.0138, ..., -0.0157, -0.0018, 0.0087], ..., [ 0.0387, -0.0316, -0.0327, ..., -0.0094, 0.0412, -0.0092], [ 0.0585, -0.0407, 0.0431, ..., -0.0400, -0.0436, 0.0119], [-0.0207, -0.0509, 0.0349, ..., -0.0262, 0.0142, 0.0008]], requires_grad=True)***************names: conv1modules: Sequential( (conv): Conv2d(3, 32, kernel_size=(1, 1), stride=(1, 1), bias=False) (relu1): ReLU() (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))





结论:module.conv.weight的这个module是针对的“总”


所以若不想报错, 需要添加判断句,只要“总”



解决方法:参考repvgg


将module中包含repvgg_convert函数的,取出来做下一步

for name, module in model.named_modules(): # module中有所有的分支 if hasattr(module, 'repvgg_convert'): #判断 kernel, bias = module.repvgg_convert()