MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilat...
用子module来实现Residual block,用_make_layer函数来实现layer''' def__init__(self,num_classes=2):super(ResNet34,self).__init__()self.model_name='resnet34'# 前几层:图像转换 self.pre=nn.Sequential(nn.Conv2d(3,64,7,2,3,bias=False),nn.BatchNorm2d(64),nn.ReLU(inplace=True),nn.Ma...
_make_layer( 256, 512, 6, stride=2) self.layer4 = self._make_layer( 512, 512, 3, stride=2) #分类用的全连接 self.fc = nn.Linear(512, num_classes) def _make_layer(self, inchannel, outchannel, block_num, stride=1): ''' 构建layer,包含多个residual block ''' shortcut = nn....
self.inchannel=3# 初始输入通道数 self.futures=self.make_layer() # 构建卷积层之后的全连接层以及分类器: self.classifier=nn.Sequential(nn.Dropout(0.4),# 两层fc效果还差一些 nn.Linear(4*512,10), )# fc,最终Cifar10输出是10类 defmake_layer(self): layers=[] forvinself.cfg: ifv=='M': l...
而在make_layers部分,需要判断当前stride, 有三次stride,每次缩放一倍,默认stride都是1,当然也可以把stride全列举出来,就不用判断了。 def_make_layers(self, in_planes, cfg, layer): layers = [] forxincfg: out_planes = xifisinstance(x,int)elsex[0] ...
定义_make_layer函数:用来构建网络架构中的(conv2.x,conv3.x,conv4.x,conv5.x) block:基础残差结构(根据定义的网络层数不同,传入不同的残差结构,[18,34]传BasicBlock,[50,101,152]传Bottleneck) channel:残差结构中第一个卷积层使用的卷积核的个数 ...
_make_layer()方法中比较重要的两行代码是: 1、layers.append(block(self.inplanes, planes, stride, downsample)),该部分是将每个blocks的第一个residual结构保存在layers列表中。 2、for i in range(1, blocks): layers.append(block(self.inplanes...
Module): ''' 实现主module:ResNet34 ResNet34 包含多个layer,每个layer又包含多个residual block 用子module来实现residual block,用_make_layer函数来实现layer ''' def __init__(self, num_classes=1000): super(ResNet, self).__init__() # 前几层图像转换 self.pre = nn.Sequential( nn.Conv2d(...
6 self.conv2 = self._make_layer(Bottleneck, [[1, 1, 1]] * 3, [[0, 1, 0]] * 3, 64) self.conv3 = self._make_layer(Bottleneck, [[1, 2, 1]] + [[1, 1, 1]] * 3, [[0, 1, 0]] * 4, 128) self.conv4 = self._make_layer(Bottleneck, [[1, 2, 1]] + [[1...
out_c, kernel_size=(3, 3, 3), padding=0), nn.LeakyReLU(), nn.MaxPool3d((2, 2, 2)), ) return conv_layer def forward(self, x): # Set 1 out = self.conv_layer1(x) out = self.conv_layer2(out) out = out.view(out.size(0), -1) out = ...