_make_layer( 64, 128, 3) self.layer2 = self._make_layer( 128, 256, 4, stride=2) self.layer3 = self._make_layer( 256, 512, 6, stride=2) self.layer4 = self._make_layer( 512, 512, 3, stride=2) #分类用的全连接 self.fc = nn.Linear(512, num_classes) def _make_layer(...
raiseNotImplementedError 而在make_layers部分,需要判断当前stride, 有三次stride,每次缩放一倍,默认stride都是1,当然也可以把stride全列举出来,就不用判断了。 def_make_layers(self, in_planes, cfg, layer): layers = [] forxincfg: out_planes = xifisinstance(x,int)elsex[0] stride =1ifisinstance(x,...
self.inchannel=3# 初始输入通道数 self.futures=self.make_layer() # 构建卷积层之后的全连接层以及分类器: self.classifier=nn.Sequential(nn.Dropout(0.4),# 两层fc效果还差一些 nn.Linear(4*512,10), )# fc,最终Cifar10输出是10类 defmake_layer(self): layers=[] forvinself.cfg: ifv=='M': l...
bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) # 网络的第一层加入注意力机制 self.ca = ChannelAttention(self.inplanes) self.sa = SpatialAttention() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, ...
_make_layer方法中比较重要的两行代码是:1、layers.append(block(self.inplanes, planes, stride, downsample)),该部分是将每个blocks的第一个residual结构保存在layers列表中。2、 for i in range(1, blocks): layers.append(block(self.inplanes, planes)),该部分是将每个blocks的剩下residual 结构保存在layers...
1、主干网络修改为darknet53,其重要特点是使用了残差网络Residual,darknet53中的残差卷积块就是进行一次3X3、步长为2的卷积,然后保存该卷积layer,再进行一次1X1的卷积(用于减少通道数)和一次3X3的卷积(增加通道数),并把这个结果加上layer作为最后的结果, 残差网络的特点是容易优化,并且能够通过增加相当的深度来提高准...
def_make_layer(self,block,planes,num_blocks,stride):layers=[]foriinrange(num_blocks):ifi==0:layers.append(block(self.in_planes,planes,stride))else:layers.append(block(planes,planes,1))self.in_planes=planesreturnnn.Sequential(*layers)defforward(self,x):x=self.maxpool(self.relu(self.bn1(...
(kernel_size=3,stride=2,padding=1)self.layer1=self._make_layer(block,64,layers[0])self.layer2=self._make_layer(block,128,layers[1],stride=2)self.layer3=self._make_layer(block,256,layers[2],stride=2)self.layer4=self._make_layer(block,512,layers[3],stride=2)self.avgpool=nn.Avg...
6 self.conv2 = self._make_layer(Bottleneck, [[1, 1, 1]] * 3, [[0, 1, 0]] * 3, 64) self.conv3 = self._make_layer(Bottleneck, [[1, 2, 1]] + [[1, 1, 1]] * 3, [[0, 1, 0]] * 4, 128) self.conv4 = self._make_layer(Bottleneck, [[1, 2, 1]] + [[1...
out_c, kernel_size=(3, 3, 3), padding=0), nn.LeakyReLU(), nn.MaxPool3d((2, 2, 2)), ) return conv_layer def forward(self, x): # Set 1 out = self.conv_layer1(x) out = self.conv_layer2(out) out = out.view(out.size(0), -1) out = ...