pytorch中的pre-train模型

卷积神经网络的训练是耗时的,很多场合不可能每次都从随机初始化参数开始训练网络。

pytorch中自带几种常用的深度学习网络预训练模型,如VGG、ResNet等。往往为了加快学习的进度,在训练的初期我们直接加载pre-train模型中预先训练好的参数,model的加载如下所示:

import torchvision.models as models

#resnet
model = models.ResNet(pretrained=True)
model = models.resnet18(pretrained=True)
model = models.resnet34(pretrained=True)
model = models.resnet50(pretrained=True)

#vgg
model = models.VGG(pretrained=True)
model = models.vgg11(pretrained=True)
model = models.vgg16(pretrained=True)
model = models.vgg16_bn(pretrained=True)

该类模型的修改

参数修改

对于简单的参数修改,这里以resnet预训练模型举例,resnet源代码在Github点击打开链接

resnet网络最后一层分类层fc是对1000种类型进行划分,对于自己的数据集,如果只有9类,修改的代码如下:

# coding=UTF-8
import torchvision.models as models 

#调用模型
model = models.resnet50(pretrained=True)
#提取fc层中固定的参数
fc_features = model.fc.in_features
#修改类别为9
model.fc = nn.Linear(fc_features, 9)

增减卷积层

前一种方法只适用于简单的参数修改,有的时候我们往往要修改网络中的层次结构,这时只能用参数覆盖的方法,即自己先定义一个类似的网络,再将预训练中的参数提取到自己的网络中来。这里以resnet预训练模型举例。

# coding=UTF-8
import torchvision.models as models
import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo 

class CNN(nn.Module):
    def __init__(self, block, layers, num_classes=9):
        self.inplanes = 64
        super(ResNet, self).__init__()        
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,                               bias=False)        
        self.bn1 = nn.BatchNorm2d(64)        
        self.relu = nn.ReLU(inplace=True)        
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)        
        self.layer1 = self._make_layer(block, 64, layers[0])        
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)        
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)        
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)        
        self.avgpool = nn.AvgPool2d(7, stride=1)        
        #新增一个反卷积层        
        self.convtranspose1 = nn.ConvTranspose2d(2048, 2048, kernel_size=3, stride=1, padding=1, output_padding=0, groups=1, bias=False, dilation=1)        
        #新增一个最大池化层        
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)        
        #去掉原来的fc层,新增一个fclass层        
        self.fclass = nn.Linear(2048, num_classes)         

        for m in self.modules():            
            if isinstance(m, nn.Conv2d):                
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels                 
                m.weight.data.normal_(0, math.sqrt(2. / n))            
            elif isinstance(m, nn.BatchNorm2d):                
                m.weight.data.fill_(1)                
                m.bias.data.zero_()     
    def _make_layer(self, block, planes, blocks, stride=1):        
        downsample = None        
        if stride != 1 or self.inplanes != planes * block.expansion:            
            downsample = nn.Sequential(                
                nn.Conv2d(self.inplanes, planes * block.expansion,                          
                          kernel_size=1, stride=stride, bias=False),                
                          nn.BatchNorm2d(planes * block.expansion),            
                          )         
        layers = []        
        layers.append(block(self.inplanes, planes, stride, downsample))        
        self.inplanes = planes * block.expansion        
        for i in range(1, blocks):            
            layers.append(block(self.inplanes, planes))         
        return nn.Sequential(*layers)     
    def forward(self, x):        
        x = self.conv1(x)        
        x = self.bn1(x)        
        x = self.relu(x)        
        x = self.maxpool(x)         
        x = self.layer1(x)        
        x = self.layer2(x)        
        x = self.layer3(x)        
        x = self.layer4(x)         
        x = self.avgpool(x)        
        #新加层的forward        
        x = x.view(x.size(0), -1)        
        x = self.convtranspose1(x)        
        x = self.maxpool2(x)        
        x = x.view(x.size(0), -1)        
        x = self.fclass(x)         
        return x 
    
    #加载model
    resnet50 = models.resnet50(pretrained=True)
    cnn = CNN(Bottleneck, [3, 4, 6, 3])
    #读取参数
    pretrained_dict = resnet50.state_dict()
    model_dict = cnn.state_dict()
    # 将pretrained_dict里不属于model_dict的键剔除掉
    pretrained_dict =  {k: v for k, v in pretrained_dict.items() if k in model_dict}
    # 更新现有的model_dict
    model_dict.update(pretrained_dict)
    # 加载我们真正需要的state_dict
    cnn.load_state_dict(model_dict)
    # print(resnet50)
    print(cnn)

非官方的pretrain model

去除pretrain model 最后一层或某一层

import torch
from collections import OrderedDict
import os
import torch.nn as nn
import torch.nn.init as init
from xxx import new_VGG 

def init_weight(modules):    
    for m in modules:        
        if isinstance(m, nn.Conv2d):            
            init.xavier_uniform_(m.weight.data)            
            if m.bias is not None:                
                m.bias.data.zero_()        
        elif isinstance(m, nn.BatchNorm2d):            
            m.weight.data.fill_(1)            
            m.bias.data.zero_()        
        elif isinstance(m, nn.Linear):            
            m.weight.data.normal(0,0.01)            
            m.bias.data.zero_() 
            
def copyStateDict(state_dict):    
    if list(state_dict.keys())[0].startswith('module'):        
        start_idx = 1    
    else:        
        start_idx = 0    
    new_state_dict = OrderedDict()    
    for k,v in state_dict.items():        
        name = ','.join(k.split('.')[state_idx:])        
        new_state_dict[name] = v    
    return new_state_dict 

#加载pretrain model
state_dict = torch.load('/users/xxx/xxx.pth') 

new_dict = copyStateDict(state_dict)
keys = []
for k,v in new_dict.items():    
    if k.startswith('conv_cls'):    #将‘conv_cls’开头的key过滤掉,这里是要去除的层的key        
        continue    
    keys.append(k) 

#去除指定层后的模型
new_dict = {k:new_dict[k] for k in keys} 

net = new_VGG()   #自己定义的模型,但要保证前面保存的层和自定义的模型中的层一致 

#加载pretrain model中的参数到新的模型中,此时自定义的层中是没有参数的,在使用的时候需要init_weight一下
net.state_dict().update(new_dict) 

#保存去除指定层后的模型
torch.save(net.state_dict(), '/users/xxx/xxx.pth')

参考链接:
https://blog.csdn.net/whut_ldz/article/details/78845947
https://blog.csdn.net/qq_36076233/article/details/107793069

Logo

开放原子开发者工作坊旨在鼓励更多人参与开源活动,与志同道合的开发者们相互交流开发经验、分享开发心得、获取前沿技术趋势。工作坊有多种形式的开发者活动,如meetup、训练营等,主打技术交流,干货满满,真诚地邀请各位开发者共同参与!

更多推荐