数据预处理部分:
数据增强:torchvision中transforms模块自带功能,比较实用 数据预处理:torchvision中transforms也帮我们实现好了,直接调用即可 DataLoader模块直接读取batch数据
网络模块设置:
加载预训练模型,torchvision中有很多经典网络架构,调用起来十分方便,并且可以用人家训练好的权重参数来继续训练,也就是所谓的迁移学习 需要注意的是别人训练好的任务跟咱们的可不是完全一样,需要把最后的head层改一改,一般也就是最后的全连接层,改成咱们自己的任务 训练时可以全部重头训练,也可以只训练最后咱们任务的层,因为前几层都是做特征提取的,本质任务目标是一致的
网络模型保存与测试
模型保存的时候可以带有选择性,例如在验证集中如果当前效果好则保存 读取模型进行实际测试
data_transforms = { 'train' : transforms. Compose( [ transforms. Resize( [ 96 , 96 ] ) , transforms. RandomRotation( 45 ) , transforms. CenterCrop( 64 ) , transforms. RandomHorizontalFlip( p= 0.5 ) , transforms. RandomVerticalFlip( p= 0.5 ) , transforms. ColorJitter( brightness= 0.2 , contrast= 0.1 , saturation= 0.1 , hue= 0.1 ) , transforms. RandomGrayscale( p= 0.025 ) , transforms. ToTensor( ) , transforms. Normalize( [ 0.485 , 0.456 , 0.406 ] , [ 0.229 , 0.224 , 0.225 ] ) ] ) , 'valid' : transforms. Compose( [ transforms. Resize( [ 64 , 64 ] ) , transforms. ToTensor( ) , transforms. Normalize( [ 0.485 , 0.456 , 0.406 ] , [ 0.229 , 0.224 , 0.225 ] ) ] ) ,
}
选择性的权重更新
def set_parameter_requires_grad ( model, feature_extracting) : if feature_extracting: for param in model. parameters( ) : param. requires_grad = False
自定义修改模型输出层,以resnet18为例
def initialize_model ( model_name, num_classes, feature_extract, use_pretrained= True ) : model_ft = models. resnet18( pretrained= use_pretrained) set_parameter_requires_grad( model_ft, feature_extract) num_ftrs = model_ft. fc. in_featuresmodel_ft. fc = nn. Linear( num_ftrs, 102 ) input_size = 64 return model_ft, input_size
训练权重 选择
model_ft, input_size = initialize_model( model_name, 102 , feature_extract, use_pretrained= True )
model_ft = model_ft. to( device)
filename= 'checkpoint.pth'
params_to_update = model_ft. parameters( )
print ( "Params to learn:" )
if feature_extract: params_to_update = [ ] for name, param in model_ft. named_parameters( ) : if param. requires_grad == True : params_to_update. append( param) print ( "\t" , name)
else : for name, param in model_ft. named_parameters( ) : if param. requires_grad == True : print ( "\t" , name)
基本训练代码
def train_model ( model, dataloaders, criterion, optimizer, num_epochs= 25 , filename= 'best.pt' ) : since = time. time( ) best_acc = 0 model. to( device) val_acc_history = [ ] train_acc_history = [ ] train_losses = [ ] valid_losses = [ ] LRs = [ optimizer. param_groups[ 0 ] [ 'lr' ] ] best_model_wts = copy. deepcopy( model. state_dict( ) ) for epoch in range ( num_epochs) : print ( 'Epoch {}/{}' . format ( epoch, num_epochs - 1 ) ) print ( '-' * 10 ) for phase in [ 'train' , 'valid' ] : if phase == 'train' : model. train( ) else : model. eval ( ) running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[ phase] : inputs = inputs. to( device) labels = labels. to( device) optimizer. zero_grad( ) outputs = model( inputs) loss = criterion( outputs, labels) _, preds = torch. max ( outputs, 1 ) if phase == 'train' : loss. backward( ) optimizer. step( ) running_loss += loss. item( ) * inputs. size( 0 ) running_corrects += torch. sum ( preds == labels. data) epoch_loss = running_loss / len ( dataloaders[ phase] . dataset) epoch_acc = running_corrects. double( ) / len ( dataloaders[ phase] . dataset) time_elapsed = time. time( ) - sinceprint ( 'Time elapsed {:.0f}m {:.0f}s' . format ( time_elapsed // 60 , time_elapsed % 60 ) ) print ( '{} Loss: {:.4f} Acc: {:.4f}' . format ( phase, epoch_loss, epoch_acc) ) if phase == 'valid' and epoch_acc > best_acc: best_acc = epoch_accbest_model_wts = copy. deepcopy( model. state_dict( ) ) state = { 'state_dict' : model. state_dict( ) , 'best_acc' : best_acc, 'optimizer' : optimizer. state_dict( ) , } torch. save( state, filename) if phase == 'valid' : val_acc_history. append( epoch_acc) valid_losses. append( epoch_loss) if phase == 'train' : train_acc_history. append( epoch_acc) train_losses. append( epoch_loss) print ( 'Optimizer learning rate : {:.7f}' . format ( optimizer. param_groups[ 0 ] [ 'lr' ] ) ) LRs. append( optimizer. param_groups[ 0 ] [ 'lr' ] ) print ( ) scheduler. step( ) time_elapsed = time. time( ) - sinceprint ( 'Training complete in {:.0f}m {:.0f}s' . format ( time_elapsed // 60 , time_elapsed % 60 ) ) print ( 'Best val Acc: {:4f}' . format ( best_acc) ) model. load_state_dict( best_model_wts) return model, val_acc_history, train_acc_history, valid_losses, train_losses, LRs
调用训练
model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model( model_ft, dataloaders, criterion, optimizer_ft, num_epochs= 20 )
def im_convert ( tensor) : """ 展示数据""" image = tensor. to( "cpu" ) . clone( ) . detach( ) image = image. numpy( ) . squeeze( ) image = image. transpose( 1 , 2 , 0 ) image = image * np. array( ( 0.229 , 0.224 , 0.225 ) ) + np. array( ( 0.485 , 0.456 , 0.406 ) ) image = image. clip( 0 , 1 ) return image