# 同上 # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) # 优化参数,lr为学习率,momentum为动量因子,weight_decay为权重衰减因子 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # 梯度均衡参数 # learning policy lr_config = dict( policy...
checkpoint_config = dict(interval=20) # 20个epoch保存一次权重 参数说明见:https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook CLASSmmcv.runner.CheckpointHook(interval: int = - 1, by_epoch: bool = True, save_optimizer: bool = True, out_dir: Optional[str] = None, ma...
config_file='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'checkpoint_file='checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'device='cuda:0'# 初始化检测器 model=init_detector(config_file,checkpoint_file,device=device)# 推理演示图像 img='img/demo.jpg'result=inference_d...
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # 是一种防止梯度爆炸的策略 # lr 参数 lr_config = dict( policy='step', # lr decay的方式,其余的还有consine cyclic(不知道缩写是否一样) warmup='linear', # 初始的学习率增...
runner.register_training_hooks(cfg.lr_config,optimizer_config,cfg.checkpoint_config,cfg.log_config,cfg.get('momentum_config',None))#5.如果需要 val,则还需要注册 EvalHook runner.register_hook(eval_hook(val_dataloader,**eval_cfg))#6.注册用户自定义 hook ...
optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 1. 2. 动量调度 MMDetection支持依据学习率动态调整动量使得训练收敛加快,示例如下。 lr_config = dict( policy='cyclic', target_ratio=(10, 1e-4),
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[8, 11]) 在上面的代码中,lr=0.02指定了初始学习率为0.02。lr_config字典中则定义了学习率调整策略,包括学习率调整方式(pol...
optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 1. 2. 其中,_delete_=True将用新键替换backbone字段中的所有旧键 2. 学习率配置 lr_config = dict( policy='step', warmup='linear', warmup_iters=500, ...
(interval=1, metric='bbox') # 优化器配置 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # optimizer hook配置 optimizer_config = dict(grad_clip=None) # 学习率配置 lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, ...
https://github.com/open-mmlab/mmdetection/blob/master/tools/convert_datasets/pascal_voc.py)。 然后,你可以简单地使用CustomDataset。自定义优化 在mmdet/core/optimizer/copy_of_sgd.py中定义了定制优化器CopyOfSGD的示例。 更一般地,可以如下定义定制的优化器。在mmdet/core/optimizer/my_optimizer.py中:from...