per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, data_root=data_root, img_dir='train', ann_dir='train_labels', pipeline=train_pipeline), # val=, # 测试集 val=dict( type=dataset_type, data_root=data_root, img_dir='test', ann_dir='test_labels', pipeline=test_...
if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # gpu_ids is used to calculate iter when resuming checkpoint _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) # 创建日志文件夹 mmcv.mkdir_or_exist(...
#single-gpu trainpython tools/train.py configs/_gmmseg/segformer_mit-b5_gmmseg_512x512_80k_cocostuff10k.py#multi-gpu trainbash ./tools/dist_train.sh configs/_gmmseg/segformer_mit-b5_gmmseg_512x512_80k_cocostuff10k.py${GPU_NUM}#single-gpu testpython tools/test.py configs/_gmmseg/segfor...
Describe the bug Recently, I have followed the instructions byAdd New Datasets,Tutorial 4: Train and test with existing modelsandMMSegmentation_Tutorialsto train my custom data, I check every steps and each codes carefully, however some errors still occurred, how can I solve it? Reproduction What...