self.t = time.time() def before_iter(self, runner): runner.log_buffer.update({'data_time': time.time() - self.t}) def after_iter(self, runner): runner.log_buffer.update({'time': time.time() - self.t}) self.t = time.time() # source: mmcv/runner/hooks/iter_timer.py # ps...
data_loader = data_loader self._max_iters = self._max_epochs * len(self.data_loader) # 在训练的过程中执行hook函数 self.call_hook('before_train_epoch') time.sleep(2) # Prevent possible deadlock during epoch transition for i, data_batch in enumerate(self.data_loader): self._inner_iter...
runtime 可配置 checkpoint 间隔多少存一个。默认 1 epoch 1 个,空间不够用😶 配置可对照 __base__ 的内容覆盖修改,更多说明见官方文档。 训练模型 # single-gpu training python tools/train.py \ configs/voc_cat/faster_rcnn_r50_fpn_1x_voc_cat.py \ --work-dir _train_voc_cat # multi-gpu tr...
config配置文件: _base_ = ['../_base_/models/faster_rcnn_r50_fpn.py','../_base_/datasets/voc0712_val.py', # 只更改了这个voc数据集的配置文件'../_base_/default_runtime.py']model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))# optimizeroptimizer = dict(type='SGD', lr...
time.sleep(2)# Prevent possible deadlock during epoch transitionfori, data_batchinenumerate(self.data_loader): self._inner_iter = i self.call_hook('before_train_iter') self.run_iter(data_batch, train_mode=True, **kwargs) self.call_hook('after_train_iter') ...
用户可以通过 train_dataloader.dataset,val_dataloader.dataset 和test_dataloader.dataset 来设置数据集的配置,它们分别对应 2.x 版本中的 data.train,data.val 和data.test。 原配置 data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations...
time.sleep(2)# Prevent possible deadlock during epoch transitionfori, data_batchinenumerate(self.data_loader): self._inner_iter = i self.call_hook('before_train_iter') self.run_iter(data_batch, train_mode=True, **kwargs) self.call_hook('after_train_iter') ...
img_prefix=data_root + 'val2017/', # 同上 img_scale=(1333, 800), # 同上 img_norm_cfg=img_norm_cfg, # 同上 size_divisor=32, # 同上 flip_ratio=0, # 同上 with_mask=False, # 同上 with_crowd=True, # 同上 with_label=True), # 同上 ...
import argparse import glob import multiprocessing as mp import os import time import cv2 import tqdm import os from detectron2.config import get_cfg from detectron2.data.detection_utils import read_image from detectron2.utils.logger import setup_logger from predictor import VisualizationDemo import ...
data = dict(samples_per_gpu=16, # 每个gpu分配16张图像workers_per_gpu=2, # 分配两个gputrain=dict(type='RepeatDataset',times=3, # 重复训练3次...)) 设置完成后,直接就可以进行python tools/train.py ,进行训练。可以自行进行相关的配置,比如--gpu-id等等。