From 1f0e6315f088559d1c9f48cfd25b5c072c65339a Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sat, 11 Mar 2023 22:10:34 +0800 Subject: [PATCH 01/20] base code for autoanchor(with bug) --- .../yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py | 13 + mmyolo/core/__init__.py | 2 + mmyolo/core/anchor/__init__.py | 12 + mmyolo/core/anchor/anchor_generator.py | 40 ++ mmyolo/core/anchor/anchor_optimizer.py | 495 ++++++++++++++++++ mmyolo/engine/hooks/__init__.py | 3 +- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 77 +++ mmyolo/registry.py | 4 +- mmyolo/utils/setup_env.py | 1 + 9 files changed, 645 insertions(+), 2 deletions(-) create mode 100644 configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py create mode 100644 mmyolo/core/__init__.py create mode 100644 mmyolo/core/anchor/__init__.py create mode 100644 mmyolo/core/anchor/anchor_generator.py create mode 100644 mmyolo/core/anchor/anchor_optimizer.py create mode 100644 mmyolo/engine/hooks/yolo_auto_anchor_hook.py diff --git a/configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py b/configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py new file mode 100644 index 000000000..7d75a7b0e --- /dev/null +++ b/configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py @@ -0,0 +1,13 @@ +_base_ = '../yolov5_s-v61_syncbn_8xb16-300e_coco.py' + +model = dict( + bbox_head=dict(prior_generator=dict(type='YOLOAutoAnchorGenerator'))) + +custom_hooks = [ + dict( + type='YOLOAutoAnchorHook', + optimizer=dict( + type='YOLOKMeansAnchorOptimizer', + iters=1000, + num_anchor_per_level=[3, 3, 3])) +] diff --git a/mmyolo/core/__init__.py b/mmyolo/core/__init__.py new file mode 100644 index 000000000..0df1ce61e --- /dev/null +++ b/mmyolo/core/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor import * # noqa: F401, F403 diff --git a/mmyolo/core/anchor/__init__.py b/mmyolo/core/anchor/__init__.py new file mode 100644 index 000000000..9b1fafc3b --- /dev/null +++ b/mmyolo/core/anchor/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor_generator import YOLOAutoAnchorGenerator +from .anchor_optimizer import (YOLODEAnchorOptimizer, + YOLOKMeansAnchorOptimizer, + YOLOV5KMeansAnchorOptimizer) + +__all__ = [ + 'YOLOAutoAnchorGenerator', + 'YOLOKMeansAnchorOptimizer', + 'YOLOV5KMeansAnchorOptimizer', + 'YOLODEAnchorOptimizer', +] diff --git a/mmyolo/core/anchor/anchor_generator.py b/mmyolo/core/anchor/anchor_generator.py new file mode 100644 index 000000000..15213c709 --- /dev/null +++ b/mmyolo/core/anchor/anchor_generator.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmdet.models.task_modules import YOLOAnchorGenerator +from torch.nn.modules.utils import _pair + +from mmyolo.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class YOLOAutoAnchorGenerator(nn.Module, YOLOAnchorGenerator): + """AutoAnchor generator for YOLO. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels. + base_sizes (list[list[tuple[int, int]]]): The basic sizes + of anchors in multiple levels. + """ + + def __init__(self, strides, base_sizes, use_box_type: bool = False): + super().__init__() + self.strides = [_pair(stride) for stride in strides] + self.centers = [(stride[0] / 2., stride[1] / 2.) + for stride in self.strides] + self.use_box_type = use_box_type + self.register_buffer('anchors', torch.tensor(base_sizes)) + + @property + def base_sizes(self): + T = [] + num_anchor_per_level = len(self.anchors[0]) + for base_sizes_per_level in self.anchors: + assert num_anchor_per_level == len(base_sizes_per_level) + T.append([_pair(base_size) for base_size in base_sizes_per_level]) + return T + + @property + def base_anchors(self): + return self.gen_base_anchors() diff --git a/mmyolo/core/anchor/anchor_optimizer.py b/mmyolo/core/anchor/anchor_optimizer.py new file mode 100644 index 000000000..ef0a6cd68 --- /dev/null +++ b/mmyolo/core/anchor/anchor_optimizer.py @@ -0,0 +1,495 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import random +from typing import Tuple + +import numpy as np +import torch +from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps, + bbox_xyxy_to_cxcywh) +from mmengine.fileio import dump +from mmengine.utils import ProgressBar +from scipy.optimize import differential_evolution +from torch import Tensor + +from mmyolo.registry import TASK_UTILS + +try: + from scipy.cluster.vq import kmeans +except ImportError: + kmeans = None + + +@TASK_UTILS.register_module() +class BaseAnchorOptimizer: + """Base class for anchor optimizer. + + Args: + dataset (obj:`Dataset`): Dataset object. + input_shape (list[int]): Input image shape of the model. + Format in [width, height]. + num_anchor_per_level (list[int]) : Number of anchors for each level. + logger (obj:`logging.Logger`): The logger for logging. + device (str, optional): Device used for calculating. + Default: 'cuda:0' + out_dir (str, optional): Path to save anchor optimize result. + Default: None + """ + + def __init__(self, + dataset, + input_shape, + num_anchor_per_level, + logger, + device='cuda:0', + out_dir=None): + self.dataset = dataset + self.input_shape = input_shape + self.num_anchor_per_level = num_anchor_per_level + self.num_anchors = sum(num_anchor_per_level) + self.logger = logger + self.device = device + self.out_dir = out_dir + bbox_whs, img_shapes = self.get_whs_and_shapes() + ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape]) + + # resize to input shape + self.bbox_whs = bbox_whs / ratios + + def get_whs_and_shapes(self): + """Get widths and heights of bboxes and shapes of images. + + Returns: + tuple[np.ndarray]: Array of bbox shapes and array of image + shapes with shape (num_bboxes, 2) in [width, height] format. + """ + self.logger.info('Collecting bboxes from annotation...') + bbox_whs = [] + img_shapes = [] + prog_bar = ProgressBar(len(self.dataset)) + for idx in range(len(self.dataset)): + data_info = self.dataset.get_data_info(idx) + img_shape = np.array([data_info['width'], data_info['height']]) + gt_instances = data_info['instances'] + for instance in gt_instances: + bbox = np.array(instance['bbox']) + gt_filter_sizes = bbox[2:4] - bbox[0:2] + img_shapes.append(img_shape) + bbox_whs.append(gt_filter_sizes) + + prog_bar.update() + print('\n') + bbox_whs = np.array(bbox_whs) + img_shapes = np.array(img_shapes) + self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.') + return bbox_whs, img_shapes + + def get_zero_center_bbox_tensor(self): + """Get a tensor of bboxes centered at (0, 0). + + Returns: + Tensor: Tensor of bboxes with shape (num_bboxes, 4) + in [xmin, ymin, xmax, ymax] format. + """ + whs = torch.from_numpy(self.bbox_whs).to( + self.device, dtype=torch.float32) + bboxes = bbox_cxcywh_to_xyxy( + torch.cat([torch.zeros_like(whs), whs], dim=1)) + return bboxes + + def optimize(self): + raise NotImplementedError + + def save_result(self, anchors, path=None): + + anchor_results = [] + start = 0 + for num in self.num_anchor_per_level: + end = num + start + anchor_results.append([(round(w), round(h)) + for w, h in anchors[start:end]]) + start = end + + self.logger.info(f'Anchor optimize result:{anchor_results}') + if path: + json_path = osp.join(path, 'anchor_optimize_result.json') + dump(anchor_results, json_path) + self.logger.info(f'Result saved in {json_path}') + return anchor_results + + +@TASK_UTILS.register_module() +class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer): + r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet. + `_. + + Args: + iters (int): Maximum iterations for k-means. + """ + + def __init__(self, iters, **kwargs): + + super().__init__(**kwargs) + self.iters = iters + + def optimize(self): + anchors = self.kmeans_anchors() + anchor_results = self.save_result(anchors, self.out_dir) + return anchor_results + + def kmeans_anchors(self): + self.logger.info( + f'Start cluster {self.num_anchors} YOLO anchors with K-means...') + bboxes = self.get_zero_center_bbox_tensor() + cluster_center_idx = torch.randint( + 0, bboxes.shape[0], (self.num_anchors, )).to(self.device) + + assignments = torch.zeros((bboxes.shape[0], )).to(self.device) + cluster_centers = bboxes[cluster_center_idx] + if self.num_anchors == 1: + cluster_centers = self.kmeans_maximization(bboxes, assignments, + cluster_centers) + anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + return anchors + + prog_bar = ProgressBar(self.iters) + for i in range(self.iters): + converged, assignments = self.kmeans_expectation( + bboxes, assignments, cluster_centers) + if converged: + self.logger.info(f'K-means process has converged at iter {i}.') + break + cluster_centers = self.kmeans_maximization(bboxes, assignments, + cluster_centers) + prog_bar.update() + print('\n') + avg_iou = bbox_overlaps(bboxes, + cluster_centers).max(1)[0].mean().item() + + anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}') + + return anchors + + def kmeans_maximization(self, bboxes, assignments, centers): + """Maximization part of EM algorithm(Expectation-Maximization)""" + new_centers = torch.zeros_like(centers) + for i in range(centers.shape[0]): + mask = (assignments == i) + if mask.sum(): + new_centers[i, :] = bboxes[mask].mean(0) + return new_centers + + def kmeans_expectation(self, bboxes, assignments, centers): + """Expectation part of EM algorithm(Expectation-Maximization)""" + ious = bbox_overlaps(bboxes, centers) + closest = ious.argmax(1) + converged = (closest == assignments).all() + return converged, closest + + +@TASK_UTILS.register_module() +class YOLOV5KMeansAnchorOptimizer(BaseAnchorOptimizer): + r"""YOLOv5 anchor optimizer using shape k-means. + Code refer to `ultralytics/yolov5. + `_. + + Args: + iters (int): Maximum iterations for k-means. + prior_match_thr (float): anchor-label width height + ratio threshold hyperparameter. + """ + + def __init__(self, + iters, + prior_match_thr=4.0, + mutation_args=[0.9, 0.1], + augment_args=[0.9, 1.1], + **kwargs): + + super().__init__(**kwargs) + self.iters = iters + self.prior_match_thr = prior_match_thr + [self.mutation_prob, self.mutation_sigma] = mutation_args + [self.augment_min, self.augment_max] = augment_args + + def optimize(self): + self.logger.info( + f'Start cluster {self.num_anchors} YOLOv5 anchors with K-means...') + + bbox_whs = torch.from_numpy(self.bbox_whs).to( + self.device, dtype=torch.float32) + anchors = self.anchor_generate( + bbox_whs, + num=self.num_anchors, + img_size=self.input_shape[0], + prior_match_thr=self.prior_match_thr, + iters=self.iters) + best_ratio, mean_matched = self.anchor_metric(bbox_whs, anchors) + self.logger.info(f'{mean_matched:.2f} anchors/target {best_ratio:.3f} ' + 'Best Possible Recall (BPR). ') + self.save_result(anchors.tolist(), self.out_dir) + + def anchor_generate(self, + box_size: Tensor, + num: int = 9, + img_size: int = 640, + prior_match_thr: float = 4.0, + iters: int = 1000) -> Tensor: + """cluster boxes metric with anchors. + + Args: + box_size (Tensor): The size of the bxes, which shape is + (box_num, 2),the number 2 means width and height. + num (int): number of anchors. + img_size (int): image size used for training + prior_match_thr (float): width/height ratio threshold + used for training + iters (int): iterations to evolve anchors using genetic algorithm + + Returns: + anchors (Tensor): kmeans evolved anchors + """ + + thr = 1 / prior_match_thr + + # step1: filter small bbox + box_size = self._filter_box(box_size) + assert num <= len(box_size) + + # step2: init anchors + if kmeans: + try: + self.logger.info( + 'beginning init anchors with scipy kmeans method') + # sigmas for whitening + sigmas = box_size.std(0).cpu().numpy() + anchors = kmeans( + box_size.cpu().numpy() / sigmas, num, iter=30)[0] * sigmas + # kmeans may return fewer points than requested + # if width/height is insufficient or too similar + assert num == len(anchors) + except Exception: + self.logger.warning( + 'scipy kmeans method cannot get enough points ' + 'because of width/height is insufficient or too similar, ' + 'now switching strategies from kmeans to random init.') + anchors = np.sort(np.random.rand(num * 2)).reshape( + num, 2) * img_size + else: + self.logger.info( + 'cannot found scipy package, switching strategies from kmeans ' + 'to random init, you can install scipy package to ' + 'get better anchor init') + anchors = np.sort(np.random.rand(num * 2)).reshape(num, + 2) * img_size + + self.logger.info('init done, beginning evolve anchors...') + # sort small to large + anchors = torch.tensor(anchors[np.argsort(anchors.prod(1))]).to( + box_size.device, dtype=torch.float32) + + # step3: evolve anchors use Genetic Algorithm + prog_bar = ProgressBar(iters) + fitness = self._anchor_fitness(box_size, anchors, thr) + cluster_shape = anchors.shape + + for _ in range(iters): + mutate_result = np.ones(cluster_shape) + # mutate until a change occurs (prevent duplicates) + while (mutate_result == 1).all(): + # mutate_result is scale factor of anchors, between 0.3 and 3 + mutate_result = ( + (np.random.random(cluster_shape) < self.mutation_prob) * + random.random() * np.random.randn(*cluster_shape) * + self.mutation_sigma + 1).clip(0.3, 3.0) + mutate_result = torch.from_numpy(mutate_result).to(box_size.device) + new_anchors = (anchors.clone() * mutate_result).clip(min=2.0) + new_fitness = self._anchor_fitness(box_size, new_anchors, thr) + if new_fitness > fitness: + fitness = new_fitness + anchors = new_anchors.clone() + + prog_bar.update() + print('\n') + # sort small to large + anchors = anchors[torch.argsort(anchors.prod(1))] + self.logger.info(f'Anchor cluster finish. fitness = {fitness:.4f}') + + return anchors + + def anchor_metric(self, + box_size: Tensor, + anchors: Tensor, + threshold: float = 4.0) -> Tuple: + """compute boxes metric with anchors. + + Args: + box_size (Tensor): The size of the bxes, which shape + is (box_num, 2), the number 2 means width and height. + anchors (Tensor): The size of the bxes, which shape + is (anchor_num, 2), the number 2 means width and height. + threshold (float): the compare threshold of ratio + + Returns: + Tuple: a tuple of metric result, best_ratio_mean and mean_matched + """ + # step1: augment scale + # According to the uniform distribution,the scaling scale between + # augment_min and augment_max is randomly generated + scale = np.random.uniform( + self.augment_min, self.augment_max, size=(box_size.shape[0], 1)) + box_size = torch.tensor( + np.array( + [l[:, ] * s for s, l in zip(scale, + box_size.cpu().numpy())])).to( + box_size.device, + dtype=torch.float32) + # step2: calculate ratio + min_ratio, best_ratio = self._metric(box_size, anchors) + mean_matched = (min_ratio > 1 / threshold).float().sum(1).mean() + best_ratio_mean = (best_ratio > 1 / threshold).float().mean() + return best_ratio_mean, mean_matched + + def _filter_box(self, box_size: Tensor) -> Tensor: + small_cnt = (box_size < 3.0).any(1).sum() + if small_cnt: + self.logger.warning( + f'Extremely small objects found: {small_cnt} ' + f'of {len(box_size)} labels are <3 pixels in size') + # filter > 2 pixels + filter_sizes = box_size[(box_size >= 2.0).any(1)] + return filter_sizes + + def _anchor_fitness(self, box_size: Tensor, anchors: Tensor, thr: float): + """mutation fitness.""" + _, best = self._metric(box_size, anchors) + return (best * (best > thr).float()).mean() + + def _metric(self, box_size: Tensor, anchors: Tensor) -> Tuple: + """compute boxes metric with anchors. + + Args: + box_size (Tensor): The size of the bxes, which shape is + (box_num, 2), the number 2 means width and height. + anchors (Tensor): The size of the bxes, which shape is + (anchor_num, 2), the number 2 means width and height. + + Returns: + Tuple: a tuple of metric result, min_ratio and best_ratio + """ + + # ratio means the (width_1/width_2 and height_1/height_2) ratio of each + # box and anchor, the ratio shape is torch.Size([box_num,anchor_num,2]) + ratio = box_size[:, None] / anchors[None] + + # min_ratio records the min ratio of each box with all anchor, + # min_ratio.shape is torch.Size([box_num,anchor_num]) + # notice: + # smaller ratio means worse shape-match between boxes and anchors + min_ratio = torch.min(ratio, 1 / ratio).min(2)[0] + + # find the best shape-match ratio for each box + # box_best_ratio.shape is torch.Size([box_num]) + best_ratio = min_ratio.max(1)[0] + + return min_ratio, best_ratio + + +@TASK_UTILS.register_module() +class YOLODEAnchorOptimizer(BaseAnchorOptimizer): + """YOLO anchor optimizer using differential evolution algorithm. + + Args: + iters (int): Maximum iterations for k-means. + strategy (str): The differential evolution strategy to use. + Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1exp' + - 'randtobest1exp' + - 'currenttobest1exp' + - 'best2exp' + - 'rand2exp' + - 'randtobest1bin' + - 'currenttobest1bin' + - 'best2bin' + - 'rand2bin' + - 'rand1bin' + + Default: 'best1bin'. + population_size (int): Total population size of evolution algorithm. + Default: 15. + convergence_thr (float): Tolerance for convergence, the + optimizing stops when ``np.std(pop) <= abs(convergence_thr) + + convergence_thr * np.abs(np.mean(population_energies))``, + respectively. Default: 0.0001. + mutation (tuple[float]): Range of dithering randomly changes the + mutation constant. Default: (0.5, 1). + recombination (float): Recombination constant of crossover probability. + Default: 0.7. + """ + + def __init__(self, + iters, + strategy='best1bin', + population_size=15, + convergence_thr=0.0001, + mutation=(0.5, 1), + recombination=0.7, + **kwargs): + + super().__init__(**kwargs) + + self.iters = iters + self.strategy = strategy + self.population_size = population_size + self.convergence_thr = convergence_thr + self.mutation = mutation + self.recombination = recombination + + def optimize(self): + anchors = self.differential_evolution() + self.save_result(anchors, self.out_dir) + + def differential_evolution(self): + bboxes = self.get_zero_center_bbox_tensor() + + bounds = [] + for i in range(self.num_anchors): + bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])]) + + result = differential_evolution( + func=self.avg_iou_cost, + bounds=bounds, + args=(bboxes, ), + strategy=self.strategy, + maxiter=self.iters, + popsize=self.population_size, + tol=self.convergence_thr, + mutation=self.mutation, + recombination=self.recombination, + updating='immediate', + disp=True) + self.logger.info( + f'Anchor evolution finish. Average IOU: {1 - result.fun}') + anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])] + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + return anchors + + @staticmethod + def avg_iou_cost(anchor_params, bboxes): + assert len(anchor_params) % 2 == 0 + anchor_whs = torch.tensor( + [[w, h] + for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to( + bboxes.device, dtype=bboxes.dtype) + anchor_boxes = bbox_cxcywh_to_xyxy( + torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1)) + ious = bbox_overlaps(bboxes, anchor_boxes) + max_ious, _ = ious.max(1) + cost = 1 - max_ious.mean().item() + return cost diff --git a/mmyolo/engine/hooks/__init__.py b/mmyolo/engine/hooks/__init__.py index 0b8deebc8..a16a2b9ed 100644 --- a/mmyolo/engine/hooks/__init__.py +++ b/mmyolo/engine/hooks/__init__.py @@ -1,10 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. from .ppyoloe_param_scheduler_hook import PPYOLOEParamSchedulerHook from .switch_to_deploy_hook import SwitchToDeployHook +from .yolo_auto_anchor_hook import YOLOAutoAnchorHook from .yolov5_param_scheduler_hook import YOLOv5ParamSchedulerHook from .yolox_mode_switch_hook import YOLOXModeSwitchHook __all__ = [ 'YOLOv5ParamSchedulerHook', 'YOLOXModeSwitchHook', 'SwitchToDeployHook', - 'PPYOLOEParamSchedulerHook' + 'PPYOLOEParamSchedulerHook', 'YOLOAutoAnchorHook' ] diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py new file mode 100644 index 000000000..c0ac41f97 --- /dev/null +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.dist import broadcast, get_dist_info +from mmengine.hooks import Hook +from mmengine.logging import MMLogger +from mmengine.model import is_model_wrapper +from mmengine.runner import Runner + +from mmyolo.registry import HOOKS, TASK_UTILS + + +@HOOKS.register_module() +class YOLOAutoAnchorHook(Hook): + + def __init__(self, optimizer): + self.optimizer = optimizer + + def before_train(self, runner: Runner) -> None: + + if runner.iter > 0: + return + + model = runner.model + if is_model_wrapper(model): + model = model.module + + rank, _ = get_dist_info() + + weights = model.state_dict() + key = 'bbox_head.prior_generator.anchors' + anchors_tensor = weights[key] + device_m = weights[key].device + if rank == 0 and not runner._has_loaded: + runner_dataset = runner.train_dataloader.dataset + self.optimizer.update( + dataset=runner_dataset, + device=runner_dataset[0]['inputs'].device, + input_shape=runner.cfg['img_scale'], + logger=MMLogger.get_current_instance()) + + optimizer = TASK_UTILS.build(self.optimizer) + anchors = optimizer.optimize() + anchors_tensor = torch.tensor(anchors, device=device_m) + + broadcast(anchors_tensor) + weights[key] = anchors_tensor + model.load_state_dict(weights) + self.reinitialize_bbox_head(runner, model, device_m) + + def before_val(self, runner: Runner) -> None: + model = runner.model + if is_model_wrapper(model): + model = model.module + + prior_generator = model.bbox_head.prior_generator + device = prior_generator.anchors.device + self.reinitialize_bbox_head(runner, model, device) + + def before_test(self, runner: Runner) -> None: + + model = runner.model + if is_model_wrapper(model): + model = model.module + + prior_generator = model.bbox_head.prior_generator + device = prior_generator.anchors.device + self.reinitialize_bbox_head(runner, model, device) + + def reinitialize_bbox_head(self, runner: Runner, model, device) -> None: + priors_base_sizes = torch.tensor( + model.bbox_head.prior_generator.base_sizes, + dtype=torch.float, + device=device) + featmap_strides = torch.tensor( + model.bbox_head.featmap_strides, dtype=torch.float, + device=device)[:, None, None] + model.bbox_head.priors_base_sizes = priors_base_sizes / featmap_strides diff --git a/mmyolo/registry.py b/mmyolo/registry.py index 71f43e6cf..3d9752008 100644 --- a/mmyolo/registry.py +++ b/mmyolo/registry.py @@ -93,7 +93,9 @@ # manage task-specific modules like anchor generators and box coders TASK_UTILS = Registry( - 'task util', parent=MMENGINE_TASK_UTILS, locations=['mmyolo.models']) + 'task util', + parent=MMENGINE_TASK_UTILS, + locations=['mmyolo.core', 'mmyolo.models']) # manage visualizer VISUALIZERS = Registry( diff --git a/mmyolo/utils/setup_env.py b/mmyolo/utils/setup_env.py index f51ed928c..727234dd5 100644 --- a/mmyolo/utils/setup_env.py +++ b/mmyolo/utils/setup_env.py @@ -19,6 +19,7 @@ def register_all_modules(init_default_scope: bool = True): import mmdet.engine # noqa: F401,F403 import mmdet.visualization # noqa: F401,F403 + import mmyolo.core # noqa: F401,F403 import mmyolo.datasets # noqa: F401,F403 import mmyolo.engine # noqa: F401,F403 import mmyolo.models # noqa: F401,F403 From b2e383862f252d54ec51672133a9cd52a90a6f35 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Thu, 16 Mar 2023 22:04:57 +0800 Subject: [PATCH 02/20] del anchor generator --- mmyolo/core/anchor/__init__.py | 2 - mmyolo/core/anchor/anchor_generator.py | 40 -------------------- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 36 ++++++++++-------- mmyolo/models/dense_heads/yolov5_head.py | 1 - tools/test.py | 8 ++++ tools/train.py | 5 +++ 6 files changed, 34 insertions(+), 58 deletions(-) delete mode 100644 mmyolo/core/anchor/anchor_generator.py diff --git a/mmyolo/core/anchor/__init__.py b/mmyolo/core/anchor/__init__.py index 9b1fafc3b..2ac4f9533 100644 --- a/mmyolo/core/anchor/__init__.py +++ b/mmyolo/core/anchor/__init__.py @@ -1,11 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .anchor_generator import YOLOAutoAnchorGenerator from .anchor_optimizer import (YOLODEAnchorOptimizer, YOLOKMeansAnchorOptimizer, YOLOV5KMeansAnchorOptimizer) __all__ = [ - 'YOLOAutoAnchorGenerator', 'YOLOKMeansAnchorOptimizer', 'YOLOV5KMeansAnchorOptimizer', 'YOLODEAnchorOptimizer', diff --git a/mmyolo/core/anchor/anchor_generator.py b/mmyolo/core/anchor/anchor_generator.py deleted file mode 100644 index 15213c709..000000000 --- a/mmyolo/core/anchor/anchor_generator.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmdet.models.task_modules import YOLOAnchorGenerator -from torch.nn.modules.utils import _pair - -from mmyolo.registry import TASK_UTILS - - -@TASK_UTILS.register_module() -class YOLOAutoAnchorGenerator(nn.Module, YOLOAnchorGenerator): - """AutoAnchor generator for YOLO. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels. - base_sizes (list[list[tuple[int, int]]]): The basic sizes - of anchors in multiple levels. - """ - - def __init__(self, strides, base_sizes, use_box_type: bool = False): - super().__init__() - self.strides = [_pair(stride) for stride in strides] - self.centers = [(stride[0] / 2., stride[1] / 2.) - for stride in self.strides] - self.use_box_type = use_box_type - self.register_buffer('anchors', torch.tensor(base_sizes)) - - @property - def base_sizes(self): - T = [] - num_anchor_per_level = len(self.anchors[0]) - for base_sizes_per_level in self.anchors: - assert num_anchor_per_level == len(base_sizes_per_level) - T.append([_pair(base_size) for base_size in base_sizes_per_level]) - return T - - @property - def base_anchors(self): - return self.gen_base_anchors() diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index c0ac41f97..7773bb400 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -19,17 +19,20 @@ def before_train(self, runner: Runner) -> None: if runner.iter > 0: return - model = runner.model if is_model_wrapper(model): model = model.module rank, _ = get_dist_info() + device_w = next(model.parameters()).device + anchors = torch.tensor( + runner.cfg.model.bbox_head.prior_generator.base_sizes, + device=device_w) + model.register_buffer('anchors', anchors) weights = model.state_dict() - key = 'bbox_head.prior_generator.anchors' + key = 'anchors' anchors_tensor = weights[key] - device_m = weights[key].device if rank == 0 and not runner._has_loaded: runner_dataset = runner.train_dataloader.dataset self.optimizer.update( @@ -40,21 +43,20 @@ def before_train(self, runner: Runner) -> None: optimizer = TASK_UTILS.build(self.optimizer) anchors = optimizer.optimize() - anchors_tensor = torch.tensor(anchors, device=device_m) + anchors_tensor = torch.tensor(anchors, device=device_w) broadcast(anchors_tensor) weights[key] = anchors_tensor model.load_state_dict(weights) - self.reinitialize_bbox_head(runner, model, device_m) + + self.reinitialize_bbox_head(runner, model) def before_val(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - prior_generator = model.bbox_head.prior_generator - device = prior_generator.anchors.device - self.reinitialize_bbox_head(runner, model, device) + self.reinitialize_bbox_head(runner, model) def before_test(self, runner: Runner) -> None: @@ -62,15 +64,19 @@ def before_test(self, runner: Runner) -> None: if is_model_wrapper(model): model = model.module - prior_generator = model.bbox_head.prior_generator - device = prior_generator.anchors.device - self.reinitialize_bbox_head(runner, model, device) + self.reinitialize_bbox_head(runner, model) + + def reinitialize_bbox_head(self, runner: Runner, model) -> None: + anchors_tensor = model.state_dict()['anchors'] + base_sizes = anchors_tensor.tolist() + device = anchors_tensor.device + prior_generator = runner.cfg.model.bbox_head.prior_generator + prior_generator.update(base_sizes=base_sizes) + + model.bbox_head.prior_generator = TASK_UTILS.build(prior_generator) - def reinitialize_bbox_head(self, runner: Runner, model, device) -> None: priors_base_sizes = torch.tensor( - model.bbox_head.prior_generator.base_sizes, - dtype=torch.float, - device=device) + base_sizes, dtype=torch.float, device=device) featmap_strides = torch.tensor( model.bbox_head.featmap_strides, dtype=torch.float, device=device)[:, None, None] diff --git a/mmyolo/models/dense_heads/yolov5_head.py b/mmyolo/models/dense_heads/yolov5_head.py index c49d08518..5d82e3579 100644 --- a/mmyolo/models/dense_heads/yolov5_head.py +++ b/mmyolo/models/dense_heads/yolov5_head.py @@ -115,7 +115,6 @@ def forward(self, x: Tuple[Tensor]) -> Tuple[List]: def forward_single(self, x: Tensor, convs: nn.Module) -> Tuple[Tensor, Tensor, Tensor]: """Forward feature of a single scale level.""" - pred_map = convs(x) bs, _, ny, nx = pred_map.shape pred_map = pred_map.view(bs, self.num_base_priors, self.num_out_attrib, diff --git a/tools/test.py b/tools/test.py index c05defe3c..687ddbbbd 100644 --- a/tools/test.py +++ b/tools/test.py @@ -35,6 +35,8 @@ def parse_args(): '--tta', action='store_true', help='Whether to use test time augmentation') + parser.add_argument( + '--autoanchor', action='store_true', help='Whether to use autoanchor') parser.add_argument( '--show', action='store_true', help='show prediction results') parser.add_argument( @@ -98,6 +100,9 @@ def main(): if args.deploy: cfg.custom_hooks.append(dict(type='SwitchToDeployHook')) + if args.autoanchor: + cfg.custom_hooks.append(cfg.autoanchor_hook) + # add `format_only` and `outfile_prefix` into cfg if args.json_prefix is not None: cfg_json = { @@ -126,6 +131,9 @@ def main(): test_data_cfg.batch_shapes_cfg = None test_data_cfg.pipeline = cfg.tta_pipeline + if args.autoanchor is not None: + cfg.custom_hooks.append(cfg.autoanchor_hook) + # build the runner from config if 'runner_type' not in cfg: # build the default runner diff --git a/tools/train.py b/tools/train.py index 1060b631a..60c692f70 100644 --- a/tools/train.py +++ b/tools/train.py @@ -21,6 +21,8 @@ def parse_args(): action='store_true', default=False, help='enable automatic-mixed-precision training') + parser.add_argument( + '--autoanchor', action='store_true', help='Whether to use autoanchor') parser.add_argument( '--resume', nargs='?', @@ -87,6 +89,9 @@ def main(): cfg.optim_wrapper.type = 'AmpOptimWrapper' cfg.optim_wrapper.loss_scale = 'dynamic' + if args.autoanchor: + cfg.custom_hooks.append(cfg.autoanchor_hook) + # resume is determined in this priority: resume from > auto_resume if args.resume == 'auto': cfg.resume = True From 43dcb9bab8a0d802bf8c65986f77fcc68e620d0e Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Thu, 16 Mar 2023 22:08:24 +0800 Subject: [PATCH 03/20] del configs --- configs/_base_/autoanchor.py | 6 ++++++ .../yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py | 13 ------------- .../yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py | 11 +++++++---- 3 files changed, 13 insertions(+), 17 deletions(-) create mode 100644 configs/_base_/autoanchor.py delete mode 100644 configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py diff --git a/configs/_base_/autoanchor.py b/configs/_base_/autoanchor.py new file mode 100644 index 000000000..d7bae3e42 --- /dev/null +++ b/configs/_base_/autoanchor.py @@ -0,0 +1,6 @@ +autoanchor_hook = dict( + type='YOLOAutoAnchorHook', + optimizer=dict( + type='YOLOKMeansAnchorOptimizer', + iters=1000, + num_anchor_per_level=[3, 3, 3])) diff --git a/configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py b/configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py deleted file mode 100644 index 7d75a7b0e..000000000 --- a/configs/yolov5/autoanchor/yolov5_s-v61_syncbn_8xb16-300e_coco_aa.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = '../yolov5_s-v61_syncbn_8xb16-300e_coco.py' - -model = dict( - bbox_head=dict(prior_generator=dict(type='YOLOAutoAnchorGenerator'))) - -custom_hooks = [ - dict( - type='YOLOAutoAnchorHook', - optimizer=dict( - type='YOLOKMeansAnchorOptimizer', - iters=1000, - num_anchor_per_level=[3, 3, 3])) -] diff --git a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py index 305034132..c33963e5c 100644 --- a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py +++ b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py @@ -1,11 +1,14 @@ -_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] +_base_ = [ + '../_base_/default_runtime.py', '../_base_/det_p5_tta.py', + '../_base_/autoanchor.py' +] # ========================Frequently modified parameters====================== # -----data related----- -data_root = 'data/coco/' # Root path of data +data_root = '/Users/yechenzhi/data/coco/' # Root path of data # Path of train annotation file -train_ann_file = 'annotations/instances_train2017.json' -train_data_prefix = 'train2017/' # Prefix of train image path +train_ann_file = 'annotations/instances_val2017.json' +train_data_prefix = 'val2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path From 7348c6c8ee11e4d62bb364fb22ee3dc73582a419 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Thu, 16 Mar 2023 22:14:46 +0800 Subject: [PATCH 04/20] del local configs --- configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py index c33963e5c..3e9ab88a8 100644 --- a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py +++ b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py @@ -5,10 +5,10 @@ # ========================Frequently modified parameters====================== # -----data related----- -data_root = '/Users/yechenzhi/data/coco/' # Root path of data +data_root = 'data/coco/' # Root path of data # Path of train annotation file -train_ann_file = 'annotations/instances_val2017.json' -train_data_prefix = 'val2017/' # Prefix of train image path +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path From d464820db01f15cac89618c0f786c5071e8fe898 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Thu, 16 Mar 2023 22:17:22 +0800 Subject: [PATCH 05/20] del change --- mmyolo/models/dense_heads/yolov5_head.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mmyolo/models/dense_heads/yolov5_head.py b/mmyolo/models/dense_heads/yolov5_head.py index 5d82e3579..c49d08518 100644 --- a/mmyolo/models/dense_heads/yolov5_head.py +++ b/mmyolo/models/dense_heads/yolov5_head.py @@ -115,6 +115,7 @@ def forward(self, x: Tuple[Tensor]) -> Tuple[List]: def forward_single(self, x: Tensor, convs: nn.Module) -> Tuple[Tensor, Tensor, Tensor]: """Forward feature of a single scale level.""" + pred_map = convs(x) bs, _, ny, nx = pred_map.shape pred_map = pred_map.view(bs, self.num_base_priors, self.num_out_attrib, From ecf7a42b2d3c4c436d5b5d6580fa039bc8150886 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Thu, 16 Mar 2023 22:39:47 +0800 Subject: [PATCH 06/20] add logs --- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 6 ++++-- tools/test.py | 2 ++ tools/train.py | 2 ++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index 7773bb400..37eb25e55 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -22,6 +22,7 @@ def before_train(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module + print('begin reload optimized anchors') rank, _ = get_dist_info() device_w = next(model.parameters()).device @@ -52,10 +53,11 @@ def before_train(self, runner: Runner) -> None: self.reinitialize_bbox_head(runner, model) def before_val(self, runner: Runner) -> None: + model = runner.model if is_model_wrapper(model): model = model.module - + print('begin reload optimized anchors') self.reinitialize_bbox_head(runner, model) def before_test(self, runner: Runner) -> None: @@ -63,7 +65,7 @@ def before_test(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - + print('begin reload optimized anchors') self.reinitialize_bbox_head(runner, model) def reinitialize_bbox_head(self, runner: Runner, model) -> None: diff --git a/tools/test.py b/tools/test.py index 687ddbbbd..5d74cbe47 100644 --- a/tools/test.py +++ b/tools/test.py @@ -101,6 +101,8 @@ def main(): cfg.custom_hooks.append(dict(type='SwitchToDeployHook')) if args.autoanchor: + assert cfg.model.bbox_head.prior_generator.type \ + == 'mmdet.YOLOAnchorGenerator' cfg.custom_hooks.append(cfg.autoanchor_hook) # add `format_only` and `outfile_prefix` into cfg diff --git a/tools/train.py b/tools/train.py index 60c692f70..58f0e22b5 100644 --- a/tools/train.py +++ b/tools/train.py @@ -90,6 +90,8 @@ def main(): cfg.optim_wrapper.loss_scale = 'dynamic' if args.autoanchor: + assert cfg.model.bbox_head.prior_generator.type \ + == 'mmdet.YOLOAnchorGenerator' cfg.custom_hooks.append(cfg.autoanchor_hook) # resume is determined in this priority: resume from > auto_resume From a3862382cbe19016ceb0beb1be10735c48bf7728 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sat, 18 Mar 2023 10:34:10 +0800 Subject: [PATCH 07/20] adapt autoanchor to ema --- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index 37eb25e55..720562325 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -4,6 +4,7 @@ from mmengine.hooks import Hook from mmengine.logging import MMLogger from mmengine.model import is_model_wrapper +from mmengine.registry import MODELS from mmengine.runner import Runner from mmyolo.registry import HOOKS, TASK_UTILS @@ -22,7 +23,7 @@ def before_train(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reload optimized anchors') + print('begin reloading optimized anchors') rank, _ = get_dist_info() device_w = next(model.parameters()).device @@ -51,22 +52,28 @@ def before_train(self, runner: Runner) -> None: model.load_state_dict(weights) self.reinitialize_bbox_head(runner, model) + runner.hooks[2].ema_model = MODELS.build( + runner.hooks[2].ema_cfg, default_args=dict(model=model)) def before_val(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reload optimized anchors') + print('begin reloading optimized anchors') self.reinitialize_bbox_head(runner, model) + runner.hooks[2].ema_model = MODELS.build( + runner.hooks[2].ema_cfg, default_args=dict(model=model)) def before_test(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reload optimized anchors') + print('begin reloading optimized anchors') self.reinitialize_bbox_head(runner, model) + runner.hooks[2].ema_model = MODELS.build( + runner.hooks[2].ema_cfg, default_args=dict(model=model)) def reinitialize_bbox_head(self, runner: Runner, model) -> None: anchors_tensor = model.state_dict()['anchors'] From 450dbe4d64908f01e6ef90ebe233ae49588ddccc Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sat, 18 Mar 2023 11:32:15 +0800 Subject: [PATCH 08/20] fix format --- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 31 ++++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index 720562325..c4c37179a 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -1,13 +1,13 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmengine.dist import broadcast, get_dist_info -from mmengine.hooks import Hook +from mmengine.hooks import EMAHook, Hook from mmengine.logging import MMLogger from mmengine.model import is_model_wrapper -from mmengine.registry import MODELS +from mmengine.registry import HOOKS, MODELS from mmengine.runner import Runner -from mmyolo.registry import HOOKS, TASK_UTILS +from mmyolo.registry import TASK_UTILS @HOOKS.register_module() @@ -23,7 +23,7 @@ def before_train(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reloading optimized anchors') + print('begin reload optimized anchors') rank, _ = get_dist_info() device_w = next(model.parameters()).device @@ -51,31 +51,25 @@ def before_train(self, runner: Runner) -> None: weights[key] = anchors_tensor model.load_state_dict(weights) - self.reinitialize_bbox_head(runner, model) - runner.hooks[2].ema_model = MODELS.build( - runner.hooks[2].ema_cfg, default_args=dict(model=model)) + self.reinitialize(runner, model) def before_val(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reloading optimized anchors') - self.reinitialize_bbox_head(runner, model) - runner.hooks[2].ema_model = MODELS.build( - runner.hooks[2].ema_cfg, default_args=dict(model=model)) + print('begin reload optimized anchors') + self.reinitialize(runner, model) def before_test(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reloading optimized anchors') - self.reinitialize_bbox_head(runner, model) - runner.hooks[2].ema_model = MODELS.build( - runner.hooks[2].ema_cfg, default_args=dict(model=model)) + print('begin reload optimized anchors') + self.reinitialize(runner, model) - def reinitialize_bbox_head(self, runner: Runner, model) -> None: + def reinitialize(self, runner: Runner, model) -> None: anchors_tensor = model.state_dict()['anchors'] base_sizes = anchors_tensor.tolist() device = anchors_tensor.device @@ -90,3 +84,8 @@ def reinitialize_bbox_head(self, runner: Runner, model) -> None: model.bbox_head.featmap_strides, dtype=torch.float, device=device)[:, None, None] model.bbox_head.priors_base_sizes = priors_base_sizes / featmap_strides + + for hook in runner.hooks: + if isinstance(hook, EMAHook): + hook.ema_model = MODELS.build( + hook.ema_cfg, default_args=dict(model=model)) From d0f5cba411f82d196023b17e548227cc7057f57a Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sat, 18 Mar 2023 11:35:03 +0800 Subject: [PATCH 09/20] fix format --- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index c4c37179a..093ed1dc5 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -4,10 +4,10 @@ from mmengine.hooks import EMAHook, Hook from mmengine.logging import MMLogger from mmengine.model import is_model_wrapper -from mmengine.registry import HOOKS, MODELS +from mmengine.registry import MODELS from mmengine.runner import Runner -from mmyolo.registry import TASK_UTILS +from mmyolo.registry import HOOKS, TASK_UTILS @HOOKS.register_module() @@ -23,7 +23,7 @@ def before_train(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reload optimized anchors') + print('begin reloading optimized anchors') rank, _ = get_dist_info() device_w = next(model.parameters()).device @@ -58,7 +58,7 @@ def before_val(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reload optimized anchors') + print('begin reloading optimized anchors') self.reinitialize(runner, model) def before_test(self, runner: Runner) -> None: @@ -66,7 +66,7 @@ def before_test(self, runner: Runner) -> None: model = runner.model if is_model_wrapper(model): model = model.module - print('begin reload optimized anchors') + print('begin reloading optimized anchors') self.reinitialize(runner, model) def reinitialize(self, runner: Runner, model) -> None: From 90693400d6f848cc4ae103ad138dc84fdc97a314 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sat, 18 Mar 2023 18:27:42 +0800 Subject: [PATCH 10/20] fix ema logic --- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 39 ++++++++++++-------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index 093ed1dc5..0021dd4ca 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -1,10 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmengine.dist import broadcast, get_dist_info -from mmengine.hooks import EMAHook, Hook +from mmengine.hooks import Hook from mmengine.logging import MMLogger from mmengine.model import is_model_wrapper -from mmengine.registry import MODELS from mmengine.runner import Runner from mmyolo.registry import HOOKS, TASK_UTILS @@ -13,9 +12,27 @@ @HOOKS.register_module() class YOLOAutoAnchorHook(Hook): + priority = 48 + def __init__(self, optimizer): self.optimizer = optimizer + def before_run(self, runner) -> None: + """Create an ema copy of the model. + + Args: + runner (Runner): The runner of the training process. + """ + model = runner.model + if is_model_wrapper(model): + model = model.module + + device = next(model.parameters()).device + anchors = torch.tensor( + runner.cfg.model.bbox_head.prior_generator.base_sizes, + device=device) + model.register_buffer('anchors', anchors) + def before_train(self, runner: Runner) -> None: if runner.iter > 0: @@ -26,15 +43,9 @@ def before_train(self, runner: Runner) -> None: print('begin reloading optimized anchors') rank, _ = get_dist_info() - device_w = next(model.parameters()).device - anchors = torch.tensor( - runner.cfg.model.bbox_head.prior_generator.base_sizes, - device=device_w) - model.register_buffer('anchors', anchors) weights = model.state_dict() - key = 'anchors' - anchors_tensor = weights[key] + anchors_tensor = weights['anchors'] if rank == 0 and not runner._has_loaded: runner_dataset = runner.train_dataloader.dataset self.optimizer.update( @@ -45,10 +56,11 @@ def before_train(self, runner: Runner) -> None: optimizer = TASK_UTILS.build(self.optimizer) anchors = optimizer.optimize() - anchors_tensor = torch.tensor(anchors, device=device_w) + device = next(model.parameters()).device + anchors_tensor = torch.tensor(anchors, device=device) broadcast(anchors_tensor) - weights[key] = anchors_tensor + weights['anchors'] = anchors_tensor model.load_state_dict(weights) self.reinitialize(runner, model) @@ -84,8 +96,3 @@ def reinitialize(self, runner: Runner, model) -> None: model.bbox_head.featmap_strides, dtype=torch.float, device=device)[:, None, None] model.bbox_head.priors_base_sizes = priors_base_sizes / featmap_strides - - for hook in runner.hooks: - if isinstance(hook, EMAHook): - hook.ema_model = MODELS.build( - hook.ema_cfg, default_args=dict(model=model)) From 2c41f7df243fafc56244e2f2c2260b1f2ac1124b Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sun, 19 Mar 2023 10:11:35 +0800 Subject: [PATCH 11/20] fix format --- configs/_base_/autoanchor.py | 19 +++++++++- mmyolo/core/__init__.py | 2 -- mmyolo/core/anchor/__init__.py | 10 ------ mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 4 --- mmyolo/registry.py | 4 +-- mmyolo/utils/__init__.py | 6 +++- .../anchor_optimizers.py} | 35 +++++++++++++++++-- mmyolo/utils/setup_env.py | 1 - tools/test.py | 13 ++++--- tools/train.py | 9 +++-- 10 files changed, 72 insertions(+), 31 deletions(-) delete mode 100644 mmyolo/core/__init__.py delete mode 100644 mmyolo/core/anchor/__init__.py rename mmyolo/{core/anchor/anchor_optimizer.py => utils/anchor_optimizers.py} (93%) diff --git a/configs/_base_/autoanchor.py b/configs/_base_/autoanchor.py index d7bae3e42..559c9844b 100644 --- a/configs/_base_/autoanchor.py +++ b/configs/_base_/autoanchor.py @@ -1,6 +1,23 @@ -autoanchor_hook = dict( +k_means_autoanchor_hook = dict( type='YOLOAutoAnchorHook', optimizer=dict( type='YOLOKMeansAnchorOptimizer', iters=1000, num_anchor_per_level=[3, 3, 3])) + +de_autoanchor_hook = dict( + type='YOLOAutoAnchorHook', + optimizer=dict( + type='YOLODEAnchorOptimizer', + iters=1000, + num_anchor_per_level=[3, 3, 3])) + +v5_k_means_autoanchor_hook = dict( + type='YOLOAutoAnchorHook', + optimizer=dict( + type='YOLOV5KMeansAnchorOptimizer', + iters=1000, + num_anchor_per_level=[3, 3, 3], + prior_match_thr=4.0, + mutation_args=[0.9, 0.1], + augment_args=[0.9, 0.1])) diff --git a/mmyolo/core/__init__.py b/mmyolo/core/__init__.py deleted file mode 100644 index 0df1ce61e..000000000 --- a/mmyolo/core/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .anchor import * # noqa: F401, F403 diff --git a/mmyolo/core/anchor/__init__.py b/mmyolo/core/anchor/__init__.py deleted file mode 100644 index 2ac4f9533..000000000 --- a/mmyolo/core/anchor/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .anchor_optimizer import (YOLODEAnchorOptimizer, - YOLOKMeansAnchorOptimizer, - YOLOV5KMeansAnchorOptimizer) - -__all__ = [ - 'YOLOKMeansAnchorOptimizer', - 'YOLOV5KMeansAnchorOptimizer', - 'YOLODEAnchorOptimizer', -] diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index 0021dd4ca..f5de308e8 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -18,11 +18,7 @@ def __init__(self, optimizer): self.optimizer = optimizer def before_run(self, runner) -> None: - """Create an ema copy of the model. - Args: - runner (Runner): The runner of the training process. - """ model = runner.model if is_model_wrapper(model): model = model.module diff --git a/mmyolo/registry.py b/mmyolo/registry.py index 3d9752008..71f43e6cf 100644 --- a/mmyolo/registry.py +++ b/mmyolo/registry.py @@ -93,9 +93,7 @@ # manage task-specific modules like anchor generators and box coders TASK_UTILS = Registry( - 'task util', - parent=MMENGINE_TASK_UTILS, - locations=['mmyolo.core', 'mmyolo.models']) + 'task util', parent=MMENGINE_TASK_UTILS, locations=['mmyolo.models']) # manage visualizer VISUALIZERS = Registry( diff --git a/mmyolo/utils/__init__.py b/mmyolo/utils/__init__.py index f4e968494..d1547fb88 100644 --- a/mmyolo/utils/__init__.py +++ b/mmyolo/utils/__init__.py @@ -1,9 +1,13 @@ # Copyright (c) OpenMMLab. All rights reserved. +from .anchor_optimizers import (YOLODEAnchorOptimizer, + YOLOKMeansAnchorOptimizer, + YOLOV5KMeansAnchorOptimizer) from .collect_env import collect_env from .misc import is_metainfo_lower, switch_to_deploy from .setup_env import register_all_modules __all__ = [ 'register_all_modules', 'collect_env', 'switch_to_deploy', - 'is_metainfo_lower' + 'is_metainfo_lower', 'YOLOKMeansAnchorOptimizer', + 'YOLOV5KMeansAnchorOptimizer', 'YOLODEAnchorOptimizer' ] diff --git a/mmyolo/core/anchor/anchor_optimizer.py b/mmyolo/utils/anchor_optimizers.py similarity index 93% rename from mmyolo/core/anchor/anchor_optimizer.py rename to mmyolo/utils/anchor_optimizers.py index ef0a6cd68..910775707 100644 --- a/mmyolo/core/anchor/anchor_optimizer.py +++ b/mmyolo/utils/anchor_optimizers.py @@ -1,4 +1,33 @@ # Copyright (c) OpenMMLab. All rights reserved. +"""Optimize anchor settings on a specific dataset. + +This script provides three methods to optimize YOLO anchors including k-means +anchor cluster, differential evolution and v5-k-means. You can use +``--algorithm k-means``, ``--algorithm differential_evolution`` and +``--algorithm v5-k-means`` to switch those methods. + +Example: + Use k-means anchor cluster:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --out-dir ${OUT_DIR} + + Use differential evolution to optimize anchors:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm differential_evolution \ + --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --out-dir ${OUT_DIR} + + Use v5-k-means to optimize anchors:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm v5-k-means \ + --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --prior_match_thr ${PRIOR_MATCH_THR} \ + --out-dir ${OUT_DIR} +""" import os.path as osp import random from typing import Tuple @@ -230,7 +259,8 @@ def optimize(self): best_ratio, mean_matched = self.anchor_metric(bbox_whs, anchors) self.logger.info(f'{mean_matched:.2f} anchors/target {best_ratio:.3f} ' 'Best Possible Recall (BPR). ') - self.save_result(anchors.tolist(), self.out_dir) + anchor_results = self.save_result(anchors.tolist(), self.out_dir) + return anchor_results def anchor_generate(self, box_size: Tensor, @@ -453,7 +483,8 @@ def __init__(self, def optimize(self): anchors = self.differential_evolution() - self.save_result(anchors, self.out_dir) + anchor_results = self.save_result(anchors, self.out_dir) + return anchor_results def differential_evolution(self): bboxes = self.get_zero_center_bbox_tensor() diff --git a/mmyolo/utils/setup_env.py b/mmyolo/utils/setup_env.py index 727234dd5..f51ed928c 100644 --- a/mmyolo/utils/setup_env.py +++ b/mmyolo/utils/setup_env.py @@ -19,7 +19,6 @@ def register_all_modules(init_default_scope: bool = True): import mmdet.engine # noqa: F401,F403 import mmdet.visualization # noqa: F401,F403 - import mmyolo.core # noqa: F401,F403 import mmyolo.datasets # noqa: F401,F403 import mmyolo.engine # noqa: F401,F403 import mmyolo.models # noqa: F401,F403 diff --git a/tools/test.py b/tools/test.py index 5d74cbe47..65f34f3ed 100644 --- a/tools/test.py +++ b/tools/test.py @@ -35,8 +35,7 @@ def parse_args(): '--tta', action='store_true', help='Whether to use test time augmentation') - parser.add_argument( - '--autoanchor', action='store_true', help='Whether to use autoanchor') + parser.add_argument('--autoanchor', help='types of autoanchor') parser.add_argument( '--show', action='store_true', help='show prediction results') parser.add_argument( @@ -133,8 +132,14 @@ def main(): test_data_cfg.batch_shapes_cfg = None test_data_cfg.pipeline = cfg.tta_pipeline - if args.autoanchor is not None: - cfg.custom_hooks.append(cfg.autoanchor_hook) + if args.autoanchor: + assert cfg.model.bbox_head.prior_generator.type \ + == 'mmdet.YOLOAnchorGenerator' + assert args.autoanchor in [ + 'k_means_autoanchor_hook', 'de_autoanchor_hook', + 'v5_k_means_autoanchor_hook' + ] + cfg.custom_hooks.append(cfg.get(args.autoanchor)) # build the runner from config if 'runner_type' not in cfg: diff --git a/tools/train.py b/tools/train.py index 58f0e22b5..87ccb8875 100644 --- a/tools/train.py +++ b/tools/train.py @@ -21,8 +21,7 @@ def parse_args(): action='store_true', default=False, help='enable automatic-mixed-precision training') - parser.add_argument( - '--autoanchor', action='store_true', help='Whether to use autoanchor') + parser.add_argument('--autoanchor', help='types of autoanchor') parser.add_argument( '--resume', nargs='?', @@ -92,7 +91,11 @@ def main(): if args.autoanchor: assert cfg.model.bbox_head.prior_generator.type \ == 'mmdet.YOLOAnchorGenerator' - cfg.custom_hooks.append(cfg.autoanchor_hook) + assert args.autoanchor in [ + 'k_means_autoanchor_hook', 'de_autoanchor_hook', + 'v5_k_means_autoanchor_hook' + ] + cfg.custom_hooks.append(cfg.get(args.autoanchor)) # resume is determined in this priority: resume from > auto_resume if args.resume == 'auto': From f13c038a43509d3051cc67398abb93d458a681cf Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sun, 19 Mar 2023 10:31:43 +0800 Subject: [PATCH 12/20] fix assert --- configs/_base_/autoanchor.py | 6 +++--- configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py | 6 +++--- tools/test.py | 7 ++++--- tools/train.py | 7 ++++--- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/configs/_base_/autoanchor.py b/configs/_base_/autoanchor.py index 559c9844b..541d537dd 100644 --- a/configs/_base_/autoanchor.py +++ b/configs/_base_/autoanchor.py @@ -1,18 +1,18 @@ -k_means_autoanchor_hook = dict( +k_means_autoanchor = dict( type='YOLOAutoAnchorHook', optimizer=dict( type='YOLOKMeansAnchorOptimizer', iters=1000, num_anchor_per_level=[3, 3, 3])) -de_autoanchor_hook = dict( +de_autoanchor = dict( type='YOLOAutoAnchorHook', optimizer=dict( type='YOLODEAnchorOptimizer', iters=1000, num_anchor_per_level=[3, 3, 3])) -v5_k_means_autoanchor_hook = dict( +v5_k_means_autoanchor = dict( type='YOLOAutoAnchorHook', optimizer=dict( type='YOLOV5KMeansAnchorOptimizer', diff --git a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py index 3e9ab88a8..c33963e5c 100644 --- a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py +++ b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py @@ -5,10 +5,10 @@ # ========================Frequently modified parameters====================== # -----data related----- -data_root = 'data/coco/' # Root path of data +data_root = '/Users/yechenzhi/data/coco/' # Root path of data # Path of train annotation file -train_ann_file = 'annotations/instances_train2017.json' -train_data_prefix = 'train2017/' # Prefix of train image path +train_ann_file = 'annotations/instances_val2017.json' +train_data_prefix = 'val2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path diff --git a/tools/test.py b/tools/test.py index 65f34f3ed..70643db61 100644 --- a/tools/test.py +++ b/tools/test.py @@ -136,9 +136,10 @@ def main(): assert cfg.model.bbox_head.prior_generator.type \ == 'mmdet.YOLOAnchorGenerator' assert args.autoanchor in [ - 'k_means_autoanchor_hook', 'de_autoanchor_hook', - 'v5_k_means_autoanchor_hook' - ] + 'k_means_autoanchor', 'de_autoanchor', + 'v5_k_means_autoanchor'], \ + 'only k_means_autoanchor, de_autoanchor, v5_k_means_autoanchor ' \ + 'are supported !' cfg.custom_hooks.append(cfg.get(args.autoanchor)) # build the runner from config diff --git a/tools/train.py b/tools/train.py index 87ccb8875..5cc101375 100644 --- a/tools/train.py +++ b/tools/train.py @@ -92,9 +92,10 @@ def main(): assert cfg.model.bbox_head.prior_generator.type \ == 'mmdet.YOLOAnchorGenerator' assert args.autoanchor in [ - 'k_means_autoanchor_hook', 'de_autoanchor_hook', - 'v5_k_means_autoanchor_hook' - ] + 'k_means_autoanchor', 'de_autoanchor', + 'v5_k_means_autoanchor'], \ + 'only k_means_autoanchor, de_autoanchor, v5_k_means_autoanchor ' \ + 'are supported !' cfg.custom_hooks.append(cfg.get(args.autoanchor)) # resume is determined in this priority: resume from > auto_resume From b082292ebbd25ded39540a4f12b2d90c048e1a48 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sun, 19 Mar 2023 10:34:20 +0800 Subject: [PATCH 13/20] del local change --- configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py index c33963e5c..3e9ab88a8 100644 --- a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py +++ b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py @@ -5,10 +5,10 @@ # ========================Frequently modified parameters====================== # -----data related----- -data_root = '/Users/yechenzhi/data/coco/' # Root path of data +data_root = 'data/coco/' # Root path of data # Path of train annotation file -train_ann_file = 'annotations/instances_val2017.json' -train_data_prefix = 'val2017/' # Prefix of train image path +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path From cb11b908524436867bae3708e819050dde28f25c Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sun, 19 Mar 2023 10:45:53 +0800 Subject: [PATCH 14/20] add note --- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index f5de308e8..4bf8b0526 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -14,6 +14,8 @@ class YOLOAutoAnchorHook(Hook): priority = 48 + # YOLOAutoAnchorHook takes priority over EMAHook. + def __init__(self, optimizer): self.optimizer = optimizer From 69c100279c4e5c2d8d4d281db76576919d1f7a76 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sun, 19 Mar 2023 11:00:56 +0800 Subject: [PATCH 15/20] add default autoanchor --- tools/test.py | 8 +++++++- tools/train.py | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/test.py b/tools/test.py index 70643db61..5ebbcedb8 100644 --- a/tools/test.py +++ b/tools/test.py @@ -35,7 +35,13 @@ def parse_args(): '--tta', action='store_true', help='Whether to use test time augmentation') - parser.add_argument('--autoanchor', help='types of autoanchor') + parser.add_argument( + '--autoanchor', + choices=[ + 'k_means_autoanchor', 'de_autoanchor', 'v5_k_means_autoanchor' + ], + default='v5_k_means_autoanchor', + help='types of autoanchor') parser.add_argument( '--show', action='store_true', help='show prediction results') parser.add_argument( diff --git a/tools/train.py b/tools/train.py index 5cc101375..ff70dfa80 100644 --- a/tools/train.py +++ b/tools/train.py @@ -21,7 +21,6 @@ def parse_args(): action='store_true', default=False, help='enable automatic-mixed-precision training') - parser.add_argument('--autoanchor', help='types of autoanchor') parser.add_argument( '--resume', nargs='?', @@ -30,6 +29,13 @@ def parse_args(): help='If specify checkpoint path, resume from it, while if not ' 'specify, try to auto resume from the latest checkpoint ' 'in the work directory.') + parser.add_argument( + '--autoanchor', + choices=[ + 'k_means_autoanchor', 'de_autoanchor', 'v5_k_means_autoanchor' + ], + default='v5_k_means_autoanchor', + help='types of autoanchor') parser.add_argument( '--cfg-options', nargs='+', From f4d65b96559c810807e34fe7666dc5f2b9c99d9e Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Sun, 19 Mar 2023 11:09:09 +0800 Subject: [PATCH 16/20] fix default --- tools/test.py | 1 - tools/train.py | 1 - 2 files changed, 2 deletions(-) diff --git a/tools/test.py b/tools/test.py index 5ebbcedb8..f8076d7c0 100644 --- a/tools/test.py +++ b/tools/test.py @@ -40,7 +40,6 @@ def parse_args(): choices=[ 'k_means_autoanchor', 'de_autoanchor', 'v5_k_means_autoanchor' ], - default='v5_k_means_autoanchor', help='types of autoanchor') parser.add_argument( '--show', action='store_true', help='show prediction results') diff --git a/tools/train.py b/tools/train.py index ff70dfa80..090642d68 100644 --- a/tools/train.py +++ b/tools/train.py @@ -34,7 +34,6 @@ def parse_args(): choices=[ 'k_means_autoanchor', 'de_autoanchor', 'v5_k_means_autoanchor' ], - default='v5_k_means_autoanchor', help='types of autoanchor') parser.add_argument( '--cfg-options', From 873f9573745cbf59c24992ea15cc6e84b5d51148 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Tue, 21 Mar 2023 22:34:08 +0800 Subject: [PATCH 17/20] add note to assure priority --- mmyolo/engine/hooks/yolo_auto_anchor_hook.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py index 4bf8b0526..b30db8719 100644 --- a/mmyolo/engine/hooks/yolo_auto_anchor_hook.py +++ b/mmyolo/engine/hooks/yolo_auto_anchor_hook.py @@ -17,7 +17,11 @@ class YOLOAutoAnchorHook(Hook): # YOLOAutoAnchorHook takes priority over EMAHook. def __init__(self, optimizer): + self.optimizer = optimizer + print('YOLOAutoAnchorHook should take priority over EMAHook, ' + 'the default priority of EMAHook is 49, so the priority of ' + 'YOLOAutoAnchorHook is 48') def before_run(self, runner) -> None: @@ -27,8 +31,7 @@ def before_run(self, runner) -> None: device = next(model.parameters()).device anchors = torch.tensor( - runner.cfg.model.bbox_head.prior_generator.base_sizes, - device=device) + model.bbox_head.prior_generator.base_sizes, device=device) model.register_buffer('anchors', anchors) def before_train(self, runner: Runner) -> None: From 94d43c091ef4072a4534cc628338008568dcc2a9 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Tue, 21 Mar 2023 22:48:19 +0800 Subject: [PATCH 18/20] del offline autoanchors --- tools/analysis_tools/optimize_anchors.py | 487 +---------------------- 1 file changed, 2 insertions(+), 485 deletions(-) diff --git a/tools/analysis_tools/optimize_anchors.py b/tools/analysis_tools/optimize_anchors.py index 34d4d067a..689c8f64a 100644 --- a/tools/analysis_tools/optimize_anchors.py +++ b/tools/analysis_tools/optimize_anchors.py @@ -29,29 +29,15 @@ --out-dir ${OUT_DIR} """ import argparse -import os.path as osp -import random -from typing import Tuple -import numpy as np -import torch -from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps, - bbox_xyxy_to_cxcywh) from mmdet.utils import replace_cfg_vals, update_data_root from mmengine.config import Config -from mmengine.fileio import dump from mmengine.logging import MMLogger from mmengine.registry import init_default_scope -from mmengine.utils import ProgressBar -from scipy.optimize import differential_evolution -from torch import Tensor from mmyolo.registry import DATASETS - -try: - from scipy.cluster.vq import kmeans -except ImportError: - kmeans = None +from mmyolo.utils import (YOLODEAnchorOptimizer, YOLOKMeansAnchorOptimizer, + YOLOV5KMeansAnchorOptimizer) def parse_args(): @@ -107,475 +93,6 @@ def parse_args(): return args -class BaseAnchorOptimizer: - """Base class for anchor optimizer. - - Args: - dataset (obj:`Dataset`): Dataset object. - input_shape (list[int]): Input image shape of the model. - Format in [width, height]. - num_anchor_per_level (list[int]) : Number of anchors for each level. - logger (obj:`logging.Logger`): The logger for logging. - device (str, optional): Device used for calculating. - Default: 'cuda:0' - out_dir (str, optional): Path to save anchor optimize result. - Default: None - """ - - def __init__(self, - dataset, - input_shape, - num_anchor_per_level, - logger, - device='cuda:0', - out_dir=None): - self.dataset = dataset - self.input_shape = input_shape - self.num_anchor_per_level = num_anchor_per_level - self.num_anchors = sum(num_anchor_per_level) - self.logger = logger - self.device = device - self.out_dir = out_dir - bbox_whs, img_shapes = self.get_whs_and_shapes() - ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape]) - - # resize to input shape - self.bbox_whs = bbox_whs / ratios - - def get_whs_and_shapes(self): - """Get widths and heights of bboxes and shapes of images. - - Returns: - tuple[np.ndarray]: Array of bbox shapes and array of image - shapes with shape (num_bboxes, 2) in [width, height] format. - """ - self.logger.info('Collecting bboxes from annotation...') - bbox_whs = [] - img_shapes = [] - prog_bar = ProgressBar(len(self.dataset)) - for idx in range(len(self.dataset)): - data_info = self.dataset.get_data_info(idx) - img_shape = np.array([data_info['width'], data_info['height']]) - gt_instances = data_info['instances'] - for instance in gt_instances: - bbox = np.array(instance['bbox']) - gt_filter_sizes = bbox[2:4] - bbox[0:2] - img_shapes.append(img_shape) - bbox_whs.append(gt_filter_sizes) - - prog_bar.update() - print('\n') - bbox_whs = np.array(bbox_whs) - img_shapes = np.array(img_shapes) - self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.') - return bbox_whs, img_shapes - - def get_zero_center_bbox_tensor(self): - """Get a tensor of bboxes centered at (0, 0). - - Returns: - Tensor: Tensor of bboxes with shape (num_bboxes, 4) - in [xmin, ymin, xmax, ymax] format. - """ - whs = torch.from_numpy(self.bbox_whs).to( - self.device, dtype=torch.float32) - bboxes = bbox_cxcywh_to_xyxy( - torch.cat([torch.zeros_like(whs), whs], dim=1)) - return bboxes - - def optimize(self): - raise NotImplementedError - - def save_result(self, anchors, path=None): - - anchor_results = [] - start = 0 - for num in self.num_anchor_per_level: - end = num + start - anchor_results.append([(round(w), round(h)) - for w, h in anchors[start:end]]) - start = end - - self.logger.info(f'Anchor optimize result:{anchor_results}') - if path: - json_path = osp.join(path, 'anchor_optimize_result.json') - dump(anchor_results, json_path) - self.logger.info(f'Result saved in {json_path}') - - -class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer): - r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet. - `_. - - Args: - iters (int): Maximum iterations for k-means. - """ - - def __init__(self, iters, **kwargs): - - super().__init__(**kwargs) - self.iters = iters - - def optimize(self): - anchors = self.kmeans_anchors() - self.save_result(anchors, self.out_dir) - - def kmeans_anchors(self): - self.logger.info( - f'Start cluster {self.num_anchors} YOLO anchors with K-means...') - bboxes = self.get_zero_center_bbox_tensor() - cluster_center_idx = torch.randint( - 0, bboxes.shape[0], (self.num_anchors, )).to(self.device) - - assignments = torch.zeros((bboxes.shape[0], )).to(self.device) - cluster_centers = bboxes[cluster_center_idx] - if self.num_anchors == 1: - cluster_centers = self.kmeans_maximization(bboxes, assignments, - cluster_centers) - anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() - anchors = sorted(anchors, key=lambda x: x[0] * x[1]) - return anchors - - prog_bar = ProgressBar(self.iters) - for i in range(self.iters): - converged, assignments = self.kmeans_expectation( - bboxes, assignments, cluster_centers) - if converged: - self.logger.info(f'K-means process has converged at iter {i}.') - break - cluster_centers = self.kmeans_maximization(bboxes, assignments, - cluster_centers) - prog_bar.update() - print('\n') - avg_iou = bbox_overlaps(bboxes, - cluster_centers).max(1)[0].mean().item() - - anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() - anchors = sorted(anchors, key=lambda x: x[0] * x[1]) - self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}') - - return anchors - - def kmeans_maximization(self, bboxes, assignments, centers): - """Maximization part of EM algorithm(Expectation-Maximization)""" - new_centers = torch.zeros_like(centers) - for i in range(centers.shape[0]): - mask = (assignments == i) - if mask.sum(): - new_centers[i, :] = bboxes[mask].mean(0) - return new_centers - - def kmeans_expectation(self, bboxes, assignments, centers): - """Expectation part of EM algorithm(Expectation-Maximization)""" - ious = bbox_overlaps(bboxes, centers) - closest = ious.argmax(1) - converged = (closest == assignments).all() - return converged, closest - - -class YOLOV5KMeansAnchorOptimizer(BaseAnchorOptimizer): - r"""YOLOv5 anchor optimizer using shape k-means. - Code refer to `ultralytics/yolov5. - `_. - - Args: - iters (int): Maximum iterations for k-means. - prior_match_thr (float): anchor-label width height - ratio threshold hyperparameter. - """ - - def __init__(self, - iters, - prior_match_thr=4.0, - mutation_args=[0.9, 0.1], - augment_args=[0.9, 1.1], - **kwargs): - - super().__init__(**kwargs) - self.iters = iters - self.prior_match_thr = prior_match_thr - [self.mutation_prob, self.mutation_sigma] = mutation_args - [self.augment_min, self.augment_max] = augment_args - - def optimize(self): - self.logger.info( - f'Start cluster {self.num_anchors} YOLOv5 anchors with K-means...') - - bbox_whs = torch.from_numpy(self.bbox_whs).to( - self.device, dtype=torch.float32) - anchors = self.anchor_generate( - bbox_whs, - num=self.num_anchors, - img_size=self.input_shape[0], - prior_match_thr=self.prior_match_thr, - iters=self.iters) - best_ratio, mean_matched = self.anchor_metric(bbox_whs, anchors) - self.logger.info(f'{mean_matched:.2f} anchors/target {best_ratio:.3f} ' - 'Best Possible Recall (BPR). ') - self.save_result(anchors.tolist(), self.out_dir) - - def anchor_generate(self, - box_size: Tensor, - num: int = 9, - img_size: int = 640, - prior_match_thr: float = 4.0, - iters: int = 1000) -> Tensor: - """cluster boxes metric with anchors. - - Args: - box_size (Tensor): The size of the bxes, which shape is - (box_num, 2),the number 2 means width and height. - num (int): number of anchors. - img_size (int): image size used for training - prior_match_thr (float): width/height ratio threshold - used for training - iters (int): iterations to evolve anchors using genetic algorithm - - Returns: - anchors (Tensor): kmeans evolved anchors - """ - - thr = 1 / prior_match_thr - - # step1: filter small bbox - box_size = self._filter_box(box_size) - assert num <= len(box_size) - - # step2: init anchors - if kmeans: - try: - self.logger.info( - 'beginning init anchors with scipy kmeans method') - # sigmas for whitening - sigmas = box_size.std(0).cpu().numpy() - anchors = kmeans( - box_size.cpu().numpy() / sigmas, num, iter=30)[0] * sigmas - # kmeans may return fewer points than requested - # if width/height is insufficient or too similar - assert num == len(anchors) - except Exception: - self.logger.warning( - 'scipy kmeans method cannot get enough points ' - 'because of width/height is insufficient or too similar, ' - 'now switching strategies from kmeans to random init.') - anchors = np.sort(np.random.rand(num * 2)).reshape( - num, 2) * img_size - else: - self.logger.info( - 'cannot found scipy package, switching strategies from kmeans ' - 'to random init, you can install scipy package to ' - 'get better anchor init') - anchors = np.sort(np.random.rand(num * 2)).reshape(num, - 2) * img_size - - self.logger.info('init done, beginning evolve anchors...') - # sort small to large - anchors = torch.tensor(anchors[np.argsort(anchors.prod(1))]).to( - box_size.device, dtype=torch.float32) - - # step3: evolve anchors use Genetic Algorithm - prog_bar = ProgressBar(iters) - fitness = self._anchor_fitness(box_size, anchors, thr) - cluster_shape = anchors.shape - - for _ in range(iters): - mutate_result = np.ones(cluster_shape) - # mutate until a change occurs (prevent duplicates) - while (mutate_result == 1).all(): - # mutate_result is scale factor of anchors, between 0.3 and 3 - mutate_result = ( - (np.random.random(cluster_shape) < self.mutation_prob) * - random.random() * np.random.randn(*cluster_shape) * - self.mutation_sigma + 1).clip(0.3, 3.0) - mutate_result = torch.from_numpy(mutate_result).to(box_size.device) - new_anchors = (anchors.clone() * mutate_result).clip(min=2.0) - new_fitness = self._anchor_fitness(box_size, new_anchors, thr) - if new_fitness > fitness: - fitness = new_fitness - anchors = new_anchors.clone() - - prog_bar.update() - print('\n') - # sort small to large - anchors = anchors[torch.argsort(anchors.prod(1))] - self.logger.info(f'Anchor cluster finish. fitness = {fitness:.4f}') - - return anchors - - def anchor_metric(self, - box_size: Tensor, - anchors: Tensor, - threshold: float = 4.0) -> Tuple: - """compute boxes metric with anchors. - - Args: - box_size (Tensor): The size of the bxes, which shape - is (box_num, 2), the number 2 means width and height. - anchors (Tensor): The size of the bxes, which shape - is (anchor_num, 2), the number 2 means width and height. - threshold (float): the compare threshold of ratio - - Returns: - Tuple: a tuple of metric result, best_ratio_mean and mean_matched - """ - # step1: augment scale - # According to the uniform distribution,the scaling scale between - # augment_min and augment_max is randomly generated - scale = np.random.uniform( - self.augment_min, self.augment_max, size=(box_size.shape[0], 1)) - box_size = torch.tensor( - np.array( - [l[:, ] * s for s, l in zip(scale, - box_size.cpu().numpy())])).to( - box_size.device, - dtype=torch.float32) - # step2: calculate ratio - min_ratio, best_ratio = self._metric(box_size, anchors) - mean_matched = (min_ratio > 1 / threshold).float().sum(1).mean() - best_ratio_mean = (best_ratio > 1 / threshold).float().mean() - return best_ratio_mean, mean_matched - - def _filter_box(self, box_size: Tensor) -> Tensor: - small_cnt = (box_size < 3.0).any(1).sum() - if small_cnt: - self.logger.warning( - f'Extremely small objects found: {small_cnt} ' - f'of {len(box_size)} labels are <3 pixels in size') - # filter > 2 pixels - filter_sizes = box_size[(box_size >= 2.0).any(1)] - return filter_sizes - - def _anchor_fitness(self, box_size: Tensor, anchors: Tensor, thr: float): - """mutation fitness.""" - _, best = self._metric(box_size, anchors) - return (best * (best > thr).float()).mean() - - def _metric(self, box_size: Tensor, anchors: Tensor) -> Tuple: - """compute boxes metric with anchors. - - Args: - box_size (Tensor): The size of the bxes, which shape is - (box_num, 2), the number 2 means width and height. - anchors (Tensor): The size of the bxes, which shape is - (anchor_num, 2), the number 2 means width and height. - - Returns: - Tuple: a tuple of metric result, min_ratio and best_ratio - """ - - # ratio means the (width_1/width_2 and height_1/height_2) ratio of each - # box and anchor, the ratio shape is torch.Size([box_num,anchor_num,2]) - ratio = box_size[:, None] / anchors[None] - - # min_ratio records the min ratio of each box with all anchor, - # min_ratio.shape is torch.Size([box_num,anchor_num]) - # notice: - # smaller ratio means worse shape-match between boxes and anchors - min_ratio = torch.min(ratio, 1 / ratio).min(2)[0] - - # find the best shape-match ratio for each box - # box_best_ratio.shape is torch.Size([box_num]) - best_ratio = min_ratio.max(1)[0] - - return min_ratio, best_ratio - - -class YOLODEAnchorOptimizer(BaseAnchorOptimizer): - """YOLO anchor optimizer using differential evolution algorithm. - - Args: - iters (int): Maximum iterations for k-means. - strategy (str): The differential evolution strategy to use. - Should be one of: - - - 'best1bin' - - 'best1exp' - - 'rand1exp' - - 'randtobest1exp' - - 'currenttobest1exp' - - 'best2exp' - - 'rand2exp' - - 'randtobest1bin' - - 'currenttobest1bin' - - 'best2bin' - - 'rand2bin' - - 'rand1bin' - - Default: 'best1bin'. - population_size (int): Total population size of evolution algorithm. - Default: 15. - convergence_thr (float): Tolerance for convergence, the - optimizing stops when ``np.std(pop) <= abs(convergence_thr) - + convergence_thr * np.abs(np.mean(population_energies))``, - respectively. Default: 0.0001. - mutation (tuple[float]): Range of dithering randomly changes the - mutation constant. Default: (0.5, 1). - recombination (float): Recombination constant of crossover probability. - Default: 0.7. - """ - - def __init__(self, - iters, - strategy='best1bin', - population_size=15, - convergence_thr=0.0001, - mutation=(0.5, 1), - recombination=0.7, - **kwargs): - - super().__init__(**kwargs) - - self.iters = iters - self.strategy = strategy - self.population_size = population_size - self.convergence_thr = convergence_thr - self.mutation = mutation - self.recombination = recombination - - def optimize(self): - anchors = self.differential_evolution() - self.save_result(anchors, self.out_dir) - - def differential_evolution(self): - bboxes = self.get_zero_center_bbox_tensor() - - bounds = [] - for i in range(self.num_anchors): - bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])]) - - result = differential_evolution( - func=self.avg_iou_cost, - bounds=bounds, - args=(bboxes, ), - strategy=self.strategy, - maxiter=self.iters, - popsize=self.population_size, - tol=self.convergence_thr, - mutation=self.mutation, - recombination=self.recombination, - updating='immediate', - disp=True) - self.logger.info( - f'Anchor evolution finish. Average IOU: {1 - result.fun}') - anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])] - anchors = sorted(anchors, key=lambda x: x[0] * x[1]) - return anchors - - @staticmethod - def avg_iou_cost(anchor_params, bboxes): - assert len(anchor_params) % 2 == 0 - anchor_whs = torch.tensor( - [[w, h] - for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to( - bboxes.device, dtype=bboxes.dtype) - anchor_boxes = bbox_cxcywh_to_xyxy( - torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1)) - ious = bbox_overlaps(bboxes, anchor_boxes) - max_ious, _ = ious.max(1) - cost = 1 - max_ious.mean().item() - return cost - - def main(): logger = MMLogger.get_current_instance() args = parse_args() From fecf16663d5e8db52988b9b514465c832c23d195 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Tue, 21 Mar 2023 23:11:10 +0800 Subject: [PATCH 19/20] add comment and modify train.py --- configs/_base_/autoanchor.py | 23 ----------- .../yolov5_s-v61_syncbn_8xb16-300e_coco.py | 38 +++++++++++++++---- tools/train.py | 13 +------ 3 files changed, 33 insertions(+), 41 deletions(-) delete mode 100644 configs/_base_/autoanchor.py diff --git a/configs/_base_/autoanchor.py b/configs/_base_/autoanchor.py deleted file mode 100644 index 541d537dd..000000000 --- a/configs/_base_/autoanchor.py +++ /dev/null @@ -1,23 +0,0 @@ -k_means_autoanchor = dict( - type='YOLOAutoAnchorHook', - optimizer=dict( - type='YOLOKMeansAnchorOptimizer', - iters=1000, - num_anchor_per_level=[3, 3, 3])) - -de_autoanchor = dict( - type='YOLOAutoAnchorHook', - optimizer=dict( - type='YOLODEAnchorOptimizer', - iters=1000, - num_anchor_per_level=[3, 3, 3])) - -v5_k_means_autoanchor = dict( - type='YOLOAutoAnchorHook', - optimizer=dict( - type='YOLOV5KMeansAnchorOptimizer', - iters=1000, - num_anchor_per_level=[3, 3, 3], - prior_match_thr=4.0, - mutation_args=[0.9, 0.1], - augment_args=[0.9, 0.1])) diff --git a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py index 3e9ab88a8..0b1155992 100644 --- a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py +++ b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py @@ -1,14 +1,11 @@ -_base_ = [ - '../_base_/default_runtime.py', '../_base_/det_p5_tta.py', - '../_base_/autoanchor.py' -] +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] # ========================Frequently modified parameters====================== # -----data related----- -data_root = 'data/coco/' # Root path of data +data_root = '/Users/yechenzhi/data/coco/' # Root path of data # Path of train annotation file -train_ann_file = 'annotations/instances_train2017.json' -train_data_prefix = 'train2017/' # Prefix of train image path +train_ann_file = 'annotations/instances_val2017.json' +train_data_prefix = 'val2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path @@ -280,6 +277,33 @@ priority=49) ] +autoanchor_hook = dict( + type='YOLOAutoAnchorHook', + optimizer=dict( + type='YOLOV5KMeansAnchorOptimizer', + iters=1000, + num_anchor_per_level=[3, 3, 3], + prior_match_thr=4.0, + mutation_args=[0.9, 0.1], + augment_args=[0.9, 0.1])) + +# You can comment out the existing autoanchor hook, +# and then select the autoanchor you want and uncomment it. + +# autoanchor_hook = dict( +# type='YOLOAutoAnchorHook', +# optimizer=dict( +# type='YOLOKMeansAnchorOptimizer', +# iters=1000, +# num_anchor_per_level=[3, 3, 3])) + +# autoanchor_hook = dict( +# type='YOLOAutoAnchorHook', +# optimizer=dict( +# type='YOLODEAnchorOptimizer', +# iters=1000, +# num_anchor_per_level=[3, 3, 3])) + val_evaluator = dict( type='mmdet.CocoMetric', proposal_nums=(100, 1, 10), diff --git a/tools/train.py b/tools/train.py index 090642d68..25680344e 100644 --- a/tools/train.py +++ b/tools/train.py @@ -30,11 +30,7 @@ def parse_args(): 'specify, try to auto resume from the latest checkpoint ' 'in the work directory.') parser.add_argument( - '--autoanchor', - choices=[ - 'k_means_autoanchor', 'de_autoanchor', 'v5_k_means_autoanchor' - ], - help='types of autoanchor') + '--autoanchor', action='store_true', help='types of autoanchor') parser.add_argument( '--cfg-options', nargs='+', @@ -96,12 +92,7 @@ def main(): if args.autoanchor: assert cfg.model.bbox_head.prior_generator.type \ == 'mmdet.YOLOAnchorGenerator' - assert args.autoanchor in [ - 'k_means_autoanchor', 'de_autoanchor', - 'v5_k_means_autoanchor'], \ - 'only k_means_autoanchor, de_autoanchor, v5_k_means_autoanchor ' \ - 'are supported !' - cfg.custom_hooks.append(cfg.get(args.autoanchor)) + cfg.custom_hooks.append(cfg.autoanchor_hook) # resume is determined in this priority: resume from > auto_resume if args.resume == 'auto': From e0cd9ae076b28a486524eb4f72d5c7f62744ae00 Mon Sep 17 00:00:00 2001 From: "yechenzhi@kuaishou.com" <136920488@qq.com> Date: Tue, 21 Mar 2023 23:14:03 +0800 Subject: [PATCH 20/20] del local changes --- configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py index 0b1155992..66007bd4e 100644 --- a/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py +++ b/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py @@ -2,10 +2,10 @@ # ========================Frequently modified parameters====================== # -----data related----- -data_root = '/Users/yechenzhi/data/coco/' # Root path of data +data_root = 'data/coco/' # Root path of data # Path of train annotation file -train_ann_file = 'annotations/instances_val2017.json' -train_data_prefix = 'val2017/' # Prefix of train image path +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path