diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 000000000000..3da386f7e724
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,5 @@
+# These are supported funding model platforms
+
+github: glenn-jocher
+patreon: ultralytics
+open_collective: ultralytics
diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml
index 020658372f3c..bb8b173cdb31 100644
--- a/.github/workflows/ci-testing.yml
+++ b/.github/workflows/ci-testing.yml
@@ -2,12 +2,10 @@ name: CI CPU testing
on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows
push:
- branches: [ master ]
+ branches: [ master, develop ]
pull_request:
# The branches below must be a subset of the branches above
- branches: [ master ]
- schedule:
- - cron: '0 0 * * *' # Runs at 00:00 UTC every day
+ branches: [ master, develop ]
jobs:
cpu-tests:
@@ -66,14 +64,15 @@ jobs:
di=cpu # inference devices # define device
# train
- python train.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di
+ python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di
# detect
python detect.py --weights weights/${{ matrix.model }}.pt --device $di
python detect.py --weights runs/train/exp/weights/last.pt --device $di
# test
- python test.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --device $di
- python test.py --img 256 --batch 8 --weights runs/train/exp/weights/last.pt --device $di
+ python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di
+ python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
+ python hubconf.py # hub
python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect
- python models/export.py --img 256 --batch 1 --weights weights/${{ matrix.model }}.pt # export
+ python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export
shell: bash
diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml
index 2bfa53c14fcd..4e502fe9af7b 100644
--- a/.github/workflows/greetings.yml
+++ b/.github/workflows/greetings.yml
@@ -11,12 +11,12 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }}
pr-message: |
👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to:
- - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master update by running the following, replacing 'feature' with the name of your local branch:
+ - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch:
```bash
git remote add upstream https://github.com/ultralytics/yolov5.git
git fetch upstream
git checkout feature # <----- replace 'feature' with local branch name
- git rebase upstream/master
+ git rebase upstream/develop
git push -u origin -f
```
- ✅ Verify all Continuous Integration (CI) **checks are passing**.
@@ -42,10 +42,11 @@ jobs:
YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
- - **Google Colab Notebook** with free GPU:
- - **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5)
- - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
- - **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)
+ - **Google Colab and Kaggle** notebooks with free GPU:
+ - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
+ - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)
+ - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart)
+
## Status
diff --git a/Dockerfile b/Dockerfile
index 24529d2b9415..b47e5bbff194 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,14 +1,14 @@
# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
-FROM nvcr.io/nvidia/pytorch:20.12-py3
+FROM nvcr.io/nvidia/pytorch:21.03-py3
# Install linux packages
-RUN apt update && apt install -y screen libgl1-mesa-glx
+RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
# Install python dependencies
-RUN pip install --upgrade pip
COPY requirements.txt .
-RUN pip install -r requirements.txt
-RUN pip install gsutil
+RUN python -m pip install --upgrade pip
+RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof
+RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook
# Create working directory
RUN mkdir -p /usr/src/app
@@ -17,11 +17,8 @@ WORKDIR /usr/src/app
# Copy contents
COPY . /usr/src/app
-# Copy weights
-#RUN python3 -c "from models import *; \
-#attempt_download('weights/yolov5s.pt'); \
-#attempt_download('weights/yolov5m.pt'); \
-#attempt_download('weights/yolov5l.pt')"
+# Set environment variables
+ENV HOME=/usr/src/app
# --------------------------------------------------- Extras Below ---------------------------------------------------
@@ -40,13 +37,13 @@ COPY . /usr/src/app
# sudo docker kill $(sudo docker ps -q)
# Kill all image-based
-# sudo docker kill $(sudo docker ps -a -q --filter ancestor=ultralytics/yolov5:latest)
+# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
# Bash into running container
-# sudo docker container exec -it ba65811811ab bash
+# sudo docker exec -it 5a9b5863d93d bash
# Bash into stopped container
-# sudo docker commit 092b16b25c5b usr/resume && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco --entrypoint=sh usr/resume
+# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash
# Send weights to GCP
# python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt
diff --git a/README.md b/README.md
index 0a8d3d6a3a1c..5552f3493323 100755
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@ https://github.com/ultralytics/yolov5.git
### 2. Install requirements
```
pip install -r requirements.txt
-pip install tensorflow==2.4.0
+pip install tensorflow==2.4.1
```
### 3. Convert and verify
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml
new file mode 100644
index 000000000000..f45182b43e25
--- /dev/null
+++ b/data/GlobalWheat2020.yaml
@@ -0,0 +1,55 @@
+# Global Wheat 2020 dataset http://www.global-wheat.com/
+# Train command: python train.py --data GlobalWheat2020.yaml
+# Default dataset location is next to YOLOv5:
+# /parent_folder
+# /datasets/GlobalWheat2020
+# /yolov5
+
+
+# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
+train: # 3422 images
+ - ../datasets/GlobalWheat2020/images/arvalis_1
+ - ../datasets/GlobalWheat2020/images/arvalis_2
+ - ../datasets/GlobalWheat2020/images/arvalis_3
+ - ../datasets/GlobalWheat2020/images/ethz_1
+ - ../datasets/GlobalWheat2020/images/rres_1
+ - ../datasets/GlobalWheat2020/images/inrae_1
+ - ../datasets/GlobalWheat2020/images/usask_1
+
+val: # 748 images (WARNING: train set contains ethz_1)
+ - ../datasets/GlobalWheat2020/images/ethz_1
+
+test: # 1276 images
+ - ../datasets/GlobalWheat2020/images/utokyo_1
+ - ../datasets/GlobalWheat2020/images/utokyo_2
+ - ../datasets/GlobalWheat2020/images/nau_1
+ - ../datasets/GlobalWheat2020/images/uq_1
+
+# number of classes
+nc: 1
+
+# class names
+names: [ 'wheat_head' ]
+
+
+# download command/URL (optional) --------------------------------------------------------------------------------------
+download: |
+ from utils.general import download, Path
+
+ # Download
+ dir = Path('../datasets/GlobalWheat2020') # dataset directory
+ urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
+ 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
+ download(urls, dir=dir)
+
+ # Make Directories
+ for p in 'annotations', 'images', 'labels':
+ (dir / p).mkdir(parents=True, exist_ok=True)
+
+ # Move
+ for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
+ 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
+ (dir / p).rename(dir / 'images' / p) # move to /images
+ f = (dir / p).with_suffix('.json') # json file
+ if f.exists():
+ f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml
new file mode 100644
index 000000000000..a8c1f25b385a
--- /dev/null
+++ b/data/SKU-110K.yaml
@@ -0,0 +1,52 @@
+# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19
+# Train command: python train.py --data SKU-110K.yaml
+# Default dataset location is next to YOLOv5:
+# /parent_folder
+# /datasets/SKU-110K
+# /yolov5
+
+
+# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
+train: ../datasets/SKU-110K/train.txt # 8219 images
+val: ../datasets/SKU-110K/val.txt # 588 images
+test: ../datasets/SKU-110K/test.txt # 2936 images
+
+# number of classes
+nc: 1
+
+# class names
+names: [ 'object' ]
+
+
+# download command/URL (optional) --------------------------------------------------------------------------------------
+download: |
+ import shutil
+ from tqdm import tqdm
+ from utils.general import np, pd, Path, download, xyxy2xywh
+
+ # Download
+ datasets = Path('../datasets') # download directory
+ urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
+ download(urls, dir=datasets, delete=False)
+
+ # Rename directories
+ dir = (datasets / 'SKU-110K')
+ if dir.exists():
+ shutil.rmtree(dir)
+ (datasets / 'SKU110K_fixed').rename(dir) # rename dir
+ (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
+
+ # Convert labels
+ names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
+ for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
+ x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
+ images, unique_images = x[:, 0], np.unique(x[:, 0])
+ with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
+ f.writelines(f'./images/{s}\n' for s in unique_images)
+ for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
+ cls = 0 # single-class dataset
+ with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
+ for r in x[images == im]:
+ w, h = r[6], r[7] # image width, height
+ xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
+ f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml
new file mode 100644
index 000000000000..c4603b200132
--- /dev/null
+++ b/data/VisDrone.yaml
@@ -0,0 +1,61 @@
+# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset
+# Train command: python train.py --data VisDrone.yaml
+# Default dataset location is next to YOLOv5:
+# /parent_folder
+# /VisDrone
+# /yolov5
+
+
+# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
+train: ../VisDrone/VisDrone2019-DET-train/images # 6471 images
+val: ../VisDrone/VisDrone2019-DET-val/images # 548 images
+test: ../VisDrone/VisDrone2019-DET-test-dev/images # 1610 images
+
+# number of classes
+nc: 10
+
+# class names
+names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor' ]
+
+
+# download command/URL (optional) --------------------------------------------------------------------------------------
+download: |
+ from utils.general import download, os, Path
+
+ def visdrone2yolo(dir):
+ from PIL import Image
+ from tqdm import tqdm
+
+ def convert_box(size, box):
+ # Convert VisDrone box to YOLO xywh box
+ dw = 1. / size[0]
+ dh = 1. / size[1]
+ return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
+
+ (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
+ pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
+ for f in pbar:
+ img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
+ lines = []
+ with open(f, 'r') as file: # read annotation.txt
+ for row in [x.split(',') for x in file.read().strip().splitlines()]:
+ if row[4] == '0': # VisDrone 'ignored regions' class 0
+ continue
+ cls = int(row[5]) - 1
+ box = convert_box(img_size, tuple(map(int, row[:4])))
+ lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
+ with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
+ fl.writelines(lines) # write label.txt
+
+
+ # Download
+ dir = Path('../VisDrone') # dataset directory
+ urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
+ 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
+ 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
+ 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
+ download(urls, dir=dir)
+
+ # Convert
+ for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
+ visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml
new file mode 100644
index 000000000000..0ba314d82ce1
--- /dev/null
+++ b/data/argoverse_hd.yaml
@@ -0,0 +1,21 @@
+# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
+# Train command: python train.py --data argoverse_hd.yaml
+# Default dataset location is next to YOLOv5:
+# /parent_folder
+# /argoverse
+# /yolov5
+
+
+# download command/URL (optional)
+download: bash data/scripts/get_argoverse_hd.sh
+
+# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
+train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images
+val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges
+test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview
+
+# number of classes
+nc: 8
+
+# class names
+names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ]
diff --git a/data/coco.yaml b/data/coco.yaml
index b9da2bf5919b..f818a49ff0fa 100644
--- a/data/coco.yaml
+++ b/data/coco.yaml
@@ -1,6 +1,6 @@
# COCO 2017 dataset http://cocodataset.org
# Train command: python train.py --data coco.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco
# /yolov5
@@ -30,6 +30,6 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', '
# Print classes
# with open('data/coco.yaml') as f:
-# d = yaml.load(f, Loader=yaml.FullLoader) # dict
+# d = yaml.safe_load(f) # dict
# for i, x in enumerate(d['names']):
# print(i, x)
diff --git a/data/coco128.yaml b/data/coco128.yaml
index c41bccf2b8d5..83fbc29d3404 100644
--- a/data/coco128.yaml
+++ b/data/coco128.yaml
@@ -1,6 +1,6 @@
# COCO 2017 dataset http://cocodataset.org - first 128 training images
# Train command: python train.py --data coco128.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco128
# /yolov5
diff --git a/data/hyp.finetune_objects365.yaml b/data/hyp.finetune_objects365.yaml
new file mode 100644
index 000000000000..2b104ef2d9bf
--- /dev/null
+++ b/data/hyp.finetune_objects365.yaml
@@ -0,0 +1,28 @@
+lr0: 0.00258
+lrf: 0.17
+momentum: 0.779
+weight_decay: 0.00058
+warmup_epochs: 1.33
+warmup_momentum: 0.86
+warmup_bias_lr: 0.0711
+box: 0.0539
+cls: 0.299
+cls_pw: 0.825
+obj: 0.632
+obj_pw: 1.0
+iou_t: 0.2
+anchor_t: 3.44
+anchors: 3.2
+fl_gamma: 0.0
+hsv_h: 0.0188
+hsv_s: 0.704
+hsv_v: 0.36
+degrees: 0.0
+translate: 0.0902
+scale: 0.491
+shear: 0.0
+perspective: 0.0
+flipud: 0.0
+fliplr: 0.5
+mosaic: 1.0
+mixup: 0.0
diff --git a/data/objects365.yaml b/data/objects365.yaml
new file mode 100644
index 000000000000..eb99995903cf
--- /dev/null
+++ b/data/objects365.yaml
@@ -0,0 +1,102 @@
+# Objects365 dataset https://www.objects365.org/
+# Train command: python train.py --data objects365.yaml
+# Default dataset location is next to YOLOv5:
+# /parent_folder
+# /datasets/objects365
+# /yolov5
+
+# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
+train: ../datasets/objects365/images/train # 1742289 images
+val: ../datasets/objects365/images/val # 5570 images
+
+# number of classes
+nc: 365
+
+# class names
+names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup',
+ 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book',
+ 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag',
+ 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV',
+ 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle',
+ 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird',
+ 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',
+ 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning',
+ 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife',
+ 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock',
+ 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish',
+ 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan',
+ 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard',
+ 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign',
+ 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',
+ 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard',
+ 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry',
+ 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',
+ 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors',
+ 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape',
+ 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck',
+ 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette',
+ 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket',
+ 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine',
+ 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine',
+ 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon',
+ 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse',
+ 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball',
+ 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin',
+ 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts',
+ 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',
+ 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD',
+ 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder',
+ 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips',
+ 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab',
+ 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal',
+ 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart',
+ 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French',
+ 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell',
+ 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil',
+ 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ]
+
+
+# download command/URL (optional) --------------------------------------------------------------------------------------
+download: |
+ from pycocotools.coco import COCO
+ from tqdm import tqdm
+
+ from utils.general import download, Path
+
+ # Make Directories
+ dir = Path('../datasets/objects365') # dataset directory
+ for p in 'images', 'labels':
+ (dir / p).mkdir(parents=True, exist_ok=True)
+ for q in 'train', 'val':
+ (dir / p / q).mkdir(parents=True, exist_ok=True)
+
+ # Download
+ url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/"
+ download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir, delete=False) # annotations json
+ download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train',
+ curl=True, delete=False, threads=8)
+
+ # Move
+ train = dir / 'images' / 'train'
+ for f in tqdm(train.rglob('*.jpg'), desc=f'Moving images'):
+ f.rename(train / f.name) # move to /images/train
+
+ # Labels
+ coco = COCO(dir / 'zhiyuan_objv2_train.json')
+ names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
+ for cid, cat in enumerate(names):
+ catIds = coco.getCatIds(catNms=[cat])
+ imgIds = coco.getImgIds(catIds=catIds)
+ for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
+ width, height = im["width"], im["height"]
+ path = Path(im["file_name"]) # image filename
+ try:
+ with open(dir / 'labels' / 'train' / path.with_suffix('.txt').name, 'a') as file:
+ annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
+ for a in coco.loadAnns(annIds):
+ x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
+ x, y = x + w / 2, y + h / 2 # xy to center
+ file.write(f"{cid} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n")
+
+ except Exception as e:
+ print(e)
diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh
new file mode 100644
index 000000000000..331509914568
--- /dev/null
+++ b/data/scripts/get_argoverse_hd.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
+# Download command: bash data/scripts/get_argoverse_hd.sh
+# Train command: python train.py --data argoverse_hd.yaml
+# Default dataset location is next to YOLOv5:
+# /parent_folder
+# /argoverse
+# /yolov5
+
+# Download/unzip images
+d='../argoverse/' # unzip directory
+mkdir $d
+url=https://argoverse-hd.s3.us-east-2.amazonaws.com/
+f=Argoverse-HD-Full.zip
+curl -L $url$f -o $f && unzip -q $f -d $d && rm $f download, unzip, remove in background
+wait # finish background tasks
+
+cd ../argoverse/Argoverse-1.1/
+ln -s tracking images
+
+cd ../Argoverse-HD/annotations/
+
+python3 - "$@" <train.txt
cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt
-python3 - "$@" <= 1
- p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
+ p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count
else:
- p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
+ p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
+ imc = im0.copy() if opt.save_crop else im0 # for opt.save_crop
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
@@ -165,7 +170,7 @@ def _imports_graph_def():
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
- s += f'{n} {names[int(c)]}s, ' # add to string
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
@@ -175,9 +180,12 @@ def _imports_graph_def():
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
- if save_img or view_img: # Add bbox to image
- label = f'{names[int(cls)]} {conf:.2f}'
- plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
+ if save_img or opt.save_crop or view_img: # Add bbox to image
+ c = int(cls) # integer class
+ label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}')
+ plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness)
+ if opt.save_crop:
+ save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
@@ -185,22 +193,25 @@ def _imports_graph_def():
# Stream results
if view_img:
cv2.imshow(str(p), im0)
+ cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
- else: # 'video'
+ else: # 'video' or 'stream'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
-
- fourcc = 'mp4v' # output video codec
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
+ if vid_cap: # video
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ else: # stream
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
+ save_path += '.mp4'
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
@@ -214,13 +225,16 @@ def _imports_graph_def():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
- parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
+ parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
+ parser.add_argument('--max-det', type=int, default=1000, help='maximum number of detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+ parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
@@ -229,14 +243,17 @@ def _imports_graph_def():
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--tfl-int8', action='store_true', help='use int8 quantized TFLite model')
+ parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
+ parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
+ parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
opt = parser.parse_args()
+ opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
- check_requirements()
+ check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
- with torch.no_grad():
- if opt.update: # update all models (to fix SourceChangeWarning)
- for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
- detect()
- strip_optimizer(opt.weights)
- else:
- detect()
+ if opt.update: # update all models (to fix SourceChangeWarning)
+ for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
+ detect(opt=opt)
+ strip_optimizer(opt.weights)
+ else:
+ detect(opt=opt)
diff --git a/hubconf.py b/hubconf.py
index c4485a42e335..40bbb1ed0826 100644
--- a/hubconf.py
+++ b/hubconf.py
@@ -1,23 +1,14 @@
-"""File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
+"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
Usage:
import torch
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80)
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
"""
-from pathlib import Path
-
import torch
-from models.yolo import Model
-from utils.general import set_logging
-from utils.google_utils import attempt_download
-
-dependencies = ['torch', 'yaml']
-set_logging()
-
-def create(name, pretrained, channels, classes, autoshape):
+def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
"""Creates a specified YOLOv5 model
Arguments:
@@ -25,117 +16,109 @@ def create(name, pretrained, channels, classes, autoshape):
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
+ autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
+ verbose (bool): print all information to screen
+ device (str, torch.device, None): device to use for model parameters
Returns:
- pytorch model
+ YOLOv5 pytorch model
"""
- config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
+ from pathlib import Path
+
+ from models.yolo import Model, attempt_load
+ from utils.general import check_requirements, set_logging
+ from utils.google_utils import attempt_download
+ from utils.torch_utils import select_device
+
+ check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop'))
+ set_logging(verbose=verbose)
+
+ fname = Path(name).with_suffix('.pt') # checkpoint filename
try:
- model = Model(config, channels, classes)
- if pretrained:
- fname = f'{name}.pt' # checkpoint filename
- attempt_download(fname) # download if not found locally
- ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
- state_dict = ckpt['model'].float().state_dict() # to FP32
- state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
- model.load_state_dict(state_dict, strict=False) # load
- if len(ckpt['model'].names) == classes:
- model.names = ckpt['model'].names # set class names attribute
- if autoshape:
- model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
- return model
+ if pretrained and channels == 3 and classes == 80:
+ model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model
+ else:
+ cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path
+ model = Model(cfg, channels, classes) # create model
+ if pretrained:
+ ckpt = torch.load(attempt_download(fname), map_location=torch.device('cpu')) # load
+ msd = model.state_dict() # model state_dict
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
+ csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
+ model.load_state_dict(csd, strict=False) # load
+ if len(ckpt['model'].names) == classes:
+ model.names = ckpt['model'].names # set class names attribute
+ if autoshape:
+ model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
+ device = select_device('0' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device)
+ return model.to(device)
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
- s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
+ s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url
raise Exception(s) from e
-def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True):
- """YOLOv5-small model from https://github.com/ultralytics/yolov5
-
- Arguments:
- pretrained (bool): load pretrained weights into the model, default=False
- channels (int): number of input channels, default=3
- classes (int): number of model classes, default=80
-
- Returns:
- pytorch model
- """
- return create('yolov5s', pretrained, channels, classes, autoshape)
-
+def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
+ # YOLOv5 custom or local model
+ return _create(path, autoshape=autoshape, verbose=verbose, device=device)
-def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True):
- """YOLOv5-medium model from https://github.com/ultralytics/yolov5
- Arguments:
- pretrained (bool): load pretrained weights into the model, default=False
- channels (int): number of input channels, default=3
- classes (int): number of model classes, default=80
+def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-small model https://github.com/ultralytics/yolov5
+ return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
- Returns:
- pytorch model
- """
- return create('yolov5m', pretrained, channels, classes, autoshape)
+def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-medium model https://github.com/ultralytics/yolov5
+ return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True):
- """YOLOv5-large model from https://github.com/ultralytics/yolov5
- Arguments:
- pretrained (bool): load pretrained weights into the model, default=False
- channels (int): number of input channels, default=3
- classes (int): number of model classes, default=80
+def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-large model https://github.com/ultralytics/yolov5
+ return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
- Returns:
- pytorch model
- """
- return create('yolov5l', pretrained, channels, classes, autoshape)
+def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
+ return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True):
- """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
- Arguments:
- pretrained (bool): load pretrained weights into the model, default=False
- channels (int): number of input channels, default=3
- classes (int): number of model classes, default=80
+def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
+ return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
- Returns:
- pytorch model
- """
- return create('yolov5x', pretrained, channels, classes, autoshape)
+def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
+ return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
-def custom(path_or_model='path/to/model.pt', autoshape=True):
- """YOLOv5-custom model from https://github.com/ultralytics/yolov5
- Arguments (3 options):
- path_or_model (str): 'path/to/model.pt'
- path_or_model (dict): torch.load('path/to/model.pt')
- path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
+def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
+ return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
- Returns:
- pytorch model
- """
- model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
- if isinstance(model, dict):
- model = model['model'] # load model
- hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
- hub_model.load_state_dict(model.float().state_dict()) # load state_dict
- hub_model.names = model.names # class names
- return hub_model.autoshape() if autoshape else hub_model
+def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
+ return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
if __name__ == '__main__':
- model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
- # model = custom(path_or_model='path/to/model.pt') # custom example
+ model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
+ # model = custom(path='path/to/model.pt') # custom
# Verify inference
+ import cv2
+ import numpy as np
from PIL import Image
- imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')]
- results = model(imgs)
- results.show()
+ imgs = ['data/images/zidane.jpg', # filename
+ 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI
+ cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
+ Image.open('data/images/bus.jpg'), # PIL
+ np.zeros((320, 640, 3))] # numpy
+
+ results = model(imgs) # batched inference
results.print()
+ results.save()
diff --git a/models/common.py b/models/common.py
index a25707fdd387..4211db406c3d 100644
--- a/models/common.py
+++ b/models/common.py
@@ -1,15 +1,21 @@
-# This file contains modules common to various models
+# YOLOv5 common modules
import math
+from copy import copy
+from pathlib import Path
+
import numpy as np
+import pandas as pd
import requests
import torch
import torch.nn as nn
-from PIL import Image, ImageDraw
+from PIL import Image
+from torch.cuda import amp
from utils.datasets import letterbox
-from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
-from utils.plots import color_list
+from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
+from utils.plots import colors, plot_one_box
+from utils.torch_utils import time_synchronized
def autopad(k, p=None): # kernel, padding
@@ -39,6 +45,52 @@ def fuseforward(self, x):
return self.act(self.conv(x))
+class TransformerLayer(nn.Module):
+ # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
+ def __init__(self, c, num_heads):
+ super().__init__()
+ self.q = nn.Linear(c, c, bias=False)
+ self.k = nn.Linear(c, c, bias=False)
+ self.v = nn.Linear(c, c, bias=False)
+ self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
+ self.fc1 = nn.Linear(c, c, bias=False)
+ self.fc2 = nn.Linear(c, c, bias=False)
+
+ def forward(self, x):
+ x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
+ x = self.fc2(self.fc1(x)) + x
+ return x
+
+
+class TransformerBlock(nn.Module):
+ # Vision Transformer https://arxiv.org/abs/2010.11929
+ def __init__(self, c1, c2, num_heads, num_layers):
+ super().__init__()
+ self.conv = None
+ if c1 != c2:
+ self.conv = Conv(c1, c2)
+ self.linear = nn.Linear(c2, c2) # learnable position embedding
+ self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
+ self.c2 = c2
+
+ def forward(self, x):
+ if self.conv is not None:
+ x = self.conv(x)
+ b, _, w, h = x.shape
+ p = x.flatten(2)
+ p = p.unsqueeze(0)
+ p = p.transpose(0, 3)
+ p = p.squeeze(3)
+ e = self.linear(p)
+ x = p + e
+
+ x = self.tr(x)
+ x = x.unsqueeze(3)
+ x = x.transpose(0, 3)
+ x = x.reshape(b, self.c2, w, h)
+ return x
+
+
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
@@ -86,6 +138,14 @@ def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
+class C3TR(C3):
+ # C3 module with TransformerBlock()
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ super().__init__(c1, c2, n, shortcut, g, e)
+ c_ = int(c2 * e)
+ self.m = TransformerBlock(c_, c_, 4, n)
+
+
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
@@ -155,50 +215,57 @@ class NMS(nn.Module):
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
+ max_det = 1000 # maximum number of detections per image
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
- return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
+ return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)
-class autoShape(nn.Module):
+class AutoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
- img_size = 640 # inference size (pixels)
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
+ max_det = 1000 # maximum number of detections per image
def __init__(self, model):
- super(autoShape, self).__init__()
+ super(AutoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
- print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
+ print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
+ @torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
- # Inference from various sources. For height=720, width=1280, RGB images example inputs are:
- # filename: imgs = 'data/samples/zidane.jpg'
+ # Inference from various sources. For height=640, width=1280, RGB images example inputs are:
+ # filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
- # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
- # PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
- # numpy: = np.zeros((720,1280,3)) # HWC
- # torch: = torch.zeros(16,3,720,1280) # BCHW
+ # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
+ # PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
+ # numpy: = np.zeros((640,1280,3)) # HWC
+ # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
+ t = [time_synchronized()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
- return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
+ with amp.autocast(enabled=p.device.type != 'cpu'):
+ return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
- shape0, shape1 = [], [] # image and inference shapes
+ shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
+ f = f'image{i}' # filename
if isinstance(im, str): # filename or uri
- im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open
- im = np.array(im) # to numpy
+ im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
+ elif isinstance(im, Image.Image): # PIL Image
+ im, f = np.asarray(im), getattr(im, 'filename', f) or f
+ files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
@@ -206,82 +273,114 @@ def forward(self, imgs, size=640, augment=False, profile=False):
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
- imgs[i] = im # update
+ imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
+ t.append(time_synchronized())
- # Inference
- with torch.no_grad():
+ with amp.autocast(enabled=p.device.type != 'cpu'):
+ # Inference
y = self.model(x, augment, profile)[0] # forward
- y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
+ t.append(time_synchronized())
- # Post-process
- for i in range(n):
- scale_coords(shape1, y[i][:, :4], shape0[i])
+ # Post-process
+ y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS
+ for i in range(n):
+ scale_coords(shape1, y[i][:, :4], shape0[i])
- return Detections(imgs, y, self.names)
+ t.append(time_synchronized())
+ return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# detections class for YOLOv5 inference results
- def __init__(self, imgs, pred, names=None):
+ def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
+ self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
- self.n = len(self.pred)
+ self.n = len(self.pred) # number of images (batch size)
+ self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
+ self.s = shape # inference BCHW shape
- def display(self, pprint=False, show=False, save=False):
- colors = color_list()
- for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
- str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
+ def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
+ for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
+ str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
- str += f'{n} {self.names[int(c)]}s, ' # add to string
- if show or save:
- img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
+ str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
+ if show or save or render or crop:
for *box, conf, cls in pred: # xyxy, confidence, class
- # str += '%s %.2f, ' % (names[int(cls)], conf) # label
- ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot
- if save:
- f = f'results{i}.jpg'
- str += f"saved to '{f}'"
- img.save(f) # save
- if show:
- img.show(f'Image {i}') # show
+ label = f'{self.names[int(cls)]} {conf:.2f}'
+ if crop:
+ save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
+ else: # all others
+ plot_one_box(box, im, label=label, color=colors(cls))
+
+ im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
- print(str)
+ print(str.rstrip(', '))
+ if show:
+ im.show(self.files[i]) # show
+ if save:
+ f = self.files[i]
+ im.save(save_dir / f) # save
+ print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
+ if render:
+ self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
+ print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
def show(self):
self.display(show=True) # show results
- def save(self):
- self.display(save=True) # save results
-
- def __len__(self):
- return self.n
+ def save(self, save_dir='runs/hub/exp'):
+ save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
+ self.display(save=True, save_dir=save_dir) # save results
+
+ def crop(self, save_dir='runs/hub/exp'):
+ save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
+ self.display(crop=True, save_dir=save_dir) # crop results
+ print(f'Saved results to {save_dir}\n')
+
+ def render(self):
+ self.display(render=True) # render results
+ return self.imgs
+
+ def pandas(self):
+ # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
+ new = copy(self) # return copy
+ ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
+ cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
+ for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
+ a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
+ setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
+ return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
- x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
+ x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
+ def __len__(self):
+ return self.n
+
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
diff --git a/models/experimental.py b/models/experimental.py
index 2dbbf7fa32f2..490e995efad5 100644
--- a/models/experimental.py
+++ b/models/experimental.py
@@ -1,4 +1,4 @@
-# This file contains experimental modules
+# YOLOv5 experimental modules
import numpy as np
import torch
@@ -58,7 +58,7 @@ def forward(self, x):
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
- def __init__(self, c1, c2, k, s):
+ def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
@@ -110,24 +110,31 @@ def forward(self, x, augment=False):
return y, None # inference, train output
-def attempt_load(weights, map_location=None):
+def attempt_load(weights, map_location=None, inplace=True, fuse=True):
+ from models.yolo import Detect, Model
+
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
- attempt_download(w)
- model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
+ ckpt = torch.load(attempt_download(w), map_location=map_location) # load
+ if fuse:
+ model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
+ else:
+ model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
+
# Compatibility updates
for m in model.modules():
- if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
- m.inplace = True # pytorch 1.7.0 compatibility
+ if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
+ m.inplace = inplace # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
- print('Ensemble created with %s\n' % weights)
- for k in ['names', 'stride']:
+ print(f'Ensemble created with {weights}\n')
+ for k in ['names']:
setattr(model, k, getattr(model[-1], k))
+ model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
return model # return ensemble
diff --git a/models/export.py b/models/export.py
index 057658af53dc..0d1147938e37 100644
--- a/models/export.py
+++ b/models/export.py
@@ -1,46 +1,65 @@
-"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
+"""Exports a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats
Usage:
- $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
+ $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1
"""
import argparse
import sys
import time
+from pathlib import Path
-sys.path.append('./') # to run '$ python *.py' files in subdirectories
+sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
import torch
import torch.nn as nn
+from torch.utils.mobile_optimizer import optimize_for_mobile
import models
from models.experimental import attempt_load
from utils.activations import Hardswish, SiLU
-from utils.general import set_logging, check_img_size
+from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging
+from utils.torch_utils import select_device
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/
+ parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
+ parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats')
+ parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
+ parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
+ parser.add_argument('--train', action='store_true', help='model.train() mode')
+ parser.add_argument('--optimize', action='store_true', help='optimize TorchScript for mobile') # TorchScript-only
+ parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only
+ parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only
+ parser.add_argument('--opset-version', type=int, default=12, help='ONNX opset version') # ONNX-only
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
+ opt.include = [x.lower() for x in opt.include]
print(opt)
set_logging()
t = time.time()
# Load PyTorch model
- model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model
+ device = select_device(opt.device)
+ model = attempt_load(opt.weights, map_location=device) # load FP32 model
labels = model.names
# Checks
gs = int(max(model.stride)) # grid size (max stride)
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
+ assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0'
# Input
- img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection
+ img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection
# Update model
+ if opt.half:
+ img, model = img.half(), model.half() # to FP16
+ if opt.train:
+ model.train() # training mode (no grid construction in Detect layer)
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if isinstance(m, models.common.Conv): # assign export-friendly activations
@@ -48,50 +67,79 @@
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
- # elif isinstance(m, models.yolo.Detect):
- # m.forward = m.forward_export # assign forward (optional)
- model.model[-1].export = True # set Detect() layer export=True
- y = model(img) # dry run
-
- # TorchScript export
- try:
- print('\nStarting TorchScript export with torch %s...' % torch.__version__)
- f = opt.weights.replace('.pt', '.torchscript.pt') # filename
- ts = torch.jit.trace(model, img)
- ts.save(f)
- print('TorchScript export success, saved as %s' % f)
- except Exception as e:
- print('TorchScript export failure: %s' % e)
-
- # ONNX export
- try:
- import onnx
-
- print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
- f = opt.weights.replace('.pt', '.onnx') # filename
- torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
- output_names=['classes', 'boxes'] if y is None else ['output'])
-
- # Checks
- onnx_model = onnx.load(f) # load onnx model
- onnx.checker.check_model(onnx_model) # check onnx model
- # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
- print('ONNX export success, saved as %s' % f)
- except Exception as e:
- print('ONNX export failure: %s' % e)
-
- # CoreML export
- try:
- import coremltools as ct
-
- print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
- # convert model from torchscript and apply pixel scaling as per detect.py
- model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
- f = opt.weights.replace('.pt', '.mlmodel') # filename
- model.save(f)
- print('CoreML export success, saved as %s' % f)
- except Exception as e:
- print('CoreML export failure: %s' % e)
+ elif isinstance(m, models.yolo.Detect):
+ m.inplace = opt.inplace
+ m.onnx_dynamic = opt.dynamic
+ # m.forward = m.forward_export # assign forward (optional)
+
+ for _ in range(2):
+ y = model(img) # dry runs
+ print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)")
+
+ # TorchScript export -----------------------------------------------------------------------------------------------
+ if 'torchscript' in opt.include or 'coreml' in opt.include:
+ prefix = colorstr('TorchScript:')
+ try:
+ print(f'\n{prefix} starting export with torch {torch.__version__}...')
+ f = opt.weights.replace('.pt', '.torchscript.pt') # filename
+ ts = torch.jit.trace(model, img, strict=False)
+ (optimize_for_mobile(ts) if opt.optimize else ts).save(f)
+ print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
+ except Exception as e:
+ print(f'{prefix} export failure: {e}')
+
+ # ONNX export ------------------------------------------------------------------------------------------------------
+ if 'onnx' in opt.include:
+ prefix = colorstr('ONNX:')
+ try:
+ import onnx
+
+ print(f'{prefix} starting export with onnx {onnx.__version__}...')
+ f = opt.weights.replace('.pt', '.onnx') # filename
+ torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'],
+ training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL,
+ do_constant_folding=not opt.train,
+ dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
+ 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)
+
+ # Checks
+ model_onnx = onnx.load(f) # load onnx model
+ onnx.checker.check_model(model_onnx) # check onnx model
+ # print(onnx.helper.printable_graph(model_onnx.graph)) # print
+
+ # Simplify
+ if opt.simplify:
+ try:
+ check_requirements(['onnx-simplifier'])
+ import onnxsim
+
+ print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
+ model_onnx, check = onnxsim.simplify(
+ model_onnx,
+ dynamic_input_shape=opt.dynamic,
+ input_shapes={'images': list(img.shape)} if opt.dynamic else None)
+ assert check, 'assert check failed'
+ onnx.save(model_onnx, f)
+ except Exception as e:
+ print(f'{prefix} simplifier failure: {e}')
+ print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
+ except Exception as e:
+ print(f'{prefix} export failure: {e}')
+
+ # CoreML export ----------------------------------------------------------------------------------------------------
+ if 'coreml' in opt.include:
+ prefix = colorstr('CoreML:')
+ try:
+ import coremltools as ct
+
+ print(f'{prefix} starting export with coremltools {ct.__version__}...')
+ assert opt.train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`'
+ model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
+ f = opt.weights.replace('.pt', '.mlmodel') # filename
+ model.save(f)
+ print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
+ except Exception as e:
+ print(f'{prefix} export failure: {e}')
# Finish
- print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
+ print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.')
diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml
new file mode 100644
index 000000000000..11298b01f479
--- /dev/null
+++ b/models/hub/yolov5l6.yaml
@@ -0,0 +1,60 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors:
+ - [ 19,27, 44,40, 38,94 ] # P3/8
+ - [ 96,68, 86,152, 180,137 ] # P4/16
+ - [ 140,301, 303,264, 238,542 ] # P5/32
+ - [ 436,615, 739,380, 925,792 ] # P6/64
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 9, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
+ [ -1, 3, C3, [ 768 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
+ [ -1, 3, C3, [ 1024, False ] ], # 11
+ ]
+
+# YOLOv5 head
+head:
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
+ [ -1, 3, C3, [ 768, False ] ], # 15
+
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 19
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
+
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
+
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
+
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml
new file mode 100644
index 000000000000..48afc865593a
--- /dev/null
+++ b/models/hub/yolov5m6.yaml
@@ -0,0 +1,60 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 0.67 # model depth multiple
+width_multiple: 0.75 # layer channel multiple
+
+# anchors
+anchors:
+ - [ 19,27, 44,40, 38,94 ] # P3/8
+ - [ 96,68, 86,152, 180,137 ] # P4/16
+ - [ 140,301, 303,264, 238,542 ] # P5/32
+ - [ 436,615, 739,380, 925,792 ] # P6/64
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 9, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
+ [ -1, 3, C3, [ 768 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
+ [ -1, 3, C3, [ 1024, False ] ], # 11
+ ]
+
+# YOLOv5 head
+head:
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
+ [ -1, 3, C3, [ 768, False ] ], # 15
+
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 19
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
+
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
+
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
+
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml
new file mode 100644
index 000000000000..f2d666722b30
--- /dev/null
+++ b/models/hub/yolov5s-transformer.yaml
@@ -0,0 +1,48 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 9, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
+ [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module
+ ]
+
+# YOLOv5 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml
new file mode 100644
index 000000000000..1df577a2cc97
--- /dev/null
+++ b/models/hub/yolov5s6.yaml
@@ -0,0 +1,60 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+
+# anchors
+anchors:
+ - [ 19,27, 44,40, 38,94 ] # P3/8
+ - [ 96,68, 86,152, 180,137 ] # P4/16
+ - [ 140,301, 303,264, 238,542 ] # P5/32
+ - [ 436,615, 739,380, 925,792 ] # P6/64
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 9, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
+ [ -1, 3, C3, [ 768 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
+ [ -1, 3, C3, [ 1024, False ] ], # 11
+ ]
+
+# YOLOv5 head
+head:
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
+ [ -1, 3, C3, [ 768, False ] ], # 15
+
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 19
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
+
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
+
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
+
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml
new file mode 100644
index 000000000000..5ebc02124fe7
--- /dev/null
+++ b/models/hub/yolov5x6.yaml
@@ -0,0 +1,60 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.33 # model depth multiple
+width_multiple: 1.25 # layer channel multiple
+
+# anchors
+anchors:
+ - [ 19,27, 44,40, 38,94 ] # P3/8
+ - [ 96,68, 86,152, 180,137 ] # P4/16
+ - [ 140,301, 303,264, 238,542 ] # P5/32
+ - [ 436,615, 739,380, 925,792 ] # P6/64
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 9, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
+ [ -1, 3, C3, [ 768 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
+ [ -1, 3, C3, [ 1024, False ] ], # 11
+ ]
+
+# YOLOv5 head
+head:
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
+ [ -1, 3, C3, [ 768, False ] ], # 15
+
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 19
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
+
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
+
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
+
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/tf.py b/models/tf.py
index ab5a8d9d9a70..e96926cc2fb5 100644
--- a/models/tf.py
+++ b/models/tf.py
@@ -6,6 +6,8 @@
from copy import deepcopy
from pathlib import Path
+sys.path.append('./') # to run '$ python *.py' files in subdirectories
+
import numpy as np
import tensorflow as tf
import torch
@@ -15,7 +17,7 @@
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3
-from models.experimental import MixConv2d, CrossConv
+from models.experimental import MixConv2d, CrossConv, attempt_load
from models.yolo import Detect
from utils.datasets import LoadImages
from utils.general import make_divisible, check_file, check_dataset
@@ -32,7 +34,8 @@ def __init__(self, w=None):
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
- moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()))
+ moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
+ epsilon=w.eps)
def call(self, inputs):
return self.bn(inputs)
@@ -221,7 +224,12 @@ def __init__(self, size, scale_factor, mode, w=None):
super(tf_Upsample, self).__init__()
assert scale_factor == 2, "scale_factor must be 2"
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
- self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
+ if opt.tf_raw_resize:
+ # with default arguments: align_corners=False, half_pixel_centers=False
+ self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
+ size=(x.shape[1] * 2, x.shape[2] * 2))
+ else:
+ self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
def call(self, inputs):
return self.upsample(inputs)
@@ -360,11 +368,12 @@ def representative_dataset_gen():
parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--dynamic-batch-size', action='store_true', help='dynamic batch size')
- parser.add_argument('--no-tfl-detect', action='store_true', help='remove Detect() from TFLite model')
parser.add_argument('--source', type=str, default='../data/coco128.yaml', help='dir of images or data.yaml file')
parser.add_argument('--ncalib', type=int, default=100, help='number of calibration images')
parser.add_argument('--tfl-int8', action='store_true', dest='tfl_int8', help='export TFLite int8 model')
parser.add_argument('--tf-nms', action='store_true', dest='tf_nms', help='TF NMS (without TFLite export)')
+ parser.add_argument('--tf-raw-resize', action='store_true', dest='tf_raw_resize',
+ help='use tf.raw_ops.ResizeNearestNeighbor for resize')
parser.add_argument('--topk-per-class', type=int, default=100, help='topk per class to keep in NMS')
parser.add_argument('--topk-all', type=int, default=100, help='topk for all classes to keep in NMS')
parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
@@ -378,9 +387,7 @@ def representative_dataset_gen():
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection
# Load PyTorch model
- attempt_download(opt.weights)
- model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float() # .fuse()
- model.eval()
+ model = attempt_load(opt.weights, map_location=torch.device('cpu'), inplace=True, fuse=False)
model.model[-1].export = False # set Detect() layer export=True
y = model(img) # dry run
nc = y[0].shape[-1] - 5
@@ -432,10 +439,6 @@ def representative_dataset_gen():
if not opt.tf_nms:
try:
print('\nStarting TFLite export with TensorFlow %s...' % tf.__version__)
- if opt.no_tfl_detect:
- print("Don't export Detect module")
- m.training = True
- keras_model = keras.Model(inputs=inputs, outputs=tf_model.predict(inputs))
# fp32 TFLite model export ---------------------------------------------------------------------------------
# converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
@@ -476,6 +479,7 @@ def representative_dataset_gen():
converter.inference_output_type = tf.uint8 # or tf.int8
converter.allow_custom_ops = False
converter.experimental_new_converter = True
+ converter.experimental_new_quantizer = False
tflite_model = converter.convert()
f = opt.weights.replace('.pt', '-int8.tflite') # filename
open(f, "wb").write(tflite_model)
diff --git a/models/yolo.py b/models/yolo.py
index 5dc8b57f4d98..2844cd0410e0 100644
--- a/models/yolo.py
+++ b/models/yolo.py
@@ -1,14 +1,20 @@
+"""YOLOv5-specific modules
+
+Usage:
+ $ python path/to/models/yolo.py --cfg yolov5s.yaml
+"""
+
import argparse
import logging
import sys
from copy import deepcopy
from pathlib import Path
-sys.path.append('./') # to run '$ python *.py' files in subdirectories
+sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
-from models.experimental import MixConv2d, CrossConv
+from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
@@ -22,9 +28,9 @@
class Detect(nn.Module):
stride = None # strides computed during build
- export = False # onnx export
+ onnx_dynamic = False # ONNX export parameter
- def __init__(self, nc=80, anchors=(), ch=()): # detection layer
+ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
@@ -35,23 +41,28 @@ def __init__(self, nc=80, anchors=(), ch=()): # detection layer
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
+ self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
- self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
- if self.grid[i].shape[2:4] != x[i].shape[2:4]:
+ if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
- y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
- y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
+ if self.inplace:
+ y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
+ y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
+ else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
+ xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
+ wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
+ y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@@ -63,7 +74,7 @@ def _make_grid(nx=20, ny=20):
class Model(nn.Module):
- def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
@@ -71,27 +82,32 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels,
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
- self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ self.yaml = yaml.safe_load(f) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
- logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
+ logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
+ if anchors:
+ logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
+ self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
- # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
+ self.inplace = self.yaml.get('inplace', True)
+ # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
+ m.inplace = self.inplace
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
- # print('Strides: %s' % m.stride.tolist())
+ # logger.info('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
@@ -100,24 +116,23 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels,
def forward(self, x, augment=False, profile=False):
if augment:
- img_size = x.shape[-2:] # height, width
- s = [1, 0.83, 0.67] # scales
- f = [None, 3, None] # flips (2-ud, 3-lr)
- y = [] # outputs
- for si, fi in zip(s, f):
- xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
- yi = self.forward_once(xi)[0] # forward
- # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
- yi[..., :4] /= si # de-scale
- if fi == 2:
- yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
- elif fi == 3:
- yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
- y.append(yi)
- return torch.cat(y, 1), None # augmented inference, train
+ return self.forward_augment(x) # augmented inference, None
else:
return self.forward_once(x, profile) # single-scale inference, train
+ def forward_augment(self, x):
+ img_size = x.shape[-2:] # height, width
+ s = [1, 0.83, 0.67] # scales
+ f = [None, 3, None] # flips (2-ud, 3-lr)
+ y = [] # outputs
+ for si, fi in zip(s, f):
+ xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
+ yi = self.forward_once(xi)[0] # forward
+ # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
+ yi = self._descale_pred(yi, fi, si, img_size)
+ y.append(yi)
+ return torch.cat(y, 1), None # augmented inference, train
+
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
@@ -130,15 +145,34 @@ def forward_once(self, x, profile=False):
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
- print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
+ if m == self.model[0]:
+ logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}")
+ logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
- print('%.1fms total' % sum(dt))
+ logger.info('%.1fms total' % sum(dt))
return x
+ def _descale_pred(self, p, flips, scale, img_size):
+ # de-scale predictions following augmented inference (inverse operation)
+ if self.inplace:
+ p[..., :4] /= scale # de-scale
+ if flips == 2:
+ p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
+ elif flips == 3:
+ p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
+ else:
+ x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
+ if flips == 2:
+ y = img_size[0] - y # de-flip ud
+ elif flips == 3:
+ x = img_size[1] - x # de-flip lr
+ p = torch.cat((x, y, wh, p[..., 4:]), -1)
+ return p
+
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
@@ -153,15 +187,16 @@ def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
- print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
+ logger.info(
+ ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
- # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
+ # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
- print('Fusing layers... ')
+ logger.info('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
@@ -173,20 +208,20 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
- print('Adding NMS... ')
+ logger.info('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
- print('Removing NMS... ')
+ logger.info('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
- def autoshape(self): # add autoShape module
- print('Adding autoShape... ')
- m = autoShape(self) # wrap model
+ def autoshape(self): # add AutoShape module
+ logger.info('Adding AutoShape... ')
+ m = AutoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
@@ -210,45 +245,30 @@ def parse_model(d, ch): # model_dict, input_channels(3)
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
- if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
+ if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
+ C3, C3TR]:
c1, c2 = ch[f], args[0]
-
- # Normal
- # if i > 0 and args[0] != no: # channel expansion factor
- # ex = 1.75 # exponential (default 2.0)
- # e = math.log(c2 / ch[1]) / math.log(2)
- # c2 = int(ch[1] * ex ** e)
- # if m != Focus:
-
- c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
-
- # Experimental
- # if i > 0 and args[0] != no: # channel expansion factor
- # ex = 1 + gw # exponential (default 2.0)
- # ch1 = 32 # ch[1]
- # e = math.log(c2 / ch1) / math.log(2) # level 1-n
- # c2 = int(ch1 * ex ** e)
- # if m != Focus:
- # c2 = make_divisible(c2, 8) if c2 != no else c2
+ if c2 != no: # if not output
+ c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
- if m in [BottleneckCSP, C3]:
- args.insert(2, n)
+ if m in [BottleneckCSP, C3, C3TR]:
+ args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
- c2 = sum([ch[x if x < 0 else x + 1] for x in f])
+ c2 = sum([ch[x] for x in f])
elif m is Detect:
- args.append([ch[x + 1] for x in f])
+ args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
- c2 = ch[f if f < 0 else f + 1] * args[0] ** 2
+ c2 = ch[f] * args[0] ** 2
elif m is Expand:
- c2 = ch[f if f < 0 else f + 1] // args[0] ** 2
+ c2 = ch[f] // args[0] ** 2
else:
- c2 = ch[f if f < 0 else f + 1]
+ c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
@@ -257,6 +277,8 @@ def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
+ if i == 0:
+ ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
@@ -275,12 +297,12 @@ def parse_model(d, ch): # model_dict, input_channels(3)
model.train()
# Profile
- # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
+ # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device)
# y = model(img, profile=True)
- # Tensorboard
+ # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898)
# from torch.utils.tensorboard import SummaryWriter
- # tb_writer = SummaryWriter()
- # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
- # tb_writer.add_graph(model.model, img) # add model to tensorboard
+ # tb_writer = SummaryWriter('.')
+ # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/")
+ # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
diff --git a/requirements.txt b/requirements.txt
index 812d5442bb60..e3fc25bd7f4d 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,19 +1,18 @@
# pip install -r requirements.txt
# base ----------------------------------------
-Cython
matplotlib>=3.2.2
numpy>=1.18.5
opencv-python>=4.1.2
Pillow
-PyYAML>=5.3
+PyYAML>=5.3.1
scipy>=1.4.1
-tensorboard>=2.2
torch>=1.7.0
torchvision>=0.8.1
tqdm>=4.41.0
# logging -------------------------------------
+tensorboard>=2.4.1
# wandb
# plotting ------------------------------------
@@ -21,11 +20,12 @@ seaborn>=0.11.0
pandas
# export --------------------------------------
-# coremltools==4.0
-# onnx>=1.8.0
+# coremltools>=4.1
+# onnx>=1.9.0
# scikit-learn==0.19.2 # for coreml quantization
-# tensorflow==2.4.0
+# tensorflow==2.4.1 # for TFLite export
# extras --------------------------------------
-thop # FLOPS computation
+# Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
pycocotools>=2.0 # COCO mAP
+thop # FLOPS computation
diff --git a/test.py b/test.py
index 9c8d3b28bb03..0716c5d8b93c 100644
--- a/test.py
+++ b/test.py
@@ -12,13 +12,13 @@
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
- box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path
-from utils.loss import compute_loss
+ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
+@torch.no_grad()
def test(data,
weights=None,
batch_size=32,
@@ -36,8 +36,11 @@ def test(data,
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
- log_imgs=0): # number of logged images
-
+ wandb_logger=None,
+ compute_loss=None,
+ half_precision=True,
+ is_coco=False,
+ opt=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
@@ -48,51 +51,51 @@ def test(data,
device = select_device(opt.device, batch_size=batch_size)
# Directories
- save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
+ save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
- imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
+ imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
- half = device.type != 'cpu' # half precision only supported on CUDA
+ half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
- is_coco = data.endswith('coco.yaml') # is COCO dataset
- with open(data) as f:
- data = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ if isinstance(data, str):
+ is_coco = data.endswith('coco.yaml')
+ with open(data) as f:
+ data = yaml.safe_load(f)
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
- log_imgs, wandb = min(log_imgs, 100), None # ceil
- try:
- import wandb # Weights & Biases
- except ImportError:
- log_imgs = 0
-
+ log_imgs = 0
+ if wandb_logger and wandb_logger.wandb:
+ log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader
if not training:
- img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
- _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
- path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
- dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0]
+ if device.type != 'cpu':
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
+ task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images
+ dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,
+ prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
- s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
+ s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
@@ -103,25 +106,24 @@ def test(data,
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
- with torch.no_grad():
- # Run model
- t = time_synchronized()
- inf_out, train_out = model(img, augment=augment) # inference and training outputs
- t0 += time_synchronized() - t
+ # Run model
+ t = time_synchronized()
+ out, train_out = model(img, augment=augment) # inference and training outputs
+ t0 += time_synchronized() - t
- # Compute loss
- if training:
- loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
+ # Compute loss
+ if compute_loss:
+ loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
- # Run NMS
- targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
- lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
- t = time_synchronized()
- output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
- t1 += time_synchronized() - t
+ # Run NMS
+ targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
+ t = time_synchronized()
+ out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
+ t1 += time_synchronized() - t
# Statistics per image
- for si, pred in enumerate(output):
+ for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
@@ -134,6 +136,8 @@ def test(data,
continue
# Predictions
+ if single_cls:
+ pred[:, 5] = 0
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
@@ -146,15 +150,17 @@ def test(data,
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
- # W&B logging
- if plots and len(wandb_images) < log_imgs:
- box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
- "class_id": int(cls),
- "box_caption": "%s %.3f" % (names[cls], conf),
- "scores": {"class_score": conf},
- "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
- boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
- wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
+ # W&B logging - Media Panel Plots
+ if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
+ if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
+ box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
+ "class_id": int(cls),
+ "box_caption": "%s %.3f" % (names[cls], conf),
+ "scores": {"class_score": conf},
+ "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
+ boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
+ wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
+ wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None
# Append to pycocotools JSON dictionary
if save_json:
@@ -178,12 +184,12 @@ def test(data,
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
- confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
+ confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
- ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
- pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
+ ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices
+ pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices
# Search for detections
if pi.shape[0]:
@@ -209,24 +215,24 @@ def test(data,
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
- Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
+ Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
- p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
- pf = '%20s' + '%12.3g' * 6 # print format
+ pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
- if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats):
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
@@ -238,9 +244,11 @@ def test(data,
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
- if wandb and wandb.run:
- wandb.log({"Images": wandb_images})
- wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
+ if wandb_logger and wandb_logger.wandb:
+ val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
+ wandb_logger.log({"Validation": val_batches})
+ if wandb_images:
+ wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
# Save JSON
if save_json and len(jdict):
@@ -268,10 +276,10 @@ def test(data,
print(f'pycocotools unable to run: {e}')
# Return results
+ model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
- model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
@@ -286,7 +294,7 @@ def test(data,
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
- parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
@@ -302,9 +310,9 @@ def test(data,
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
- check_requirements()
+ check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
- if opt.task in ['val', 'test']: # run normally
+ if opt.task in ('train', 'val', 'test'): # run normally
test(opt.data,
opt.weights,
opt.batch_size,
@@ -318,18 +326,24 @@ def test(data,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
+ opt=opt
)
+ elif opt.task == 'speed': # speed benchmarks
+ for w in opt.weights:
+ test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt)
+
elif opt.task == 'study': # run over a range of settings and save/plot
- for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
- f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
- x = list(range(320, 800, 64)) # x axis
+ # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
+ x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
+ for w in opt.weights:
+ f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
- print('\nRunning %s point %s...' % (f, i))
- r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
- plots=False)
+ print(f'\nRunning {f} point {i}...')
+ r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
+ plots=False, opt=opt)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
- plot_study_txt(f, x) # plot
+ plot_study_txt(x=x) # plot
diff --git a/train.py b/train.py
index 3a42db7f767d..1041ec30c257 100644
--- a/train.py
+++ b/train.py
@@ -4,9 +4,9 @@
import os
import random
import time
+from copy import deepcopy
from pathlib import Path
from threading import Thread
-from warnings import warn
import numpy as np
import torch.distributed as dist
@@ -28,23 +28,18 @@
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
- check_requirements, print_mutation, set_logging, one_cycle
+ check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
-from utils.loss import compute_loss
+from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
-from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
+from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel
+from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
-try:
- import wandb
-except ImportError:
- wandb = None
- logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
-
-def train(hyp, opt, device, tb_writer=None, wandb=None):
- logger.info(f'Hyperparameters {hyp}')
+def train(hyp, opt, device, tb_writer=None):
+ logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
@@ -57,40 +52,51 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
- yaml.dump(hyp, f, sort_keys=False)
+ yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
- yaml.dump(vars(opt), f, sort_keys=False)
+ yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
- with torch_distributed_zero_first(rank):
- check_dataset(data_dict) # check
- train_path = data_dict['train']
- test_path = data_dict['val']
+ data_dict = yaml.safe_load(f) # data dict
+
+ # Logging- Doing this before checking the dataset. Might update data_dict
+ loggers = {'wandb': None} # loggers dict
+ if rank in [-1, 0]:
+ opt.hyp = hyp # add hyperparameters
+ run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
+ wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
+ loggers['wandb'] = wandb_logger.wandb
+ data_dict = wandb_logger.data_dict
+ if wandb_logger.wandb:
+ weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
+
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
+ is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
- attempt_download(weights) # download if not found locally
+ weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
- if hyp.get('anchors'):
- ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
- model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
- exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys
+ model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
+ exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
- model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
+ model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
+ with torch_distributed_zero_first(rank):
+ check_dataset(data_dict) # check
+ train_path = data_dict['train']
+ test_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
@@ -127,18 +133,15 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
- lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
+ if opt.linear_lr:
+ lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
+ else:
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
- # Logging
- if rank in [-1, 0] and wandb and wandb.run is None:
- opt.hyp = hyp # add hyperparameters
- wandb_run = wandb.init(config=opt, resume="allow",
- project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
- name=save_dir.stem,
- id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
- loggers = {'wandb': wandb} # loggers dict
+ # EMA
+ ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
@@ -148,10 +151,14 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
+ # EMA
+ if ema and ckpt.get('ema'):
+ ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
+ ema.updates = ckpt['updates']
+
# Results
if ckpt.get('training_results') is not None:
- with open(results_file, 'w') as file:
- file.write(ckpt['training_results']) # write results.txt
+ results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
@@ -165,7 +172,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
del ckpt, state_dict
# Image sizes
- gs = int(model.stride.max()) # grid size (max stride)
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
@@ -178,28 +185,21 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
- # EMA
- ema = ModelEMA(model) if rank in [-1, 0] else None
-
- # DDP mode
- if cuda and rank != -1:
- model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
-
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
- image_weights=opt.image_weights, quad=opt.quad)
+ image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
- ema.updates = start_epoch * nb // accumulate # set EMA updates
- testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader
- hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
- rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]
+ testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
+ hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
+ world_size=opt.world_size, workers=opt.workers,
+ pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
@@ -207,17 +207,26 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
- plot_labels(labels, save_dir, loggers)
+ plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
+ model.half().float() # pre-reduce anchor precision
+
+ # DDP mode
+ if cuda and rank != -1:
+ model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
+ # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
+ find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
- hyp['cls'] *= nc / 80. # scale hyp['cls'] to class count
- hyp['obj'] *= imgsz ** 2 / 640. ** 2 * 3. / nl # scale hyp['obj'] to image size and output layers
+ hyp['box'] *= 3. / nl # scale to layers
+ hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
+ hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
+ hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
@@ -232,9 +241,11 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
- logger.info('Image sizes %g train, %g test\n'
- 'Using %g dataloader workers\nLogging results to %s\n'
- 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
+ compute_loss = ComputeLoss(model) # init loss class
+ logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
+ f'Using {dataloader.num_workers} dataloader workers\n'
+ f'Logging results to {save_dir}\n'
+ f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
@@ -260,7 +271,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
- logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
+ logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
@@ -290,7 +301,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
- loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
+ loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
@@ -319,11 +330,12 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
- # if tb_writer:
- # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
- # tb_writer.add_graph(model, imgs) # add model to tensorboard
- elif plots and ni == 3 and wandb:
- wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})
+ if tb_writer:
+ tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph
+ # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
+ elif plots and ni == 10 and wandb_logger.wandb:
+ wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
+ save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
@@ -335,25 +347,27 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
- if ema:
- ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
- results, maps, times = test.test(opt.data,
- batch_size=total_batch_size,
+ wandb_logger.current_epoch = epoch + 1
+ results, maps, times = test.test(data_dict,
+ batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
+ save_json=is_coco and final_epoch,
+ verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
- log_imgs=opt.log_imgs if wandb else 0)
+ wandb_logger=wandb_logger,
+ compute_loss=compute_loss,
+ is_coco=is_coco)
# Write
with open(results_file, 'a') as f:
- f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
- if len(opt.name) and opt.bucket:
- os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
+ f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
@@ -363,72 +377,74 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
- if wandb:
- wandb.log({tag: x}) # W&B
+ if wandb_logger.wandb:
+ wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
+ wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
- save = (not opt.nosave) or (final_epoch and not opt.evolve)
- if save:
- with open(results_file, 'r') as f: # create checkpoint
- ckpt = {'epoch': epoch,
- 'best_fitness': best_fitness,
- 'training_results': f.read(),
- 'model': ema.ema,
- 'optimizer': None if final_epoch else optimizer.state_dict(),
- 'wandb_id': wandb_run.id if wandb else None}
+ if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
+ ckpt = {'epoch': epoch,
+ 'best_fitness': best_fitness,
+ 'training_results': results_file.read_text(),
+ 'model': deepcopy(de_parallel(model)).half(),
+ 'ema': deepcopy(ema.ema).half(),
+ 'updates': ema.updates,
+ 'optimizer': optimizer.state_dict(),
+ 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
+ if wandb_logger.wandb:
+ if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
+ wandb_logger.log_model(
+ last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
+
# end epoch ----------------------------------------------------------------------------------------------------
# end training
-
if rank in [-1, 0]:
- # Strip optimizers
- final = best if best.exists() else last # final model
- for f in [last, best]:
- if f.exists():
- strip_optimizer(f) # strip optimizers
- if opt.bucket:
- os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
-
- # Plots
+ logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n')
if plots:
plot_results(save_dir=save_dir) # save as results.png
- if wandb:
- files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
- wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
- if (save_dir / f).exists()]})
- if opt.log_artifacts:
- wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
-
- # Test best.pt
- logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
- if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
- for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests
- results, _, _ = test.test(opt.data,
- batch_size=total_batch_size,
- imgsz=imgsz_test,
- conf_thres=conf,
- iou_thres=iou,
- model=attempt_load(final, device).half(),
- single_cls=opt.single_cls,
- dataloader=testloader,
- save_dir=save_dir,
- save_json=save_json,
- plots=False)
-
+ if wandb_logger.wandb:
+ files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
+ wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
+ if (save_dir / f).exists()]})
+
+ if not opt.evolve:
+ if is_coco: # COCO dataset
+ for m in [last, best] if best.exists() else [last]: # speed, mAP tests
+ results, _, _ = test.test(opt.data,
+ batch_size=batch_size * 2,
+ imgsz=imgsz_test,
+ conf_thres=0.001,
+ iou_thres=0.7,
+ model=attempt_load(m, device).half(),
+ single_cls=opt.single_cls,
+ dataloader=testloader,
+ save_dir=save_dir,
+ save_json=True,
+ plots=False,
+ is_coco=is_coco)
+
+ # Strip optimizers
+ for f in last, best:
+ if f.exists():
+ strip_optimizer(f) # strip optimizers
+ if wandb_logger.wandb: # Log the stripped model
+ wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model',
+ name='run_' + wandb_logger.wandb_run.id + '_model',
+ aliases=['latest', 'best', 'stripped'])
+ wandb_logger.finish_run()
else:
dist.destroy_process_group()
-
- wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
@@ -457,13 +473,18 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
- parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
- parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
+ parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
+ parser.add_argument('--linear-lr', action='store_true', help='linear LR')
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
+ parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
+ parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
+ parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
opt = parser.parse_args()
# Set DDP variables
@@ -472,16 +493,18 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
- check_requirements()
+ check_requirements(exclude=('pycocotools', 'thop'))
# Resume
- if opt.resume: # resume an interrupted run
+ wandb_run = check_wandb_resume(opt)
+ if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
- opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
- opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate
+ opt = argparse.Namespace(**yaml.safe_load(f)) # replace
+ opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \
+ '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
@@ -489,7 +512,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
- opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# DDP mode
opt.total_batch_size = opt.batch_size
@@ -500,24 +523,22 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
+ assert not opt.image_weights, '--image-weights argument is not compatible with DDP training'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
- hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
- if 'box' not in hyp:
- warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' %
- (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120'))
- hyp['box'] = hyp.pop('giou')
+ hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
- logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
+ prefix = colorstr('tensorboard: ')
+ logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
- train(hyp, opt, device, tb_writer, wandb)
+ train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
@@ -591,7 +612,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
- results = train(hyp.copy(), opt, device, wandb=wandb)
+ results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
diff --git a/tutorial.ipynb b/tutorial.ipynb
index 853f42f196d8..1bc9a8cda032 100644
--- a/tutorial.ipynb
+++ b/tutorial.ipynb
@@ -16,7 +16,7 @@
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
- "02ac0588602847eea00a0205f87bcce2": {
+ "8815626359d84416a2f44a95500580a4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
@@ -28,15 +28,15 @@
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
- "layout": "IPY_MODEL_c472ea49806447a68b5a9221a4ddae85",
+ "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e",
"_model_module": "@jupyter-widgets/controls",
"children": [
- "IPY_MODEL_091fdf499bd44a80af7281d16da4aa93",
- "IPY_MODEL_c79f69c959de4427ba102a87a9f46d80"
+ "IPY_MODEL_876609753c2946248890344722963d44",
+ "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05"
]
}
},
- "c472ea49806447a68b5a9221a4ddae85": {
+ "3b85609c4ce94a74823f2cfe141ce68e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -87,12 +87,12 @@
"left": null
}
},
- "091fdf499bd44a80af7281d16da4aa93": {
+ "876609753c2946248890344722963d44": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
- "style": "IPY_MODEL_c42ae5af74a0491187827d0a1fc259bb",
+ "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800",
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
@@ -107,30 +107,30 @@
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_5a90f72d3a2d46cb9ad915daa3ead8b4"
+ "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8"
}
},
- "c79f69c959de4427ba102a87a9f46d80": {
+ "8abfdd8778e44b7ca0d29881cb1ada05": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
- "style": "IPY_MODEL_2a7ed6611da34662b10e37fd4f4e4438",
+ "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
- "value": " 781M/781M [00:23<00:00, 35.1MB/s]",
+ "value": " 781M/781M [08:43<00:00, 1.56MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_fead0160658445bf9e966daa4481cad0"
+ "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50"
}
},
- "c42ae5af74a0491187827d0a1fc259bb": {
+ "78c6c3d97c484916b8ee167c63556800": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
@@ -145,7 +145,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "5a90f72d3a2d46cb9ad915daa3ead8b4": {
+ "9dd0f182db5d45378ceafb855e486eb8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -196,7 +196,7 @@
"left": null
}
},
- "2a7ed6611da34662b10e37fd4f4e4438": {
+ "a3dab28b45c247089a3d1b8b09f327de": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
@@ -210,7 +210,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "fead0160658445bf9e966daa4481cad0": {
+ "32451332b7a94ba9aacddeaa6ac94d50": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -261,7 +261,7 @@
"left": null
}
},
- "cf1ab9fde7444d3e874fcd407ba8f0f8": {
+ "0fffa335322b41658508e06aed0acbf0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
@@ -273,15 +273,15 @@
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
- "layout": "IPY_MODEL_9ee03f9c85f34155b2645e89c9211547",
+ "layout": "IPY_MODEL_a354c6f80ce347e5a3ef64af87c0eccb",
"_model_module": "@jupyter-widgets/controls",
"children": [
- "IPY_MODEL_933ebc451c09490aadf71afbbb3dff2a",
- "IPY_MODEL_8e7c55cbca624432a84fa7ad8f3a4016"
+ "IPY_MODEL_85823e71fea54c39bd11e2e972348836",
+ "IPY_MODEL_fb11acd663fa4e71b041d67310d045fd"
]
}
},
- "9ee03f9c85f34155b2645e89c9211547": {
+ "a354c6f80ce347e5a3ef64af87c0eccb": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -332,50 +332,50 @@
"left": null
}
},
- "933ebc451c09490aadf71afbbb3dff2a": {
+ "85823e71fea54c39bd11e2e972348836": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
- "style": "IPY_MODEL_dd62d83b35d04a178840772e82bd2f2e",
+ "style": "IPY_MODEL_8a919053b780449aae5523658ad611fa",
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
- "max": 22090455,
+ "max": 22091032,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
- "value": 22090455,
+ "value": 22091032,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_d5c4f3d1c8b046e3a163faaa6b3a51ab"
+ "layout": "IPY_MODEL_5bae9393a58b44f7b69fb04816f94f6f"
}
},
- "8e7c55cbca624432a84fa7ad8f3a4016": {
+ "fb11acd663fa4e71b041d67310d045fd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
- "style": "IPY_MODEL_78d1da8efb504b03878ca9ce5b404006",
+ "style": "IPY_MODEL_d26c6d16c7f24030ab2da5285bf198ee",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
- "value": " 21.1M/21.1M [00:01<00:00, 16.9MB/s]",
+ "value": " 21.1M/21.1M [00:02<00:00, 9.36MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_d28208ba1213436a93926a01d99d97ae"
+ "layout": "IPY_MODEL_f7767886b2364c8d9efdc79e175ad8eb"
}
},
- "dd62d83b35d04a178840772e82bd2f2e": {
+ "8a919053b780449aae5523658ad611fa": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
@@ -390,7 +390,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "d5c4f3d1c8b046e3a163faaa6b3a51ab": {
+ "5bae9393a58b44f7b69fb04816f94f6f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -441,7 +441,7 @@
"left": null
}
},
- "78d1da8efb504b03878ca9ce5b404006": {
+ "d26c6d16c7f24030ab2da5285bf198ee": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
@@ -455,7 +455,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "d28208ba1213436a93926a01d99d97ae": {
+ "f7767886b2364c8d9efdc79e175ad8eb": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -517,7 +517,8 @@
"colab_type": "text"
},
"source": [
- ""
+ "",
+ ""
]
},
{
@@ -528,8 +529,8 @@
"source": [
"\n",
"\n",
- "This notebook was written by Ultralytics LLC, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
- "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com."
+ "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
+ "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!"
]
},
{
@@ -550,7 +551,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "888d5c41-00e9-47d8-d230-dded99325bea"
+ "outputId": "9b022435-4197-41fc-abea-81f86ce857d0"
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
@@ -561,14 +562,14 @@
"from IPython.display import Image, clear_output # to display images\n",
"\n",
"clear_output()\n",
- "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
+ "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
- "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16130MB, multi_processor_count=80)\n"
+ "Setup complete. Using torch 1.8.1+cu101 (Tesla V100-SXM2-16GB)\n"
],
"name": "stdout"
}
@@ -582,7 +583,9 @@
"source": [
"# 1. Inference\n",
"\n",
- "`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)."
+ "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n",
+ "\n",
+ " "
]
},
{
@@ -604,15 +607,15 @@
{
"output_type": "stream",
"text": [
- "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
- "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
+ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
+ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Fusing layers... \n",
- "Model Summary: 232 layers, 7459581 parameters, 0 gradients\n",
- "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s)\n",
- "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s)\n",
+ "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n",
+ "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n",
+ "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n",
"Results saved to runs/detect/exp\n",
- "Done. (0.113s)\n"
+ "Done. (0.087)\n"
],
"name": "stdout"
},
@@ -634,16 +637,6 @@
}
]
},
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "4qbaa3iEcrcE"
- },
- "source": [
- "Results are saved to `runs/detect`. A full list of available inference sources:\n",
- " "
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -651,7 +644,7 @@
},
"source": [
"# 2. Test\n",
- "Test a model on [COCO](https://cocodataset.org/#home) val or test-dev dataset to evaluate trained accuracy. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be 1-2% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
+ "Test a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
]
},
{
@@ -670,19 +663,19 @@
"id": "WQPtK1QYVaD_",
"colab": {
"base_uri": "https://localhost:8080/",
- "height": 66,
+ "height": 65,
"referenced_widgets": [
- "02ac0588602847eea00a0205f87bcce2",
- "c472ea49806447a68b5a9221a4ddae85",
- "091fdf499bd44a80af7281d16da4aa93",
- "c79f69c959de4427ba102a87a9f46d80",
- "c42ae5af74a0491187827d0a1fc259bb",
- "5a90f72d3a2d46cb9ad915daa3ead8b4",
- "2a7ed6611da34662b10e37fd4f4e4438",
- "fead0160658445bf9e966daa4481cad0"
+ "8815626359d84416a2f44a95500580a4",
+ "3b85609c4ce94a74823f2cfe141ce68e",
+ "876609753c2946248890344722963d44",
+ "8abfdd8778e44b7ca0d29881cb1ada05",
+ "78c6c3d97c484916b8ee167c63556800",
+ "9dd0f182db5d45378ceafb855e486eb8",
+ "a3dab28b45c247089a3d1b8b09f327de",
+ "32451332b7a94ba9aacddeaa6ac94d50"
]
},
- "outputId": "780d8f5f-766e-4b99-e370-11f9b884c27a"
+ "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363"
},
"source": [
"# Download COCO val2017\n",
@@ -695,7 +688,7 @@
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
- "model_id": "02ac0588602847eea00a0205f87bcce2",
+ "model_id": "8815626359d84416a2f44a95500580a4",
"version_minor": 0,
"version_major": 2
},
@@ -723,7 +716,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "013935a5-ba81-4810-b723-0cb01cf7bc79"
+ "outputId": "2340b131-9943-4cd6-fd3a-8272aeb0774f"
},
"source": [
"# Run YOLOv5x on COCO val2017\n",
@@ -734,45 +727,46 @@
{
"output_type": "stream",
"text": [
- "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
- "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
+ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
+ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
- "Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5x.pt to yolov5x.pt...\n",
- "100% 170M/170M [00:05<00:00, 32.6MB/s]\n",
+ "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n",
+ "100% 168M/168M [00:05<00:00, 32.3MB/s]\n",
"\n",
"Fusing layers... \n",
- "Model Summary: 484 layers, 88922205 parameters, 0 gradients\n",
- "Scanning labels ../coco/labels/val2017.cache (4952 found, 0 missing, 48 empty, 0 duplicate, for 5000 images): 5000it [00:00, 14785.71it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:30<00:00, 1.74it/s]\n",
- " all 5e+03 3.63e+04 0.409 0.754 0.672 0.484\n",
- "Speed: 5.9/2.1/7.9 ms inference/NMS/total per 640x640 image at batch-size 32\n",
+ "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n",
+ " all 5000 36335 0.745 0.627 0.68 0.49\n",
+ "Speed: 5.3/1.6/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n",
"\n",
"Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
"loading annotations into memory...\n",
- "Done (t=0.43s)\n",
+ "Done (t=0.48s)\n",
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
- "DONE (t=4.67s)\n",
+ "DONE (t=5.08s)\n",
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *bbox*\n",
- "DONE (t=92.11s).\n",
+ "DONE (t=90.51s).\n",
"Accumulating evaluation results...\n",
- "DONE (t=13.24s).\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.492\n",
- " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.676\n",
- " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.534\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.318\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.541\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.633\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.376\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.617\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.670\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.493\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.723\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.812\n",
+ "DONE (t=15.16s).\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n",
+ " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n",
+ " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n",
"Results saved to runs/test/exp\n"
],
"name": "stdout"
@@ -786,7 +780,7 @@
},
"source": [
"## COCO test-dev2017\n",
- "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (20,000 images). Results are saved to a `*.json` file which can be submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
+ "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
]
},
{
@@ -797,9 +791,9 @@
"source": [
"# Download COCO test-dev2017\n",
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
- "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
+ "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
"!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
- "%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5"
+ "%mv ./test2017 ../coco/images # move to /coco"
],
"execution_count": null,
"outputs": []
@@ -833,19 +827,19 @@
"id": "Knxi2ncxWffW",
"colab": {
"base_uri": "https://localhost:8080/",
- "height": 66,
+ "height": 65,
"referenced_widgets": [
- "cf1ab9fde7444d3e874fcd407ba8f0f8",
- "9ee03f9c85f34155b2645e89c9211547",
- "933ebc451c09490aadf71afbbb3dff2a",
- "8e7c55cbca624432a84fa7ad8f3a4016",
- "dd62d83b35d04a178840772e82bd2f2e",
- "d5c4f3d1c8b046e3a163faaa6b3a51ab",
- "78d1da8efb504b03878ca9ce5b404006",
- "d28208ba1213436a93926a01d99d97ae"
+ "0fffa335322b41658508e06aed0acbf0",
+ "a354c6f80ce347e5a3ef64af87c0eccb",
+ "85823e71fea54c39bd11e2e972348836",
+ "fb11acd663fa4e71b041d67310d045fd",
+ "8a919053b780449aae5523658ad611fa",
+ "5bae9393a58b44f7b69fb04816f94f6f",
+ "d26c6d16c7f24030ab2da5285bf198ee",
+ "f7767886b2364c8d9efdc79e175ad8eb"
]
},
- "outputId": "59f9a94b-21e1-4626-f36a-a8e1b1e5c8f6"
+ "outputId": "b41ac253-9e1b-4c26-d78b-700ea0154f43"
},
"source": [
"# Download COCO128\n",
@@ -858,12 +852,12 @@
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
- "model_id": "cf1ab9fde7444d3e874fcd407ba8f0f8",
+ "model_id": "0fffa335322b41658508e06aed0acbf0",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
- "HBox(children=(FloatProgress(value=0.0, max=22090455.0), HTML(value='')))"
+ "HBox(children=(FloatProgress(value=0.0, max=22091032.0), HTML(value='')))"
]
},
"metadata": {
@@ -896,7 +890,7 @@
"id": "bOy5KI2ncnWd"
},
"source": [
- "# Tensorboard (optional)\n",
+ "# Tensorboard (optional)\n",
"%load_ext tensorboard\n",
"%tensorboard --logdir runs/train"
],
@@ -909,9 +903,10 @@
"id": "2fLAV42oNb7M"
},
"source": [
- "# Weights & Biases (optional)\n",
- "%pip install -q wandb \n",
- "!wandb login # use 'wandb disabled' or 'wandb enabled' to disable or enable"
+ "# Weights & Biases (optional)\n",
+ "%pip install -q wandb\n",
+ "import wandb\n",
+ "wandb.login()"
],
"execution_count": null,
"outputs": []
@@ -923,7 +918,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "138f2d1d-364c-405a-cf13-ea91a2aff915"
+ "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014"
},
"source": [
"# Train YOLOv5s on COCO128 for 3 epochs\n",
@@ -934,76 +929,76 @@
{
"output_type": "stream",
"text": [
- "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
- "\n",
- "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n",
- "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n",
- "2020-11-20 11:45:17.042357: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1\n",
- "Hyperparameters {'lr0': 0.01, 'lrf': 0.2, 'momentum': 0.937, 'weight_decay': 0.0005, 'warmup_epochs': 3.0, 'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1, 'box': 0.05, 'cls': 0.5, 'cls_pw': 1.0, 'obj': 1.0, 'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0, 'fl_gamma': 0.0, 'hsv_h': 0.015, 'hsv_s': 0.7, 'hsv_v': 0.4, 'degrees': 0.0, 'translate': 0.1, 'scale': 0.5, 'shear': 0.0, 'perspective': 0.0, 'flipud': 0.0, 'fliplr': 0.5, 'mosaic': 1.0, 'mixup': 0.0}\n",
- "Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt to yolov5s.pt...\n",
- "100% 14.5M/14.5M [00:01<00:00, 14.8MB/s]\n",
+ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
+ "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
+ "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n",
+ "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
+ "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
+ "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
+ "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
- " 2 -1 1 19904 models.common.BottleneckCSP [64, 64, 1] \n",
+ " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
- " 4 -1 1 161152 models.common.BottleneckCSP [128, 128, 3] \n",
+ " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n",
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
- " 6 -1 1 641792 models.common.BottleneckCSP [256, 256, 3] \n",
+ " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n",
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
" 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n",
- " 9 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n",
+ " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
- " 13 -1 1 378624 models.common.BottleneckCSP [512, 256, 1, False] \n",
+ " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
- " 17 -1 1 95104 models.common.BottleneckCSP [256, 128, 1, False] \n",
+ " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
- " 20 -1 1 313088 models.common.BottleneckCSP [256, 256, 1, False] \n",
+ " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
- " 23 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n",
+ " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
- "Model Summary: 283 layers, 7468157 parameters, 7468157 gradients\n",
+ "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n",
"\n",
- "Transferred 370/370 items from yolov5s.pt\n",
- "Optimizer groups: 62 .bias, 70 conv.weight, 59 other\n",
- "Scanning images: 100% 128/128 [00:00<00:00, 5395.63it/s]\n",
- "Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 13972.28it/s]\n",
- "Caching images (0.1GB): 100% 128/128 [00:00<00:00, 173.55it/s]\n",
- "Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 8693.98it/s]\n",
- "Caching images (0.1GB): 100% 128/128 [00:00<00:00, 133.30it/s]\n",
- "NumExpr defaulting to 2 threads.\n",
+ "Transferred 362/362 items from yolov5s.pt\n",
+ "Scaled weight_decay = 0.0005\n",
+ "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
+ "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n",
+ "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n",
+ "Plotting labels... \n",
"\n",
- "Analyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
+ "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
"Image sizes 640 train, 640 test\n",
"Using 2 dataloader workers\n",
"Logging results to runs/train/exp\n",
"Starting training for 3 epochs...\n",
"\n",
- " Epoch gpu_mem box obj cls total targets img_size\n",
- " 0/2 5.24G 0.04202 0.06745 0.01503 0.1245 194 640: 100% 8/8 [00:03<00:00, 2.01it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:03<00:00, 2.40it/s]\n",
- " all 128 929 0.404 0.758 0.701 0.45\n",
+ " Epoch gpu_mem box obj cls total labels img_size\n",
+ " 0/2 3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.21it/s]\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09s/it]\n",
+ " all 128 929 0.605 0.657 0.666 0.434\n",
"\n",
- " Epoch gpu_mem box obj cls total targets img_size\n",
- " 1/2 5.12G 0.04461 0.05874 0.0169 0.1202 142 640: 100% 8/8 [00:01<00:00, 4.14it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:01<00:00, 5.75it/s]\n",
- " all 128 929 0.403 0.772 0.703 0.453\n",
+ " Epoch gpu_mem box obj cls total labels img_size\n",
+ " 1/2 6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18it/s]\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72it/s]\n",
+ " all 128 929 0.61 0.66 0.669 0.438\n",
"\n",
- " Epoch gpu_mem box obj cls total targets img_size\n",
- " 2/2 5.12G 0.04445 0.06545 0.01667 0.1266 149 640: 100% 8/8 [00:01<00:00, 4.15it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:06<00:00, 1.18it/s]\n",
- " all 128 929 0.395 0.767 0.702 0.452\n",
- "Optimizer stripped from runs/train/exp/weights/last.pt, 15.2MB\n",
- "3 epochs completed in 0.006 hours.\n",
- "\n"
+ " Epoch gpu_mem box obj cls total labels img_size\n",
+ " 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n",
+ " all 128 929 0.618 0.659 0.671 0.438\n",
+ "3 epochs completed in 0.007 hours.\n",
+ "\n",
+ "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
+ "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n"
],
"name": "stdout"
}
@@ -1026,9 +1021,9 @@
"source": [
"## Weights & Biases Logging 🌟 NEW\n",
"\n",
- "[Weights & Biases](https://www.wandb.com/) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
+ "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
"\n",
- "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
+ "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
"\n",
""
]
@@ -1114,10 +1109,23 @@
"\n",
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
"\n",
- "- **Google Colab Notebook** with free GPU: \n",
- "- **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5)\n",
- "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) \n",
- "- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)\n"
+ "- **Google Colab and Kaggle** notebooks with free GPU: \n",
+ "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n",
+ "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n",
+ "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "6Qu7Iesl0p54"
+ },
+ "source": [
+ "# Status\n",
+ "\n",
+ "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n",
+ "\n",
+ "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
]
},
{
@@ -1152,11 +1160,32 @@
},
"source": [
"# Reproduce\n",
- "%%shell\n",
- "for x in yolov5s yolov5m yolov5l yolov5x; do\n",
- " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n",
- " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP\n",
- "done"
+ "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n",
+ " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n",
+ " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "GMusP4OAxFu6"
+ },
+ "source": [
+ "# PyTorch Hub\n",
+ "import torch\n",
+ "\n",
+ "# Model\n",
+ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n",
+ "\n",
+ "# Images\n",
+ "dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'\n",
+ "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n",
+ "\n",
+ "# Inference\n",
+ "results = model(imgs)\n",
+ "results.print() # or .show(), .save()"
],
"execution_count": null,
"outputs": []
@@ -1205,6 +1234,19 @@
"execution_count": null,
"outputs": []
},
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "RVRSOhEvUdb5"
+ },
+ "source": [
+ "# Evolve\n",
+ "!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n",
+ "!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
{
"cell_type": "code",
"metadata": {
@@ -1219,4 +1261,4 @@
"outputs": []
}
]
-}
\ No newline at end of file
+}
diff --git a/utils/activations.py b/utils/activations.py
index aa3ddf071d28..92a3b5eaa54b 100644
--- a/utils/activations.py
+++ b/utils/activations.py
@@ -19,23 +19,6 @@ def forward(x):
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
-class MemoryEfficientSwish(nn.Module):
- class F(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x):
- ctx.save_for_backward(x)
- return x * torch.sigmoid(x)
-
- @staticmethod
- def backward(ctx, grad_output):
- x = ctx.saved_tensors[0]
- sx = torch.sigmoid(x)
- return grad_output * (sx * (1 + x * (1 - sx)))
-
- def forward(self, x):
- return self.F.apply(x)
-
-
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
@staticmethod
@@ -70,3 +53,46 @@ def __init__(self, c1, k=3): # ch_in, kernel
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
+
+
+# ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
+class AconC(nn.Module):
+ r""" ACON activation (activate or not).
+ AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
+ according to "Activate or Not: Learning Customized Activation" .
+ """
+
+ def __init__(self, c1):
+ super().__init__()
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
+
+ def forward(self, x):
+ dpx = (self.p1 - self.p2) * x
+ return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
+
+
+class MetaAconC(nn.Module):
+ r""" ACON activation (activate or not).
+ MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
+ according to "Activate or Not: Learning Customized Activation" .
+ """
+
+ def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
+ super().__init__()
+ c2 = max(r, c1 // r)
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
+ self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
+ # self.bn1 = nn.BatchNorm2d(c2)
+ # self.bn2 = nn.BatchNorm2d(c1)
+
+ def forward(self, x):
+ y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
+ # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
+ # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
+ beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
+ dpx = (self.p1 - self.p2) * x
+ return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
diff --git a/utils/autoanchor.py b/utils/autoanchor.py
index badefc154138..87dc394c832e 100644
--- a/utils/autoanchor.py
+++ b/utils/autoanchor.py
@@ -3,9 +3,10 @@
import numpy as np
import torch
import yaml
-from scipy.cluster.vq import kmeans
from tqdm import tqdm
+from utils.general import colorstr
+
def check_anchor_order(m):
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
@@ -20,7 +21,8 @@ def check_anchor_order(m):
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
- print('\nAnalyzing anchors... ', end='')
+ prefix = colorstr('autoanchor: ')
+ print(f'\n{prefix}Analyzing anchors... ', end='')
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
@@ -34,21 +36,25 @@ def metric(k): # compute metric
bpr = (best > 1. / thr).float().mean() # best possible recall
return bpr, aat
- bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
- print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
+ anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors
+ bpr, aat = metric(anchors)
+ print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
if bpr < 0.98: # threshold to recompute
print('. Attempting to improve anchors, please wait...')
na = m.anchor_grid.numel() // 2 # number of anchors
- new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
- new_bpr = metric(new_anchors.reshape(-1, 2))[0]
+ try:
+ anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
+ except Exception as e:
+ print(f'{prefix}ERROR: {e}')
+ new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
- new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
- m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
- m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
+ anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
+ m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference
+ m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
check_anchor_order(m)
- print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
+ print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
else:
- print('Original anchors better than new anchors. Proceeding with original anchors.')
+ print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')
print('') # newline
@@ -69,7 +75,10 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
+ from scipy.cluster.vq import kmeans
+
thr = 1. / thr
+ prefix = colorstr('autoanchor: ')
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
@@ -85,16 +94,16 @@ def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
- print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
- print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
- (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
+ print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')
+ print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '
+ f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
if isinstance(path, str): # *.yaml file
with open(path) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
@@ -107,15 +116,15 @@ def print_results(k):
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
- print('WARNING: Extremely small objects found. '
- '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
+ print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans calculation
- print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
+ print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
+ assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}')
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
@@ -136,7 +145,7 @@ def print_results(k):
# Evolve
npr = np.random
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
- pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
+ pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
@@ -145,7 +154,7 @@ def print_results(k):
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
- pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
+ pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
if verbose:
print_results(k)
diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh
new file mode 100644
index 000000000000..c319a83cfbdf
--- /dev/null
+++ b/utils/aws/mime.sh
@@ -0,0 +1,26 @@
+# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
+# This script will run on every instance restart, not only on first start
+# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
+
+Content-Type: multipart/mixed; boundary="//"
+MIME-Version: 1.0
+
+--//
+Content-Type: text/cloud-config; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Content-Disposition: attachment; filename="cloud-config.txt"
+
+#cloud-config
+cloud_final_modules:
+- [scripts-user, always]
+
+--//
+Content-Type: text/x-shellscript; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Content-Disposition: attachment; filename="userdata.txt"
+
+#!/bin/bash
+# --- paste contents of userdata.sh here ---
+--//
diff --git a/utils/aws/resume.py b/utils/aws/resume.py
new file mode 100644
index 000000000000..4b0d4246b594
--- /dev/null
+++ b/utils/aws/resume.py
@@ -0,0 +1,37 @@
+# Resume all interrupted trainings in yolov5/ dir including DDP trainings
+# Usage: $ python utils/aws/resume.py
+
+import os
+import sys
+from pathlib import Path
+
+import torch
+import yaml
+
+sys.path.append('./') # to run '$ python *.py' files in subdirectories
+
+port = 0 # --master_port
+path = Path('').resolve()
+for last in path.rglob('*/**/last.pt'):
+ ckpt = torch.load(last)
+ if ckpt['optimizer'] is None:
+ continue
+
+ # Load opt.yaml
+ with open(last.parent.parent / 'opt.yaml') as f:
+ opt = yaml.safe_load(f)
+
+ # Get device count
+ d = opt['device'].split(',') # devices
+ nd = len(d) # number of devices
+ ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
+
+ if ddp: # multi-GPU
+ port += 1
+ cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
+ else: # single-GPU
+ cmd = f'python train.py --resume {last}'
+
+ cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
+ print(cmd)
+ os.system(cmd)
diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh
new file mode 100644
index 000000000000..5846fedb16f9
--- /dev/null
+++ b/utils/aws/userdata.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
+# This script will run only once on first instance start (for a re-start script see mime.sh)
+# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
+# Use >300 GB SSD
+
+cd home/ubuntu
+if [ ! -d yolov5 ]; then
+ echo "Running first-time script." # install dependencies, download COCO, pull Docker
+ git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
+ cd yolov5
+ bash data/scripts/get_coco.sh && echo "Data done." &
+ sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
+ python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
+ wait && echo "All tasks done." # finish background tasks
+else
+ echo "Running re-start script." # resume interrupted runs
+ i=0
+ list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
+ while IFS= read -r id; do
+ ((i++))
+ echo "restarting container $i: $id"
+ sudo docker start $id
+ # sudo docker exec -it $id python train.py --resume # single-GPU
+ sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
+ done <<<"$list"
+fi
diff --git a/utils/datasets.py b/utils/datasets.py
index 15d71933b77a..cc97107df063 100755
--- a/utils/datasets.py
+++ b/utils/datasets.py
@@ -1,6 +1,7 @@
# Dataset utils and dataloaders
import glob
+import hashlib
import logging
import math
import os
@@ -20,12 +21,13 @@
from torch.utils.data import Dataset
from tqdm import tqdm
-from utils.general import xyxy2xywh, xywh2xyxy, clean_str
+from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
+ resample_segments, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
-img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
+img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
@@ -35,9 +37,12 @@
break
-def get_hash(files):
- # Returns a single hash value of a list of files
- return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
+def get_hash(paths):
+ # Returns a single hash value of a list of paths (files or dirs)
+ size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
+ h = hashlib.md5(str(size).encode()) # hash sizes
+ h.update(''.join(paths).encode()) # hash paths
+ return h.hexdigest() # return hash
def exif_size(img):
@@ -56,7 +61,7 @@ def exif_size(img):
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
- rank=-1, world_size=1, workers=8, image_weights=False, quad=False):
+ rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
@@ -67,8 +72,8 @@ def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=Fa
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
- rank=rank,
- image_weights=image_weights)
+ image_weights=image_weights,
+ prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
@@ -119,9 +124,8 @@ def __iter__(self):
class LoadImages: # for inference
- def __init__(self, path, img_size=640, auto=True):
- p = str(Path(path)) # os-agnostic
- p = os.path.abspath(p) # absolute path
+ def __init__(self, path, img_size=640, stride=32, auto=True):
+ p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
@@ -129,13 +133,14 @@ def __init__(self, path, img_size=640, auto=True):
elif os.path.isfile(p):
files = [p] # files
else:
- raise Exception('ERROR: %s does not exist' % p)
+ raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
+ self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
@@ -145,8 +150,8 @@ def __init__(self, path, img_size=640, auto=True):
self.new_video(videos[0]) # new video
else:
self.cap = None
- assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
- (p, img_formats, vid_formats)
+ assert self.nf > 0, f'No images or videos found in {p}. ' \
+ f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
@@ -172,17 +177,17 @@ def __next__(self):
ret_val, img0 = self.cap.read()
self.frame += 1
- print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
+ print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
- print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
+ print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
- img = letterbox(img0, new_shape=self.img_size, auto=self.auto)[0]
+ img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
@@ -193,15 +198,16 @@ def __next__(self):
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
- self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
- def __init__(self, pipe='0', img_size=640):
+ def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
+ self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
@@ -239,12 +245,12 @@ def __next__(self):
break
# Print
- assert ret_val, 'Camera Error %s' % self.pipe
+ assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
- print('webcam %g: ' % self.count, end='')
+ print(f'webcam {self.count}: ', end='')
# Padded resize
- img = letterbox(img0, new_shape=self.img_size)[0]
+ img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
@@ -257,9 +263,10 @@ def __len__(self):
class LoadStreams: # multiple IP or RTSP cameras
- def __init__(self, sources='streams.txt', img_size=640, auto=True):
+ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
+ self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
@@ -268,40 +275,47 @@ def __init__(self, sources='streams.txt', img_size=640, auto=True):
sources = [sources]
n = len(sources)
- self.imgs = [None] * n
+ self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
- for i, s in enumerate(sources):
- # Start the thread to read frames from the video stream
- print('%g/%g: %s... ' % (i + 1, n, s), end='')
- cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
- assert cap.isOpened(), 'Failed to open %s' % s
+ for i, s in enumerate(sources): # index, source
+ # Start thread to read frames from video stream
+ print(f'{i + 1}/{n}: {s}... ', end='')
+ if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
+ check_requirements(('pafy', 'youtube_dl'))
+ import pafy
+ s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
+ s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
+ cap = cv2.VideoCapture(s)
+ assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- fps = cap.get(cv2.CAP_PROP_FPS) % 100
+ self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
+ self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
+
_, self.imgs[i] = cap.read() # guarantee first frame
- thread = Thread(target=self.update, args=([i, cap]), daemon=True)
- print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
- thread.start()
+ self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
+ print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
+ self.threads[i].start()
print('') # newline
# check for common shapes
- s = np.stack([letterbox(x, new_shape=self.img_size, auto=self.auto)[0].shape for x in self.imgs], 0) # inference shapes
+ s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
- def update(self, index, cap):
- # Read next stream frame in a daemon thread
- n = 0
- while cap.isOpened():
+ def update(self, i, cap):
+ # Read stream `i` frames in daemon thread
+ n, f = 0, self.frames[i]
+ while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
- if n == 4: # read every 4th frame
- _, self.imgs[index] = cap.retrieve()
- n = 0
- time.sleep(0.01) # wait time
+ if n % 4: # read every 4th frame
+ success, im = cap.retrieve()
+ self.imgs[i] = im if success else self.imgs[i] * 0
+ time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
@@ -309,13 +323,13 @@ def __iter__(self):
def __next__(self):
self.count += 1
- img0 = self.imgs.copy()
- if cv2.waitKey(1) == ord('q'): # q to quit
+ if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
- img = [letterbox(x, new_shape=self.img_size, auto=self.rect and self.auto)[0] for x in img0]
+ img0 = self.imgs.copy()
+ img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
@@ -333,12 +347,12 @@ def __len__(self):
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
- return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
+ return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
- cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
+ cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
@@ -347,6 +361,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
+ self.path = path
try:
f = [] # image files
@@ -354,37 +369,42 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
+ # f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
+ # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
- raise Exception('%s does not exist' % p)
+ raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
- assert self.img_files, 'No images found'
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
+ assert self.img_files, f'{prefix}No images found'
except Exception as e:
- raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
+ raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
- cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
+ cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
- cache = torch.load(cache_path) # load
- if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
- cache = self.cache_labels(cache_path) # re-cache
+ cache, exists = torch.load(cache_path), True # load
+ if cache['hash'] != get_hash(self.label_files + self.img_files): # changed
+ cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
- cache = self.cache_labels(cache_path) # cache
+ cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
- [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
- desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
- tqdm(None, desc=desc, total=n, initial=n)
- assert nf > 0 or not augment, f'No labels found in {cache_path}. Can not train without labels. See {help_url}'
+ nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
+ if exists:
+ d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
+ tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
+ assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
- labels, shapes = zip(*cache.values())
+ cache.pop('version') # remove version
+ labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
@@ -434,9 +454,10 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
- pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
+ pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
+ pbar.close()
- def cache_labels(self, path=Path('./labels.cache')):
+ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
@@ -447,13 +468,20 @@ def cache_labels(self, path=Path('./labels.cache')):
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
- assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
+ segments = [] # instance segments
+ assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
+ assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
- l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
+ l = [x.split() for x in f.read().strip().splitlines() if len(x)]
+ if any([len(x) > 8 for x in l]): # is segment
+ classes = np.array([x[0] for x in l], dtype=np.float32)
+ segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
+ l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
+ l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
@@ -465,21 +493,26 @@ def cache_labels(self, path=Path('./labels.cache')):
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
- x[im_file] = [l, shape]
+ x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
- print('WARNING: Ignoring corrupted image and/or label %s: %s' % (im_file, e))
+ logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
- pbar.desc = f"Scanning '{path.parent / path.stem}' for images and labels... " \
+ pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
+ pbar.close()
if nf == 0:
- print(f'WARNING: No labels found in {path}. See {help_url}')
+ logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
- x['results'] = [nf, nm, ne, nc, i + 1]
- torch.save(x, path) # save for next time
- logging.info(f"New cache created: {path}")
+ x['results'] = nf, nm, ne, nc, i + 1
+ x['version'] = 0.2 # cache version
+ try:
+ torch.save(x, path) # save cache for next time
+ logging.info(f'{prefix}New cache created: {path}')
+ except Exception as e:
+ logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
@@ -517,16 +550,9 @@ def __getitem__(self, index):
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
- # Load labels
- labels = []
- x = self.labels[index]
- if x.size > 0:
- # Normalized xywh to pixel xyxy format
- labels = x.copy()
- labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
- labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
- labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
- labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
+ labels = self.labels[index].copy()
+ if labels.size: # normalized xywh to pixel xyxy format
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
@@ -617,10 +643,10 @@ def load_image(self, index):
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
- r = self.img_size / max(h0, w0) # resize image to img_size
- if r != 1: # always resize down, only resize up if training with augmentation
- interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
- img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
+ r = self.img_size / max(h0, w0) # ratio
+ if r != 1: # if sizes are not equal
+ img = cv2.resize(img, (int(w0 * r), int(h0 * r)),
+ interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
@@ -639,19 +665,25 @@ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
- # Histogram equalization
- # if random.random() < 0.2:
- # for i in range(3):
- # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
+
+def hist_equalize(img, clahe=True, bgr=False):
+ # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
+ yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
+ if clahe:
+ c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
+ yuv[:, :, 0] = c.apply(yuv[:, :, 0])
+ else:
+ yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
+ return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
- labels4 = []
+ labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
- indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
+ indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
@@ -676,23 +708,21 @@ def load_mosaic(self, index):
padh = y1a - y1b
# Labels
- x = self.labels[index]
- labels = x.copy()
- if x.size > 0: # Normalized xywh to pixel xyxy format
- labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
- labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
- labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
- labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
+ if labels.size:
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
+ segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
+ segments4.extend(segments)
# Concat/clip labels
- if len(labels4):
- labels4 = np.concatenate(labels4, 0)
- np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
- # img4, labels4 = replicate(img4, labels4) # replicate
+ labels4 = np.concatenate(labels4, 0)
+ for x in (labels4[:, 1:], *segments4):
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
+ # img4, labels4 = replicate(img4, labels4) # replicate
# Augment
- img4, labels4 = random_perspective(img4, labels4,
+ img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
@@ -706,9 +736,9 @@ def load_mosaic(self, index):
def load_mosaic9(self, index):
# loads images in a 9-mosaic
- labels9 = []
+ labels9, segments9 = [], []
s = self.img_size
- indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
+ indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
@@ -739,34 +769,34 @@ def load_mosaic9(self, index):
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
- x = self.labels[index]
- labels = x.copy()
- if x.size > 0: # Normalized xywh to pixel xyxy format
- labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
- labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
- labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
- labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
+ if labels.size:
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
+ segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
+ segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
- yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
+ yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
- if len(labels9):
- labels9 = np.concatenate(labels9, 0)
- labels9[:, [1, 3]] -= xc
- labels9[:, [2, 4]] -= yc
+ labels9 = np.concatenate(labels9, 0)
+ labels9[:, [1, 3]] -= xc
+ labels9[:, [2, 4]] -= yc
+ c = np.array([xc, yc]) # centers
+ segments9 = [x - c for x in segments9]
- np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
- # img9, labels9 = replicate(img9, labels9) # replicate
+ for x in (labels9[:, 1:], *segments9):
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
+ # img9, labels9 = replicate(img9, labels9) # replicate
# Augment
- img9, labels9 = random_perspective(img9, labels9,
+ img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
@@ -794,8 +824,8 @@ def replicate(img, labels):
return img, labels
-def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
- # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
+def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
+ # Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
@@ -810,7 +840,7 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
- dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
@@ -827,7 +857,8 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale
return img, ratio, (dw, dh)
-def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
+def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
+ border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
@@ -879,37 +910,38 @@ def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shea
# Transform label coordinates
n = len(targets)
if n:
- # warp points
- xy = np.ones((n * 4, 3))
- xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
- xy = xy @ M.T # transform
- if perspective:
- xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
- else: # affine
- xy = xy[:, :2].reshape(n, 8)
-
- # create new boxes
- x = xy[:, [0, 2, 4, 6]]
- y = xy[:, [1, 3, 5, 7]]
- xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
-
- # # apply angle-based reduction of bounding boxes
- # radians = a * math.pi / 180
- # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
- # x = (xy[:, 2] + xy[:, 0]) / 2
- # y = (xy[:, 3] + xy[:, 1]) / 2
- # w = (xy[:, 2] - xy[:, 0]) * reduction
- # h = (xy[:, 3] - xy[:, 1]) * reduction
- # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
-
- # clip boxes
- xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
- xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
+ use_segments = any(x.any() for x in segments)
+ new = np.zeros((n, 4))
+ if use_segments: # warp segments
+ segments = resample_segments(segments) # upsample
+ for i, segment in enumerate(segments):
+ xy = np.ones((len(segment), 3))
+ xy[:, :2] = segment
+ xy = xy @ M.T # transform
+ xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
+
+ # clip
+ new[i] = segment2box(xy, width, height)
+
+ else: # warp boxes
+ xy = np.ones((n * 4, 3))
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
+ xy = xy @ M.T # transform
+ xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
+
+ # create new boxes
+ x = xy[:, [0, 2, 4, 6]]
+ y = xy[:, [1, 3, 5, 7]]
+ new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+
+ # clip
+ new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
+ new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
- i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
- targets[:, 1:5] = xy[i]
+ targets[:, 1:5] = new[i]
return img, targets
@@ -1018,19 +1050,24 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
-def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
+def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
- # Arguments
- path: Path to images directory
- weights: Train, val, test weights (list)
+ Usage: from utils.datasets import *; autosplit('../coco128')
+ Arguments
+ path: Path to images directory
+ weights: Train, val, test weights (list)
+ annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
- files = list(path.rglob('*.*'))
+ files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
+
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
+
+ print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
- if img.suffix[1:] in img_formats:
+ if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md
new file mode 100644
index 000000000000..324c2416dcd9
--- /dev/null
+++ b/utils/flask_rest_api/README.md
@@ -0,0 +1,68 @@
+# Flask REST API
+[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
+
+## Requirements
+
+[Flask](https://palletsprojects.com/p/flask/) is required. Install with:
+```shell
+$ pip install Flask
+```
+
+## Run
+
+After Flask installation run:
+
+```shell
+$ python3 restapi.py --port 5000
+```
+
+Then use [curl](https://curl.se/) to perform a request:
+
+```shell
+$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'`
+```
+
+The model inference results are returned as a JSON response:
+
+```json
+[
+ {
+ "class": 0,
+ "confidence": 0.8900438547,
+ "height": 0.9318675399,
+ "name": "person",
+ "width": 0.3264600933,
+ "xcenter": 0.7438579798,
+ "ycenter": 0.5207948685
+ },
+ {
+ "class": 0,
+ "confidence": 0.8440024257,
+ "height": 0.7155083418,
+ "name": "person",
+ "width": 0.6546785235,
+ "xcenter": 0.427829951,
+ "ycenter": 0.6334488392
+ },
+ {
+ "class": 27,
+ "confidence": 0.3771208823,
+ "height": 0.3902671337,
+ "name": "tie",
+ "width": 0.0696444362,
+ "xcenter": 0.3675483763,
+ "ycenter": 0.7991207838
+ },
+ {
+ "class": 27,
+ "confidence": 0.3527112305,
+ "height": 0.1540903747,
+ "name": "tie",
+ "width": 0.0336618312,
+ "xcenter": 0.7814827561,
+ "ycenter": 0.5065554976
+ }
+]
+```
+
+An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py`
diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py
new file mode 100644
index 000000000000..ff21f30f93ca
--- /dev/null
+++ b/utils/flask_rest_api/example_request.py
@@ -0,0 +1,13 @@
+"""Perform test request"""
+import pprint
+
+import requests
+
+DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
+TEST_IMAGE = "zidane.jpg"
+
+image_data = open(TEST_IMAGE, "rb").read()
+
+response = requests.post(DETECTION_URL, files={"image": image_data}).json()
+
+pprint.pprint(response)
diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py
new file mode 100644
index 000000000000..a54e2309715c
--- /dev/null
+++ b/utils/flask_rest_api/restapi.py
@@ -0,0 +1,37 @@
+"""
+Run a rest API exposing the yolov5s object detection model
+"""
+import argparse
+import io
+
+import torch
+from PIL import Image
+from flask import Flask, request
+
+app = Flask(__name__)
+
+DETECTION_URL = "/v1/object-detection/yolov5s"
+
+
+@app.route(DETECTION_URL, methods=["POST"])
+def predict():
+ if not request.method == "POST":
+ return
+
+ if request.files.get("image"):
+ image_file = request.files["image"]
+ image_bytes = image_file.read()
+
+ img = Image.open(io.BytesIO(image_bytes))
+
+ results = model(img, size=640) # reduce size=320 for faster inference
+ return results.pandas().xyxy[0].to_json(orient="records")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
+ parser.add_argument("--port", default=5000, type=int, help="port number")
+ args = parser.parse_args()
+
+ model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
+ app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
diff --git a/utils/general.py b/utils/general.py
index f1fb7d2af539..b8377f105e30 100755
--- a/utils/general.py
+++ b/utils/general.py
@@ -1,4 +1,4 @@
-# General utils
+# YOLOv5 general utils
import glob
import logging
@@ -9,10 +9,14 @@
import re
import subprocess
import time
+from itertools import repeat
+from multiprocessing.pool import ThreadPool
from pathlib import Path
import cv2
import numpy as np
+import pandas as pd
+import pkg_resources as pkg
import torch
import torchvision
import yaml
@@ -24,16 +28,19 @@
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
+pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
+os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
-def set_logging(rank=-1):
+def set_logging(rank=-1, verbose=True):
logging.basicConfig(
format="%(message)s",
- level=logging.INFO if rank in [-1, 0] else logging.WARN)
+ level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
def init_seeds(seed=0):
+ # Initialize random number generator (RNG) seeds
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
@@ -45,38 +52,151 @@ def get_latest_run(search_dir='.'):
return max(last_list, key=os.path.getctime) if last_list else ''
-def check_git_status():
- # Suggest 'git pull' if repo is out of date
- if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
- s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
- if 'Your branch is behind' in s:
- print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
+def is_docker():
+ # Is environment a Docker container?
+ return Path('/workspace').exists() # or Path('/.dockerenv').exists()
+
+
+def is_colab():
+ # Is environment a Google Colab instance?
+ try:
+ import google.colab
+ return True
+ except Exception as e:
+ return False
+
+
+def is_pip():
+ # Is file in a pip package?
+ return 'site-packages' in Path(__file__).absolute().parts
+
+
+def emojis(str=''):
+ # Return platform-dependent emoji-safe version of string
+ return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
+
+
+def file_size(file):
+ # Return file size in MB
+ return Path(file).stat().st_size / 1e6
+
+def check_online():
+ # Check internet connectivity
+ import socket
+ try:
+ socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
+ return True
+ except OSError:
+ return False
-def check_requirements(file='requirements.txt'):
- # Check installed dependencies meet requirements
- import pkg_resources
- requirements = pkg_resources.parse_requirements(Path(file).open())
- requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements]
- pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
+
+def check_git_status():
+ # Recommend 'git pull' if code is out of date
+ print(colorstr('github: '), end='')
+ try:
+ assert Path('.git').exists(), 'skipping check (not a git repository)'
+ assert not is_docker(), 'skipping check (Docker image)'
+ assert check_online(), 'skipping check (offline)'
+
+ cmd = 'git fetch && git config --get remote.origin.url'
+ url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
+ branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
+ n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
+ if n > 0:
+ s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
+ f"Use 'git pull' to update or 'git clone {url}' to download latest."
+ else:
+ s = f'up to date with {url} ✅'
+ print(emojis(s)) # emoji-safe
+ except Exception as e:
+ print(e)
+
+
+def check_python(minimum='3.7.0', required=True):
+ # Check current python version vs. required python version
+ current = platform.python_version()
+ result = pkg.parse_version(current) >= pkg.parse_version(minimum)
+ if required:
+ assert result, f'Python {minimum} required by YOLOv5, but Python {current} is currently installed'
+ return result
+
+
+def check_requirements(requirements='requirements.txt', exclude=()):
+ # Check installed dependencies meet requirements (pass *.txt file or list of packages)
+ prefix = colorstr('red', 'bold', 'requirements:')
+ check_python() # check python version
+ if isinstance(requirements, (str, Path)): # requirements.txt file
+ file = Path(requirements)
+ if not file.exists():
+ print(f"{prefix} {file.resolve()} not found, check failed.")
+ return
+ requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
+ else: # list or tuple of packages
+ requirements = [x for x in requirements if x not in exclude]
+
+ n = 0 # number of packages updates
+ for r in requirements:
+ try:
+ pkg.require(r)
+ except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
+ n += 1
+ print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...")
+ try:
+ print(subprocess.check_output(f"pip install '{r}'", shell=True).decode())
+ except Exception as e:
+ print(f'{prefix} {e}')
+
+ if n: # if packages updated
+ source = file.resolve() if 'file' in locals() else requirements
+ s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
+ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
+ print(emojis(s)) # emoji-safe
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
- new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
+ if isinstance(img_size, int):
+ new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
+ elif len(img_size) == 2:
+ new_size = [0, 0]
+ new_size[0] = make_divisible(img_size[0], int(s))
+ new_size[1] = make_divisible(img_size[1], int(s))
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
+def check_imshow():
+ # Check if environment supports image displays
+ try:
+ assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
+ assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
+ cv2.imshow('test', np.zeros((1, 1, 3)))
+ cv2.waitKey(1)
+ cv2.destroyAllWindows()
+ cv2.waitKey(1)
+ return True
+ except Exception as e:
+ print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
+ return False
+
+
def check_file(file):
- # Search for file if not found
- if os.path.isfile(file) or file == '':
+ # Search/download file (if necessary) and return path
+ file = str(file) # convert to str()
+ if Path(file).is_file() or file == '': # exists
return file
- else:
+ elif file.startswith(('http://', 'https://')): # download
+ url, file = file, Path(file).name
+ print(f'Downloading {url} to {file}...')
+ torch.hub.download_url_to_file(url, file)
+ assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
+ return file
+ else: # search
files = glob.glob('./**/' + file, recursive=True) # find file
- assert len(files), 'File Not Found: %s' % file # assert file was found
- assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
+ assert len(files), f'File not found: {file}' # assert file was found
+ assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
@@ -88,18 +208,54 @@ def check_dataset(dict):
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
- print('Downloading %s ...' % s)
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
+ print(f'Downloading {s} ...')
torch.hub.download_url_to_file(s, f)
- r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
- else: # bash script
+ r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip
+ elif s.startswith('bash '): # bash script
+ print(f'Running {s} ...')
r = os.system(s)
- print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
+ else: # python script
+ r = exec(s) # return None
+ print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result
else:
raise Exception('Dataset not found.')
+def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
+ # Multi-threaded file download and unzip function
+ def download_one(url, dir):
+ # Download 1 file
+ f = dir / Path(url).name # filename
+ if not f.exists():
+ print(f'Downloading {url} to {f}...')
+ if curl:
+ os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
+ else:
+ torch.hub.download_url_to_file(url, f, progress=True) # torch download
+ if unzip and f.suffix in ('.zip', '.gz'):
+ print(f'Unzipping {f}...')
+ if f.suffix == '.zip':
+ s = f'unzip -qo {f} -d {dir} && rm {f}' # unzip -quiet -overwrite
+ elif f.suffix == '.gz':
+ s = f'tar xfz {f} --directory {f.parent}' # unzip
+ if delete: # delete zip file after unzip
+ s += f' && rm {f}'
+ os.system(s)
+
+ dir = Path(dir)
+ dir.mkdir(parents=True, exist_ok=True) # make directory
+ if threads > 1:
+ pool = ThreadPool(threads)
+ pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
+ pool.close()
+ pool.join()
+ else:
+ for u in tuple(url) if isinstance(url, str) else url:
+ download_one(u, dir)
+
+
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
@@ -115,6 +271,31 @@ def one_cycle(y1=0.0, y2=1.0, steps=100):
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
+def colorstr(*input):
+ # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
+ *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
+ colors = {'black': '\033[30m', # basic colors
+ 'red': '\033[31m',
+ 'green': '\033[32m',
+ 'yellow': '\033[33m',
+ 'blue': '\033[34m',
+ 'magenta': '\033[35m',
+ 'cyan': '\033[36m',
+ 'white': '\033[37m',
+ 'bright_black': '\033[90m', # bright colors
+ 'bright_red': '\033[91m',
+ 'bright_green': '\033[92m',
+ 'bright_yellow': '\033[93m',
+ 'bright_blue': '\033[94m',
+ 'bright_magenta': '\033[95m',
+ 'bright_cyan': '\033[96m',
+ 'bright_white': '\033[97m',
+ 'end': '\033[0m', # misc
+ 'bold': '\033[1m',
+ 'underline': '\033[4m'}
+ return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
+
+
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
@@ -174,6 +355,50 @@ def xywh2xyxy(x):
return y
+def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
+ # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
+ y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
+ y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
+ y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
+ y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
+ return y
+
+
+def xyn2xy(x, w=640, h=640, padw=0, padh=0):
+ # Convert normalized segments into pixel segments, shape (n,2)
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
+ y[:, 0] = w * x[:, 0] + padw # top left x
+ y[:, 1] = h * x[:, 1] + padh # top left y
+ return y
+
+
+def segment2box(segment, width=640, height=640):
+ # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
+ x, y = segment.T # segment xy
+ inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
+ x, y, = x[inside], y[inside]
+ return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
+
+
+def segments2boxes(segments):
+ # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
+ boxes = []
+ for s in segments:
+ x, y = s.T # segment xy
+ boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
+ return xyxy2xywh(np.array(boxes)) # cls, xywh
+
+
+def resample_segments(segments, n=1000):
+ # Up-sample an (n,2) segment
+ for i, s in enumerate(segments):
+ x = np.linspace(0, len(s) - 1, n)
+ xp = np.arange(len(s))
+ segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
+ return segments
+
+
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
@@ -198,7 +423,7 @@ def clip_coords(boxes, img_shape):
boxes[:, 3].clamp_(0, img_shape[0]) # y2
-def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
+def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
@@ -234,7 +459,7 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
- alpha = v / ((1 + eps) - iou + v)
+ alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
@@ -276,23 +501,27 @@ def wh_iou(wh1, wh2):
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
-def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
- """Performs Non-Maximum Suppression (NMS) on inference results
+def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
+ labels=(), max_det=300):
+ """Runs Non-Maximum Suppression (NMS) on inference results
Returns:
- detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
+ list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
+ # Checks
+ assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
+ assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
+
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
- max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
- multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
+ multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
@@ -366,18 +595,20 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non
return output
-def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
+def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
- for key in 'optimizer', 'training_results', 'wandb_id':
- x[key] = None
+ if x.get('ema'):
+ x['model'] = x['ema'] # replace model with ema
+ for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
+ x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
- print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
+ print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
@@ -405,14 +636,14 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
results = tuple(x[0, :7])
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
- yaml.dump(hyp, f, sort_keys=False)
+ yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
- # applies a second stage classifier to yolo outputs
+ # Apply a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
@@ -446,14 +677,33 @@ def apply_classifier(x, model, img, im0):
return x
-def increment_path(path, exist_ok=True, sep=''):
- # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
+def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
+ # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
+ xyxy = torch.tensor(xyxy).view(-1, 4)
+ b = xyxy2xywh(xyxy) # boxes
+ if square:
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
+ b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
+ xyxy = xywh2xyxy(b).long()
+ clip_coords(xyxy, im.shape)
+ crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
+ if save:
+ cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop)
+ return crop
+
+
+def increment_path(path, exist_ok=False, sep='', mkdir=False):
+ # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
- if (path.exists() and exist_ok) or (not path.exists()):
- return str(path)
- else:
+ if path.exists() and not exist_ok:
+ suffix = path.suffix
+ path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
- return f"{path}{sep}{n}" # update path
+ path = Path(f"{path}{sep}{n}{suffix}") # update path
+ dir = path if path.suffix == '' else path.parent # directory
+ if not dir.exists() and mkdir:
+ dir.mkdir(parents=True, exist_ok=True) # make directory
+ return path
diff --git a/utils/google_utils.py b/utils/google_utils.py
index 242270c1b033..ac5c54dba97f 100644
--- a/utils/google_utils.py
+++ b/utils/google_utils.py
@@ -12,71 +12,96 @@
def gsutil_getsize(url=''):
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
- s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8')
+ s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
return eval(s.split(' ')[0]) if len(s) else 0 # bytes
-def attempt_download(weights):
- # Attempt to download pretrained weights if not found locally
- weights = str(weights).strip().replace("'", '')
- file = Path(weights).name.lower()
+def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
+ # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
+ file = Path(file)
+ try: # GitHub
+ print(f'Downloading {url} to {file}...')
+ torch.hub.download_url_to_file(url, str(file))
+ assert file.exists() and file.stat().st_size > min_bytes # check
+ except Exception as e: # GCP
+ file.unlink(missing_ok=True) # remove partial downloads
+ print(f'Download error: {e}\nRe-attempting {url2 or url} to {file}...')
+ os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
+ finally:
+ if not file.exists() or file.stat().st_size < min_bytes: # check
+ file.unlink(missing_ok=True) # remove partial downloads
+ print(f'ERROR: Download failure: {error_msg or url}')
+ print('')
+
+
+def attempt_download(file, repo='ultralytics/yolov5'):
+ # Attempt file download if does not exist
+ file = Path(str(file).strip().replace("'", ''))
+
+ if not file.exists():
+ # URL specified
+ name = file.name
+ if str(file).startswith(('http:/', 'https:/')): # download
+ url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
+ safe_download(file=name, url=url, min_bytes=1E5)
+ return name
+
+ # GitHub assets
+ file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
+ try:
+ response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
+ assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
+ tag = response['tag_name'] # i.e. 'v1.0'
+ except: # fallback plan
+ assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
+ 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
+ try:
+ tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
+ except:
+ tag = 'v5.0' # current release
- msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/'
- response = requests.get('https://api.github.com/repos/ultralytics/yolov5/releases/latest').json() # github api
- assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
- redundant = False # second download option
+ if name in assets:
+ safe_download(file,
+ url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
+ # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional)
+ min_bytes=1E5,
+ error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/')
- if file in assets and not os.path.isfile(weights):
- try: # GitHub
- tag = response['tag_name'] # i.e. 'v1.0'
- url = f'https://github.com/ultralytics/yolov5/releases/download/{tag}/{file}'
- print('Downloading %s to %s...' % (url, weights))
- torch.hub.download_url_to_file(url, weights)
- assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check
- except Exception as e: # GCP
- print('Download error: %s' % e)
- assert redundant, 'No secondary mirror'
- url = 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/' + file
- print('Downloading %s to %s...' % (url, weights))
- r = os.system('curl -L %s -o %s' % (url, weights)) # torch.hub.download_url_to_file(url, weights)
- finally:
- if not (os.path.exists(weights) and os.path.getsize(weights) > 1E6): # check
- os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
- print('ERROR: Download failure: %s' % msg)
- print('')
- return
-
-
-def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', name='tmp.zip'):
+ return str(file)
+
+
+def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
# Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download()
t = time.time()
- print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')
- os.remove(name) if os.path.exists(name) else None # remove existing
- os.remove('cookie') if os.path.exists('cookie') else None
+ file = Path(file)
+ cookie = Path('cookie') # gdrive cookie
+ print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
+ file.unlink(missing_ok=True) # remove existing file
+ cookie.unlink(missing_ok=True) # remove existing cookie
# Attempt file download
out = "NUL" if platform.system() == "Windows" else "/dev/null"
- os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out))
+ os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
if os.path.exists('cookie'): # large file
- s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name)
+ s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
else: # small file
- s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id)
+ s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s) # execute, capture return
- os.remove('cookie') if os.path.exists('cookie') else None
+ cookie.unlink(missing_ok=True) # remove existing cookie
# Error check
if r != 0:
- os.remove(name) if os.path.exists(name) else None # remove partial
+ file.unlink(missing_ok=True) # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
- if name.endswith('.zip'):
+ if file.suffix == '.zip':
print('unzipping... ', end='')
- os.system('unzip -q %s' % name) # unzip
- os.remove(name) # remove zip to free space
+ os.system(f'unzip -q {file}') # unzip
+ file.unlink() # remove zip to free space
- print('Done (%.1fs)' % (time.time() - t))
+ print(f'Done ({time.time() - t:.1f}s)')
return r
diff --git a/utils/loss.py b/utils/loss.py
index 46051f2eae49..9e78df17fdf3 100644
--- a/utils/loss.py
+++ b/utils/loss.py
@@ -85,121 +85,132 @@ def forward(self, pred, true):
return loss
-def compute_loss(p, targets, model): # predictions, targets, model
- device = targets.device
- lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
- tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
- h = model.hyp # hyperparameters
-
- # Define criteria
- BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) # weight=model.class_weights)
- BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
-
- # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
- cp, cn = smooth_BCE(eps=0.0)
-
- # Focal loss
- g = h['fl_gamma'] # focal loss gamma
- if g > 0:
- BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
-
- # Losses
- nt = 0 # number of targets
- no = len(p) # number of outputs
- balance = [4.0, 1.0, 0.3, 0.1, 0.03] # P3-P7
- for i, pi in enumerate(p): # layer index, layer predictions
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
- tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
-
- n = b.shape[0] # number of targets
- if n:
- nt += n # cumulative targets
- ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
-
- # Regression
- pxy = ps[:, :2].sigmoid() * 2. - 0.5
- pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
- pbox = torch.cat((pxy, pwh), 1) # predicted box
- iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
- lbox += (1.0 - iou).mean() # iou loss
-
- # Objectness
- tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
-
- # Classification
- if model.nc > 1: # cls loss (only if multiple classes)
- t = torch.full_like(ps[:, 5:], cn, device=device) # targets
- t[range(n), tcls[i]] = cp
- lcls += BCEcls(ps[:, 5:], t) # BCE
-
- # Append targets to text file
- # with open('targets.txt', 'a') as file:
- # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
-
- lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
-
- s = 3 / no # output count scaling
- lbox *= h['box'] * s
- lobj *= h['obj']
- lcls *= h['cls'] * s
- bs = tobj.shape[0] # batch size
-
- loss = lbox + lobj + lcls
- return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
-
-
-def build_targets(p, targets, model):
- # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
- det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
- na, nt = det.na, targets.shape[0] # number of anchors, targets
- tcls, tbox, indices, anch = [], [], [], []
- gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
- ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
- targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
-
- g = 0.5 # bias
- off = torch.tensor([[0, 0],
- [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
- # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
- ], device=targets.device).float() * g # offsets
-
- for i in range(det.nl):
- anchors = det.anchors[i]
- gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
-
- # Match targets to anchors
- t = targets * gain
- if nt:
- # Matches
- r = t[:, :, 4:6] / anchors[:, None] # wh ratio
- j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
- t = t[j] # filter
-
- # Offsets
+class ComputeLoss:
+ # Compute losses
+ def __init__(self, model, autobalance=False):
+ super(ComputeLoss, self).__init__()
+ device = next(model.parameters()).device # get model device
+ h = model.hyp # hyperparameters
+
+ # Define criteria
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
+
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
+
+ # Focal loss
+ g = h['fl_gamma'] # focal loss gamma
+ if g > 0:
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
+
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
+ for k in 'na', 'nc', 'nl', 'anchors':
+ setattr(self, k, getattr(det, k))
+
+ def __call__(self, p, targets): # predictions, targets, model
+ device = targets.device
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
+ tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
+
+ # Losses
+ for i, pi in enumerate(p): # layer index, layer predictions
+ b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
+
+ n = b.shape[0] # number of targets
+ if n:
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
+
+ # Regression
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
+ iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
+ lbox += (1.0 - iou).mean() # iou loss
+
+ # Objectness
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
+
+ # Classification
+ if self.nc > 1: # cls loss (only if multiple classes)
+ t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
+ t[range(n), tcls[i]] = self.cp
+ lcls += self.BCEcls(ps[:, 5:], t) # BCE
+
+ # Append targets to text file
+ # with open('targets.txt', 'a') as file:
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
+
+ obji = self.BCEobj(pi[..., 4], tobj)
+ lobj += obji * self.balance[i] # obj loss
+ if self.autobalance:
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
+
+ if self.autobalance:
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
+ lbox *= self.hyp['box']
+ lobj *= self.hyp['obj']
+ lcls *= self.hyp['cls']
+ bs = tobj.shape[0] # batch size
+
+ loss = lbox + lobj + lcls
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
+
+ def build_targets(self, p, targets):
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
+ tcls, tbox, indices, anch = [], [], [], []
+ gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
+
+ g = 0.5 # bias
+ off = torch.tensor([[0, 0],
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
+ ], device=targets.device).float() * g # offsets
+
+ for i in range(self.nl):
+ anchors = self.anchors[i]
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
+
+ # Match targets to anchors
+ t = targets * gain
+ if nt:
+ # Matches
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
+ t = t[j] # filter
+
+ # Offsets
+ gxy = t[:, 2:4] # grid xy
+ gxi = gain[[2, 3]] - gxy # inverse
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
+ t = t.repeat((5, 1, 1))[j]
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
+ else:
+ t = targets[0]
+ offsets = 0
+
+ # Define
+ b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
- gxi = gain[[2, 3]] - gxy # inverse
- j, k = ((gxy % 1. < g) & (gxy > 1.)).T
- l, m = ((gxi % 1. < g) & (gxi > 1.)).T
- j = torch.stack((torch.ones_like(j), j, k, l, m))
- t = t.repeat((5, 1, 1))[j]
- offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
- else:
- t = targets[0]
- offsets = 0
-
- # Define
- b, c = t[:, :2].long().T # image, class
- gxy = t[:, 2:4] # grid xy
- gwh = t[:, 4:6] # grid wh
- gij = (gxy - offsets).long()
- gi, gj = gij.T # grid xy indices
-
- # Append
- a = t[:, 6].long() # anchor indices
- indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
- anch.append(anchors[a]) # anchors
- tcls.append(c) # class
-
- return tcls, tbox, indices, anch
+ gwh = t[:, 4:6] # grid wh
+ gij = (gxy - offsets).long()
+ gi, gj = gij.T # grid xy indices
+
+ # Append
+ a = t[:, 6].long() # anchor indices
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
+ tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
+ anch.append(anchors[a]) # anchors
+ tcls.append(c) # class
+
+ return tcls, tbox, indices, anch
diff --git a/utils/metrics.py b/utils/metrics.py
index 99d5bcfaf2af..323c84b6c873 100644
--- a/utils/metrics.py
+++ b/utils/metrics.py
@@ -15,7 +15,7 @@ def fitness(x):
return (x[:, :4] * w).sum(1)
-def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):
+def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
@@ -35,12 +35,11 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision
# Find unique classes
unique_classes = np.unique(target_cls)
+ nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
- pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
- s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
- ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
+ ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
@@ -55,25 +54,28 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
- r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
+ r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
- p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
+ p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
- if plot and (j == 0):
+ if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
- # Compute F1 score (harmonic mean of precision and recall)
+ # Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
-
if plot:
- plot_pr_curve(px, py, ap, save_dir, names)
+ plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
+ plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
+ plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
+ plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
- return p, r, ap, f1, unique_classes.astype('int32')
+ i = f1.mean(0).argmax() # max F1 index
+ return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
def compute_ap(recall, precision):
@@ -143,14 +145,14 @@ def process_batch(self, detections, labels):
for i, gc in enumerate(gt_classes):
j = m0 == i
if n and sum(j) == 1:
- self.matrix[gc, detection_classes[m1[j]]] += 1 # correct
+ self.matrix[detection_classes[m1[j]], gc] += 1 # correct
else:
- self.matrix[gc, self.nc] += 1 # background FP
+ self.matrix[self.nc, gc] += 1 # background FP
if n:
for i, dc in enumerate(detection_classes):
if not any(m1 == i):
- self.matrix[self.nc, dc] += 1 # background FN
+ self.matrix[dc, self.nc] += 1 # background FN
def matrix(self):
return self.matrix
@@ -166,8 +168,8 @@ def plot(self, save_dir='', names=()):
sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
- xticklabels=names + ['background FN'] if labels else "auto",
- yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1))
+ xticklabels=names + ['background FP'] if labels else "auto",
+ yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
fig.axes[0].set_xlabel('True')
fig.axes[0].set_ylabel('Predicted')
fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
@@ -181,13 +183,14 @@ def print(self):
# Plots ----------------------------------------------------------------------------------------------------------------
-def plot_pr_curve(px, py, ap, save_dir='.', names=()):
+def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()):
+ # Precision-recall curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
py = np.stack(py, axis=1)
- if 0 < len(names) < 21: # show mAP in legend if < 10 classes
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py.T):
- ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision)
+ ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
else:
ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
@@ -197,4 +200,24 @@ def plot_pr_curve(px, py, ap, save_dir='.', names=()):
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
- fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250)
+ fig.savefig(Path(save_dir), dpi=250)
+
+
+def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'):
+ # Metric-confidence curve
+ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
+
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
+ for i, y in enumerate(py):
+ ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
+ else:
+ ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
+
+ y = py.mean(0)
+ ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
+ ax.set_xlabel(xlabel)
+ ax.set_ylabel(ylabel)
+ ax.set_xlim(0, 1)
+ ax.set_ylim(0, 1)
+ plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
+ fig.savefig(Path(save_dir), dpi=250)
diff --git a/utils/plots.py b/utils/plots.py
index c883ea24f253..8313ef210f90 100644
--- a/utils/plots.py
+++ b/utils/plots.py
@@ -15,8 +15,7 @@
import seaborn as sns
import torch
import yaml
-from PIL import Image, ImageDraw
-from scipy.signal import butter, filtfilt
+from PIL import Image, ImageDraw, ImageFont
from utils.general import xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
@@ -26,12 +25,25 @@
matplotlib.use('Agg') # for writing to files only
-def color_list():
- # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
- def hex2rgb(h):
+class Colors:
+ # Ultralytics color palette https://ultralytics.com/
+ def __init__(self):
+ # hex = matplotlib.colors.TABLEAU_COLORS.values()
+ hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
+ '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
+ self.palette = [self.hex2rgb('#' + c) for c in hex]
+ self.n = len(self.palette)
+
+ def __call__(self, i, bgr=False):
+ c = self.palette[int(i) % self.n]
+ return (c[2], c[1], c[0]) if bgr else c
+
+ @staticmethod
+ def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
- return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
+
+colors = Colors() # create instance for 'from utils.plots import colors'
def hist2d(x, y, n=100):
@@ -44,6 +56,8 @@ def hist2d(x, y, n=100):
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
+ from scipy.signal import butter, filtfilt
+
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
@@ -54,18 +68,32 @@ def butter_lowpass(cutoff, fs, order):
return filtfilt(b, a, data) # forward-backward filter
-def plot_one_box(x, img, color=None, label=None, line_thickness=None):
- # Plots one bounding box on image img
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
- color = color or [random.randint(0, 255) for _ in range(3)]
+def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3):
+ # Plots one bounding box on image 'im' using OpenCV
+ assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.'
+ tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+ cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+ cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
+ cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+
+
+def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=None):
+ # Plots one bounding box on image 'im' using PIL
+ im = Image.fromarray(im)
+ draw = ImageDraw.Draw(im)
+ line_thickness = line_thickness or max(int(min(im.size) / 200), 2)
+ draw.rectangle(box, width=line_thickness, outline=color) # plot
+ if label:
+ font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12))
+ txt_width, txt_height = font.getsize(label)
+ draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color)
+ draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
+ return np.asarray(im)
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
@@ -121,7 +149,6 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
- colors = color_list() # list of colors
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
@@ -152,7 +179,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
- color = colors[cls % len(colors)]
+ color = colors(cls)
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
@@ -223,43 +250,43 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
plt.savefig('targets.jpg', dpi=200)
-def plot_study_txt(path='study/', x=None): # from utils.plots import *; plot_study_txt()
+def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
# Plot study.txt generated by test.py
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
- ax = ax.ravel()
+ # ax = ax.ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
- for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]:
+ # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
+ for f in sorted(Path(path).glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
- for i in range(7):
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
- ax[i].set_title(s[i])
+ # for i in range(7):
+ # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
+ # ax[i].set_title(s[i])
j = y[3].argmax() + 1
- ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
+ ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
- ax2.grid()
- ax2.set_xlim(0, 30)
- ax2.set_ylim(29, 51)
- ax2.set_yticks(np.arange(30, 55, 5))
+ ax2.grid(alpha=0.2)
+ ax2.set_yticks(np.arange(20, 60, 5))
+ ax2.set_xlim(0, 57)
+ ax2.set_ylim(30, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
- plt.savefig('test_study.png', dpi=300)
+ plt.savefig(str(Path(path).name) + '.png', dpi=300)
-def plot_labels(labels, save_dir=Path(''), loggers=None):
+def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
# plot dataset labels
print('Plotting labels... ')
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
- colors = color_list()
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
# seaborn correlogram
@@ -270,8 +297,14 @@ def plot_labels(labels, save_dir=Path(''), loggers=None):
# matplotlib labels
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
- ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
- ax[0].set_xlabel('classes')
+ y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
+ # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
+ ax[0].set_ylabel('instances')
+ if 0 < len(names) < 30:
+ ax[0].set_xticks(range(len(names)))
+ ax[0].set_xticklabels(names, rotation=90, fontsize=10)
+ else:
+ ax[0].set_xlabel('classes')
sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
@@ -280,7 +313,7 @@ def plot_labels(labels, save_dir=Path(''), loggers=None):
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
+ ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
ax[1].imshow(img)
ax[1].axis('off')
@@ -295,13 +328,13 @@ def plot_labels(labels, save_dir=Path(''), loggers=None):
# loggers
for k, v in loggers.items() or {}:
if k == 'wandb' and v:
- v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]})
+ v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
- hyp = yaml.load(f, Loader=yaml.FullLoader)
+ hyp = yaml.safe_load(f)
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
index 75bcb7f9834f..aa54c3cf561e 100644
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -1,11 +1,15 @@
-# PyTorch utils
+# YOLOv5 PyTorch utils
+import datetime
import logging
import math
import os
+import platform
+import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
+from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
@@ -41,9 +45,24 @@ def init_torch_seeds(seed=0):
cudnn.benchmark, cudnn.deterministic = True, False
+def date_modified(path=__file__):
+ # return human-readable file modification date, i.e. '2021-3-26'
+ t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
+ return f'{t.year}-{t.month}-{t.day}'
+
+
+def git_describe(path=Path(__file__).parent): # path must be a directory
+ # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
+ s = f'git -C {path} describe --tags --long --always'
+ try:
+ return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
+ except subprocess.CalledProcessError as e:
+ return '' # not a git repository
+
+
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
- s = f'Using torch {torch.__version__} ' # string
+ s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
@@ -51,19 +70,20 @@ def select_device(device='', batch_size=None):
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
- cuda = torch.cuda.is_available() and not cpu
+ cuda = not cpu and torch.cuda.is_available()
if cuda:
- n = torch.cuda.device_count()
- if n > 1 and batch_size: # check that batch_size is compatible with device_count
+ devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7
+ n = len(devices) # device count
+ if n > 1 and batch_size: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
- for i, d in enumerate(device.split(',') if device else range(n)):
+ for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
- s += 'CPU'
+ s += 'CPU\n'
- logger.info(f'{s}\n') # skip a line
+ logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
@@ -110,13 +130,19 @@ def profile(x, ops, n=100, device=None):
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
- print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
+ print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
+ # Returns True if model is of type DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
+def de_parallel(model):
+ # De-parallelize a model: returns single-GPU model if model is of type DP or DDP
+ return model.module if is_parallel(model) else model
+
+
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
@@ -172,7 +198,7 @@ def fuse_conv_and_bn(conv, bn):
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
- fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
+ fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
@@ -195,7 +221,7 @@ def model_info(model, verbose=False, img_size=640):
try: # FLOPS
from thop import profile
- stride = int(model.stride.max()) if hasattr(model, 'stride') else 32
+ stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
diff --git a/utils/wandb_logging/__init__.py b/utils/wandb_logging/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py
new file mode 100644
index 000000000000..f45a23011f15
--- /dev/null
+++ b/utils/wandb_logging/log_dataset.py
@@ -0,0 +1,24 @@
+import argparse
+
+import yaml
+
+from wandb_utils import WandbLogger
+
+WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
+
+
+def create_dataset_artifact(opt):
+ with open(opt.data) as f:
+ data = yaml.safe_load(f) # data dict
+ logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
+ parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
+ parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
+ opt = parser.parse_args()
+ opt.resume = False # Explicitly disallow resume check for dataset upload job
+
+ create_dataset_artifact(opt)
diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py
new file mode 100644
index 000000000000..57ce9035a777
--- /dev/null
+++ b/utils/wandb_logging/wandb_utils.py
@@ -0,0 +1,318 @@
+"""Utilities and tools for tracking runs with Weights & Biases."""
+import json
+import sys
+from pathlib import Path
+
+import torch
+import yaml
+from tqdm import tqdm
+
+sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path
+from utils.datasets import LoadImagesAndLabels
+from utils.datasets import img2label_paths
+from utils.general import colorstr, xywh2xyxy, check_dataset, check_file
+
+try:
+ import wandb
+ from wandb import init, finish
+except ImportError:
+ wandb = None
+
+WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
+
+
+def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
+ return from_string[len(prefix):]
+
+
+def check_wandb_config_file(data_config_file):
+ wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
+ if Path(wandb_config).is_file():
+ return wandb_config
+ return data_config_file
+
+
+def get_run_info(run_path):
+ run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
+ run_id = run_path.stem
+ project = run_path.parent.stem
+ entity = run_path.parent.parent.stem
+ model_artifact_name = 'run_' + run_id + '_model'
+ return entity, project, run_id, model_artifact_name
+
+
+def check_wandb_resume(opt):
+ process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None
+ if isinstance(opt.resume, str):
+ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
+ if opt.global_rank not in [-1, 0]: # For resuming DDP runs
+ entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
+ api = wandb.Api()
+ artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest')
+ modeldir = artifact.download()
+ opt.weights = str(Path(modeldir) / "last.pt")
+ return True
+ return None
+
+
+def process_wandb_config_ddp_mode(opt):
+ with open(check_file(opt.data)) as f:
+ data_dict = yaml.safe_load(f) # data dict
+ train_dir, val_dir = None, None
+ if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
+ api = wandb.Api()
+ train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
+ train_dir = train_artifact.download()
+ train_path = Path(train_dir) / 'data/images/'
+ data_dict['train'] = str(train_path)
+
+ if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
+ api = wandb.Api()
+ val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
+ val_dir = val_artifact.download()
+ val_path = Path(val_dir) / 'data/images/'
+ data_dict['val'] = str(val_path)
+ if train_dir or val_dir:
+ ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
+ with open(ddp_data_path, 'w') as f:
+ yaml.safe_dump(data_dict, f)
+ opt.data = ddp_data_path
+
+
+class WandbLogger():
+ """Log training runs, datasets, models, and predictions to Weights & Biases.
+
+ This logger sends information to W&B at wandb.ai. By default, this information
+ includes hyperparameters, system configuration and metrics, model metrics,
+ and basic data metrics and analyses.
+
+ By providing additional command line arguments to train.py, datasets,
+ models and predictions can also be logged.
+
+ For more on how this logger is used, see the Weights & Biases documentation:
+ https://docs.wandb.com/guides/integrations/yolov5
+ """
+ def __init__(self, opt, name, run_id, data_dict, job_type='Training'):
+ # Pre-training routine --
+ self.job_type = job_type
+ self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict
+ # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
+ if isinstance(opt.resume, str): # checks resume from artifact
+ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
+ entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
+ model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
+ assert wandb, 'install wandb to resume wandb runs'
+ # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
+ self.wandb_run = wandb.init(id=run_id, project=project, entity=entity, resume='allow')
+ opt.resume = model_artifact_name
+ elif self.wandb:
+ self.wandb_run = wandb.init(config=opt,
+ resume="allow",
+ project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
+ entity=opt.entity,
+ name=name,
+ job_type=job_type,
+ id=run_id) if not wandb.run else wandb.run
+ if self.wandb_run:
+ if self.job_type == 'Training':
+ if not opt.resume:
+ wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict
+ # Info useful for resuming from artifacts
+ self.wandb_run.config.opt = vars(opt)
+ self.wandb_run.config.data_dict = wandb_data_dict
+ self.data_dict = self.setup_training(opt, data_dict)
+ if self.job_type == 'Dataset Creation':
+ self.data_dict = self.check_and_upload_dataset(opt)
+ else:
+ prefix = colorstr('wandb: ')
+ print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
+
+ def check_and_upload_dataset(self, opt):
+ assert wandb, 'Install wandb to upload dataset'
+ check_dataset(self.data_dict)
+ config_path = self.log_dataset_artifact(check_file(opt.data),
+ opt.single_cls,
+ 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
+ print("Created dataset config file ", config_path)
+ with open(config_path) as f:
+ wandb_data_dict = yaml.safe_load(f)
+ return wandb_data_dict
+
+ def setup_training(self, opt, data_dict):
+ self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants
+ self.bbox_interval = opt.bbox_interval
+ if isinstance(opt.resume, str):
+ modeldir, _ = self.download_model_artifact(opt)
+ if modeldir:
+ self.weights = Path(modeldir) / "last.pt"
+ config = self.wandb_run.config
+ opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
+ self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \
+ config.opt['hyp']
+ data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
+ if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download
+ self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
+ opt.artifact_alias)
+ self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
+ opt.artifact_alias)
+ self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None
+ if self.train_artifact_path is not None:
+ train_path = Path(self.train_artifact_path) / 'data/images/'
+ data_dict['train'] = str(train_path)
+ if self.val_artifact_path is not None:
+ val_path = Path(self.val_artifact_path) / 'data/images/'
+ data_dict['val'] = str(val_path)
+ self.val_table = self.val_artifact.get("val")
+ self.map_val_table_path()
+ if self.val_artifact is not None:
+ self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
+ self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
+ if opt.bbox_interval == -1:
+ self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
+ return data_dict
+
+ def download_dataset_artifact(self, path, alias):
+ if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
+ artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
+ dataset_artifact = wandb.use_artifact(artifact_path.as_posix())
+ assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
+ datadir = dataset_artifact.download()
+ return datadir, dataset_artifact
+ return None, None
+
+ def download_model_artifact(self, opt):
+ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
+ model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
+ assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
+ modeldir = model_artifact.download()
+ epochs_trained = model_artifact.metadata.get('epochs_trained')
+ total_epochs = model_artifact.metadata.get('total_epochs')
+ is_finished = total_epochs is None
+ assert not is_finished, 'training is finished, can only resume incomplete runs.'
+ return modeldir, model_artifact
+ return None, None
+
+ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
+ model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
+ 'original_url': str(path),
+ 'epochs_trained': epoch + 1,
+ 'save period': opt.save_period,
+ 'project': opt.project,
+ 'total_epochs': opt.epochs,
+ 'fitness_score': fitness_score
+ })
+ model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
+ wandb.log_artifact(model_artifact,
+ aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
+ print("Saving model artifact on epoch ", epoch + 1)
+
+ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
+ with open(data_file) as f:
+ data = yaml.safe_load(f) # data dict
+ nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
+ names = {k: v for k, v in enumerate(names)} # to index dictionary
+ self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
+ data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None
+ self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
+ data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None
+ if data.get('train'):
+ data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
+ if data.get('val'):
+ data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
+ path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path
+ data.pop('download', None)
+ with open(path, 'w') as f:
+ yaml.safe_dump(data, f)
+
+ if self.job_type == 'Training': # builds correct artifact pipeline graph
+ self.wandb_run.use_artifact(self.val_artifact)
+ self.wandb_run.use_artifact(self.train_artifact)
+ self.val_artifact.wait()
+ self.val_table = self.val_artifact.get('val')
+ self.map_val_table_path()
+ else:
+ self.wandb_run.log_artifact(self.train_artifact)
+ self.wandb_run.log_artifact(self.val_artifact)
+ return path
+
+ def map_val_table_path(self):
+ self.val_table_map = {}
+ print("Mapping dataset")
+ for i, data in enumerate(tqdm(self.val_table.data)):
+ self.val_table_map[data[3]] = data[0]
+
+ def create_dataset_table(self, dataset, class_to_id, name='dataset'):
+ # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
+ artifact = wandb.Artifact(name=name, type="dataset")
+ img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
+ img_files = tqdm(dataset.img_files) if not img_files else img_files
+ for img_file in img_files:
+ if Path(img_file).is_dir():
+ artifact.add_dir(img_file, name='data/images')
+ labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
+ artifact.add_dir(labels_path, name='data/labels')
+ else:
+ artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
+ label_file = Path(img2label_paths([img_file])[0])
+ artifact.add_file(str(label_file),
+ name='data/labels/' + label_file.name) if label_file.exists() else None
+ table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
+ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
+ for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
+ box_data, img_classes = [], {}
+ for cls, *xywh in labels[:, 1:].tolist():
+ cls = int(cls)
+ box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]},
+ "class_id": cls,
+ "box_caption": "%s" % (class_to_id[cls])})
+ img_classes[cls] = class_to_id[cls]
+ boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
+ table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes),
+ Path(paths).name)
+ artifact.add(table, name)
+ return artifact
+
+ def log_training_progress(self, predn, path, names):
+ if self.val_table and self.result_table:
+ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
+ box_data = []
+ total_conf = 0
+ for *xyxy, conf, cls in predn.tolist():
+ if conf >= 0.25:
+ box_data.append(
+ {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
+ "class_id": int(cls),
+ "box_caption": "%s %.3f" % (names[cls], conf),
+ "scores": {"class_score": conf},
+ "domain": "pixel"})
+ total_conf = total_conf + conf
+ boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
+ id = self.val_table_map[Path(path).name]
+ self.result_table.add_data(self.current_epoch,
+ id,
+ wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
+ total_conf / max(1, len(box_data))
+ )
+
+ def log(self, log_dict):
+ if self.wandb_run:
+ for key, value in log_dict.items():
+ self.log_dict[key] = value
+
+ def end_epoch(self, best_result=False):
+ if self.wandb_run:
+ wandb.log(self.log_dict)
+ self.log_dict = {}
+ if self.result_artifact:
+ train_results = wandb.JoinedTable(self.val_table, self.result_table, "id")
+ self.result_artifact.add(train_results, 'result')
+ wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch),
+ ('best' if best_result else '')])
+ self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
+ self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
+
+ def finish_run(self):
+ if self.wandb_run:
+ if self.log_dict:
+ wandb.log(self.log_dict)
+ wandb.run.finish()