Skip to content

Commit

Permalink
Update bevformer train network
Browse files Browse the repository at this point in the history
  • Loading branch information
Charmve committed Jan 22, 2024
1 parent 14f839b commit 28717b6
Show file tree
Hide file tree
Showing 12 changed files with 1,935 additions and 58 deletions.
31 changes: 21 additions & 10 deletions Chapter07-课程展望与总结/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
* @Author: Charmve [email protected]
* @Date: 2023-10-10 10:49:13
* @LastEditors: Charmve [email protected]
* @LastEditTime: 2023-10-10 10:53:07
* @FilePath: /OccNet-Course/Chapter06-课程展望与总结/README.md
* @LastEditTime: 2024-01-22 19:55:01
* @FilePath: /OccNet-Course/Chapter07-课程展望与总结/README.md
* @Version: 1.0.1
* @Blogs: charmve.blog.csdn.net
* @GitHub: https://github.com/Charmve
Expand All @@ -13,16 +13,27 @@
* Licensed under the MIT License.
-->

在本专题课程的课程展望和总结中,主要从OCC与Nerf的关系、端到端的实现、大模型三个方面做未来展望,以及对本课程做一个总结。
在本专题课程的课程展望和总结中,主要从算法框架、数据、仿真和其他四个方面做未来展望,以及对本课程做一个总结。

1. OCC与Nerf的关系
- <h4>算法框架</h4>
- 数据驱动的端到端 UniAD
- 大模型 LMDrive [关于大模型和自动驾驶的几个迷思](关于大模型和自动驾驶的几个迷思.md)
- 世界模型:Drive-WM、DriveDreamer
- 矢量地图在线建图:MapTRv2
- BEV-OCC-Transformer: OccFormer、OccWorld、Occupancy Flow

- []()
- <h4>数据</h4>
- 4D数据自动标注:
- OCC与Nerf联合标注
- [面向BEV感知的4D标注方案](https://zhuanlan.zhihu.com/p/642735557?utm_psn=1706841959639998464)
- 数据生成:DrivingDiffusion、[MagicDrive](https://zhuanlan.zhihu.com/p/675303127)、UrbanSyn

2. 端到端的实现
- <h4>仿真</h4>
- UniSim
- DRIVE Sim

- []()
- <h4>其他</h4>
- 舱驾一体
- AI 编译器: MLIR、、XLA、Triton
- 模型剪枝、模型蒸馏、模型压缩、模型量化(PTQ、QAT)

3. 自动驾驶大模型

- [关于大模型和自动驾驶的几个迷思](关于大模型和自动驾驶的几个迷思.md)
1 change: 1 addition & 0 deletions code/BEVFormer/docs/prepare_dataset.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ unzip can_bus.zip

*We genetate custom annotation files which are different from mmdet3d's*
```
export PYTHONPATH=$PYTHONPATH:./
python tools/create_data.py nuscenes --root-path ./data/nuscenes --out-dir ./data/nuscenes --extra-tag nuscenes --version v1.0 --canbus ./data
```

Expand Down
4 changes: 1 addition & 3 deletions code/BEVFormer/projects/configs/bevformer/bevformer_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
'../datasets/custom_nus-3d.py',
'../_base_/default_runtime.py'
]
#

plugin = True
plugin_dir = 'projects/mmdet3d_plugin/'

Expand All @@ -11,8 +11,6 @@
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size = [0.2, 0.2, 8]



img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
# For nuScenes we usually do 10-class detection
Expand Down
10 changes: 5 additions & 5 deletions code/BEVFormer/projects/configs/bevformer/bevformer_small.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# BEvFormer-small consumes at lease 10500M GPU memory
# compared to bevformer_base, bevformer_small has
# smaller BEV: 200*200 -> 150*150
# less encoder layers: 6 -> 3
# smaller input size: 1600*900 -> (1600*900)*0.8
# multi-scale feautres -> single scale features (C5)
# with_cp of backbone = True
# - smaller BEV: 200*200 -> 150*150
# - less encoder layers: 6 -> 3
# - smaller input size: 1600*900 -> (1600*900)*0.8
# - multi-scale feautres -> single scale features (C5)
# - with_cp of backbone = True

_base_ = [
'../datasets/custom_nus-3d.py',
Expand Down
10 changes: 5 additions & 5 deletions code/BEVFormer/projects/configs/bevformer/bevformer_tiny.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# BEvFormer-tiny consumes at lease 6700M GPU memory
# compared to bevformer_base, bevformer_tiny has
# smaller backbone: R101-DCN -> R50
# smaller BEV: 200*200 -> 50*50
# less encoder layers: 6 -> 3
# smaller input size: 1600*900 -> 800*450
# multi-scale feautres -> single scale features (C5)
# - smaller backbone: R101-DCN -> R50
# - smaller BEV: 200*200 -> 50*50
# - less encoder layers: 6 -> 3
# - smaller input size: 1600*900 -> 800*450
# - multi-scale feautres -> single scale features (C5)


_base_ = [
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"sample": [
{"configurations": "test1"}
]
}
35 changes: 7 additions & 28 deletions code/BEVFormer/tools/analysis_tools/visual.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,45 +4,32 @@
# ---------------------------------------------

import mmcv
from nuscenes.nuscenes import NuScenes

from PIL import Image
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix
from typing import Tuple, List, Iterable
import matplotlib.pyplot as plt

import numpy as np
from PIL import Image
from matplotlib import rcParams
from matplotlib.axes import Axes
from pyquaternion import Quaternion
from PIL import Image
from matplotlib import rcParams
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from pyquaternion import Quaternion
from tqdm import tqdm

from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix
from nuscenes.eval.common.data_classes import EvalBoxes, EvalBox
from nuscenes.eval.detection.data_classes import DetectionBox
from nuscenes.eval.detection.utils import category_to_detection_name
from nuscenes.eval.detection.render import visualize_sample




cams = ['CAM_FRONT',
'CAM_FRONT_RIGHT',
'CAM_BACK_RIGHT',
'CAM_BACK',
'CAM_BACK_LEFT',
'CAM_FRONT_LEFT']

import numpy as np
import matplotlib.pyplot as plt
from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box
from PIL import Image
from matplotlib import rcParams


def render_annotation(
anntoken: str,
margin: float = 10,
Expand Down Expand Up @@ -84,6 +71,7 @@ def render_annotation(
fig, axes = plt.subplots(1, num_cam + 1, figsize=(18, 9))
select_cams = [sample_record['data'][cam] for cam in select_cams]
print('bbox in cams:', select_cams)

# Plot LIDAR view.
lidar = sample_record['data']['LIDAR_TOP']
data_path, boxes, camera_intrinsic = nusc.get_sample_data(lidar, selected_anntokens=[anntoken])
Expand Down Expand Up @@ -142,8 +130,6 @@ def render_annotation(
if out_path is not None:
plt.savefig(out_path)



def get_sample_data(sample_data_token: str,
box_vis_level: BoxVisibility = BoxVisibility.ANY,
selected_anntokens=None,
Expand Down Expand Up @@ -205,8 +191,6 @@ def get_sample_data(sample_data_token: str,

return data_path, box_list, cam_intrinsic



def get_predicted_data(sample_data_token: str,
box_vis_level: BoxVisibility = BoxVisibility.ANY,
selected_anntokens=None,
Expand Down Expand Up @@ -269,9 +253,6 @@ def get_predicted_data(sample_data_token: str,

return data_path, box_list, cam_intrinsic




def lidiar_render(sample_token, data,out_path=None):
bbox_gt_list = []
bbox_pred_list = []
Expand Down Expand Up @@ -316,7 +297,6 @@ def lidiar_render(sample_token, data,out_path=None):
print('blue is the predited result')
visualize_sample(nusc, sample_token, gt_annotations, pred_annotations, savepath=out_path+'_bev')


def get_color(category_name: str):
"""
Provides the default colors based on the category names.
Expand Down Expand Up @@ -347,7 +327,6 @@ def get_color(category_name: str):
return nusc.colormap[key]
return [0, 0, 0]


def render_sample_data(
sample_toekn: str,
with_anns: bool = True,
Expand Down Expand Up @@ -471,7 +450,7 @@ def render_sample_data(
if __name__ == '__main__':
nusc = NuScenes(version='v1.0-trainval', dataroot='./data/nuscenes', verbose=True)
# render_annotation('7603b030b42a4b1caa8c443ccc1a7d52')
bevformer_results = mmcv.load('test/bevformer_base/Thu_Jun__9_16_22_37_2022/pts_bbox/results_nusc.json')
bevformer_results = mmcv.load('tests/bevformer_base/20240122/pts_bbox/results_nusc.json')
sample_token_list = list(bevformer_results['results'].keys())
for id in range(0, 10):
render_sample_data(sample_token_list[id], pred_data=bevformer_results, out_path=sample_token_list[id])
2 changes: 1 addition & 1 deletion code/BEVFusion/docker/notebook
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
echo $BASE_URL

if [ `whoami` = "root" ];then
# 启动jupyter并设置对应路径
# 启动jupyter并设置对应路径
/opt/conda/bin/jupyter lab --ip=0.0.0.0 --LabApp.base_url=$BASE_URL --allow-root
else
/opt/conda/bin/jupyter lab --ip=0.0.0.0 --LabApp.base_url=$BASE_URL
Expand Down
4 changes: 2 additions & 2 deletions tools/app/config.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "Qbot VIPs",
"url": "ufund-me.github.io/Qbot",
"name": "OccCourse VIPs",
"url": "maiweiai.site",
"author": "Charmve",
"isNonProfit": true,
"address": {
Expand Down
2 changes: 1 addition & 1 deletion tools/app/myapp.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
# mail_license = os.getenv("MAIL_LICENSE")
mail_license = "szfiwfywaakhieda"
# 收件人邮箱,可以为多个收件人
mail_receivers = ["[email protected]", "zhangwei@qcraft.ai"]
mail_receivers = ["[email protected]", "zhangwei@maiwei.ai"]


@app.route('/')
Expand Down
6 changes: 3 additions & 3 deletions tools/app/pull_issues.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
def pull_issues(data = data):
# 发送 POST 请求
# https://github.com/Charmve/100days
repo_owner = "UFund-Me"
repo_name = "Qbot"
repo_owner = "Charmve"
repo_name = "OccNet-Course"
url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/issues"
response = re.post(url, headers=headers, json=data)

Expand Down Expand Up @@ -52,7 +52,7 @@ def close_github_issues(issue_url):

if __name__ == '__main__':
issues_data = {
"title": "🌈 {today} 来自Qbot的今日AI选股推送",
"title": "💡 {today} 来自OccCource更新提醒",
"body": "{content}"
}
pull_issues(issues_data)
Loading

0 comments on commit 28717b6

Please sign in to comment.