Skip to content

Commit

Permalink
added mypy integration
Browse files Browse the repository at this point in the history
  • Loading branch information
naokiyokoyamabd committed Oct 6, 2023
1 parent 66edd96 commit 26518e2
Show file tree
Hide file tree
Showing 44 changed files with 409 additions and 371 deletions.
33 changes: 13 additions & 20 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,41 +6,34 @@ repos:
hooks:
- id: ruff
args: ['--fix', '--config', 'pyproject.toml']
exclude: 'dreamerv3/.*|grpc_infra/.*'

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: end-of-file-fixer
exclude: 'dreamerv3/.*|grpc_infra/.*'
- id: trailing-whitespace
exclude: 'dreamerv3/.*|grpc_infra/.*'
- id: check-yaml
exclude: 'dreamerv3/.*|grpc_infra/.*'
- id: check-added-large-files
exclude: 'dreamerv3/.*|grpc_infra/.*'
- id: check-toml
exclude: 'dreamerv3/.*|grpc_infra/.*'

- repo: https://github.com/psf/black
rev: 23.3.0
hooks:
- id: black
language_version: python3.9
args: ['--config', 'pyproject.toml']
exclude: 'dreamerv3/.*|grpc_infra/.*'
verbose: true

# - repo: https://github.com/pre-commit/mirrors-mypy
# rev: v1.2.0
# hooks:
# - id: mypy
# pass_filenames: false
# additional_dependencies:
# - types-protobuf
# - types-requests
# - types-simplejson
# - types-ujson
# - types-PyYAML
# - types-toml
# - types-six
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.2.0
hooks:
- id: mypy
pass_filenames: false
additional_dependencies:
- types-protobuf
- types-requests
- types-simplejson
- types-ujson
- types-PyYAML
- types-toml
- types-six
53 changes: 32 additions & 21 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,40 @@ conda_env_name=vlfm
conda create -n $conda_env_name python=3.9 -y &&
conda activate $conda_env_name
```

Install all the dependencies:
If you are using habitat and are doing simulation experiments, install this repo into your env with the following:
```bash
pip install -e .[habitat]
```
If you are using the Spot robot, install this repo into your env with the following:
```bash
pip install -e .[reality]
```
Install all the dependencies:
```bash
git clone [email protected]:WongKinYiu/yolov7.git # if using YOLOv7
git clone [email protected]:IDEA-Research/GroundingDINO.git
```
Follow the install directions for GroundingDINO. Nothing needs to be done for YOLOv7, but it needs to be cloned into the repo.

### Installing GroundingDINO (Only if using conda-installed CUDA)
Only attempt if the installation instructions in the GroundingDINO repo do not work.

To install GroundingDINO, you will need `CUDA_HOME` set as an environment variable. If you would like to install a certain version of CUDA that is compatible with the one used to compile your version of pytorch, and you are using conda, you can run the following commands to install CUDA and set `CUDA_HOME`:
```bash
# This example is specifically for CUDA 11.8
mamba install \
cub \
thrust \
cuda-runtime \
cudatoolkit=11.8 \
cuda-nvcc==11.8.89 \
-c "nvidia/label/cuda-11.8.0" \
-c nvidia &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cuda_runtime/include/* ${CONDA_PREFIX}/include/ &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cusparse/include/* ${CONDA_PREFIX}/include/ &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cublas/include/* ${CONDA_PREFIX}/include/ &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cusolver/include/* ${CONDA_PREFIX}/include/ &&
export CUDA_HOME=${CONDA_PREFIX}
```

## 2. Downloading the HM3D dataset
Expand Down Expand Up @@ -50,29 +79,11 @@ mv objectnav_hm3d_v2 $DATA_DIR/datasets/objectnav/hm3d/v2 &&
rm objectnav_hm3d_v2.zip
```


## 3. Downloading weights for various models
The weights for MobileSAM, GroundingDINO, and PointNav must be saved to the `data/` directory. The weights can be downloaded from the following links:
- `mobile_sam.pt`: https://github.com/ChaoningZhang/MobileSAM
- `groundingdino_swint_ogc.pth`: https://github.com/IDEA-Research/GroundingDINO
- `yolov7-e6e.pt`: https://github.com/WongKinYiu/yolov7
- `pointnav_weights.pth`:

### Installing GroundingDINO
To install GroundingDINO, you will need `CUDA_HOME` set as an environment variable. If you would like to install a certain version of CUDA that is compatible with the one used to compile your version of pytorch, and you are using conda, you can run the following commands to install CUDA and set `CUDA_HOME`:
```bash
# This example is specifically for CUDA 11.8
mamba install \
cub \
thrust \
cuda-runtime \
cudatoolkit=11.8 \
cuda-nvcc==11.8.89 \
-c "nvidia/label/cuda-11.8.0" \
-c nvidia &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cuda_runtime/include/* ${CONDA_PREFIX}/include/ &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cusparse/include/* ${CONDA_PREFIX}/include/ &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cublas/include/* ${CONDA_PREFIX}/include/ &&
ln -s ${CONDA_PREFIX}/lib/python3.9/site-packages/nvidia/cusolver/include/* ${CONDA_PREFIX}/include/ &&
export CUDA_HOME=${CONDA_PREFIX}
```
## 4. Evaluation within Habitat
8 changes: 7 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,12 @@ habitat = [
"habitat-baselines == 0.2.420230405",
"habitat-lab == 0.2.420230405",
]
reality = [
"spot_wrapper @ git+https://github.com/naokiyokoyama/bd_spot_wrapper.git",
"bosdyn-client >= 3.3.2",
"bosdyn-api >= 3.3.2",
"six >= 1.16.0",
]

[project.urls]
"Homepage" = "theaiinstitute.com"
Expand Down Expand Up @@ -120,5 +126,5 @@ strict_equality = true
warn_unreachable = true
warn_redundant_casts = true
no_implicit_optional = true
files = ['projects', 'src', 'ws']
files = ['vlfm']
exclude = '^(docker|.*external|.*thirdparty|.*install|.*build|.*_experimental)/'
16 changes: 0 additions & 16 deletions scripts/eval_llm_policy.sh

This file was deleted.

12 changes: 7 additions & 5 deletions vlfm/mapping/base_map.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import List
from typing import Any, List

import numpy as np

Expand All @@ -8,10 +8,12 @@
class BaseMap:
_confidence_mask: np.ndarray = None
_camera_positions: List[np.ndarray] = []
_last_camera_yaw: float = None
_last_camera_yaw: float = 0.0
_map_dtype: np.dtype = np.float32

def __init__(self, size: int = 1000, pixels_per_meter: int = 20, *args, **kwargs):
def __init__(
self, size: int = 1000, pixels_per_meter: int = 20, *args: Any, **kwargs: Any
):
"""
Args:
size: The size of the map in pixels.
Expand All @@ -24,14 +26,14 @@ def __init__(self, size: int = 1000, pixels_per_meter: int = 20, *args, **kwargs
self._episode_pixel_origin, self.pixels_per_meter
)

def reset(self):
def reset(self) -> None:
self._map.fill(0)
self._camera_positions = []
self._traj_vis = TrajectoryVisualizer(
self._episode_pixel_origin, self.pixels_per_meter
)

def update_agent_traj(self, robot_xy, robot_heading):
def update_agent_traj(self, robot_xy: np.ndarray, robot_heading: float) -> None:
self._camera_positions.append(robot_xy)
self._last_camera_yaw = robot_heading

Expand Down
4 changes: 2 additions & 2 deletions vlfm/mapping/frontier_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ class FrontierMap:
def __init__(self, encoding_type: str = "cosine"):
self.encoder: BLIP2ITMClient = BLIP2ITMClient()

def reset(self):
def reset(self) -> None:
self.frontiers = []

def update(
self, frontier_locations: List[np.ndarray], curr_image: np.ndarray, text: str
):
) -> None:
"""
Takes in a list of frontier coordinates and the current image observation from
the robot. Any stored frontiers that are not present in the given list are
Expand Down
6 changes: 3 additions & 3 deletions vlfm/mapping/object_point_cloud_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def __init__(self, erosion_size: float) -> None:
self._erosion_size = erosion_size
self.last_target_coord: Union[np.ndarray, None] = None

def reset(self):
def reset(self) -> None:
self.clouds = {}
self.last_target_coord = None

Expand Down Expand Up @@ -207,7 +207,7 @@ def _get_closest_point(


def open3d_dbscan_filtering(
points, eps: float = 0.2, min_points: int = 100
points: np.ndarray, eps: float = 0.2, min_points: int = 100
) -> np.ndarray:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
Expand Down Expand Up @@ -238,7 +238,7 @@ def open3d_dbscan_filtering(
return largest_cluster_points


def visualize_and_save_point_cloud(point_cloud: np.ndarray, save_path: str):
def visualize_and_save_point_cloud(point_cloud: np.ndarray, save_path: str) -> None:
"""Visualizes an array of 3D points and saves the visualization as a PNG image.
Args:
Expand Down
43 changes: 4 additions & 39 deletions vlfm/mapping/obstacle_map.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
import json

import cv2
import numpy as np

from frontier_exploration.frontier_detection import detect_frontier_waypoints
from frontier_exploration.utils.fog_of_war import reveal_fog_of_war
from vlfm.mapping.base_map import BaseMap
from vlfm.mapping.value_map import JSON_PATH, KWARGS_JSON
from vlfm.utils.geometry_utils import extract_yaw, get_point_cloud, transform_points
from vlfm.utils.img_utils import fill_small_holes

Expand Down Expand Up @@ -44,7 +41,7 @@ def __init__(
kernel_size = int(kernel_size) + (int(kernel_size) % 2 == 0)
self._navigable_kernel = np.ones((kernel_size, kernel_size), np.uint8)

def reset(self):
def reset(self) -> None:
super().reset()
self._navigable_map.fill(0)
self.explored_area.fill(0)
Expand All @@ -62,7 +59,7 @@ def update_map(
topdown_fov: float,
explore: bool = True,
update_obstacles: bool = True,
):
) -> None:
"""
Adds all obstacles from the current view to the map. Also updates the area
that the robot has explored so far.
Expand Down Expand Up @@ -159,7 +156,7 @@ def update_map(
else:
self.frontiers = self._px_to_xy(self._frontiers_px)

def _get_frontiers(self):
def _get_frontiers(self) -> np.ndarray:
"""Returns the frontiers of the map."""
# Dilate the explored area slightly to prevent small gaps between the explored
# area and the unnavigable area from being detected as frontiers.
Expand All @@ -175,7 +172,7 @@ def _get_frontiers(self):
)
return frontiers

def visualize(self):
def visualize(self) -> np.ndarray:
"""Visualizes the map."""
vis_img = np.ones((*self._map.shape[:2], 3), dtype=np.uint8) * 255
# Draw explored area in light green
Expand Down Expand Up @@ -204,35 +201,3 @@ def filter_points_by_height(
points: np.ndarray, min_height: float, max_height: float
) -> np.ndarray:
return points[(points[:, 2] >= min_height) & (points[:, 2] <= max_height)]


def replay_from_dir():
with open(KWARGS_JSON, "r") as f:
kwargs = json.load(f)
with open(JSON_PATH, "r") as f:
data = json.load(f)

v = ObstacleMap(
min_height=float(kwargs.get("min_height", 0.15)),
max_height=float(kwargs.get("max_height", 0.88)),
agent_radius=float(kwargs.get("agent_radius", 0.18)),
size=kwargs["size"],
)

sorted_keys = sorted(list(data.keys()))

for img_path in sorted_keys:
tf_camera_to_episodic = np.array(data[img_path]["tf_camera_to_episodic"])
depth = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.0
v.update_map(depth, tf_camera_to_episodic)

img = v.visualize()
cv2.imshow("img", img)
key = cv2.waitKey(0)
if key == ord("q"):
break


if __name__ == "__main__":
replay_from_dir()
quit()
6 changes: 3 additions & 3 deletions vlfm/mapping/traj_visualizer.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import List, Union
from typing import Any, List, Union

import cv2
import numpy as np
Expand All @@ -19,7 +19,7 @@ def __init__(self, origin_in_img: np.ndarray, pixels_per_meter: float):
self._origin_in_img = origin_in_img
self._pixels_per_meter = pixels_per_meter

def reset(self):
def reset(self) -> None:
self._num_drawn_points = 1
self._cached_path_mask = None

Expand Down Expand Up @@ -109,7 +109,7 @@ def _draw_agent(
return img

def draw_circle(
self, img: np.ndarray, position: np.ndarray, **kwargs
self, img: np.ndarray, position: np.ndarray, **kwargs: Any
) -> np.ndarray:
"""Draws the point as a circle on the image and returns it"""
px_position = self._metric_to_pixel(position)
Expand Down
Loading

0 comments on commit 26518e2

Please sign in to comment.