diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0c8adc7..14a6c1c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,5 +21,5 @@ jobs: path: .cache restore-keys: | mkdocs-material- - - run: pip install mkdocs-material + - run: pip install -r docs-requirements.txt - run: mkdocs gh-deploy --force diff --git a/docs/index.md b/docs/index.md index 1532bb3..ba48108 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,20 +1,84 @@ -# Welcome to MkDocs +# T265 Tracking Camera Python Library +This library is to help to simplify your interaction with T265. The camera will be automatically initialized and closed. -For full documentation visit [mkdocs.org](https://www.mkdocs.org). +This is very minimalistic. The point is to provide the simplest possible way of using T265 cameras. -## Commands +## Installation +this library is pip installable +```bash +pip install t265 +``` -* `mkdocs new [dir-name]` - Create a new project. -* `mkdocs serve` - Start the live-reloading docs server. -* `mkdocs build` - Build the documentation site. -* `mkdocs -h` - Print help message and exit. -## Project layout - mkdocs.yml # The configuration file. - docs/ - index.md # The documentation homepage. - ... # Other markdown pages, images and other files. +## Ver 0.1.xx +This library handles the frame transformation difference between the pyrealsense2 and ROS. +You can specify a camera by its serial number. If you want to use more than one T265, you can create multiple instances of Tracking(). + + + +## Important Note +!!! important + + Starting [Ver. 1.54.1](https://github.com/IntelRealSense/librealsense/releases/tag/v2.54.1) Intel has dropped supports for T265 entirely. + Hence, this library requires pyrealsense2 earlier than Ver. 2.53.1 + Starting Ver 0.1.3 this is part of requirements.txt. + + + +## How to use +Here is a sample code. For more details, please check the [Tracking APIs](./tracking.md). +```python +import time +from t265 import Tracking +my_tracking = Tracking(camera_sn='133122110783') # Provide your camera serial number. It should be on the camera bottom. None will use the first camera found. + +my_tracking.start_tracking() # [Optional] start tracking. It will be called automatically when you call update_pose() first time. + # this will retry connection (Default retry 5 times with 1sec interval) + +N = 10 # will show pose, vel, acc for 10 times +for _ in range(N): + my_tracking.update_pose(wait=True) # wait=True will block the code till you get data from camera. + print(f'Is camera on: {my_tracking.is_camera_on()}') + print(f"Camera_sn: {my_tracking.get_camera_sn()}") + + # Getting pose + print(f'Current pose: {my_tracking.get_translation()}') # getting pose (x,y,z, quat_xyzw) 1 by 7 np.array + print(f"Current pose: {my_tracking.get_translation(frame='ros')}") # getting pose in ros frame (camera/odom/sample) (x,y,z, quat_xyzw) 1 by 7 np.array + print(f'Current pose: {my_tracking.get_translation(rotation=None)}') # getting pose (x,y,z,) 1 by 3 np.array + print(f'Current pose: {my_tracking.get_translation(trans=False, rotation="xyz")}') # getting pose (euler_xyz) 1 by 3 np.array + print(f'Current pose: {my_tracking.get_translation(trans=False, rotation="zyx", degrees=True)}') # getting pose (euler_zyx) 1 by 3 np.array in degrees + print(f"Current pose: {my_tracking.get_matrix()}") # getting pose Transformation matrix 4 by 4 np.array. [r11, r12, r13, tx, r21, r22, r23, ty, r31, r32, r33, tz, 0, 0, 0, 1] + print(f"Current pose: {my_tracking.get_matrix(frame='ros')}") # getting pose Transformation matrix in ros frame (camera/odom/sample) 4 by 4 np.array + + # Getting velocity and acceleration + print(f'Current velocity: {my_tracking.get_velocity()}') # getting velocity (linear vel x, y, z, angular vel x, y, z) + print(f'Current acceleration: {my_tracking.get_acceleration()()}') # getting velocity (linear acc x, y, z, angular acc x, y, z) + + time.sleep(1) # loop every 1sec +``` + +## Note + +You don't need to provide a camera serial number. If you don't provide it, the first camera found will be used. If you provide it, it'll error out if the specific camera is not found. + + +You don't need to call `my_tracking.start_tracking()` since it will be called when you do `my_tracking.update_pose()` first time. +You can, of course, call it by yourself, but calling it multiple times will throw an error. + +You don't need to call `my_tracking.stop_tracking()` to stop the camera since the class Tracking deconstructor will call it automatically. +You can, of course, call it by yourself, but calling it multiple times will throw an error. + + +If you need to restart a camera, then you can call stop_tracking(), then call start_tracking(). + + +## Framing +Currently, the library supports 2 frames: t265 hardware and ros (camera/odom/sample). Velocity and acceleration are always in t265 hardware frame (to be implemented). + +!!! note + + Currently only translation is supported. Velocity and acceleration are not supported yet. + -::: t265.Tracking.Tracking diff --git a/docs/tracking.md b/docs/tracking.md new file mode 100644 index 0000000..ad7531d --- /dev/null +++ b/docs/tracking.md @@ -0,0 +1,5 @@ +# Tracking class APIs + + +## APIs +::: t265.Tracking.Tracking diff --git a/mkdocs.yml b/mkdocs.yml index e167535..99bd333 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -119,6 +119,5 @@ extra_javascript: nav: - - Home: - - index.md - - about_us.md + - index.md + - tracking.md diff --git a/t265/Tracking.py b/t265/Tracking.py index be95191..f1b4a09 100644 --- a/t265/Tracking.py +++ b/t265/Tracking.py @@ -35,6 +35,7 @@ def __init__(self, camera_sn=None, connection_retry=DEFAULT_CONNECTION_RETRY, re def start_tracking(self): """ Start tracking with the camera. It will retry for self.connection_retry times. + Tips: This will be called automatically when you call update_pose() for the first time. """ self.pipe.start(self.config) for i in range(self.connection_retry): @@ -71,6 +72,8 @@ def _config_camera(self): def stop_tracking(self): """ Stop tracking with the camera. + + Note: This will be called automatically when Python exits. """ if self.camera_on: self.pipe.stop() @@ -108,7 +111,7 @@ def get_translation(self, frame=None, trans=True, rotation='quat', degrees=False trans (bool): If True, returns the translation. rotation (str): Rotation format. 'quat', or Euler 'xyz', 'xzy', 'yxz', 'yzx', 'zxy', 'zyx', 'ZYX', 'ZXY', 'YXZ', 'YZX', 'XZY', 'XYZ'. - degrees (bool): If True, the Euler angles are returned in degrees. + degrees (bool): If True, the Euler angles are returned in degrees. ignored if rotation is 'quat'. Returns: np.array: [x, y, z, qx, qy, qz, qw] or [x, y, z, rx, ry, rz] for 'xyz' format. Follows the input Euler @@ -122,7 +125,7 @@ def get_translation(self, frame=None, trans=True, rotation='quat', degrees=False if T is not None: t = T[0:3, 3].flatten() q = R.from_matrix(T[0:3, 0:3]).as_quat() - ret = self.to_nparray(t, q, trans, rotation, degrees) + ret = self._to_nparray(t, q, trans, rotation, degrees) return ret def _to_single_nparray(self, list_array): @@ -145,9 +148,9 @@ def _get_translation(self, trans=True, rotation='quat', degrees=False): if self.pose: t = self._vector2np(self.pose.get_pose_data().translation) q = self._quat2np(self.pose.get_pose_data().rotation) - return self.to_nparray(t, q, trans, rotation, degrees) + return self._to_nparray(t, q, trans, rotation, degrees) - def to_nparray(self, t, q, trans, rotation, degrees): + def _to_nparray(self, t, q, trans, rotation, degrees): """ Convert translation and rotation to np.array. @@ -279,9 +282,21 @@ def _quat2np(self, quat, order: str = 'xyzw'): # Status functions def is_camera_on(self): + """ + Check if the camera is on. + + Returns: + bool: True if the camera is on. + """ return self.camera_on def get_camera_sn(self): + """ + Get the camera serial number. + + Returns: + str: Camera serial number. + """ self.camera_sn = self.pipe.get_active_profile().get_device().get_info(rs.camera_info.serial_number) return self.camera_sn