From f1c9cd9ea33c4765c430f398901ac7c0539186d3 Mon Sep 17 00:00:00 2001 From: tasada038 Date: Sun, 15 Dec 2024 14:06:16 +0900 Subject: [PATCH] Add a zcam-rs and float32-data demos to work with rs and ROS2 --- README.md | 6 +- ROS2/zenoh-python-float32-data/README.md | 31 ++ .../ros2-float32-data.py | 103 +++++ ROS2/zenoh-rest-teleop/README.md | 27 +- ROS2/zenoh-rest-teleop/ros2-tb3-teleop.html | 419 ++++++++++++++++++ computer-vision/zcam/zcam-rs-python/README.md | 19 + .../zcam/zcam-rs-python/zcapture_rs.py | 68 +++ .../zcam/zcam-rs-python/zdisplay_rs.py | 85 ++++ 8 files changed, 753 insertions(+), 5 deletions(-) create mode 100644 ROS2/zenoh-python-float32-data/README.md create mode 100644 ROS2/zenoh-python-float32-data/ros2-float32-data.py create mode 100644 ROS2/zenoh-rest-teleop/ros2-tb3-teleop.html create mode 100644 computer-vision/zcam/zcam-rs-python/README.md create mode 100644 computer-vision/zcam/zcam-rs-python/zcapture_rs.py create mode 100644 computer-vision/zcam/zcam-rs-python/zdisplay_rs.py diff --git a/README.md b/README.md index 091acde..f2b57b4 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ dashboard. Three subscriber frontends are provided: a Python script using [Matplotlib](https://matplotlib.org/), a browser client and a [Freeboard](https://freeboard.github.io) configuation. -**computer-vision/zcam/zcam-{python,rust,rest}**: consists of two Zenoh nodes: a +**computer-vision/zcam/zcam-{python,rust,rest,rs-python}**: consists of two Zenoh nodes: a publisher capturing a camera video stream and a subscriber displaying said video stream. Both of the Python and Rust implementations use [OpenCV](https://opencv.org/) to encode and decode data. @@ -97,6 +97,10 @@ obstacles and environment boundaries (e.g. walls of a room). teleoperation messages by reading keyboard input (i.e. arrow keys). Both of the Rust and Python implementations are terminal applications. +**ROS2/zenoh-python-float32-data**: Zenoh nodes that publish +[Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html) +Float32 message might be used to publish real-time sensor data such as temperature readings, velocity, or any other numerical value. + **zenoh-home/{light,soil,temp-humi}-sensor**: Zenoh nodes running Zenoh [Pico](https://github.com/eclipse-zenoh/zenoh-pico) on an [ESP32](https://en.wikipedia.org/wiki/ESP32) board which publish sensor data diff --git a/ROS2/zenoh-python-float32-data/README.md b/ROS2/zenoh-python-float32-data/README.md new file mode 100644 index 0000000..c08b249 --- /dev/null +++ b/ROS2/zenoh-python-float32-data/README.md @@ -0,0 +1,31 @@ +# A zenoh Python Float32 data for ROS2 + +## **Requirements** + +* Python 3.6 minimum +* A [zenoh router](http://zenoh.io/docs/getting-started/quick-test/) +* The [zenoh/DDS bridge](https://github.com/eclipse-zenoh/zenoh-plugin-dds#trying-it-out) +* [zenoh-python](https://github.com/eclipse-zenoh/zenoh-python): install it with `pip install eclipse-zenoh`. +* [pycdr2](https://pypi.org/project/pycdr2/): install it with `pip install pycdr2`. + +----- + +## **Usage** + +1. Start the zenoh/DDS bridge: + + ```bash + zenoh-bridge-dds + ``` + +2. Start Ros2Float32 + + ```bash + python ros2-float32-data.py + ``` + +3. Start Rviz2 and Setting Plotter2D + + ```bash + python ros2-float32-data.py + ``` diff --git a/ROS2/zenoh-python-float32-data/ros2-float32-data.py b/ROS2/zenoh-python-float32-data/ros2-float32-data.py new file mode 100644 index 0000000..34da7d1 --- /dev/null +++ b/ROS2/zenoh-python-float32-data/ros2-float32-data.py @@ -0,0 +1,103 @@ +# +# Copyright (c) 2024 ZettaScale Technology Inc. +# +# This program and the accompanying materials are made available under the +# terms of the Eclipse Public License 2.0 which is available at +# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +# which is available at https://www.apache.org/licenses/LICENSE-2.0. +# +# SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +# +# Contributors: +# The Zenoh Team, +# + +import argparse +import zenoh +import random +import time +import json + +from dataclasses import dataclass +from pycdr2 import IdlStruct +from pycdr2.types import int32, uint32, float32 + + +@dataclass +class Time(IdlStruct, typename="Time"): + sec: int32 + nanosec: uint32 + +@dataclass +class Float32(IdlStruct, typename="std_msgs/msg/Float32"): + data: float32 + + +def main(): + # --- Command line argument parsing --- + parser = argparse.ArgumentParser( + prog='random_float32_publisher', + description='zenoh random Float32 publisher example') + parser.add_argument('--mode', '-m', dest='mode', + choices=['peer', 'client'], + type=str, + help='The zenoh session mode.') + parser.add_argument('--connect', '-e', dest='connect', + metavar='ENDPOINT', + action='append', + type=str, + help='zenoh endpoints to connect to.') + parser.add_argument('--listen', '-l', dest='listen', + metavar='ENDPOINT', + action='append', + type=str, + help='zenoh endpoints to listen on.') + parser.add_argument('--config', '-c', dest='config', + metavar='FILE', + type=str, + help='A configuration file.') + parser.add_argument('--topic', '-t', dest='topic', + default='random_float32', + type=str, + help='The topic to publish Float32 data.') + parser.add_argument('--float32_data', '-f', dest='float32_data', + default='10.0', + type=float, + help='The float32_data of publishing.') + + args = parser.parse_args() + conf = zenoh.Config.from_file(args.config) if args.config is not None else zenoh.Config() + if args.mode is not None: + conf.insert_json5(zenoh.config.MODE_KEY, json.dumps(args.mode)) + if args.connect is not None: + conf.insert_json5(zenoh.config.CONNECT_KEY, json.dumps(args.connect)) + if args.listen is not None: + conf.insert_json5(zenoh.config.LISTEN_KEY, json.dumps(args.listen)) + topic = args.topic + float32_data = args.float32_data + + # zenoh-net session initialization + print("Opening session...") + session = zenoh.open(conf) + + print(f"Publishing random Float32 data on topic '{topic}' at {float32_data}.") + + def pub_float32(data): + print("Pub float32: {}".format(data)) + t = Float32(data=float(data)) + session.put(topic, t.serialize()) + + try: + interval = 1.0 / float32_data + while True: + random_value = random.uniform(-10, 10.0) # Generate random float between -100.0 and 100.0 + pub_float32(random_value) + print(f"Published: {random_value} to topic '{topic}'") + time.sleep(interval) + except KeyboardInterrupt: + print("Shutting down...") + finally: + session.close() + +if __name__ == "__main__": + main() diff --git a/ROS2/zenoh-rest-teleop/README.md b/ROS2/zenoh-rest-teleop/README.md index 94ae2c7..c2d5999 100644 --- a/ROS2/zenoh-rest-teleop/README.md +++ b/ROS2/zenoh-rest-teleop/README.md @@ -37,9 +37,28 @@ via the zenoh REST API, bridged to ROS2. * Keep pressing the arrow buttons (or arrows on your keyboard) to publish Twist messages (a STOP Twist with 0 speed is published when released). * All the messages published on "rosout" via ROS2 will be displayed in the bottom box. +### ros2-tb3-teleop.html -⚠️ Note: this demo depends on the Ros2 Middleware implementation being set to CycloneDDS -Installation instructions for CycloneDDS below: -https://docs.ros.org/en/humble/Installation/DDS-Implementations/Working-with-Eclipse-CycloneDDS.html -Once installed the middleware env var must be set : +A simple teleop web page allowing to publish Twists and to subscribe to Logs +via the zenoh REST API, bridged to ROS2 for Turtlebot3 on Gazebo. + +1. Start the Turtlebot3 launch file: +Please clone the turtlebot3_simulations package beforehand. + + ```bash + RMW_IMPLEMENTATION=rmw_cyclonedds_cpp ros2 launch turtlebot3_gazebo turtlebot3_wolrd.launch.py + ``` + +2. Start the zenoh/DDS bridge, activating its REST API: + + ```bash + zenoh-bridge-ros2dds --rest-http-port 8000 + ``` + +3. Open the `ros2-tb3-teleop.html` file in a Web browser + +⚠️ Note: this demo depends on the Ros2 Middleware implementation being set to CycloneDDS +Installation instructions for CycloneDDS below: +https://docs.ros.org/en/humble/Installation/DDS-Implementations/Working-with-Eclipse-CycloneDDS.html +Once installed the middleware env var must be set : `RMW_IMPLEMENTATION=rmw_cyclonedds_cpp` \ No newline at end of file diff --git a/ROS2/zenoh-rest-teleop/ros2-tb3-teleop.html b/ROS2/zenoh-rest-teleop/ros2-tb3-teleop.html new file mode 100644 index 0000000..d17cd97 --- /dev/null +++ b/ROS2/zenoh-rest-teleop/ros2-tb3-teleop.html @@ -0,0 +1,419 @@ + + + + + + + + + + +
+

Zenoh ROS2 teleop for Turtlebot3

+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+ +
+
+
Drive
+
+
+
+
+ + + + + + + +
+
+
+
+ +
+
+
Logs
+
+
+
+
+
+
+ +
+
+
Config
+
+
+
+ +
+ +
+
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/computer-vision/zcam/zcam-rs-python/README.md b/computer-vision/zcam/zcam-rs-python/README.md new file mode 100644 index 0000000..e39fd6a --- /dev/null +++ b/computer-vision/zcam/zcam-rs-python/README.md @@ -0,0 +1,19 @@ + +# zcam-rs-python -- Streaming video with zenoh-realsense-python + +This is a simple application that shows how to stream Intel RealSense Stream with [zenoh](http://zenoh.io) + +## Dependencies + +```bash +pip3 install eclipse-zenoh opencv-python numpy imutils +sudo apt-get install librealsense2-dkms librealsense2-utils librealsense2-dev librealsense2-dbg +pip install pyrealsense2 +``` + +## Running zcam-python + +```bash +python3 zcapture_rs.py +python3 zdisplay_rs.py +``` diff --git a/computer-vision/zcam/zcam-rs-python/zcapture_rs.py b/computer-vision/zcam/zcam-rs-python/zcapture_rs.py new file mode 100644 index 0000000..f2b033a --- /dev/null +++ b/computer-vision/zcam/zcam-rs-python/zcapture_rs.py @@ -0,0 +1,68 @@ +import zenoh +import cv2 +import numpy as np +import pyrealsense2 as rs +import argparse +import json +import time + +# Argument settings +parser = argparse.ArgumentParser( + prog='zrs_capture', + description='Zenoh RealSense RGB and Depth capture example') +parser.add_argument('-m', '--mode', type=str, choices=['peer', 'client'], + help='The zenoh session mode.') +parser.add_argument('-e', '--connect', type=str, metavar='ENDPOINT', action='append', + help='Zenoh endpoints to connect to.') +parser.add_argument('-l', '--listen', type=str, metavar='ENDPOINT', action='append', + help='Zenoh endpoints to listen on.') +parser.add_argument('-k', '--key', type=str, default='demo/zcam', + help='Key expression') +parser.add_argument('-c', '--config', type=str, metavar='FILE', + help='A Zenoh configuration file.') +args = parser.parse_args() + +# Zenoh configuration +conf = zenoh.Config.from_file(args.config) if args.config is not None else zenoh.Config() +z = zenoh.open(conf) + +# RealSense pipeline configuration +pipeline = rs.pipeline() +config = rs.config() +config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) # RGB +config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) # Depth + +pipeline.start(config) + +print("[INFO] Open RealSense camera...") + +# Publish frames to Zenoh +def publish_frames(): + while True: + # Wait for frames + frames = pipeline.wait_for_frames() + color_frame = frames.get_color_frame() + depth_frame = frames.get_depth_frame() + + if not color_frame or not depth_frame: + continue + + # Get RGB image + rgb_image = np.asanyarray(color_frame.get_data()) + _, jpeg_rgb = cv2.imencode('.jpg', rgb_image) + z.put(args.key + "/rgb", jpeg_rgb.tobytes()) + + # Get Depth image + depth_image = np.asanyarray(depth_frame.get_data()) + + # Convert Depth data to color image (apply color map) + depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) + + # Encode Depth image in JPEG format + _, jpeg_depth = cv2.imencode('.jpg', depth_colormap) + z.put(args.key + "/depth", jpeg_depth.tobytes()) + + time.sleep(0.05) + +# Send frames +publish_frames() diff --git a/computer-vision/zcam/zcam-rs-python/zdisplay_rs.py b/computer-vision/zcam/zcam-rs-python/zdisplay_rs.py new file mode 100644 index 0000000..0c907b8 --- /dev/null +++ b/computer-vision/zcam/zcam-rs-python/zdisplay_rs.py @@ -0,0 +1,85 @@ +import argparse +import time +import cv2 +import zenoh +import numpy as np +import json + +# Argument parser +parser = argparse.ArgumentParser( + prog='zdisplay', + description='zenoh video display example') +parser.add_argument('-m', '--mode', type=str, choices=['peer', 'client'], + help='The zenoh session mode.') +parser.add_argument('-e', '--connect', type=str, metavar='ENDPOINT', action='append', + help='zenoh endpoints to connect to.') +parser.add_argument('-l', '--listen', type=str, metavar='ENDPOINT', action='append', + help='zenoh endpoints to listen on.') +parser.add_argument('-d', '--delay', type=float, default=0.05, + help='delay between each frame in seconds') +parser.add_argument('-k', '--key', type=str, default='demo/zcam', + help='key expression') +parser.add_argument('-c', '--config', type=str, metavar='FILE', + help='A zenoh configuration file.') + +args = parser.parse_args() + +# Zenoh session setup +conf = zenoh.Config.from_file(args.config) if args.config is not None else zenoh.Config() + +# Setting Zenoh mode (peer or client) +if args.mode is not None: + conf.insert_json5("mode", json.dumps(args.mode)) + +# Connect to the provided endpoints, or use a default if not provided +if args.connect is not None: + conf.insert_json5("connect", json.dumps(args.connect)) +else: + # Default to localhost if no connect endpoint is specified + print("[INFO] No connect endpoint specified, using default endpoint (tcp://127.0.0.1:7447).") + +# Listen to the provided endpoints if any +if args.listen is not None: + conf.insert_json5("listen", json.dumps(args.listen)) + +# Dictionary to store camera images +cams = {} + +def frames_listener(sample): + """Callback function for Zenoh subscriber""" + # Convert received image data (ZBytes type) to numpy array + npImage = np.frombuffer(bytes(sample.payload), dtype=np.uint8) + matImage = cv2.imdecode(npImage, 1) + + # Update the latest camera image (separate management for RGB and Depth) + cams[sample.key_expr] = matImage + +# Open Zenoh session +print('[INFO] Open zenoh session...') +z = zenoh.open(conf) + +# Declare subscribers for RGB and Depth +sub_rgb = z.declare_subscriber(args.key + "/rgb", frames_listener) +sub_depth = z.declare_subscriber(args.key + "/depth", frames_listener) + +while True: + # Display all camera images in separate windows + for cam_key, cam_img in cams.items(): + # Convert cam_key to string + cam_key_str = str(cam_key) + + if 'rgb' in cam_key_str: + cv2.imshow(f"RGB Camera: {cam_key_str}", cam_img) # Display RGB camera + elif 'depth' in cam_key_str: + cv2.imshow(f"Depth Camera: {cam_key_str}", cam_img) # Display Depth camera + + # Exit on ESC key + key = cv2.waitKey(1) & 0xFF + if key == 27: # Exit on ESC key + break + + # Add delay before receiving the next frame + time.sleep(args.delay) + +# Close the windows +cv2.destroyAllWindows()