diff --git a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/DummyRealSense.py b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/DummyRealSense.py
index 858e4bfa..d0efdaeb 100755
--- a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/DummyRealSense.py
+++ b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/DummyRealSense.py
@@ -85,6 +85,9 @@ def __init__(self):
self.camera_info_publisher = self.create_publisher(
CameraInfo, "~/camera_info", 1
)
+ self.aligned_depth_camera_info_publisher = self.create_publisher(
+ CameraInfo, "~/aligned_depth/camera_info", 1
+ )
if self.video is not None:
self.num_frames = 0
self.bridge = CvBridge()
@@ -174,6 +177,7 @@ def publish_frames(self):
self.compressed_image_publisher.publish(compressed_frame_msg)
self.aligned_depth_publisher.publish(depth_frame_msg)
self.camera_info_publisher.publish(self.camera_info_msg)
+ self.aligned_depth_camera_info_publisher.publish(self.camera_info_msg)
rate.sleep()
diff --git a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py
index da9ead4b..cd5da709 100755
--- a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py
+++ b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py
@@ -7,7 +7,7 @@
import numpy as np
import rclpy
from rclpy.node import Node
-from sensor_msgs.msg import Image
+from sensor_msgs.msg import CompressedImage
from threading import Lock
@@ -65,7 +65,10 @@ def __init__(
# Subscribe to the camera feed
self.subscription = self.create_subscription(
- Image, "camera/color/image_raw", self.camera_callback, 1
+ CompressedImage,
+ "camera/color/image_raw/compressed",
+ self.camera_callback,
+ 1,
)
self.subscription # prevent unused variable warning
@@ -73,7 +76,9 @@ def __init__(
self.publisher_results = self.create_publisher(
FaceDetection, "face_detection", 1
)
- self.publisher_image = self.create_publisher(Image, "face_detection_img", 1)
+ self.publisher_image = self.create_publisher(
+ CompressedImage, "face_detection_img/compressed", 1
+ )
def toggle_face_detection_callback(self, request, response):
"""
@@ -150,15 +155,16 @@ def camera_callback(self, msg):
face_detection_msg.is_face_detected = is_face_detected
if is_face_detected:
# Add a dummy face marker to the sensor_msgs/Image
- cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
+ cv_image = self.bridge.compressed_imgmsg_to_cv2(msg, "bgr8")
+ height, width, _ = cv_image.shape
cv2.circle(
cv_image,
- (msg.width // 2, msg.height // 2),
- msg.height // 25,
+ (width // 2, height // 2),
+ height // 25,
(0, 0, 255),
-1,
)
- annotated_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
+ annotated_msg = self.bridge.cv2_to_compressed_imgmsg(cv_image, "jpeg")
annotated_img = annotated_msg
# Publish the detected mouth center. The below is a hardcoded
# rough position of the mouth from the side staging location,
@@ -185,7 +191,6 @@ def camera_callback(self, msg):
annotated_img = msg
face_detection_msg.is_mouth_open = open_mouth_detected
self.publisher_results.publish(face_detection_msg)
- self.get_logger().info("Published face detection")
self.publisher_image.publish(annotated_img)
diff --git a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/SegmentFromPoint.py b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/SegmentFromPoint.py
index 0bec0a45..c330d7ef 100644
--- a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/SegmentFromPoint.py
+++ b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/SegmentFromPoint.py
@@ -9,7 +9,7 @@
from rclpy.action import ActionServer, CancelResponse, GoalResponse
from rclpy.executors import MultiThreadedExecutor
from rclpy.node import Node
-from sensor_msgs.msg import CompressedImage, Image, RegionOfInterest
+from sensor_msgs.msg import CompressedImage, RegionOfInterest
from shapely.geometry import MultiPoint
import threading
import time
@@ -34,8 +34,8 @@ def __init__(self, sleep_time=2.0, send_feedback_hz=10):
# Subscribe to the image topic, to store the latest image
self.image_subscriber = self.create_subscription(
- Image,
- "/camera/color/image_raw",
+ CompressedImage,
+ "/camera/color/image_raw/compressed",
self.image_callback,
1,
)
@@ -123,8 +123,8 @@ def segment_image(self, seed_point, result, segmentation_success):
with self.latest_img_msg_lock:
latest_img_msg = self.latest_img_msg
result.header = latest_img_msg.header
- width = latest_img_msg.width
- height = latest_img_msg.height
+ img = self.bridge.compressed_imgmsg_to_cv2(latest_img_msg, "bgr8")
+ width, height, _ = img.shape
# Sleep (dummy segmentation)
time.sleep(self.sleep_time)
@@ -175,6 +175,10 @@ def segment_image(self, seed_point, result, segmentation_success):
format="jpeg",
data=cv2.imencode(".jpg", mask_img)[1].tostring(),
)
+ mask_msg.rgb_image = CompressedImage(
+ format="jpeg",
+ data=cv2.imencode(".jpg", img[y_min:y_max, x_min:x_max])[1].tostring(),
+ )
mask_msg.item_id = "dummy_food_id_%d" % (i)
mask_msg.confidence = np.random.random()
result.detected_items.append(mask_msg)
diff --git a/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml b/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml
index 467a9a47..ec32a668 100644
--- a/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml
+++ b/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml
@@ -10,8 +10,6 @@
-
-
@@ -23,8 +21,9 @@
+
-
+
diff --git a/feedingwebapp/src/App.jsx b/feedingwebapp/src/App.jsx
index 7c0290eb..779b7a63 100644
--- a/feedingwebapp/src/App.jsx
+++ b/feedingwebapp/src/App.jsx
@@ -22,23 +22,22 @@ import BiteSelectionPointMask from './Pages/Home/BiteSelectionUIStates/BiteSelec
* @param {APP_PAGE} appPage - The current app page. Must be one of the
* states specified in APP_PAGE.
* @param {string} rosbridgeURL - The URL of the rosbridge server.
- * @param {string} webVideoServerURL - The URL of the web_video_server.
* @param {bool} debug - Whether to run it in debug mode or not.
*/
-function getComponentByAppPage(appPage, rosbridgeURL, webVideoServerURL, debug) {
+function getComponentByAppPage(appPage, rosbridgeURL, debug) {
switch (appPage) {
case APP_PAGE.Home:
// Must wrap a component in ROS tags for it to be able to connect to ROS
return (
-
-
+
+
)
case APP_PAGE.Settings:
return (
-
+
)
@@ -57,8 +56,6 @@ function App() {
// Get the rosbridge URL
const rosbridgeURL = 'ws://'.concat(window.location.hostname, ':', process.env.REACT_APP_ROSBRIDGE_PORT)
- // Get the web_video_server URL
- const webVideoServerURL = 'http://'.concat(window.location.hostname, ':', process.env.REACT_APP_WEB_VIDEO_SERVER_PORT)
// Get the debug flag
const debug = process.env.REACT_APP_DEBUG === 'true'
@@ -68,7 +65,7 @@ function App() {
<>
-
+
-
+
}
@@ -93,7 +90,7 @@ function App() {
path='/test_bite_selection_ui/point_mask_selection'
element={
-
+
}
@@ -103,7 +100,7 @@ function App() {
path='/test_bite_selection_ui/food_name_selection'
element={
-
+
}
diff --git a/feedingwebapp/src/Pages/Constants.js b/feedingwebapp/src/Pages/Constants.js
index ebe256fe..dd6ede91 100644
--- a/feedingwebapp/src/Pages/Constants.js
+++ b/feedingwebapp/src/Pages/Constants.js
@@ -48,10 +48,10 @@ NON_MOVING_STATES.add(MEAL_STATE.U_PostMeal)
export { NON_MOVING_STATES }
// The names of the ROS topic(s)
-export const CAMERA_FEED_TOPIC = '/local/camera/color/image_raw'
+export const CAMERA_FEED_TOPIC = '/local/camera/color/image_raw/compressed'
export const FACE_DETECTION_TOPIC = '/face_detection'
export const FACE_DETECTION_TOPIC_MSG = 'ada_feeding_msgs/FaceDetection'
-export const FACE_DETECTION_IMG_TOPIC = '/face_detection_img'
+export const FACE_DETECTION_IMG_TOPIC = '/face_detection_img/compressed'
// States from which, if they fail, it is NOT okay for the user to retry the
// same action.
diff --git a/feedingwebapp/src/Pages/Header/Header.jsx b/feedingwebapp/src/Pages/Header/Header.jsx
index 63f47f19..5e32d6d8 100644
--- a/feedingwebapp/src/Pages/Header/Header.jsx
+++ b/feedingwebapp/src/Pages/Header/Header.jsx
@@ -4,9 +4,6 @@ import React, { useCallback, useEffect, useState } from 'react'
import Navbar from 'react-bootstrap/Navbar'
import Nav from 'react-bootstrap/Nav'
import { useMediaQuery } from 'react-responsive'
-// PropTypes is used to validate that the used props are in fact passed to this
-// Component
-import PropTypes from 'prop-types'
// Toast generates a temporary pop-up with a timeout.
import { ToastContainer /* , toast */ } from 'react-toastify'
import 'react-toastify/dist/ReactToastify.css'
@@ -23,7 +20,7 @@ import LiveVideoModal from './LiveVideoModal'
* clicking "Video", and the ToastContainer popup that specifies when the user
* cannot click Settings.
*/
-const Header = (props) => {
+const Header = () => {
// Create a local state variable to toggle on/off the video
// TODO: Since this local state variable is in the header, the LiveVideoModal
// continues showing even if the state changes. Is this desirable? Perhaps
@@ -166,13 +163,9 @@ const Header = (props) => {
* The LiveVideoModal toggles on and off with the Video button and shows the
* robot's live camera feed.
*/}
- setVideoShow(false)} />
+ setVideoShow(false)} />
>
)
}
-Header.propTypes = {
- // The URL of the ROS web video server
- webVideoServerURL: PropTypes.string.isRequired
-}
export default Header
diff --git a/feedingwebapp/src/Pages/Header/LiveVideoModal.jsx b/feedingwebapp/src/Pages/Header/LiveVideoModal.jsx
index a46c95b7..23f92c97 100644
--- a/feedingwebapp/src/Pages/Header/LiveVideoModal.jsx
+++ b/feedingwebapp/src/Pages/Header/LiveVideoModal.jsx
@@ -49,7 +49,8 @@ function LiveVideoModal(props) {
+ return
}
case MEAL_STATE.U_PlateLocator: {
- return
+ return
}
case MEAL_STATE.R_BiteAcquisition: {
/**
@@ -153,7 +152,7 @@ function Home(props) {
)
}
case MEAL_STATE.R_DetectingFace: {
- return
+ return
}
case MEAL_STATE.R_MovingToMouth: {
/**
@@ -254,7 +253,6 @@ function Home(props) {
}, [
mealState,
props.debug,
- props.webVideoServerURL,
biteAcquisitionActionInput,
moveAbovePlateActionInput,
moveToMouthActionInput,
@@ -278,9 +276,7 @@ Home.propTypes = {
* Whether to run it in debug mode (e.g., if you aren't simulatenously running
* the robot) or not
*/
- debug: PropTypes.bool,
- // The URL of the web video server
- webVideoServerURL: PropTypes.string.isRequired
+ debug: PropTypes.bool
}
export default Home
diff --git a/feedingwebapp/src/Pages/Home/MealStates/BiteSelection.jsx b/feedingwebapp/src/Pages/Home/MealStates/BiteSelection.jsx
index 0fa9b1ab..91659c9f 100644
--- a/feedingwebapp/src/Pages/Home/MealStates/BiteSelection.jsx
+++ b/feedingwebapp/src/Pages/Home/MealStates/BiteSelection.jsx
@@ -13,10 +13,7 @@ import { useROS, createROSActionClient, callROSAction, destroyActionClient } fro
import { useWindowSize, convertRemToPixels } from '../../../helpers'
import MaskButton from '../../../buttons/MaskButton'
import {
- REALSENSE_WIDTH,
- REALSENSE_HEIGHT,
ROS_ACTIONS_NAMES,
- CAMERA_FEED_TOPIC,
ROS_ACTION_STATUS_CANCEL_GOAL,
ROS_ACTION_STATUS_EXECUTE,
ROS_ACTION_STATUS_SUCCEED,
@@ -328,20 +325,13 @@ const BiteSelection = (props) => {
let maskScaleFactor = Math.min(widthScaleFactor, heightScaleFactor)
// maskScaleFactor = Math.min(maskScaleFactor, 1.0)
- // Get the URL of the image based on the scale factor
- let imgSize = {
- width: Math.round(REALSENSE_WIDTH * maskScaleFactor),
- height: Math.round(REALSENSE_HEIGHT * maskScaleFactor)
- }
- let imgSrc = `${props.webVideoServerURL}/stream?topic=${CAMERA_FEED_TOPIC}&width=${imgSize.width}&height=${imgSize.height}&type=ros_compressed`
return (
{actionResult.detected_items.map((detected_item, i) => (
{
)
}
}
- }, [actionStatus, actionResult, foodItemClicked, isPortrait, windowSize, props.webVideoServerURL, margin])
+ }, [actionStatus, actionResult, foodItemClicked, isPortrait, windowSize, margin])
/** Get the button for continue without acquiring bite
*
@@ -480,12 +470,12 @@ const BiteSelection = (props) => {
style={{
flex: 9,
alignItems: 'center',
+ justifyContent: 'center',
width: '100%',
height: '100%'
}}
>
{
actionStatusText,
renderMaskButtons,
skipAcquisisitionButton,
- props.webVideoServerURL,
videoParentRef,
imageClicked,
props.debug,
@@ -591,9 +580,7 @@ BiteSelection.propTypes = {
* Whether to run it in debug mode (e.g., if you aren't simulatenously running
* the robot) or not
*/
- debug: PropTypes.bool.isRequired,
- // The URL of the web video server
- webVideoServerURL: PropTypes.string.isRequired
+ debug: PropTypes.bool.isRequired
}
export default BiteSelection
diff --git a/feedingwebapp/src/Pages/Home/MealStates/DetectingFace.jsx b/feedingwebapp/src/Pages/Home/MealStates/DetectingFace.jsx
index 8dc12e6e..869f1a33 100644
--- a/feedingwebapp/src/Pages/Home/MealStates/DetectingFace.jsx
+++ b/feedingwebapp/src/Pages/Home/MealStates/DetectingFace.jsx
@@ -3,9 +3,6 @@ import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import Button from 'react-bootstrap/Button'
import { useMediaQuery } from 'react-responsive'
import { View } from 'react-native'
-// PropTypes is used to validate that the used props are in fact passed to this
-// Component
-import PropTypes from 'prop-types'
// Local Imports
import { useROS, createROSService, createROSServiceRequest, subscribeToROSTopic, unsubscribeFromROSTopic } from '../../../ros/ros_helpers'
@@ -26,7 +23,7 @@ import VideoFeed from '../VideoFeed'
* configuration. It displays the output of face detection, and automatically
* moves on to `R_MovingToMouth` when a face is detected.
*/
-const DetectingFace = (props) => {
+const DetectingFace = () => {
// Keep track of whether a mouth has been detected or not
const [mouthDetected, setMouthDetected] = useState(false)
// Get the relevant global variables
@@ -203,14 +200,12 @@ const DetectingFace = (props) => {
}}
>
@@ -238,7 +233,6 @@ const DetectingFace = (props) => {
mouthDetected,
moveToMouthCallback,
moveToMouthImage,
- props.webVideoServerURL,
textFontSize,
buttonHeight,
buttonWidth,
@@ -251,14 +245,4 @@ const DetectingFace = (props) => {
// Render the component
return fullPageView()
}
-DetectingFace.propTypes = {
- /**
- * Whether to run it in debug mode (e.g., if you aren't simulatenously running
- * the robot) or not
- */
- debug: PropTypes.bool.isRequired,
- // The URL of the web video server
- webVideoServerURL: PropTypes.string.isRequired
-}
-
export default DetectingFace
diff --git a/feedingwebapp/src/Pages/Home/MealStates/PlateLocator.jsx b/feedingwebapp/src/Pages/Home/MealStates/PlateLocator.jsx
index bcb2dd93..ce2611ab 100644
--- a/feedingwebapp/src/Pages/Home/MealStates/PlateLocator.jsx
+++ b/feedingwebapp/src/Pages/Home/MealStates/PlateLocator.jsx
@@ -3,9 +3,6 @@ import React, { useCallback, useMemo, useRef } from 'react'
import Button from 'react-bootstrap/Button'
import { useMediaQuery } from 'react-responsive'
import { View } from 'react-native'
-// PropTypes is used to validate that the used props are in fact passed to this
-// Component
-import PropTypes from 'prop-types'
// Local Imports
import '../Home.css'
@@ -20,7 +17,7 @@ import VideoFeed from '../VideoFeed'
* the user to teleoperate the robot with Cartesian Control until the plate
* is satisfactorily in view.
*/
-const PlateLocator = (props) => {
+const PlateLocator = () => {
// Get the relevant global variables
const setMealState = useGlobalState((state) => state.setMealState)
// Get robot motion flag for plate locator
@@ -179,14 +176,7 @@ const PlateLocator = (props) => {
return (
-
+
{directionalArrows()}
@@ -195,9 +185,5 @@ const PlateLocator = (props) => {
)
}
-PlateLocator.propTypes = {
- // The URL of the web video server
- webVideoServerURL: PropTypes.string.isRequired
-}
export default PlateLocator
diff --git a/feedingwebapp/src/Pages/Home/VideoFeed.jsx b/feedingwebapp/src/Pages/Home/VideoFeed.jsx
index 71299504..266da69a 100644
--- a/feedingwebapp/src/Pages/Home/VideoFeed.jsx
+++ b/feedingwebapp/src/Pages/Home/VideoFeed.jsx
@@ -1,11 +1,12 @@
// React Imports
-import React, { useCallback, useEffect, useState } from 'react'
+import React, { useCallback, useEffect, useRef, useState } from 'react'
// PropTypes is used to validate that the used props are in fact passed to this Component
import PropTypes from 'prop-types'
// Local Imports
import { CAMERA_FEED_TOPIC, REALSENSE_WIDTH, REALSENSE_HEIGHT } from '../Constants'
import { useWindowSize } from '../../helpers'
+import { useROS, subscribeToROSTopic, unsubscribeFromROSTopic } from '../../ros/ros_helpers'
/**
* Takes in an imageWidth and imageHeight, and returns a width and height that
@@ -76,6 +77,53 @@ const VideoFeed = (props) => {
const [imgWidth, setImgWidth] = useState(0)
const [imgHeight, setImgHeight] = useState(0)
const [scaleFactor, setScaleFactor] = useState(0.0)
+ // Store the latest image to render
+ const [latestRenderedImg, setLatestRenderedImg] = useState(null)
+
+ /**
+ * Connect to ROS, if not already connected. Put this in useRef to avoid
+ * re-connecting upon re-renders.
+ */
+ const ros = useRef(useROS().ros)
+ // Store the latest image timestamp in a ref to avoid re-generating cameraCallback
+ const latestImageTimestamp = useRef(null)
+
+ /**
+ * Subscribe to the image topic.
+ */
+ const cameraCallback = useCallback(
+ (message) => {
+ // console.log('Got camera message', message)
+ if (!latestImageTimestamp.current || props.updateRateHz <= 0) {
+ setLatestRenderedImg(message)
+ } else {
+ let currTime = message.header.stamp.sec + message.header.stamp.nanosec * 1e-9
+ if (currTime - latestImageTimestamp.current >= 1.0 / props.updateRateHz) {
+ setLatestRenderedImg(message)
+ latestImageTimestamp.current = currTime
+ }
+ }
+ },
+ [latestImageTimestamp, setLatestRenderedImg, props.updateRateHz]
+ )
+ useEffect(() => {
+ console.log('subscribing to img topic')
+ let topic = subscribeToROSTopic(ros.current, props.topic, 'sensor_msgs/CompressedImage', cameraCallback)
+ const cleanup = () => {
+ console.log('unsubscribing from img topic')
+ unsubscribeFromROSTopic(topic, cameraCallback)
+ }
+ window.addEventListener('beforeunload', cleanup)
+ /**
+ * In practice, because the values passed in in the second argument of
+ * useEffect will not change on re-renders, this return statement will
+ * only be called when the component unmounts.
+ */
+ return () => {
+ window.removeEventListener('beforeunload', cleanup)
+ cleanup()
+ }
+ }, [cameraCallback, props.topic])
// Callback to resize the image based on the parent width and height
const resizeImage = useCallback(() => {
@@ -145,7 +193,7 @@ const VideoFeed = (props) => {
// Render the component
return (
{
VideoFeed.propTypes = {
// The ref to the parent DOM element. Null if the component is not yet mounted
parent: PropTypes.object.isRequired,
- // The URL of the video feed
- webVideoServerURL: PropTypes.string.isRequired,
// The margins around the video feed
marginTop: PropTypes.number.isRequired,
marginBottom: PropTypes.number.isRequired,
@@ -170,8 +216,8 @@ VideoFeed.propTypes = {
marginRight: PropTypes.number.isRequired,
// The topic of the video feed
topic: PropTypes.string.isRequired,
- // The type of the video feed
- type: PropTypes.string.isRequired,
+ // The rate at which to update the video feed, in Hz
+ updateRateHz: PropTypes.number.isRequired,
/**
* An optional callback function for when the user clicks on the video feed.
* This function should take in two parameters, `x` and `y`, which are the
@@ -182,7 +228,7 @@ VideoFeed.propTypes = {
}
VideoFeed.defaultProps = {
topic: CAMERA_FEED_TOPIC,
- type: 'ros_compressed'
+ updateRateHz: 10
}
export default VideoFeed
diff --git a/feedingwebapp/src/buttons/MaskButton.jsx b/feedingwebapp/src/buttons/MaskButton.jsx
index 862427b7..ae500199 100644
--- a/feedingwebapp/src/buttons/MaskButton.jsx
+++ b/feedingwebapp/src/buttons/MaskButton.jsx
@@ -31,7 +31,6 @@ function MaskButton(props) {
// Get the properties
let buttonSize = props.buttonSize
let imgSrc = props.imgSrc
- let imgSize = props.imgSize
let maskSrc = props.maskSrc
let invertMask = props.invertMask
let maskScaleFactor = props.maskScaleFactor
@@ -67,15 +66,11 @@ function MaskButton(props) {
>
@@ -106,11 +101,6 @@ MaskButton.propTypes = {
}).isRequired,
// The image source URL
imgSrc: PropTypes.string.isRequired,
- // The image size
- imgSize: PropTypes.shape({
- width: PropTypes.number.isRequired,
- height: PropTypes.number.isRequired
- }).isRequired,
// The mask source URL
maskSrc: PropTypes.string.isRequired,
// Whether or not to invert the mask