diff --git a/demos/camera_motion/src/demo.py b/demos/camera_motion/src/demo.py index 42673dd0..cafef3ca 100644 --- a/demos/camera_motion/src/demo.py +++ b/demos/camera_motion/src/demo.py @@ -163,10 +163,10 @@ def run(): help="Pass this flag to draw the paths of the objects (SLOW)", ) parser.add_argument( - "--path-history", + "--path-drawer-scale", type=int, - default=20, - help="Length of the paths", + default=3, + help="Canvas (background) scale relative to frame size for the AbsolutePath drawer", ) parser.add_argument( "--id-size", @@ -215,7 +215,7 @@ def run(): fixed_camera = FixedCamera(scale=args.fixed_camera_scale) if args.draw_paths: - path_drawer = AbsolutePaths(max_history=args.path_history, thickness=2) + path_drawer = AbsolutePaths(scale=args.path_drawer_scale) video = Video(input_path=input_path) show_or_write = ( diff --git a/norfair/drawing/path.py b/norfair/drawing/path.py index 80294413..c9e80410 100644 --- a/norfair/drawing/path.py +++ b/norfair/drawing/path.py @@ -1,8 +1,10 @@ from collections import defaultdict from typing import Callable, Optional, Sequence, Tuple +import cv2 import numpy as np +from norfair.camera_motion import HomographyTransformation, TranslationTransformation from norfair.drawing.color import Palette from norfair.drawing.drawer import Drawer from norfair.tracker import TrackedObject @@ -128,17 +130,19 @@ class AbsolutePaths: Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion. - !!! warning - This drawer is not optimized so it can be stremely slow. Performance degrades linearly with - `max_history * number_of_tracked_objects`. - Parameters ---------- get_points_to_draw : Optional[Callable[[np.array], np.array]], optional - Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject]) - and returns a list of points for which we want to draw their paths. - - By default it is the mean point of all the points in the tracker. + Function that takes a [`TrackedObject`][norfair.tracker.TrackedObject], and returns a list of points + (in the absolute coordinate frame) for which we want to draw their paths. + + By default we just average the points with greatest height ('feet') if the object has live points. + scale : Optional[float], optional + Norfair will draw over a background canvas in the absolute coordinates. This determines how + relatively bigger is this canvas with respect to the original frame. + After the camera moves, part of the frame might get outside the canvas if scale is not large enough. + attenuation : Optional[float], optional + How fast we forget old points in the path. (0=Draw all points, 1=Draw only most current point) thickness : Optional[int], optional Thickness of the circles representing the paths of interest. color : Optional[Tuple[int, int, int]], optional @@ -147,6 +151,12 @@ class AbsolutePaths: Radius of the circles representing the paths of interest. max_history : int, optional Number of past points to include in the path. High values make the drawing slower + path_blend_factor: Optional[float], optional + When blending the frame and the canvas (with the paths overdrawn), we do: + frame = path_blend_factor * canvas + frame_blend_factor * frame + frame_blend_factor: + When blending the frame and the canvas (with the paths overdrawn), we do: + frame = path_blend_factor * canvas + frame_blend_factor * frame Examples -------- @@ -163,70 +173,156 @@ class AbsolutePaths: def __init__( self, + scale: float = 3, + attenuation: float = 0.05, get_points_to_draw: Optional[Callable[[np.array], np.array]] = None, thickness: Optional[int] = None, color: Optional[Tuple[int, int, int]] = None, radius: Optional[int] = None, - max_history=20, + path_blend_factor=2, + frame_blend_factor=1, ): + self.scale = scale + self._background = None + self._attenuation_factor = 1 - attenuation if get_points_to_draw is None: - def get_points_to_draw(points): - return [np.mean(np.array(points), axis=0)] + def get_points_to_draw(obj): + # don't draw the object if we haven't seen it recently + if not obj.live_points.any(): + return [] + + # obtain point with greatest height (feet) + points_height = obj.estimate[:, 1] + feet_indices = np.argwhere(points_height == points_height.max()) + # average their absolute positions + try: + return np.mean( + obj.get_estimate(absolute=True)[feet_indices], axis=0 + ) + except: + return np.mean(obj.estimate[feet_indices], axis=0) self.get_points_to_draw = get_points_to_draw - self.radius = radius self.thickness = thickness self.color = color - self.past_points = defaultdict(lambda: []) - self.max_history = max_history - self.alphas = np.linspace(0.99, 0.01, max_history) + self.path_blend_factor = path_blend_factor + self.frame_blend_factor = frame_blend_factor def draw(self, frame, tracked_objects, coord_transform=None): + """ + the objects have a relative frame: frame_det + the objects have an absolute frame: frame_one + the frame passed could be either frame_det, or a new perspective where you want to draw the paths + + initialization: + 1. top_left is an arbitrary coordinate of some pixel inside background + logic: + 1. draw track.get_estimate(absolute=True) + top_left, in background + 2. transform background with the composition (coord_transform.abs_to_rel o minus_top_left_translation). If coord_transform is None, only use minus_top_left_translation. + 3. crop [:frame.width, :frame.height] from the result + 4. overlay that over frame + + Remark: + In any case, coord_transform should be the coordinate transformation between the tracker absolute coords (as abs) and frame coords (as rel) + """ + + # initialize background if necessary + if self._background is None: + original_size = ( + frame.shape[1], + frame.shape[0], + ) # OpenCV format is (width, height) + + scaled_size = tuple( + (np.array(original_size) * np.array(self.scale)).round().astype(int) + ) + self._background = np.zeros( + [scaled_size[1], scaled_size[0], frame.shape[-1]], + frame.dtype, + ) + + # this is the corner of the first passed frame (inside the background) + self.top_left = ( + np.array(self._background.shape[:2]) // 2 + - np.array(frame.shape[:2]) // 2 + ) + else: + self._background = (self._background * self._attenuation_factor).astype( + frame.dtype + ) + frame_scale = frame.shape[0] / 100 if self.radius is None: self.radius = int(max(frame_scale * 0.7, 1)) if self.thickness is None: self.thickness = int(max(frame_scale / 7, 1)) - for obj in tracked_objects: - if not obj.live_points.any(): - continue + # draw in background (each point in top_left_translation(abs_coordinate)) + for obj in tracked_objects: if self.color is None: color = Palette.choose_color(obj.id) else: color = self.color - points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True)) + points_to_draw = self.get_points_to_draw(obj) - for point in coord_transform.abs_to_rel(points_to_draw): + for point in points_to_draw: Drawer.circle( - frame, - position=tuple(point.astype(int)), + self._background, + position=tuple((point + self.top_left).astype(int)), radius=self.radius, color=color, thickness=self.thickness, ) - last = points_to_draw - for i, past_points in enumerate(self.past_points[obj.id]): - overlay = frame.copy() - last = coord_transform.abs_to_rel(last) - for j, point in enumerate(coord_transform.abs_to_rel(past_points)): - Drawer.line( - overlay, - tuple(last[j].astype(int)), - tuple(point.astype(int)), - color=color, - thickness=self.thickness, - ) - last = past_points - - alpha = self.alphas[i] - frame = Drawer.alpha_blend(overlay, frame, alpha=alpha) - self.past_points[obj.id].insert(0, points_to_draw) - self.past_points[obj.id] = self.past_points[obj.id][: self.max_history] + # apply warp to self._background with composition abs_to_rel o -top_left_translation to background, and crop [:width, :height] to get frame overdrawn + if isinstance(coord_transform, HomographyTransformation): + minus_top_left_translation = np.array( + [[1, 0, -self.top_left[0]], [0, 1, -self.top_left[1]], [0, 0, 1]] + ) + full_transformation = ( + coord_transform.homography_matrix @ minus_top_left_translation + ) + background_size_frame = cv2.warpPerspective( + self._background, + full_transformation, + tuple(frame.shape[:2][::-1]), + cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0), + ) + elif isinstance(coord_transform, TranslationTransformation): + full_transformation = np.array( + [ + [1, 0, coord_transform.movement_vector[0] - self.top_left[0]], + [0, 1, coord_transform.movement_vector[1] - self.top_left[1]], + ] + ) + background_size_frame = cv2.warpAffine( + self._background, + full_transformation, + tuple(frame.shape[:2][::-1]), + cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0), + ) + else: + background_size_frame = self._background[ + self.top_left[1] :, self.top_left[0] : + ] + background_size_frame = background_size_frame[ + : frame.shape[0], : frame.shape[1] + ] + + frame = cv2.addWeighted( + frame, + self.frame_blend_factor, + background_size_frame, + self.path_blend_factor, + 0.0, + ) return frame