diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b843d92..814f08a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -27,7 +27,7 @@ python app.py ### Build executable ```shell - pyinstaller ./app.py -n VTS_Fullbody_Tracking-0.1.9 --add-data='models/*:models' --add-data='*.png:.' -F -w + pyinstaller ./app.py -n VTS_Fullbody_Tracking-0.1.10 --add-data='models/*:models' --add-data="venv/Lib/site-packages/mediapipe/modules/pose_landmark/*:mediapipe/modules/pose_landmark" --add-data="venv/Lib/site-packages/mediapipe/modules/pose_detection/*:mediapipe/modules/pose_detection" --add-data='*.png:.' -F -w' ``` diff --git a/app.py b/app.py index 80b6113..55c1cf7 100644 --- a/app.py +++ b/app.py @@ -1,5 +1,4 @@ import mediapipe as mp -from mediapipe.tasks import python import numpy as np import pyvts import asyncio @@ -8,13 +7,13 @@ from info import VERSION from plugin.ui import window_tracking_configuration, NIZIMA_LIVE, VTUBE_STUDIO -from plugin.mediapipe import get_bodyparts_values, MediapipeTracking, LIVE_STREAM, IMAGE +from plugin.mediapipe import get_bodyparts_values, LIVE_STREAM, IMAGE from plugin.vtube_studio import connection_vts, create_parameters_vts, send_paramters_vts from plugin.nizima import Nizima RESULT = None - +BG_COLOR = (192, 192, 192) # gray model_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'models/pose_landmarker_full.task')) icon_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'icon.png')) @@ -30,9 +29,10 @@ async def main(settings): # ----- MEDIAPIPE: LANDMARKER CONFIGURATION ----------- - mt = MediapipeTracking(mode=settings['tracking_mode']) - options = mt.init_mediapipe_options(model_path) - PoseLandmarker = mp.tasks.vision.PoseLandmarker + mode = settings['tracking_mode'] + mp_drawing = mp.solutions.drawing_utils + mp_drawing_styles = mp.solutions.drawing_styles + mp_pose = mp.solutions.pose connection = False software = settings['software'] @@ -72,7 +72,6 @@ async def main(settings): # ---- LIVE TRACKING ---------------- if connection: parameters = None - timestamp = 0 # -- Camera connection camera_setting = settings['camera_id'] if not settings['camera_url'] else settings['camera_url'] @@ -82,39 +81,57 @@ async def main(settings): cv2.waitKey(0) cv2.destroyAllWindows() - #print('========== START LIVE TRACKING =========') - with PoseLandmarker.create_from_options(options) as landmarker: + with mp_pose.Pose(min_detection_confidence=0.5,min_tracking_confidence=0.5) as pose: + #print('========== START LIVE TRACKING =========') # -- LOOP Through Video while cap.isOpened(): - ret, frame = cap.read() + success, frame = cap.read() - if ret: - timestamp += 1 - # Detect pose landmarks from the current frame + if success: + # current frame input_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame) - if mt.mode == LIVE_STREAM: - landmarker.detect_async(input_image, timestamp) - RESULT = mt.result + + # -- POSE DETECTION ---------------- + # Detect pose landmarks from the current frame + if mode == LIVE_STREAM: + frame.flags.writeable = False + image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + RESULT = pose.process(image) else: - RESULT = landmarker.detect(input_image) # IMAGE + # Convert the BGR image to RGB before processing. + image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + RESULT = pose.process(image) # Display Image for tracking window image = render_image(input_image, settings['preview_enabled']) + if RESULT: + # -- DRAW LANDMARKS -------- if RESULT.pose_world_landmarks: # Get coordinates parameters = RESULT - image = mt.draw_landmarks_on_image(image, RESULT) + if mode == LIVE_STREAM: + # Draw the pose annotation on the image. + image.flags.writeable = True + + # Flip the image horizontally for a selfie-view display. + #cv2.imshow('MediaPipe Pose', cv2.flip(image, 1)) + + # Draw pose landmarks on the image. + mp_drawing.draw_landmarks( + image, + parameters.pose_landmarks, + mp_pose.POSE_CONNECTIONS, + landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()) else: error_pose_estimation(image) # - WINDOW : CAMERA TRACKING cv2.imshow(f'VTS FullBody Tracking {VERSION}', image) - # SEND DATA TO VTUBE STUDIO + # SEND DATA if parameters: data = get_bodyparts_values(parameters) - if software == NIZIMA_LIVE: data = [{"Id": key, "Value": value*10} for key, value in data.items()] await nz.set_live_parameter_values(data) diff --git a/info.py b/info.py index 0070705..c515e1c 100644 --- a/info.py +++ b/info.py @@ -1,2 +1,2 @@ -VERSION = '0.1.9' +VERSION = '0.1.10' ICON_PATH = "icon.png" \ No newline at end of file diff --git a/plugin/mediapipe.py b/plugin/mediapipe.py index c723b6f..7a60654 100644 --- a/plugin/mediapipe.py +++ b/plugin/mediapipe.py @@ -95,74 +95,6 @@ def get_part_from_name(i): return part raise None - -class MediapipeTracking: - def __init__(self, mode=LIVE_STREAM): - self.result = None - self.options = None - self.mode = mode - - def init_mediapipe_options(self, model_path): - # Create a PoseLandmarker object - BaseOptions = mp.tasks.BaseOptions - PoseLandmarkerOptions = mp.tasks.vision.PoseLandmarkerOptions - VisionRunningMode = mp.tasks.vision.RunningMode - - if self.mode == LIVE_STREAM: - PoseLandmarkerResult = mp.tasks.vision.PoseLandmarkerResult - - def get_result(result: PoseLandmarkerResult, output_image: mp.Image, timestamp_ms: int): - print('pose landmarker result: {}'.format(result)) - self.result = result - - self.options = PoseLandmarkerOptions( - base_options=BaseOptions(model_asset_path=model_path), - running_mode=VisionRunningMode.LIVE_STREAM, - result_callback=get_result # LIVE_STREAM - ) - else: - self.options = PoseLandmarkerOptions( - base_options=BaseOptions(model_asset_path=model_path), - running_mode=VisionRunningMode.IMAGE, - ) - return self.options - - def pose_detection(self, input_image, timestamp=None): - # Detect pose landmarks from the current frame - pass - - def draw_landmarks_on_image(self, img, detection_result): - """ - Draw landmarks on the input image. - - :param rgb_image: input image - :param detection_result: result of landmark detection - :param preview: Whether to display the original image - :param annotated: Whether to annotate landmarks with their coordinates - :return: Image with landmarks - """ - - # Fetch the image coordinates of the pose landmarks for drawing the pose on the image - pose_landmarks_list = detection_result.pose_landmarks - - # - Loop through the detected poses to visualize - for idx in range(len(pose_landmarks_list)): - pose_landmarks = pose_landmarks_list[idx] - - # -- Draw the pose landmarks - pose_landmarks_proto = landmark_pb2.NormalizedLandmarkList() - pose_landmarks_proto.landmark.extend([ - landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in pose_landmarks - ]) - solutions.drawing_utils.draw_landmarks( - img, - pose_landmarks_proto, - solutions.pose.POSE_CONNECTIONS, - solutions.drawing_styles.get_default_pose_landmarks_style()) - - return img - - def get_parameters_names(): bodyparts_names = [part.name for part in BodyParts] # Remove unused parameter names @@ -185,7 +117,7 @@ def get_bodyparts_values(parameters): values = {} # Get coordinates from hip as midpoint - parameters_world = parameters.pose_world_landmarks[0] + parameters_world = parameters.pose_world_landmarks.landmark#[0] # Go through each tracked body part for bodypart in BodyParts: @@ -204,7 +136,7 @@ def get_bodyparts_values(parameters): i += 1 # Retrieve coordinates from the image - parameters_img = parameters.pose_landmarks[0] + parameters_img = parameters.pose_landmarks.landmark values = calcul_body_position(values, parameters_img) values = calcul_hips_position(values, parameters_img) diff --git a/requirements.txt b/requirements.txt index a6e6210..26cfaf3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -mediapipe +mediapipe>=0.10.18 pyvts opencv-python numpy