From a1aade78e32c0ae85c31e60b6e2d3d27bf2c9383 Mon Sep 17 00:00:00 2001
From: Sree Praveen Challa <130534146+praveenarjun@users.noreply.github.com>
Date: Wed, 9 Oct 2024 13:50:30 +0000
Subject: [PATCH 1/2] Feature Using Computer Vision to Detect Bad Posture
---
.../Media Pipe Pose checkpoint.ipynb | 463 ++++++++++++++++++
1 file changed, 463 insertions(+)
create mode 100644 Computer Vision/Bad Posture Detection MediaPipe/Media Pipe Pose checkpoint.ipynb
diff --git a/Computer Vision/Bad Posture Detection MediaPipe/Media Pipe Pose checkpoint.ipynb b/Computer Vision/Bad Posture Detection MediaPipe/Media Pipe Pose checkpoint.ipynb
new file mode 100644
index 00000000..0345579c
--- /dev/null
+++ b/Computer Vision/Bad Posture Detection MediaPipe/Media Pipe Pose checkpoint.ipynb
@@ -0,0 +1,463 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 0. Install and Import Dependencies"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install mediapipe opencv-python"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import cv2\n",
+ "import mediapipe as mp\n",
+ "import numpy as np\n",
+ "mp_drawing = mp.solutions.drawing_utils\n",
+ "mp_pose = mp.solutions.pose"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# VIDEO FEED\n",
+ "cap = cv2.VideoCapture(0)\n",
+ "while cap.isOpened():\n",
+ " ret, frame = cap.read()\n",
+ " cv2.imshow('Mediapipe Feed', frame)\n",
+ " \n",
+ " if cv2.waitKey(10) & 0xFF == ord('q'):\n",
+ " break\n",
+ " \n",
+ "cap.release()\n",
+ "cv2.destroyAllWindows()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 1. Make Detections"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "cap = cv2.VideoCapture(0)\n",
+ "## Setup mediapipe instance\n",
+ "with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n",
+ " while cap.isOpened():\n",
+ " ret, frame = cap.read()\n",
+ " \n",
+ " # Recolor image to RGB\n",
+ " image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
+ " image.flags.writeable = False\n",
+ " \n",
+ " # Make detection\n",
+ " results = pose.process(image)\n",
+ " \n",
+ " # Recolor back to BGR\n",
+ " image.flags.writeable = True\n",
+ " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n",
+ " \n",
+ " # Render detections\n",
+ " mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,\n",
+ " mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2), \n",
+ " mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2) \n",
+ " ) \n",
+ " \n",
+ " cv2.imshow('Mediapipe Feed', image)\n",
+ "\n",
+ " if cv2.waitKey(10) & 0xFF == ord('q'):\n",
+ " break\n",
+ "\n",
+ " cap.release()\n",
+ " cv2.destroyAllWindows()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mp_drawing.DrawingSpec??"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 2. Determining Joints"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "cap = cv2.VideoCapture(0)\n",
+ "## Setup mediapipe instance\n",
+ "with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n",
+ " while cap.isOpened():\n",
+ " ret, frame = cap.read()\n",
+ " \n",
+ " # Recolor image to RGB\n",
+ " image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
+ " image.flags.writeable = False\n",
+ " \n",
+ " # Make detection\n",
+ " results = pose.process(image)\n",
+ " \n",
+ " # Recolor back to BGR\n",
+ " image.flags.writeable = True\n",
+ " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n",
+ " \n",
+ " # Extract landmarks\n",
+ " try:\n",
+ " landmarks = results.pose_landmarks.landmark\n",
+ " print(landmarks)\n",
+ " except:\n",
+ " pass\n",
+ " \n",
+ " \n",
+ " # Render detections\n",
+ " mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,\n",
+ " mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2), \n",
+ " mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2) \n",
+ " ) \n",
+ " \n",
+ " cv2.imshow('Mediapipe Feed', image)\n",
+ "\n",
+ " if cv2.waitKey(10) & 0xFF == ord('q'):\n",
+ " break\n",
+ "\n",
+ " cap.release()\n",
+ " cv2.destroyAllWindows()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "len(landmarks)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "for lndmrk in mp_pose.PoseLandmark:\n",
+ " print(lndmrk)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].visibility"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 3. Calculate Angles"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def calculate_angle(a,b,c):\n",
+ " a = np.array(a) # First\n",
+ " b = np.array(b) # Mid\n",
+ " c = np.array(c) # End\n",
+ " \n",
+ " radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])\n",
+ " angle = np.abs(radians*180.0/np.pi)\n",
+ " \n",
+ " if angle >180.0:\n",
+ " angle = 360-angle\n",
+ " \n",
+ " return angle "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]\n",
+ "elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]\n",
+ "wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "shoulder, elbow, wrist"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "calculate_angle(shoulder, elbow, wrist)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tuple(np.multiply(elbow, [640, 480]).astype(int))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "cap = cv2.VideoCapture(0)\n",
+ "## Setup mediapipe instance\n",
+ "with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n",
+ " while cap.isOpened():\n",
+ " ret, frame = cap.read()\n",
+ " \n",
+ " # Recolor image to RGB\n",
+ " image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
+ " image.flags.writeable = False\n",
+ " \n",
+ " # Make detection\n",
+ " results = pose.process(image)\n",
+ " \n",
+ " # Recolor back to BGR\n",
+ " image.flags.writeable = True\n",
+ " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n",
+ " \n",
+ " # Extract landmarks\n",
+ " try:\n",
+ " landmarks = results.pose_landmarks.landmark\n",
+ " \n",
+ " # Get coordinates\n",
+ " shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]\n",
+ " elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]\n",
+ " wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]\n",
+ " \n",
+ " # Calculate angle\n",
+ " angle = calculate_angle(shoulder, elbow, wrist)\n",
+ " \n",
+ " # Visualize angle\n",
+ " cv2.putText(image, str(angle), \n",
+ " tuple(np.multiply(elbow, [640, 480]).astype(int)), \n",
+ " cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA\n",
+ " )\n",
+ " \n",
+ " except:\n",
+ " pass\n",
+ " \n",
+ " \n",
+ " # Render detections\n",
+ " mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,\n",
+ " mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2), \n",
+ " mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2) \n",
+ " ) \n",
+ " \n",
+ " cv2.imshow('Mediapipe Feed', image)\n",
+ "\n",
+ " if cv2.waitKey(10) & 0xFF == ord('q'):\n",
+ " break\n",
+ "\n",
+ " cap.release()\n",
+ " cv2.destroyAllWindows()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 4. Curl Counter"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "cap = cv2.VideoCapture(0)\n",
+ "\n",
+ "# Curl counter variables\n",
+ "counter = 0 \n",
+ "stage = None\n",
+ "\n",
+ "## Setup mediapipe instance\n",
+ "with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n",
+ " while cap.isOpened():\n",
+ " ret, frame = cap.read()\n",
+ " \n",
+ " # Recolor image to RGB\n",
+ " image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
+ " image.flags.writeable = False\n",
+ " \n",
+ " # Make detection\n",
+ " results = pose.process(image)\n",
+ " \n",
+ " # Recolor back to BGR\n",
+ " image.flags.writeable = True\n",
+ " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n",
+ " \n",
+ " # Extract landmarks\n",
+ " try:\n",
+ " landmarks = results.pose_landmarks.landmark\n",
+ " \n",
+ " # Get coordinates\n",
+ " shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]\n",
+ " elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]\n",
+ " wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]\n",
+ " \n",
+ " # Calculate angle\n",
+ " angle = calculate_angle(shoulder, elbow, wrist)\n",
+ " \n",
+ " # Visualize angle\n",
+ " cv2.putText(image, str(angle), \n",
+ " tuple(np.multiply(elbow, [640, 480]).astype(int)), \n",
+ " cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA\n",
+ " )\n",
+ " \n",
+ " # Curl counter logic\n",
+ " if angle > 160:\n",
+ " stage = \"down\"\n",
+ " if angle < 30 and stage =='down':\n",
+ " stage=\"up\"\n",
+ " counter +=1\n",
+ " print(counter)\n",
+ " \n",
+ " except:\n",
+ " pass\n",
+ " \n",
+ " # Render curl counter\n",
+ " # Setup status box\n",
+ " cv2.rectangle(image, (0,0), (225,73), (245,117,16), -1)\n",
+ " \n",
+ " # Rep data\n",
+ " cv2.putText(image, 'REPS', (15,12), \n",
+ " cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)\n",
+ " cv2.putText(image, str(counter), \n",
+ " (10,60), \n",
+ " cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2, cv2.LINE_AA)\n",
+ " \n",
+ " # Stage data\n",
+ " cv2.putText(image, 'STAGE', (65,12), \n",
+ " cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)\n",
+ " cv2.putText(image, stage, \n",
+ " (60,60), \n",
+ " cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2, cv2.LINE_AA)\n",
+ " \n",
+ " \n",
+ " # Render detections\n",
+ " mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,\n",
+ " mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2), \n",
+ " mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2) \n",
+ " ) \n",
+ " \n",
+ " cv2.imshow('Mediapipe Feed', image)\n",
+ "\n",
+ " if cv2.waitKey(10) & 0xFF == ord('q'):\n",
+ " break\n",
+ "\n",
+ " cap.release()\n",
+ " cv2.destroyAllWindows()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "mediapipe",
+ "language": "python",
+ "name": "mediapipe"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
From 38b3c026d0c84e1ba83ddeb2fc0a42d9cfd0b744 Mon Sep 17 00:00:00 2001
From: Sree Praveen Challa <130534146+praveenarjun@users.noreply.github.com>
Date: Wed, 9 Oct 2024 18:35:50 +0000
Subject: [PATCH 2/2] Updating the Readme
---
.../Bad Posture Detection MediaPipe/README.md | 53 +++++++++++++++++++
1 file changed, 53 insertions(+)
create mode 100644 Computer Vision/Bad Posture Detection MediaPipe/README.md
diff --git a/Computer Vision/Bad Posture Detection MediaPipe/README.md b/Computer Vision/Bad Posture Detection MediaPipe/README.md
new file mode 100644
index 00000000..7bb795e5
--- /dev/null
+++ b/Computer Vision/Bad Posture Detection MediaPipe/README.md
@@ -0,0 +1,53 @@
+
+This GitHub repository contains a posture detection program that utilizes [YOLOv5](https://github.com/ultralytics/yolov5),And mediaPipe an advanced object detection algorithm, to detect and predict lateral sitting postures. The program is designed to analyze the user's sitting posture in real-time and provide feedback on whether the posture is good or bad based on predefined criteria. The goal of this project is to promote healthy sitting habits and prevent potential health issues associated with poor posture.
+
+Key Features:
+
+* YOLOv5: The program leverages the power of YOLOv5, which is an object detection algorithm, to
+ accurately detect the user's sitting posture from a webcam.
+* Real-time Posture Detection: The program provides real-time feedback on the user's sitting posture, making it suitable
+ for use in applications such as office ergonomics, fitness, and health monitoring.
+* Good vs. Bad Posture Classification: The program uses a pre-trained model to classify the detected posture as good or
+ bad, enabling users to improve their posture and prevent potential health issues associated with poor sitting habits.
+* Open-source: The program is released under an open-source license, allowing users to access the source code, modify
+ it, and contribute to the project.
+
+### Built With
+
+![Python]
+
+# Getting Started
+
+### Prerequisites
+
+* Python 3.9.x
+
+
+
+### Run the program
+
+`python application.py ` **OR** `python3 application.py `
+
+The default model is loaded if no model file is specified.
+
+
+
+*Fig. 1: YOLOv5s network architecture (based on Liu et al.). The CBS module consists of a Convolutional layer, a Batch Normalization layer, and a Sigmoid Linear Unit (SiLU) activation function. The C3 module consists of three CBS modules and one bottleneck block. The SPPF module consists of two CBS modules and three Max Pooling layers.*
+
+## Model Results
+The validation set contains 80 images (40 sitting_good, 40 sitting_bad). The results are as follows:
+|Class|Images|Instances|Precision|Recall|mAP50|mAP50-95|
+|--|--|--|--|--|--|--|
+|all| 80 | 80 | 0.87 | 0.939 | 0.931 | 0.734 |
+|sitting_good| 40 | 40| 0.884 | 0.954 | 0.908 |0.744 |
+|sitting_bad| 80 | 40 | 0.855 | 0.925 | 0.953 | 0.724 |
+
+F1, Precision, Recall, and Precision-Recall plots:
+
+
+
+
+
+
+
+