-
Notifications
You must be signed in to change notification settings - Fork 0
/
live_translator.py
93 lines (66 loc) · 2.89 KB
/
live_translator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import joblib
import pandas as pd
import numpy as np
import mediapipe as mp
import cv2
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
""" Loading Pre-trained Models """
# classifier = joblib.load('Trained_models/knn.pkl')
# classifier = joblib.load('Trained_models/logisticreg.pkl')
# classifier = joblib.load('Trained_models/gnb.pkl')
# classifier = joblib.load('Trained_models/svm.pkl')
# classifier = joblib.load('Trained_models/decisiontree.pkl')
classifier = joblib.load('Trained_models/randomforest.pkl')
""" Loading Pre-Trained Scaler to normalize the input values """
# dataset = pd.read_csv('mcoords_damta.csv')
# X_split = dataset.iloc[:, 1:].values
# Y_split = dataset.iloc[:, 0].values
# X_train, X_test, y_train, y_test = train_test_split(X_split, Y_split, test_size=0.33)
# scaler = StandardScaler().fit(X_train)
scaler = joblib.load('Trained_models/StandardScaler.pkl')
# Making the model to make landmarks using built-in mediapipe hand model
exo_landmark = mp.solutions.drawing_utils
exo_landmark_hands = mp.solutions.hands
model = exo_landmark_hands.Hands(
max_num_hands = 1,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
# taking webcam input
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
continue
# Flipping the image horizontally for a later selfie-view display, and converting
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, marking the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = model.process(image)
# Drawing the hand annotations on the image.
# resetiing writeable is True
image.flags.writeable = True
#converting the BGR image to RGB.
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
coords = hand_landmarks.landmark
exo_landmark.draw_landmarks(image, hand_landmarks, exo_landmark_hands.HAND_CONNECTIONS)
coords = list(np.array([[landmark.x, landmark.y] for landmark in coords]).flatten())
coords = scaler.transform([coords])
predicted = classifier.predict(coords)
# Defining the Status Box
cv2.rectangle(image, (0,0), (160, 60), (245, 90, 16), -1)
# Displaying Class
cv2.putText(image, 'Predicted Letter'
, (20,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(image, str(predicted[0])
, (20,45), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Sign Translator', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()