-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsignlanguage.py
34 lines (26 loc) · 1.15 KB
/
signlanguage.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import sign_language_translator as slt
# Print available language codes
print(slt.TextLanguageCodes, slt.SignLanguageCodes)
# Initialize the model for English and American Sign Language (ASL)
model = slt.models.ConcatenativeSynthesis(
text_language="english", sign_language="us-asl", sign_format="video"
)
# Example text to translate
text = "This is very good."
sign = model.translate(text) # tokenize, map, download & concatenate
sign.show()
sign.save(f"{text}.mp4")
model.text_language = "english" # slt.TextLanguageCodes.ENGLISH # slt.languages.text.English()
sign_2 = model.translate("Five hours.")
sign_2.show()
# Load the sign-to-text model (pytorch) (COMING SOON!)
# translation_model = slt.get_model(slt.ModelCodes.Gesture)
embedding_model = slt.models.MediaPipeLandmarksModel()
# Load the video file
sign = slt.Video("/Users/aahilali/Desktop/raw_videos/_2FBDaOPYig_1-3-rgb_front.mp4")
embedding = embedding_model.embed(sign.iter_frames())
# Translate embeddings to text (Note: This feature might be COMING SOON)
# text = translation_model.translate(embedding)
# print(text)
sign.show()
slt.Landmarks(embedding, connections="mediapipe-world").show()