-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathright-teachable.html
148 lines (123 loc) · 5.88 KB
/
right-teachable.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hand Raise Checker</title>
<link rel="stylesheet" href="styles.css">
</head>
<body onload="init()">
<div id="header">
<text>Right-Side, Check if hand is raised</text>
<!-- <div>
<button id="start_button" type="button" onclick="init()">Start</button>
</div> -->
</div>
<!-- <div id="blocker"></div> -->
<div id="right-video">
<canvas id="right-canvas" width="300" height="600"></canvas>
</div>
<div id="label-container"></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.3.1/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@teachablemachine/pose@0.8/dist/teachablemachine-pose.min.js"></script>
<script type="text/javascript">
// the link to your model provided by Teachable Machine export panel
const URL = "https://teachablemachine.withgoogle.com/models/zi53kDWFM/";
let model, webcam, ctx, labelContainer, maxPredictions;
let started = false
async function init() {
const modelURL = URL + "model.json";
const metadataURL = URL + "metadata.json";
// load the model and metadata
// Refer to tmImage.loadFromFiles() in the API to support files from a file picker
// Note: the pose library adds a tmPose object to your window (window.tmPose)
model = await tmPose.load(modelURL, metadataURL);
maxPredictions = model.getTotalClasses();
// Convenience function to setup a webcam
const size = 600;
const flip = false; // whether to flip the webcam
webcam = new tmPose.Webcam(size, size, flip); // width, height, flip
await webcam.setup(); // request access to the webcam
await webcam.play();
window.requestAnimationFrame(loop);
// append/get elements to the DOM
const canvas = document.getElementById("right-canvas");
canvas.width = size / 2; canvas.height = size;
ctx = canvas.getContext("2d")
// ctx.drawImage(webcam.canvas, 150, 300, 300, 600);
labelContainer = document.getElementById("label-container");
for (let i = 0; i < maxPredictions; i++) { // and class labels
labelContainer.appendChild(document.createElement("div"));
}
}
async function loop(timestamp) {
webcam.update(); // update the webcam frame
await predict();
window.requestAnimationFrame(loop);
}
// async function predict() {
// // Prediction #1: run input through posenet
// // estimatePose can take in an image, video or canvas html element
// const { pose, posenetOutput } = await model.estimatePose(webcam.canvas);
// // Prediction 2: run input through teachable machine classification model
// const prediction = await model.predict(posenetOutput);
// for (let i = 0; i < maxPredictions; i++) {
// const classPrediction =
// prediction[i].className + ": " + prediction[i].probability.toFixed(2);
// labelContainer.childNodes[i].innerHTML = classPrediction;
// console.log("right: " + labelContainer.childNodes[i].innerHTML)
// }
// // finally draw the poses
// drawPose(pose);
// }
predicts = {};
loop_count = 0;
async function predict() {
// Prediction #1: run input through posenet
// estimatePose can take in an image, video or canvas html element
const { pose, posenetOutput } = await model.estimatePose(webcam.canvas);
// Prediction 2: run input through teachable machine classification model
const prediction = await model.predict(posenetOutput);
for (let i = 0; i < maxPredictions; i++) {
const classPrediction =
prediction[i].className + ": " + prediction[i].probability.toFixed(2);
labelContainer.childNodes[i].innerHTML = classPrediction;
predicts[prediction[i].className] = prediction[i].probability.toFixed(2);
console.log(predicts)
// localStorage.setItem('test', JSON.stringify(predicts));
// var fs = require('fs');
// fs.writeFile("test.json", JSON.stringify(predicts));
// console.log("right: " + labelContainer.childNodes[i].innerHTML)
// finally draw the poses
drawPose(pose);
}
if (loop_count%500 == 0) {
const textToBLOB = new Blob([JSON.stringify(predicts)], { type: 'text/plain' });
const sFileName = 'right.txt'; // The file to save the data.
let newLink = document.createElement("a");
newLink.download = sFileName;
if (window.webkitURL != null) {
newLink.href = window.webkitURL.createObjectURL(textToBLOB);
}
else {
newLink.href = window.URL.createObjectURL(textToBLOB);
newLink.style.display = "none";
document.body.appendChild(newLink);
}
newLink.click();
}
loop_count++;
}
function drawPose(pose) {
if (webcam.canvas) {
ctx.drawImage(webcam.canvas, 0, 0);
// draw the keypoints and skeleton
if (pose) {
const minPartConfidence = 0.5;
tmPose.drawKeypoints(pose.keypoints, minPartConfidence, ctx);
tmPose.drawSkeleton(pose.keypoints, minPartConfidence, ctx);
}
}
}
</script>
</body>
</html>