-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path4k.py
103 lines (82 loc) · 3.66 KB
/
4k.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
# Get argument first
nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute())
if len(sys.argv) > 1:
nnPath = sys.argv[1]
if not Path(nnPath).exists():
import sys
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
# MobilenetSSD label texts
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.createColorCamera()
nn = pipeline.createMobileNetDetectionNetwork()
xoutVideo = pipeline.createXLinkOut()
xoutPreview = pipeline.createXLinkOut()
nnOut = pipeline.createXLinkOut()
xoutVideo.setStreamName("video")
xoutPreview.setStreamName("preview")
nnOut.setStreamName("nn")
# Properties
camRgb.setPreviewSize(300, 300) # NN input
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
camRgb.setInterleaved(False)
camRgb.setPreviewKeepAspectRatio(False)
# Define a neural network that will make predictions based on the source frames
nn.setConfidenceThreshold(0.5)
nn.setBlobPath(nnPath)
nn.setNumInferenceThreads(2)
nn.input.setBlocking(False)
# Linking
camRgb.video.link(xoutVideo.input)
camRgb.preview.link(xoutPreview.input)
camRgb.preview.link(nn.input)
nn.out.link(nnOut.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues will be used to get the frames and nn data from the outputs defined above
qVideo = device.getOutputQueue(name="video", maxSize=4, blocking=False)
qPreview = device.getOutputQueue(name="preview", maxSize=4, blocking=False)
qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
previewFrame = None
videoFrame = None
detections = []
# nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
def frameNorm(frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
def displayFrame(name, frame):
color = (255, 0, 0)
for detection in detections:
bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 40, bbox[1] + 80), cv2.FONT_HERSHEY_TRIPLEX, 2, color, 4)
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 40, bbox[1] + 160), cv2.FONT_HERSHEY_TRIPLEX, 2, color, 4)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
# Show the frame
cv2.imshow(name, frame)
cv2.namedWindow("video", cv2.WINDOW_NORMAL)
cv2.resizeWindow("video", 1280, 720)
print("Resize video window with mouse drag!")
while True:
# Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise
inVideo = qVideo.tryGet()
inPreview = qPreview.tryGet()
inDet = qDet.tryGet()
if inVideo is not None:
videoFrame = inVideo.getCvFrame()
if inPreview is not None:
previewFrame = inPreview.getCvFrame()
if inDet is not None:
detections = inDet.detections
if videoFrame is not None:
displayFrame("video", videoFrame)
if cv2.waitKey(1) == ord('q'):
break