Skip to content

Commit

Permalink
Merge pull request #352 from luxonis/develop
Browse files Browse the repository at this point in the history
Release v2.10.0.0
  • Loading branch information
SzabolcsGergely authored Aug 24, 2021
2 parents de1692a + 9b24e44 commit 11a9a34
Show file tree
Hide file tree
Showing 9 changed files with 581 additions and 104 deletions.
2 changes: 1 addition & 1 deletion depthai-core
2 changes: 2 additions & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,5 @@ add_python_example(imu_rotation_vector imu_rotation_vector.py)
add_python_example(rgb_depth_aligned rgb_depth_aligned.py)
add_python_example(edge_detector edge_detector.py)
add_python_example(script_camera_control script_camera_control.py)
add_python_example(feature_tracker feature_tracker.py)
add_python_example(corner_detector corner_detector.py)
105 changes: 105 additions & 0 deletions examples/corner_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#!/usr/bin/env python3

import cv2
import depthai as dai


# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)

xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)

xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")

# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

# Disable optical flow
featureTrackerLeft.initialConfig.setMotionEstimator(False)
featureTrackerRight.initialConfig.setMotionEstimator(False)

# Linking
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)

monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)

featureTrackerConfig = featureTrackerRight.initialConfig.get()

print("Press 's' to switch between Harris and Shi-Thomasi corner detector!")

# Connect to device and start pipeline
with dai.Device(pipeline) as device:

# Output queues used to receive the results
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)

inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")

leftWindowName = "left"
rightWindowName = "right"

def drawFeatures(frame, features):
pointColor = (0, 0, 255)
circleRadius = 2
for feature in features:
cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, pointColor, -1, cv2.LINE_AA, 0)

while True:
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)

inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)

trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
drawFeatures(leftFrame, trackedFeaturesLeft)

trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
drawFeatures(rightFrame, trackedFeaturesRight)

# Show the frame
cv2.imshow(leftWindowName, leftFrame)
cv2.imshow(rightWindowName, rightFrame)

key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.cornerDetector.type == dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS:
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.SHI_THOMASI
print("Switching to Shi-Thomasi")
else:
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS
print("Switching to Harris")

cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg)
173 changes: 173 additions & 0 deletions examples/feature_tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
#!/usr/bin/env python3

import cv2
import depthai as dai
from collections import deque

class FeatureTrackerDrawer:

lineColor = (200, 0, 200)
pointColor = (0, 0, 255)
circleRadius = 2
maxTrackedFeaturesPathLength = 30
# for how many frames the feature is tracked
trackedFeaturesPathLength = 10

trackedIDs = None
trackedFeaturesPath = None

def onTrackBar(self, val):
FeatureTrackerDrawer.trackedFeaturesPathLength = val
pass

def trackFeaturePath(self, features):

newTrackedIDs = set()
for currentFeature in features:
currentID = currentFeature.id
newTrackedIDs.add(currentID)

if currentID not in self.trackedFeaturesPath:
self.trackedFeaturesPath[currentID] = deque()

path = self.trackedFeaturesPath[currentID]

path.append(currentFeature.position)
while(len(path) > max(1, FeatureTrackerDrawer.trackedFeaturesPathLength)):
path.popleft()

self.trackedFeaturesPath[currentID] = path

featuresToRemove = set()
for oldId in self.trackedIDs:
if oldId not in newTrackedIDs:
featuresToRemove.add(oldId)

for id in featuresToRemove:
self.trackedFeaturesPath.pop(id)

self.trackedIDs = newTrackedIDs

def drawFeatures(self, img):

cv2.setTrackbarPos(self.trackbarName, self.windowName, FeatureTrackerDrawer.trackedFeaturesPathLength)

for featurePath in self.trackedFeaturesPath.values():
path = featurePath

for j in range(len(path) - 1):
src = (int(path[j].x), int(path[j].y))
dst = (int(path[j + 1].x), int(path[j + 1].y))
cv2.line(img, src, dst, self.lineColor, 1, cv2.LINE_AA, 0)
j = len(path) - 1
cv2.circle(img, (int(path[j].x), int(path[j].y)), self.circleRadius, self.pointColor, -1, cv2.LINE_AA, 0)

def __init__(self, trackbarName, windowName):
self.trackbarName = trackbarName
self.windowName = windowName
cv2.namedWindow(windowName)
cv2.createTrackbar(trackbarName, windowName, FeatureTrackerDrawer.trackedFeaturesPathLength, FeatureTrackerDrawer.maxTrackedFeaturesPathLength, self.onTrackBar)
self.trackedIDs = set()
self.trackedFeaturesPath = dict()


# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)

xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)

xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")

# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

# Linking
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)

monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)

# By default the least mount of resources are allocated
# increasing it improves performance
numShaves = 2
numMemorySlices = 2
featureTrackerLeft.setHardwareResources(numShaves, numMemorySlices)
featureTrackerRight.setHardwareResources(numShaves, numMemorySlices)

featureTrackerConfig = featureTrackerRight.initialConfig.get()
print("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation!")

# Connect to device and start pipeline
with dai.Device(pipeline) as device:

# Output queues used to receive the results
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)

inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")

leftWindowName = "left"
leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName)

rightWindowName = "right"
rightFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", rightWindowName)

while True:
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)

inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)

trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft)
leftFeatureDrawer.drawFeatures(leftFrame)

trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
rightFeatureDrawer.trackFeaturePath(trackedFeaturesRight)
rightFeatureDrawer.drawFeatures(rightFrame)

# Show the frame
cv2.imshow(leftWindowName, leftFrame)
cv2.imshow(rightWindowName, rightFrame)

key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.motionEstimator.type == dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.HW_MOTION_ESTIMATION
print("Switching to hardware accelerated motion estimation")
else:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW
print("Switching to Lucas-Kanade optical flow")

cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg)
23 changes: 23 additions & 0 deletions examples/install_requirements.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#!/usr/bin/env python3
import platform
import sys, os, subprocess
import argparse
import re
Expand Down Expand Up @@ -45,7 +46,29 @@ def hasWhitespace(string):
# Check if in virtual environment
in_venv = getattr(sys, "real_prefix", getattr(sys, "base_prefix", sys.prefix)) != sys.prefix
pip_call = [sys.executable, "-m", "pip"]
pip_installed = True
pip_install = pip_call + ["install"]

try:
subprocess.check_call(pip_call + ["--version"])
except subprocess.CalledProcessError as ex:
pip_installed = False

if not pip_installed:
err_str = "Issues with \"pip\" package detected! Follow the official instructions to install - https://pip.pypa.io/en/stable/installation/"
raise RuntimeError(err_str)

if sys.version_info[0] != 3:
raise RuntimeError("Examples require Python 3 to run (detected: Python {})".format(sys.version_info[0]))

if platform.machine() == "arm64" and platform.system() == "Darwin":
err_str = "There are no prebuilt wheels for M1 processors. Please open the following link for a solution - https://discuss.luxonis.com/d/69-running-depthai-on-apple-m1-based-macs"
raise RuntimeError(err_str)

is_pi = platform.machine().startswith("arm") or platform.machine().startswith("aarch")
if is_pi and sys.version_info[1] in (7, 9):
print("[WARNING] There are no prebuilt wheels for Python 3.{} for OpenCV, building process on this device may be long and unstable".format(sys.version_info[1]))

if not in_venv:
pip_install.append("--user")

Expand Down
Loading

0 comments on commit 11a9a34

Please sign in to comment.