Skip to content

Commit

Permalink
Added parking event testing tools
Browse files Browse the repository at this point in the history
  • Loading branch information
connervieira committed Dec 18, 2024
1 parent d3a9840 commit c3fd09b
Show file tree
Hide file tree
Showing 2 changed files with 154 additions and 0 deletions.
71 changes: 71 additions & 0 deletions tools/parked_event_motion_detect_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# This script allows you to test your motion detection settings with a real-time video stream.

import os # Required to interact with certain operating system functions
import json # Required to process JSON data
import cv2 # Required to capture video.

predator_root_directory = str(os.path.dirname(os.path.realpath(__file__))) # This variable determines the folder path of the root Predator directory. This should usually automatically recognize itself, but it if it doesn't, you can change it manually.

try:
if (os.path.exists(predator_root_directory + "/../config.json")):
config = json.load(open(predator_root_directory + "/../config.json")) # Load the configuration database from config.json
else:
print("The configuration file doesn't appear to exist at " + predator_root_directory + "/../config.json.")
exit()
except:
print("The configuration database couldn't be loaded. It may be corrupted.")
exit()

device = "main"

if (device not in config["dashcam"]["capture"]["video"]["devices"]):
print("The specified device does not exist in the configuration. Be sure to change the 'device' variable in the motion_detect_test.py file to the device you want to test.")
exit()

resolution = [config["dashcam"]["capture"]["video"]["devices"][device]["resolution"]["width"], config["dashcam"]["capture"]["video"]["devices"][device]["resolution"]["height"]] # This determines the resolution that will be used for the video capture device.
#capture = cv2.VideoCapture(config["dashcam"]["capture"]["video"]["devices"][device]["index"]); # Open the video capture device.
capture = cv2.VideoCapture("/home/cvieira/Downloads/NearbyLightning.m4v"); # Open the video capture device.
codec = list(config["dashcam"]["capture"]["video"]["devices"][device]["codec"])
capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(codec[0], codec[1], codec[2], codec[3])) # Set the codec to be MJPEG.
capture.set(cv2.CAP_PROP_FRAME_WIDTH,resolution[0]) # Set the video stream width.
capture.set(cv2.CAP_PROP_FRAME_HEIGHT,resolution[1]) # Set the video stream height.

background_subtractor = cv2.createBackgroundSubtractorMOG2()

total_area = resolution[0] * resolution[1] # Calculate the total area of the frame.

output = cv2.VideoWriter("./predator_motion_detect_test.avi", cv2.VideoWriter_fourcc(*'XVID'), 16, (resolution[0], resolution[1])) # Update the video output.

while True:
ret, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = background_subtractor.apply(gray)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
fgmask = cv2.erode(fgmask, kernel, iterations=1)
fgmask = cv2.dilate(fgmask, kernel, iterations=1)
contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

moving_area = 0
for contour in contours:
moving_area += cv2.contourArea(contour)
if cv2.contourArea(contour) > 10:
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

moving_percentage = moving_area / total_area # Calculate the percentage of the frame that is in motion.
moving_percentage_human = "{:.5f}%".format(moving_percentage*100) # Convert the moving percentage to a human-readable string.
if (moving_area > 0): # Check to see if there is any movement at all.
if (moving_percentage > float(config["dashcam"]["parked"]["event"]["trigger_motion"]["sensitivity"])): # Check to see if there is movement that exceeds the sensitivity threshold.
print(str(moving_area) + "\t(" + str(format(moving_percentage_human)) + ")\tTriggered") # Display the movement as both a number and a percentage.
else:
print(str(moving_area) + "\t(" + str(format(moving_percentage_human)) + ")") # Display the movement as both a number and a percentage.

cv2.putText(frame, moving_percentage_human, (10, 30), 2, 0.8, (0, 0, 0)) # Add the main overlay stamp to the video stream.
output.write(frame) # Save this frame to the video file.
cv2.imshow('Motion Detection', frame)

if cv2.waitKey(1) == ord('q'):
break

capture.release()
cv2.destroyAllWindows()
83 changes: 83 additions & 0 deletions tools/parked_event_object_recognition_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
# This script allows you to test your object recognition settings with a real-time video stream.

import os # Required to interact with certain operating system functions
import json # Required to process JSON data
import cv2 # Required to capture video.
from ultralytics import YOLO
import numpy

predator_root_directory = str(os.path.dirname(os.path.realpath(__file__))) # This variable determines the folder path of the root Predator directory. This should usually automatically recognize itself, but it if it doesn't, you can change it manually.

try:
if (os.path.exists(predator_root_directory + "/../config.json")):
config = json.load(open(predator_root_directory + "/../config.json")) # Load the configuration database from config.json
else:
print("The configuration file doesn't appear to exist at " + predator_root_directory + "/../config.json.")
exit()
except:
print("The configuration database couldn't be loaded. It may be corrupted.")
exit()


model = YOLO("../assets/models/dashcam_model.pt")
def predict(frame):
global model
results = model(frame)
class_names = results[0].names

detected_objects = [] # This is a placeholder that will hold all of the detected objects.
for result in results:
boxes = result.boxes
for i in range(0, len(boxes)):
obj = {}
box = result.boxes[i].xyxy.numpy().tolist()[0]
obj["bbox"] = {}
obj["bbox"]["x1"] = round(box[0])
obj["bbox"]["y1"] = round(box[1])
obj["bbox"]["x2"] = round(box[2])
obj["bbox"]["y2"] = round(box[3])
obj["name"] = class_names[int(result.boxes[i].cls.numpy().tolist()[0])]
obj["conf"] = result.boxes[i].conf.numpy().tolist()[0]
detected_objects.append(obj)
return detected_objects

device = "main"

if (device not in config["dashcam"]["capture"]["video"]["devices"]):
print("The specified device does not exist in the configuration. Be sure to change the 'device' variable in the motion_detect_test.py file to the device you want to test.")
exit()

resolution = [config["dashcam"]["capture"]["video"]["devices"][device]["resolution"]["width"], config["dashcam"]["capture"]["video"]["devices"][device]["resolution"]["height"]] # This determines the resolution that will be used for the video capture device.
#capture = cv2.VideoCapture(config["dashcam"]["capture"]["video"]["devices"][device]["index"]); # Open the video capture device.
capture = cv2.VideoCapture("/home/cvieira/Downloads/ParkingEventDemo.mp4"); # Open the video capture device.
codec = list(config["dashcam"]["capture"]["video"]["devices"][device]["codec"])
capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(codec[0], codec[1], codec[2], codec[3])) # Set the codec to be MJPEG.
capture.set(cv2.CAP_PROP_FRAME_WIDTH,resolution[0]) # Set the video stream width.
capture.set(cv2.CAP_PROP_FRAME_HEIGHT,resolution[1]) # Set the video stream height.

background_subtractor = cv2.createBackgroundSubtractorMOG2()

total_area = resolution[0] * resolution[1] # Calculate the total area of the frame.

output = cv2.VideoWriter("./predator_object_recognition_test.avi", cv2.VideoWriter_fourcc(*'XVID'), 16, (resolution[0], resolution[1])) # Update the video output.


while True:
ret, frame = capture.read()

detected_objects = predict(frame)
for element in detected_objects:
print(element["name"], element["conf"])
if (element["conf"] >= config["dashcam"]["parked"]["event"]["trigger_object_recognition"]["minimum_confidence"] and element["name"] in config["dashcam"]["parked"]["event"]["trigger_object_recognition"]["objects"]): # Check to see if this object is in the list of target objects.
print("Detected event.")
color = config["dashcam"]["parked"]["event"]["label"]["color"]
cv2.rectangle(frame, (element["bbox"]["x1"], element["bbox"]["y1"]), (element["bbox"]["x2"], element["bbox"]["y2"]), (color[2], color[1], color[0]), 2) # Draw a box around the contour in the frame.

cv2.imshow('Object Detection', frame)

output.write(frame) # Save this frame to the video file.
if cv2.waitKey(1) == ord('q'):
break

capture.release()
cv2.destroyAllWindows()

0 comments on commit c3fd09b

Please sign in to comment.