-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathUntitled.m
85 lines (64 loc) · 2.86 KB
/
Untitled.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
% Create a cascade detector object.
faceDetector = vision.CascadeObjectDetector();
% cam=webcam(1);
cam.Resolution = '320x240';
% Read a video frame and run the face detector.
videoFileReader = vision.VideoFileReader('tilted_face.avi');
videoFrame = snapshot(cam);
bbox = step(faceDetector, videoFrame);
% Draw the returned bounding box around the detected face.
videoFrame = insertShape(videoFrame, 'Rectangle', bbox);
figure; imshow(videoFrame); title('Detected face');
% Convert the first box into a list of 4 points
% This is needed to be able to visualize the rotation of the object.
bboxPoints = bbox2points(bbox(1, :));
% Detect feature points in the face region.
points = detectMinEigenFeatures(rgb2gray(videoFrame), 'ROI', bbox);
% Display the detected points.
figure, imshow(videoFrame), hold on, title('Detected features');
plot(points);
% Create a point tracker and enable the bidirectional error constraint to
% make it more robust in the presence of noise and clutter.
pointTracker = vision.PointTracker('MaxBidirectionalError', 2);
% Initialize the tracker with the initial point locations and the initial
% video frame.
points = points.Location;
initialize(pointTracker, points, videoFrame);
videoPlayer = vision.VideoPlayer('Position',...
[100 100 [size(videoFrame, 2), size(videoFrame, 1)]+30]);
% Make a copy of the points to be used for computing the geometric
% transformation between the points in the previous and the current frames
oldPoints = points;
while ~isDone(videoFileReader)
% get the next frame
videoFrame = snapshot(cam);
% Track the points. Note that some points may be lost.
[points, isFound] = step(pointTracker, videoFrame);
visiblePoints = points(isFound, :);
oldInliers = oldPoints(isFound, :);
if size(visiblePoints, 1) >= 2 % need at least 2 points
% Estimate the geometric transformation between the old points
% and the new points and eliminate outliers
[xform, oldInliers, visiblePoints] = estimateGeometricTransform(...
oldInliers, visiblePoints, 'similarity', 'MaxDistance', 4);
% Apply the transformation to the bounding box points
bboxPoints = transformPointsForward(xform, bboxPoints);
% Insert a bounding box around the object being tracked
bboxPolygon = reshape(bboxPoints', 1, []);
videoFrame = insertShape(videoFrame, 'Polygon', bboxPolygon, ...
'LineWidth', 2);
% Display tracked points
videoFrame = insertMarker(videoFrame, visiblePoints, '+', ...
'Color', 'white');
% Reset the points
oldPoints = visiblePoints;
setPoints(pointTracker, oldPoints);
end
% Display the annotated video frame using the video player object
step(videoPlayer, videoFrame);
end
% Clean up
release(videoFileReader);
release(videoPlayer);
release(pointTracker);
clear cam;