Skip to content

Commit

Permalink
Update Readme file and Python script.
Browse files Browse the repository at this point in the history
  • Loading branch information
BabakShah committed Nov 5, 2017
1 parent 989aeb0 commit 0babaf2
Show file tree
Hide file tree
Showing 10 changed files with 58,098 additions and 68 deletions.
171 changes: 123 additions & 48 deletions AdvancedLaneLIne.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,82 +9,157 @@
# calibrate camera and remove distortion from the image

def points ():
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# Prepare object points
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Prepare object points
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)

# Arrays to store object points and image points
# from the calibration images
# Arrays to store object points and image points
# from the calibration images
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane

objpoints = []
imgpoints = []
# Make a list of calibration images
images = glob.glob('./camera_cal_images/*.jpg')

images = glob.glob('./camera_cal_images/*.jpg')
# fig, axs = plt.subplots(5,4, figsize=(16, 11))
# fig.subplots_adjust(hspace = .2, wspace=.001)
# axs = axs.ravel()

for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Go through the list and seach for chess board corners
for i, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)

# If found, add object points, image points
if ret == True:
objpoints.append(objp)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)

corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Refining image points
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)

# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners2, ret)
# cv2.imshow('img',img)
# cv2.waitKey(100)

cv2.destroyAllWindows()
return objpoints, imgpoints
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners2, ret)
# axs[i].axis('off')
# axs[i].imshow(img)
# cv2.imshow('img',img)
# cv2.waitKey(100)

cv2.destroyAllWindows()
return objpoints, imgpoints, corners, ret

def calibration(objpoints, imgpoints):
# Read an image
img = cv2.imread('./camera_cal_images/calibration1.jpg')
img_size = (img.shape[1], img.shape[0])

ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
# Read an image camera_cal_images
img = cv2.imread('./input_images/test6.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_size = (img.shape[1], img.shape[0])

return mtx, dist
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)

return mtx, dist, img
# './input_images/test6.jpg'
# './camera_cal_images/calibration1.jpg'
def undistortion(mtx, dist):
img = cv2.imread('./camera_cal_images/calibration1.jpg')
undist = cv2.undistort(img, mtx, dist, None, mtx)

return img, undist
img = cv2.imread('./input_images/test6.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
undist = cv2.undistort(img, mtx, dist, None, mtx)

return undist

objpoints, imgpoints = points()
# objpoints, imgpoints, corners = points()

# # print(objpoints)
# # print(imgpoints)
# mtx, dist, img = calibration(objpoints, imgpoints)

print(objpoints)
print(imgpoints)
mtx, dist = calibration(objpoints, imgpoints)


print("mtx: ",mtx)
print("dist: ",dist)
img, undist = undistortion(mtx, dist)
# # print("mtx: ",mtx)
# # print("dist: ",dist)
# undist = undistortion(mtx, dist)

# Visualize undistortion
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
# fig, axs = plt.subplots(1, 2, figsize=(20, 10))

# axs[0].imshow(img)
# axs[0].set_title('Original Image', fontsize=30)

# axs[1].imshow(undist)
# axs[1].set_title('Undistorted Image', fontsize=30)

# fig.tight_layout()
# # mpimg.imsave("test-after.jpg", color_select)
# # plt.imsave("output_images/test_before2.jpg", img)
# # plt.imsave("output_images/test_after2.jpg", undist)
# plt.show()

# nx = 9 # the number of inside corners in x
# ny = 6 # the number of inside corners in y

#===================================================
def unwarp(undist, src, ret):
# Convert to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
# ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# # If corners found:
# # if(corners):
if ret == True:
# a) draw corners
# img = cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# b) define 4 source points
offset = 450
img_size = (gray.shape[1], gray.shape[0])
# src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# #Note: you could pick any four of the detected corners
# # as long as those four corners define a rectangle
# #One especially smart way to do this would be to use four well-chosen
# # corners that were automatically detected during the undistortion steps
# #We recommend using the automatic detection of corners in your code
# # c) define 4 destination points
dst = np.float32([[offset, 0], [img_size[0]-offset, 0],
[offset, img_size[1]],
[img_size[0]-offset, img_size[1]]])
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# e) use cv2.warpPerspective() to warp your image to a top-down view
warped = cv2.warpPerspective(undist, M, img_size, flags=cv2.INTER_LINEAR)

return warped, M

src = np.float32([(575,464),
(707,464),
(258,682),
(1049,682)])
objpoints, imgpoints, corners, ret = points()

# print(objpoints)
# print(imgpoints)
mtx, dist, img = calibration(objpoints, imgpoints)

# print("mtx: ",mtx)
# print("dist: ",dist)
undist = undistortion(mtx, dist)

unwarped, M = unwarp(undist, src, ret)

fig, axs = plt.subplots(1, 3, figsize=(20, 10))

axs[0].imshow(img)
axs[0].set_title('Original Image', fontsize=30)

axs[1].imshow(undist)
axs[1].set_title('Undistorted Image', fontsize=30)

axs[2].imshow(unwarped)
axs[2].set_title('Unwarped Image', fontsize=30)

fig.tight_layout()
# mpimg.imsave("test-after.jpg", color_select)
# plt.imsave("test_output/test_after.jpg", results)
# plt.imsave("output_images/test_before2.jpg", img)
# plt.imsave("output_images/test_after2.jpg", undist)
plt.show()
49 changes: 29 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
# SDC-AdvancedLaneLines
Self-driving Car Project: Advanced Lane Lines Detection and Tracking

## Writeup Template


**Advanced Lane Finding Project**

The goals / steps of this project are the following:
The steps of this project are the following:

* Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
* Apply a distortion correction to raw images.
Expand All @@ -31,38 +28,50 @@ The goals / steps of this project are the following:

---

### Writeup / README

#### 1. Provide a Writeup / README that includes all the rubric points and how you addressed each one. You can submit your writeup as markdown or pdf. [Here](https://github.com/udacity/CarND-Advanced-Lane-Lines/blob/master/writeup_template.md) is a template writeup for this project you can use as a guide and a starting point.

You're reading it!

### Camera Calibration
### Pipeline (single images)

#### 1. Briefly state how you computed the camera matrix and distortion coefficients. Provide an example of a distortion corrected calibration image.
#### 1. Camera calibration and distortion-correction

The code for this step is contained in the first code cell of the IPython notebook located in "./examples/example.ipynb" (or in lines # through # of the file called `some_file.py`).
The code for this step is contained in lines 11 through 76 of the file called `AdvanceLaneLine.py`).

I start by preparing "object points", which will be the (x, y, z) coordinates of the chessboard corners in the world. Here I am assuming the chessboard is fixed on the (x, y) plane at z=0, such that the object points are the same for each calibration image. Thus, `objp` is just a replicated array of coordinates, and `objpoints` will be appended with a copy of it every time I successfully detect all chessboard corners in a test image. `imgpoints` will be appended with the (x, y) pixel position of each of the corners in the image plane with each successful chessboard detection.

I then used the output `objpoints` and `imgpoints` to compute the camera calibration and distortion coefficients using the `cv2.calibrateCamera()` function. I applied this distortion correction to the test image using the `cv2.undistort()` function and obtained this result:

![alt text][image1]

### Pipeline (single images)

For the chessboard image:

<figure>
<center>
<img src="./output_images/compare_chessboard.jpg" alt="Distorted" style="width: 100%;"/>
<!--<figcaption>Distorted image</figcaption>-->
</center>
</figure>
<!--![Distorted](./output_images/test_before.jpg){width= 40%}
*Distorted*
![Undistorted](./output_images/test_after.jpg)
*Unistorted*-->
And for the road image:
<figure>
<center>
<img src="./output_images/compare_road.jpg" alt="Road image" style="width: 100%;"/>
<!--<figcaption>Distorted image</figcaption>-->
</center>
</figure>

<!--
#### 1. Provide an example of a distortion-corrected image.
To demonstrate this step, I will describe how I apply the distortion correction to one of the test images like this one:
![alt text][image2]
![alt text][image2]-->

#### 2. Describe how (and identify where in your code) you used color transforms, gradients or other methods to create a thresholded binary image. Provide an example of a binary image result.
#### 2. Color transformation, gradients to create a thresholded binary image

I used a combination of color and gradient thresholds to generate a binary image (thresholding steps at lines # through # in `another_file.py`). Here's an example of my output for this step. (note: this is not actually from one of the test images)

![alt text][image3]

#### 3. Describe how (and identify where in your code) you performed a perspective transform and provide an example of a transformed image.
#### 3. Perspective transform

The code for my perspective transform includes a function called `warper()`, which appears in lines 1 through 8 in the file `example.py` (output_images/examples/example.py) (or, for example, in the 3rd code cell of the IPython notebook). The `warper()` function takes as inputs an image (`img`), as well as source (`src`) and destination (`dst`) points. I chose the hardcode the source and destination points in the following manner:

Expand Down
21,277 changes: 21,277 additions & 0 deletions output_images/compare_chessboard.ai

Large diffs are not rendered by default.

Binary file added output_images/compare_chessboard.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
36,669 changes: 36,669 additions & 0 deletions output_images/compare_road.ai

Large diffs are not rendered by default.

Binary file added output_images/compare_road.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added output_images/test_after.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added output_images/test_after2.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added output_images/test_before.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added output_images/test_before2.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit 0babaf2

Please sign in to comment.