Skip to content

Commit

Permalink
Updating the readme file and python script.
Browse files Browse the repository at this point in the history
  • Loading branch information
BabakShah committed Dec 13, 2017
1 parent 059c391 commit a5b4657
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 102 deletions.
136 changes: 97 additions & 39 deletions AdvancedLaneFind.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,8 +415,60 @@ def draw_lane(warped, undist, left_fitx, right_fitx, PerspTransInv, ploty):
newwarp = cv2.warpPerspective(color_warp, PerspTransInv, (undist.shape[1], undist.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
plt.imshow(result)
plt.show()
# plt.imshow(result)
# plt.show()

print('Lane lines drawn on the image!')
return result

#======================================================================
#=== radius of curvature and distance =================================
#======================================================================

# Calculates radius of curvature and distance from lane center
def measure_curvature(bin_img, l_fit, r_fit, l_lane_inds, r_lane_inds, ploty):

# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
left_curverad, right_curverad, center_dist = (0, 0, 0)

# Define maximum y-value corresponding to the bottom of the image
y_eval = np.max(ploty)

# Identify the x and y positions of all nonzero pixels in the image
nonzero = bin_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])

leftx = nonzerox[l_lane_inds]
lefty = nonzeroy[l_lane_inds]
rightx = nonzerox[r_lane_inds]
righty = nonzeroy[r_lane_inds]

# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)

# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])

# Now our radius of curvature is in meters
print(left_curverad,'m',right_curverad,'m')
return left_curverad, right_curverad

#======================================================================
#=== Draw Radius of Curvature =========================================
#======================================================================

def draw_data(img, curv_rad):
new_img = np.copy(img)
font = cv2.FONT_HERSHEY_DUPLEX
text = 'Curve radius: ' + '{:04.2f}'.format(curv_rad) + 'm'
cv2.putText(new_img, text, (40,70), font, 1.5, (200,255,155), 2, cv2.LINE_AA)
print('Lane data drawn on the image!')
return new_img

#==========================================================================
#=== Processing Image Pipeline ============================================
Expand Down Expand Up @@ -463,50 +515,56 @@ def process_image(img):
else:
left_fitx, right_fitx, left_fit, right_fit, left_lane_inds, right_lane_inds, ploty = next_fit_polynomial(grad_and_color_combined, left_fit, right_fit)

img_out1 = draw_lane(grad_and_color_combined, undist, left_fitx, right_fitx, PerspTransInv, ploty)
return img_out1
# Visualizes undistortion one by one
# cv2.imshow('FRAME',grad_and_color_combined)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
img_with_lane = draw_lane(grad_and_color_combined, undist, left_fitx, right_fitx, PerspTransInv, ploty)

rad_left, rad_right = measure_curvature(grad_and_color_combined, left_fitx, right_fitx, left_lane_inds, right_lane_inds, ploty)
img_with_lane_and_data = draw_data(img_with_lane, (rad_left+rad_right)/2)

processed_img = cv2.cvtColor(img_with_lane_and_data, cv2.COLOR_BGR2RGB)
# Visualizes the image with lane line fits and radius data
cv2.imshow('FRAME', processed_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

#======================================================================
#=== Tracking Lane Lines ==============================================
#======================================================================

# Defines a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None

l_line = Line()
r_line = Line()
# for image in range(0, 5):
# process_image(img)
# image += 1
video_output1 = 'challenge_video_output.mp4'
video_input1 = VideoFileClip('challenge_video.mp4')
print (video_input1.fps)
video_input2 = video_input1.subclip(10, 15)
processed_video = video_input2.fl_image(process_image)

processed_video.write_videofile(video_output1, audio=False)

for image in range(0, 2):
process_image(img)
image += 1

# video_output1 = 'challenge_video_output.mp4'
# video_input1 = VideoFileClip('challenge_video.mp4')
# print (video_input1.fps)
# video_input2 = video_input1.subclip(10, 15)
# processed_video = video_input2.fl_image(process_image)

# processed_video.write_videofile(video_output1, audio=False)
102 changes: 39 additions & 63 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ The steps of this project are the following:
I start camera calibration by using chessboard images. Using the function `cv2.findChessboardCorners()` I get the corners of the chessboard. I drew the corners on the chessboard images as shown below using the function `cv2.drawChessboardCorners`.

<center>
<img src="./ChessboardsWithCorners.png" alt="Distorted" style="width: 100%;"/>
<img src="./output_images/ChessboardsWithCorners.png" alt="Distorted" style="width: 100%;"/>
</center>

I then prepared "object points", which will be the (x, y, z) coordinates of the chessboard corners in the world. Here I am assuming the chessboard is fixed on the (x, y) plane at z=0, such that the object points are the same for each calibration image. Thus, `objp` is just a replicated array of coordinates, and `objpoints` will be appended with a copy of it every time I successfully detect all chessboard corners in a test image. `imgpoints` will be appended with the (x, y) pixel position of each of the corners in the image plane with each successful chessboard detection. Finally, used the output `objpoints` and `imgpoints` to compute the camera calibration matrix and distortion coefficients using the `cv2.calibrateCamera()` function.
Expand Down Expand Up @@ -68,7 +68,7 @@ I verified that my perspective transform was working as expected by drawing the

#### 3. Combining color and gradient thresholds

I used a combination of color and gradient thresholds to generate a binary image (thresholding steps at lines 227 through 241 in `Clean_AdvancedLaneLine.py`). Here's my output for this step.
I used a combination of color and gradient thresholds to generate a binary image. Here's my output for this step.

The Sobel gradient thresholds:

Expand All @@ -88,88 +88,64 @@ And the combined gradient and HLS color thresholds:
<img src="./output_images/Combined1.jpg" alt="Road image" style="width: 100%;"/>
</center>

#### 4. Identify lane-line pixels and fit their positions with a polynomial.
#### 4. Identify lane-line pixels and fit their positions with a polynomial

Then I did fit my lane lines with a 2nd order polynomial like this (lines 245 through 361 in `Clean_AdvancedLaneLine.py`):

<!--<center>
<img src="./output_images/SlidingWindow.png" alt="Road image" style="width: 100%;"/>
</center>-->
Then I did fit my lane lines with a 2nd order polynomial like this:

<center>
<img src="./output_images/Histogram.jpg" alt="Road image" style="width: 100%;"/>
</center>

<!--<center>
<img src="./output_images/PolyPrevious.png" alt="Road image" style="width: 100%;"/>
</center>-->

#### 5. Calculating the radius of curvature of the lane and the position of the vehicle with respect to center.
#### 5. Calculating the radius of curvature of the lane

The code to calculate the radius of curvature of the lane and the position of the vehicle with respect to center is as follows (lines 363 through 406 in `Clean_AdvancedLaneLine.py`):
The function to calculate the radius of curvature of the lane is as follows:

```python
# Method to determine radius of curvature and distance from lane center
# based on binary image, polynomial fit, and L and R lane pixel indices
def calc_curv_rad_and_center_dist(bin_img, l_fit, r_fit, l_lane_inds, r_lane_inds):
# Define conversions in x and y from pixels space to meters
ym_per_pix = 3.048/100 # meters per pixel in y dimension, lane line is 10 ft = 3.048 meters
xm_per_pix = 3.7/378 # meters per pixel in x dimension, lane width is 12 ft = 3.7 meters
left_curverad, right_curverad, center_dist = (0, 0, 0)
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
h = bin_img.shape[0]
ploty = np.linspace(0, h-1, h)
y_eval = np.max(ploty)
def measure_curvature(bin_img, l_fit, r_fit, l_lane_inds, r_lane_inds, ploty):

# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
left_curverad, right_curverad, center_dist = (0, 0, 0)

# Define maximum y-value corresponding to the bottom of the image
y_eval = np.max(ploty)

# Identify the x and y positions of all nonzero pixels in the image
nonzero = bin_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])

leftx = nonzerox[l_lane_inds]
lefty = nonzeroy[l_lane_inds]
rightx = nonzerox[r_lane_inds]
righty = nonzeroy[r_lane_inds]

# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)

# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])

# Identify the x and y positions of all nonzero pixels in the image
nonzero = bin_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Again, extract left and right line pixel positions
leftx = nonzerox[l_lane_inds]
lefty = nonzeroy[l_lane_inds]
rightx = nonzerox[r_lane_inds]
righty = nonzeroy[r_lane_inds]

if len(leftx) != 0 and len(rightx) != 0:
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters

# Distance from center is image x midpoint - mean of l_fit and r_fit intercepts
if r_fit is not None and l_fit is not None:
car_position = bin_img.shape[1]/2
l_fit_x_int = l_fit[0]*h**2 + l_fit[1]*h + l_fit[2]
r_fit_x_int = r_fit[0]*h**2 + r_fit[1]*h + r_fit[2]
lane_center_position = (r_fit_x_int + l_fit_x_int) /2
center_dist = (car_position - lane_center_position) * xm_per_pix
return left_curverad, right_curverad, center_dist
print('...')

rad_l, rad_r, d_center = calc_curv_rad_and_center_dist(exampleImg_bin, left_fit, right_fit, left_lane_inds, right_lane_inds)

print('Radius of curvature for example:', rad_l, 'm,', rad_r, 'm')
print('Distance from lane center for example:', d_center, 'm')
# Now our radius of curvature is in meters
print(left_curverad,'m',right_curverad,'m')
return left_curverad, right_curverad
```
#### 6. Image of result plotted back down onto the road such that the lane area is identified clearly.
#### 6. Image of result plotted back down onto the road such that the lane area is identified clearly

With and without radius and lane calculations (lines 410 through 458 in `Clean_AdvancedLaneLine.py`):
With lane radius calculations:

<center>
<img src="./output_images/DrawRadius.jpg" alt="Road image" style="width: 100%;"/>
<img src="./output_images/Output.png" alt="Road image" style="width: 90%;"/>
</center>
---

### Pipeline (video)

#### 1. Final video output

(lines 460 through 647 in `Clean_AdvancedLaneLine.py`)
Here's a [link to my video result](./project_video_output.mp4)

---
Expand Down
Binary file added output_images/Output.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit a5b4657

Please sign in to comment.