I have read few articles and saw videos of Lane Detection and thus decided to learn how it works I'm completely new to OpenCV so kindly forgive me for dumb Doubts. I took Udacity Opensource Project to develop Lane Detection,but I'm not able to execute the code. I 'm getting a value error which I'm not able to understand
Code:
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
imshape = img.shape
left_x1 = []
left_x2 = []
right_x1 = []
right_x2 = []
y_min = img.shape[0]
y_max = int(img.shape[0] * 0.611)
for line in lines:
for x1, y1, x2, y2 in line:
if ((y2 - y1) / (x2 - x1)) < 0:
mc = np.polyfit([x1, x2], [y1, y2], 1)
left_x1.append(np.int(np.float((y_min - mc[1])) / np.float(mc[0])))
left_x2.append(np.int(np.float((y_max - mc[1])) / np.float(mc[0])))
# cv2.line(img, (xone, imshape[0]), (xtwo, 330), color, thickness)
elif ((y2 - y1) / (x2 - x1)) > 0:
mc = np.polyfit([x1, x2], [y1, y2], 1)
right_x1.append(np.int(np.float((y_min - mc[1])) / np.float(mc[0])))
right_x2.append(np.int(np.float((y_max - mc[1])) / np.float(mc[0])))
# cv2.line(img, (xone, imshape[0]), (xtwo, 330), color, thickness)
l_avg_x1 = np.int(np.nanmean(left_x1))
l_avg_x2 = np.int(np.nanmean(left_x2))
r_avg_x1 = np.int(np.nanmean(right_x1))
r_avg_x2 = np.int(np.nanmean(right_x2))
# print([l_avg_x1, l_avg_x2, r_avg_x1, r_avg_x2])
cv2.line(img, (l_avg_x1, y_min), (l_avg_x2, y_max), color, thickness)
cv2.line(img, (r_avg_x1, y_min), (r_avg_x2, y_max), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros(img.shape, dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def process_image(img):
img_test = grayscale(img)
img_test = gaussian_blur(img_test, 7)
img_test = canny(img_test, 50, 150)
imshape = img.shape
vertices = np.array([[(100, imshape[0]), (400, 330), (600, 330), (imshape[1], imshape[0])]], dtype=np.int32)
img_test = region_of_interest(img_test, vertices)
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 55 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 # minimum number of pixels making up a line
max_line_gap = 100 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) * 0 # creating a blank to draw lines on
img_test = hough_lines(img_test, rho, theta, threshold, min_line_length, max_line_gap)
return img_test
img = cv2.imread("sy1.jpg")
res = process_image(img)
plt.imshow(res)
The Resulting Error:
/Users/ViditShah/anaconda/envs/py27/bin/python /Users/ViditShah/Downloads/untitled1/gist.py
/Users/ViditShah/Downloads/untitled1/gist.py:85: RuntimeWarning: Mean of empty slice
r_avg_x1 = np.int(np.nanmean(right_x1))
Traceback (most recent call last):
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 122, in <module>
res = process_image(img)
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 117, in process_image
img_test = hough_lines(img_test, rho, theta, threshold, min_line_length, max_line_gap)
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 100, in hough_lines
draw_lines(line_img, lines)
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 85, in draw_lines
r_avg_x1 = np.int(np.nanmean(right_x1))
ValueError: cannot convert float NaN to integer
Process finished with exit code 1
I'm using python2.7
Please Guide me. Your Sincerely, Vidit Shah
One possibility is dividing by zero creating Nans when you calculate gradient; try filtering out for
x1 == x2
. This potential source of errors will crop up only rarely.The most important issue is that the threshold is set too high in the Hough transform (at 55) for the structure of your code. If the Hough Lines stage does not identify lines then you will not be able to plot them.
You can get around this by either lowering the threshold (and losing quality in your line-detection for the cases when it does work) or adjusting something else in your code, for example using error handling or pre-processing the image differently so that there will always be lines output by the Hough step.