diff --git a/Bachelorarbeit.tex b/Bachelorarbeit.tex index c8e264281ec2e67c99cd93b967fc3a0652959491..4c77cad2b018904510c19c695402ec3bfde08290 100644 --- a/Bachelorarbeit.tex +++ b/Bachelorarbeit.tex @@ -29,6 +29,7 @@ chap/implementation, chap/fazit, chap/ausblick, + % chap/anhang, } %% projects specific data: @@ -71,4 +72,5 @@ \listoffigures \listoftables \lstlistoflistings + \include{chap/anhang} \end{document} diff --git a/chap/anhang.tex b/chap/anhang.tex new file mode 100644 index 0000000000000000000000000000000000000000..24dee9f1a3f458a1763168801f90af138d7a2244 --- /dev/null +++ b/chap/anhang.tex @@ -0,0 +1,61 @@ +\appendix +\chapter{Vollständiger Quellcode} + +\lstdefinestyle{file}{ + backgroundcolor=\color{lightgray!20!white}, + breakatwhitespace=true, + breaklines=true, + framexleftmargin=2px, + framextopmargin=5px, + framexbottommargin=5px, + numbers=left, + numberstyle=\tiny\ttfamily\color{gray}, + numbersep=5pt, + belowcaptionskip=-.5\smallskipamount, +} +\newcommand{\fmtTitle}[1]{\hspace*{-1.55cm}\parbox{\textwidth}{\hfill\lstinline{#1}}} + + \section{Script zu intrinsischen Kalibrierung} + \lstinputlisting[ + style=file, + title=\fmtTitle{camera_calibration.py}, + language=Python + ]{code/camera_calibration.py} + + \section{ROS Node zur Anwendung der Kalibirerung} + \lstinputlisting[ + style=file, + title=\fmtTitle{image_undistort.cpp}, + language=C++ + ]{code/lanedetection.cpp} + + \section{Erster Entwurf der Fahrspurmarker Erkennung in Python} + \lstinputlisting[ + style=file, + title=\fmtTitle{demo.py}, + language=Python + ]{code/camera_calibration.py} + + \section{Fahrspurmarker-Erkennung als ROS Node} + \lstinputlisting[ + style=file, + title=\fmtTitle{lane_markings_detection.cpp}, + language=C++ + ]{code/lanedetection.cpp} + \lstinputlisting[ + style=file, + title=\fmtTitle{lane_markings_detection.hpp}, + language=C++ + ]{code/lanedetection.hpp} + + \section{Fahrspurmarker-Erkennung mit eigener Implementierung des Canny-Edge-Detectors} + \lstinputlisting[ + style=file, + title=\fmtTitle{lane_markings_detection_custom.cpp}, + language=C++ + ]{code/custom_lanedetection.cpp} + \lstinputlisting[ + style=file, + title=\fmtTitle{lane_markings_detection_custom.hpp}, + language=C++ + ]{code/custom_lanedetection.hpp} diff --git a/code/camera_calibration.py b/code/camera_calibration.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1dc5760b7e6929572ea71078a95e64b46909b9 --- /dev/null +++ b/code/camera_calibration.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# +# This file ist documented in the ./README.md file. Please read it for deteiled explanations. +# Usage: `python3 camera_calibration.py` or mark as executable and call directly + +# * ----- Package imports: ----- * +import glob +import pathlib + +import cv2 as cv +import numpy as np +import yaml + + +# * ----- Configurations: ----- * +# * change the following configurations to fit your usecase: + +# define the grid pattern to look fore +PATTERN = (7,7) + + + +# * ----- Script code: ----- * +if __name__ == "__main__": + + # termination criteria + criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) + + # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) + objp = np.zeros((PATTERN[0]*PATTERN[1],3), np.float32) + objp[:,:2] = np.mgrid[0:PATTERN[0],0:PATTERN[1]].T.reshape(-1,2) + + # Arrays to store object points and image points from all the images. + objpoints = [] # 3d point in real world space + imgpoints = [] # 2d points in image plane. + + # get all images in current directory + folder = pathlib.Path(__file__).parent.resolve() + images = glob.glob(f'{folder}/*.png') + + # loop over all images: + for fname in images: + print(fname) + img = cv.imread(fname) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + + # Find the chess board corners + ret, corners = cv.findChessboardCorners(gray, PATTERN, None, flags=cv.CALIB_CB_ADAPTIVE_THRESH) + + # If found, add object points, image points (after refining them) + if ret == True: + print("valid") + objpoints.append(objp) + corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria) + imgpoints.append(corners) + + # Draw and display the corners + # cv.drawChessboardCorners(img, (7,6), corners2, ret) + # cv.imshow('img', img) + # cv.waitKey() + + # get calibration parameters: + ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) + + + # --- test --- + test_image = images[0] + img = cv.imread(test_image) + cv.namedWindow("original", cv.WINDOW_NORMAL) + cv.imshow('original', img) + + h, w = img.shape[:2] + newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h)) + + # undistort + dst = cv.undistort(img, mtx, dist, None, newcameramtx) + cv.namedWindow("undistored", cv.WINDOW_NORMAL) + cv.imshow('undistored', dst) + + # crop the image + x, y, w, h = roi + dst = dst[y:y+h, x:x+w] + cv.namedWindow("croped", cv.WINDOW_NORMAL) + cv.imshow('croped', dst) + + + print( + yaml.dump( + {"intrinsic": {"matrix": mtx.tolist(), "distortion": dist.tolist()[0]}}, + default_flow_style=None, + ) + ) + + # calculate re-projection error + mean_error = 0 + for i in range(len(objpoints)): + imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist) + error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2) + mean_error += error + print(f"total error: {mean_error/len(objpoints)}") + + cv.waitKey() diff --git a/code/custom_lanedetection.cpp b/code/custom_lanedetection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aebd621b21d0998f39ee415d7b69e00db46caf18 --- /dev/null +++ b/code/custom_lanedetection.cpp @@ -0,0 +1,332 @@ +#include <ros/ros.h> +#include <sensor_msgs/Image.h> +#include <opencv2/opencv.hpp> +#include <cv_bridge/cv_bridge.h> +#include <list> + +#include "lane_markings_detection.hpp" + + +// global variables needed by callback +ros::Publisher* image_pub; + + +/** + * @brief runns a canny-edge detector on the image and returns an + * orientation classification for each edge pixel + * + * @param image the image to analyse + * @return DataImage<uint8_t> + * A image of itentical dimentions with edge-pixels marked + * with their classification value + */ +DataImage<uint8_t> edgeDetectionClassification(Image image) { + DataImage<int> gradients(image.width, image.height); + DataImage<float> angles(image.width, image.height); + DataImage<uint8_t> canny_edges(image.width, image.height); + + // Compute gradient magnitude and orientation + for (int u=1; u < image.height-1; u++) { + for (int v=1; v < image.width-1; v++) { + + int dx=0, dy=0; + // std::vector<int>::iterator sobel_x = SOBEL_X.begin(); + // std::vector<int>::iterator sobel_y = SOBEL_Y.begin(); + for (int y=0; y<3; y++){ + for (int x=0; x<3; x++){ + dx += SOBEL_X[y*3+x] * image[pnt(v+y-1,u+x-1)]; + dy += SOBEL_Y[y*3+x] * image[pnt(v+y-1,u+x-1)]; + } + } + gradients[pnt(v,u)] = static_cast<int>(sqrt(dx*dx + dy*dy)); + angles[pnt(v,u)] = static_cast<float>(atan2(dy,dx) / 3.1415 * 180.0 ); + } + } + + // Non-maximum suppression + // and classification + for (int u=1; u < image.height-1; u++) { + for (int v=1; v < image.width-1; v++) { + uint8_t negative = 0; + + int grad = gradients[pnt(v,u)]; + int arc = angles[pnt(v,u)]; + + if (arc < 0) + negative = 0x10; + arc = fabsf(arc); + + if (arc <= 22.5f || arc > 157.5f ) { + canny_edges[pnt(v,u)] = negative | V; + if (grad < gradients[pnt(v,u-1)] || grad < gradients[pnt(v,u+1)]) + gradients[pnt(v,u)] = 0; + } else if ( ( !negative && arc < 67.5f ) || ( negative && 112.5f <= arc ) ) { + canny_edges[pnt(v,u)] = negative | D1; + if (grad < gradients[pnt(v-1,u-1)] || grad < gradients[pnt(v+1,u+1)]) + gradients[pnt(v,u)] = 0; + } else if ( ( negative && arc < 67.5f ) || ( !negative && 112.5f <= arc ) ) { + canny_edges[pnt(v,u)] = negative | D2; + if (grad < gradients[pnt(v-1,u+1)] || grad < gradients[pnt(v+1,u-1)]) + gradients[pnt(v,u)] = 0; + } else if ( 67.5f <= arc && arc < 112.5f ) { + canny_edges[pnt(v,u)] = negative | H; + if (grad < gradients[pnt(v-1,u)] || grad < gradients[pnt(v+1,u)]) + gradients[pnt(v,u)] = 0; + } else { + ROS_DEBUG("got strange angle: %i", arc); + } + } + } + + // thresholding + uint8_t T1 = 40; + for (int u=1; u < image.height-1; u++) + for (int v=1; v < image.width-1; v++) { + pnt p(v,u); + canny_edges[p] = gradients[p] < T1 ? 0 : canny_edges[p]; + } + return canny_edges; +} + + +/** + * @brief this function gets called back on every image that is published + * + * @param image_msg The published image, gets passed in by ROS + */ +void callback_image(sensor_msgs::Image image_msg) { + + ros::Time starttime = ros::Time::now(); + std::list<Line> all_lines; + std::list<Line> left_D1_edges; + std::list<Line> right_D1_edges; + std::list<Line> left_D2_edges; + std::list<Line> right_D2_edges; + std::list<LineMarking> markings_found; + Image image(image_msg); + + + // run canny-edge detector and classify edges + DataImage<uint8_t> classified_edges = edgeDetectionClassification(image); + + + // find starting points and follow continues lines + // creating Line objects + for (int u=1; u < classified_edges.height-1; u++) { + for (int v=1; v < classified_edges.width-1; v++) { + if (!classified_edges[pnt(v,u)]) + continue; + + // get only classification without direction: + uint8_t clsif_org = classified_edges[pnt(v,u)]; + uint8_t clsif = 0x0f & clsif_org; + bool has_neighbour = false; + + switch (clsif) { + case V: + if ( + V == (0x0f & classified_edges[pnt(v,u-1)]) || + V == (0x0f & classified_edges[pnt(v-1,u-1)]) || + V == (0x0f & classified_edges[pnt(v+1,u-1)]) + ) + has_neighbour = true; + break; + case D1: + if ( + D1 == (0x0f & classified_edges[pnt(v,u-1)]) || + D1 == (0x0f & classified_edges[pnt(v+1,u)]) || + D1 == (0x0f & classified_edges[pnt(v+1,u-1)]) + ) + has_neighbour = true; + break; + case D2: + if ( + D2 == (0x0f & classified_edges[pnt(v-1,u)]) || + D2 == (0x0f & classified_edges[pnt(v,u-1)]) || + D2 == (0x0f & classified_edges[pnt(v-1,u-1)]) + ) + has_neighbour = true; + break; + case H: + if ( + H == (0x0f & classified_edges[pnt(v-1,u)]) || + H == (0x0f & classified_edges[pnt(v-1,u-1)]) || + H == (0x0f & classified_edges[pnt(v-1,u+1)]) + ) + has_neighbour = true; + break; + } + if ( has_neighbour ) + continue; + + // no neighbour found means start pixel + pnt start(v,u); + // folow line to its end + do { + classified_edges[pnt(v,u)] = 0; + has_neighbour = false; + switch (clsif) { + case V: + if ( V == (0x0f & classified_edges[pnt(v,u+1)]) ) { + u++; + has_neighbour = true; + } else if ( V == (0x0f & classified_edges[pnt(v+1,u+1)]) ) { + u++; + v++; + has_neighbour = true; + } else if ( V == (0x0f & classified_edges[pnt(v-1,u+1)]) ) { + u++; + v--; + has_neighbour = true; + } + break; + case D1: + if ( D1 == (0x0f & classified_edges[pnt(v,u+1)]) ) { + u++; + has_neighbour = true; + } else if ( D1 == (0x0f & classified_edges[pnt(v-1,u)]) ) { + v--; + has_neighbour = true; + } else if ( D1 == (0x0f & classified_edges[pnt(v-1,u+1)]) ) { + u++; + v--; + has_neighbour = true; + } + break; + case D2: + if ( D2 == (0x0f & classified_edges[pnt(v+1,u)]) ) { + v++; + has_neighbour = true; + } else if ( D2 == (0x0f & classified_edges[pnt(v,u+1)]) ) { + u++; + has_neighbour = true; + } else if ( D2 == (0x0f & classified_edges[pnt(v+1,u+1)]) ) { + u++; + v++; + has_neighbour = true; + } + break; + case H: + if ( H == (0x0f & classified_edges[pnt(v+1,u)]) ) { + v++; + has_neighbour = true; + } else if ( H == (0x0f & classified_edges[pnt(v+1,u+1)]) ) { + u++; + v++; + has_neighbour = true; + } else if ( H == (0x0f & classified_edges[pnt(v+1,u-1)]) ) { + u--; + v++; + has_neighbour = true; + } + break; + } + } while ( has_neighbour ); + + Line l(start, pnt(v,u), clsif_org); + if ( l.length > 5.0 ) { + all_lines.push_back(l); + if (clsif == D1) { + if ( clsif_org & 0x10 ) { + right_D1_edges.push_back(l); + } else { + left_D1_edges.push_back(l); + } + } + if (clsif == D2) { + if ( clsif_org & 0x10 ) { + right_D2_edges.push_back(l); + } else { + left_D2_edges.push_back(l); + } + } + } + + // reset to original coords, so for loop continues where it left off + u = start.y; + v = start.x; + } + } + + + for(const Line& a : left_D1_edges) { + for(auto it = right_D1_edges.begin(); it != right_D1_edges.end(); it++ ) { + const Line b = *it; + if ( + ( a.start.y - 10 < b.start.y && b.start.y < a.start.y + 10 ) && + ( a.start.x < b.start.x && b.start.x < a.start.x + 35 ) + ) { + markings_found.push_back(LineMarking(a,b, 0x80)); + left_D1_edges.erase(it); + break; + } + } + } + + for(const Line& a : left_D2_edges) { + for(auto it = right_D2_edges.begin(); it != right_D2_edges.end(); it++ ) { + const Line b = *it; + if ( + ( a.start.y - 20 < b.start.y && b.start.y < a.start.y + 20 ) && + ( a.start.x < b.start.x && b.start.x < a.start.x + 30 ) + ) { + markings_found.push_back(LineMarking(a,b, 0x80)); + left_D1_edges.erase(it); + break; + } + } + } + + + cv::Mat displayLines = cv::Mat::zeros(cv::Size(image.width, image.height), CV_8U); + + + for(const Line& l : all_lines) { + cv::line(displayLines, l.start, l.end, cv::Scalar(40)); + } + + for(const LineMarking& a : markings_found) { + + std::vector<std::vector<cv::Point>> contourVec; + contourVec.push_back(a.contour); + cv::drawContours(displayLines, contourVec, 0, cv::Scalar(128)); + + cv::line(displayLines, a.middle->start, a.middle->end, cv::Scalar(255)); + } + + cv_bridge::CvImage image_displayLines(std_msgs::Header(), "mono8", displayLines); + image_pub->publish(image_displayLines.toImageMsg()); + + ros::Time endtime = ros::Time::now(); + ros::Duration time_spend = endtime - starttime; + ROS_DEBUG("run took: %fs", time_spend.toSec()); +} + + + +// node main loop +int main(int argc, char **argv) { + ros::init(argc, argv, "laneMarkingDetection"); + + ros::NodeHandle nh; + ros::NodeHandle private_nh("~"); + + /* + * subscribe to camera image creation event + */ + ros::Subscriber sub = nh.subscribe("/img/gray", 10, callback_image); + + /* + * advertise publisher topics + */ + ros::Publisher image_publisher = private_nh.advertise<sensor_msgs::Image>("/img/temp", 2); + image_pub = &image_publisher; + + /* + * start the node + */ + ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Debug); + ros::spin(); + + return 0; +} diff --git a/code/custom_lanedetection.hpp b/code/custom_lanedetection.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ad511cfb3325f08ec665426e519682001cfe0836 --- /dev/null +++ b/code/custom_lanedetection.hpp @@ -0,0 +1,110 @@ +#include <math.h> +#include <opencv2/opencv.hpp> +#include <utility> +#include <sensor_msgs/Image.h> + + +/** definitions for static constants ------------ **/ +// these represent the edge-classes +#define V 0x01 +#define D1 0x02 +#define D2 0x04 +#define H 0x08 + +// sobel Kernels +std::vector<int> SOBEL_X{ + 0, 0, 0, + -1, 0, 1, + 0, 0, 0 +}; +std::vector<int> SOBEL_Y{ + 0, -1, 0, + 0, 0, 0, + 0, 1, 0 +}; + +typedef cv::Point pnt; + +// This class is used for copyes of images containing some +// other form of data +template <class T> +class DataImage { + public: + std::vector<T> data; + int width; + int height; + + DataImage( int width, int height ): data(width*height, 0), width(width), height(height) {}; + + T& operator[](pnt const& p) { return data[p.y*width + p.x]; } + const T& operator[](pnt const& p) const { return data[p.y*width + p.x]; } +}; +// This class represents an image +// theoretically it is just a specilazation of the `DataImage` +// class, but I couldn't get that to work, so its duplicated... +class Image { + public: + std::vector<uint8_t> data; + int width; + int height; + + Image( int width, int height ): data(width*height, 0), width(width), height(height) {}; + Image( sensor_msgs::Image img ): data(img.data), width(img.width), height(img.height) {}; + + uint8_t& operator[](pnt const& p) { return data[p.y*width + p.x]; } + const uint8_t& operator[](pnt const& p) const { return data[p.y*width + p.x]; } +}; + + +// this object represents a Line +// it takes a start- and end-point +class Line { + public: + uint8_t classinfo; + float length; + pnt start; + pnt end; + + Line(pnt start, pnt end, uint8_t classinfobyte) { + classinfo = classinfobyte; + // ensure the start is alway the upper point + if (start.y <= end.y) { + this->start = start; + this->end = end; + } else { + this->start = end; + this->end = start; + } + + int dx = end.x - start.x; + int dy = end.y - start.y; + this->length = sqrt(dx*dx + dy*dy); + } +}; + +// an object representing a lane-marking +// it needs two edges and calculates the middle-Line and the contoure from that +class LineMarking { + public: + std::vector<pnt> contour; + Line *middle; + + LineMarking(const Line &a, const Line &b, uint8_t orientation) { + contour.push_back(a.start); + contour.push_back(a.end); + contour.push_back(b.end); + contour.push_back(b.start); + middle = new Line( + pnt( + (a.start.x + b.start.x)/2, + (a.start.y + b.start.y)/2 + ), + pnt( + (a.end.x + b.end.x)/2, + (a.end.y + b.end.y)/2 + ), + orientation + ); + } + +}; diff --git a/code/demo.py b/code/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..0a404ca432afc6e7288472c9dcc2136227988082 --- /dev/null +++ b/code/demo.py @@ -0,0 +1,198 @@ +import cv2 +import numpy as np +from math import atan2, inf, nan, sqrt, pi +from copy import copy +from time import perf_counter, strftime + + +sobel_x = np.array([[-1, 0, +1], [-2, 0, +2], [-1, 0, +1]]) +sobel_y = np.array([[-1, -2, -1], [0, 0, 0], [+1, +2, +1]]) + +V = 0x01 +D1 = 0x02 +D2 = 0x04 +H = 0x08 + + +class Line: + start: tuple[int, int] + end: tuple[int, int] + length: float + angle: float + info: np.uint8 + color = [255,255,255] + + def __init__( + self, start: tuple[int, int], end: tuple[int, int], info: np.uint8 = 255 + ) -> None: + if start[0] < end[0]: + self.start = start + self.end = end + else: + self.start = end + self.end = start + + dx = end[0] - start[0] + dy = end[1] - start[1] + self.length = sqrt(dx * dx + dy * dy) + self.angle = atan2(dy, dx) / pi * 180 + + if info <= 0x1f: + self.info = info + self.color[0] = 255 if info & 0x10 else 0 + self.color[1] = 255 if info & 0x07 else 0 + self.color[2] = 255 if info & 0x0b else 0 + self.color = tuple(self.color) + + + + +class LineMarking: + contour: list[tuple] + middle: Line + + def __init__(self, a: Line, b: Line, orientation: str) -> None: + self.contour = np.array([ + a.start, a.end, b.end, b.start + ]) + self.middle = Line( + ( + int((a.start[0] + b.start[0])/2), + int((a.start[1] + b.start[1])/2), + ), + ( + int((a.end[0] + b.end[0])/2), + int((a.end[1] + b.end[1])/2), + ), + ) + +lines: list[Line] = [] +markings_found: list[LineMarking] = [] + + +img = cv2.imread("./image.png") +img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + + +starttime = perf_counter() + +img = cv2.GaussianBlur(img, (3,3), 1.5) +canny = cv2.Canny(img, 180, 40) + + +pixel_info = np.zeros(img.shape[:2], dtype=np.uint8) + +# get classification for evey edge pixel +for (u, v), e in np.ndenumerate(canny[1:-1, 1:-1]): + if not e: + continue + + u += 1 + v += 1 + + nh = img[u - 1 : u + 2, v - 1 : v + 2] + dx = np.sum(nh * sobel_x) + dy = np.sum(nh * sobel_y) + arc = atan2(dy, dx) / pi * 180 + + if arc < 0: + pixel_info[u, v] |= 0x10 + arc = abs(arc) + + if arc >= 157.5 or 22.5 > arc: + pixel_info[u, v] |= V + elif 22.5 <= arc < 67.5: + pixel_info[u, v] |= D1 if not pixel_info[u, v] else D2 + elif 67.5 <= arc < 112.5: + pixel_info[u, v] |= H + elif 112.5 <= arc < 157.5: + pixel_info[u, v] |= D2 if not pixel_info[u, v] else D1 + +# find starting points and follow cintinues lines +# createing Line objects +for (u, v), e in np.ndenumerate(pixel_info[1:-1, 1:-1]): + if not e: + continue + + u += 1 + v += 1 + info = copy(e) + e &= 0x0f + + if e == V: + relevant_nh = np.array([[-1, 0], [-1, -1], [-1, 1]]) + elif e == D1: + relevant_nh = np.array([[0, 1], [-1, 0], [-1, 1]]) + elif e == H: + relevant_nh = np.array([[0, -1], [-1, -1], [1, -1]]) + elif e == D2: + relevant_nh = np.array([[-1, 0], [0, -1], [-1, -1]]) + + for x, y in relevant_nh: + if e == pixel_info[u + x, v + y] & 0x0f: + break + + else: # no neighbour found means start pixel + start = (u, v) + relevant_nh *= -1 + + while True: + pixel_info[u, v] = 0 + for x, y in relevant_nh: + if e == pixel_info[u + x, v + y] & 0x0f: + u, v = u + x, v + y + break + else: # no more neighbours + break + + l = Line(start, (u, v), info) + if l.length > 5: + lines.append(l) + + + +# group edges by class and find pairs +# creating LineMarking objects +left_D1_edges = [e for e in lines if e.info & 0x0f == D1 and not e.info & 0x10] +right_D1_edges = [e for e in lines if e.info & 0x0f == D1 and e.info & 0x10] +for a in left_D1_edges: + for b in right_D1_edges: + if ( + a.start[0] - 20 < b.start[0] < a.start[0] + 20 and + a.start[1] < b.start[1] < a.start[1] + 30 #and + #a.end[0] - 20 < b.end[0] < a.end[0] + 20 and + #a.end[1] < b.end[1] < a.end[1] + 20 + ): + markings_found.append(LineMarking(a,b, "left")) + right_D1_edges.remove(b) + break + +left_D2_edges = [e for e in lines if e.info & 0x0f == D2 and not e.info & 0x10] +right_D2_edges = [e for e in lines if e.info & 0x0f == D2 and e.info & 0x10] +for a in left_D2_edges: + for b in right_D2_edges: + if ( + a.start[1] - 30 < b.start[1] < a.start[1] + 30 and + a.start[0] < b.start[0] < a.start[0] + 30 #and + #a.end[0] - 20 < b.end[0] < a.end[0] + 20 and + #a.end[1] < b.end[1] < a.end[1] + 20 + ): + markings_found.append(LineMarking(a,b, "right")) + right_D2_edges.remove(b) + break + + +endtime = perf_counter() + +print(f"runtimetime: {endtime-starttime:.4f}s") + + +# display detected markings: +out = np.zeros(shape=img.shape + tuple([3])) +for mark in markings_found: + cv2.drawContours(out, [np.fliplr(mark.contour)], 0, (255,255,0) ) + + cv2.line(out, (mark.middle.start[1], mark.middle.start[0]), (mark.middle.end[1], mark.middle.end[0]), (0,255,0)) + +cv2.imshow("found lines", out) +cv2.waitKey() diff --git a/code/lanedetection.cpp b/code/lanedetection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0fc883a3d356b62165c3f869849ffa49aa5eb2bf --- /dev/null +++ b/code/lanedetection.cpp @@ -0,0 +1,302 @@ +#include <ros/ros.h> +#include <sensor_msgs/Image.h> +#include <opencv2/opencv.hpp> +#include <cv_bridge/cv_bridge.h> +#include <list> + +#include "lane_markings_detection.hpp" + + +// global variables needed by callback +ros::Publisher* image_pub; + + +/** + * @brief runns a canny-edge detector on the image and returns an + * orientation classification for each edge pixel + * + * @param image the image to analyse + * @return DataImage<uint8_t> + * A image of itentical dimentions with edge-pixels marked + * with their classification value + */ +cv::Mat edgeDetectionClassification(cv::Mat image) { + + cv::Mat canny; + cv::Canny(image, canny, 180, 50); + + cv::Mat classified_edges = cv::Mat::zeros(cv::Size(image.cols,image.rows), CV_8U); + for (int u=1; u < image.rows-1; u++) { + for (int v=1; v < image.cols-1; v++) { + if ( ! canny.at<uint8_t>(u,v) ) + continue; + + + uint8_t e; + int dx=0, dy=0; + for (int y=0; y<3; y++) { + for (int x=0; x<3; x++) { + e = image.at<uint8_t>(u+y-1,v+x-1); + dx += SOBEL_X[y*3+x] * e; + dy += SOBEL_Y[y*3+x] * e; + } + } + double arc = atan2(dy,dx) / 3.1415 * 180.0; + + uint8_t clsif = 0; + if (arc < 0) + clsif = 0x10; + arc = fabsf(arc); + + if (arc <= 22.5f || arc > 157.5f ) { + clsif |= V; + } else if ( 67.5f <= arc && arc < 112.5f ) { + clsif |= H; + } else if ( ( !clsif && arc < 67.5f ) || ( clsif && 112.5f <= arc ) ) { + clsif |= D1; + } else if ( ( clsif && arc < 67.5f ) || ( !clsif && 112.5f <= arc ) ) { + clsif |= D2; + } else { + ROS_DEBUG("got strange angle: %.2f", arc); + continue; + } + classified_edges.at<uint8_t>(u,v) = clsif; + } + } + return classified_edges; +} + + + +void callback_image(sensor_msgs::Image image_msg) { + + ros::Time starttime = ros::Time::now(); + std::list<Line> all_lines; + std::list<Line> left_D1_edges; + std::list<Line> right_D1_edges; + std::list<Line> left_D2_edges; + std::list<Line> right_D2_edges; + std::list<LineMarking> markings_found; + cv_bridge::CvImagePtr imagePtr = cv_bridge::toCvCopy(image_msg); + + + cv::Mat classified_edges = edgeDetectionClassification(imagePtr->image); + + // find starting points and follow continues lines + // creating Line objects + for (int u=1; u < classified_edges.rows-1; u++) { + for (int v=1; v < classified_edges.cols-1; v++) { + uint8_t clsif_org = classified_edges.at<uint8_t>(u,v); + if ( ! clsif_org ) + continue; + + // get only classification without direction: + uint8_t clsif = 0x0f & clsif_org; + + bool has_neighbour = false; + switch (clsif) { + case V: + if ( + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v-1)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v+1)) + ) + has_neighbour = true; + break; + case D1: + if ( + clsif == (0x0f & classified_edges.at<uint8_t>(u,v+1)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v+1)) + ) + has_neighbour = true; + break; + case H: + if ( + clsif == (0x0f & classified_edges.at<uint8_t>(u,v-1)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v-1)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v-1)) + ) + has_neighbour = true; + break; + case D2: + if ( + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u,v-1)) || + clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v-1)) + ) + has_neighbour = true; + break; + } + if ( has_neighbour ) + continue; + + // no neighbour found means start pixel + pnt start(v,u); + // folow line to its end + do { + classified_edges.at<uint8_t>(u,v) = 0; + has_neighbour = false; + switch (clsif) { + case V: + if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v)) ) { + u++; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v+1)) ) { + u++; + v++; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v-1)) ) { + u++; + v--; + has_neighbour = true; + } + break; + case D1: + if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v)) ) { + u++; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u,v-1)) ) { + v--; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v-1)) ) { + u++; + v--; + has_neighbour = true; + } + break; + case D2: + if ( clsif == (0x0f & classified_edges.at<uint8_t>(u,v+1)) ) { + v++; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v)) ) { + u++; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v+1)) ) { + u++; + v++; + has_neighbour = true; + } + break; + case H: + if ( clsif == (0x0f & classified_edges.at<uint8_t>(u,v+1)) ) { + v++; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u+1,v+1)) ) { + u++; + v++; + has_neighbour = true; + } else if ( clsif == (0x0f & classified_edges.at<uint8_t>(u-1,v+1)) ) { + u--; + v++; + has_neighbour = true; + } + break; + } + } while ( has_neighbour ); + + Line l(start, pnt(v,u), clsif_org); + if ( l.length > 5.0 ) { + all_lines.push_back(l); + if (clsif == D1) { + if ( clsif_org & 0x10 ) { + right_D1_edges.push_back(l); + } else { + left_D1_edges.push_back(l); + } + } + if (clsif == D2) { + if ( clsif_org & 0x10 ) { + right_D2_edges.push_back(l); + } else { + left_D2_edges.push_back(l); + } + } + } + + // reset to original coords, so for loop continues where it left off + u = start.y; + v = start.x; + } + } + + + // match pairs and create LineMarkin objects + for(const Line& a : left_D1_edges) { + for(auto it = right_D1_edges.begin(); it != right_D1_edges.end(); it++ ) { + const Line b = *it; + if ( + ( a.start.y - 10 < b.start.y && b.start.y < a.start.y + 10 ) && + ( a.start.x < b.start.x && b.start.x < a.start.x + 25 ) + ) { + markings_found.push_back(LineMarking(a,b, D1)); + left_D1_edges.erase(it); + break; + } + } + } + + for(const Line& a : left_D2_edges) { + for(auto it = right_D2_edges.begin(); it != right_D2_edges.end(); it++ ) { + const Line b = *it; + if ( + ( a.start.y - 10 < b.start.y && b.start.y < a.start.y + 10 ) && + ( a.start.x < b.start.x && b.start.x < a.start.x + 25 ) + ) { + markings_found.push_back(LineMarking(a,b, D2)); + left_D1_edges.erase(it); + break; + } + } + } + + cv::Mat displayLines = cv::Mat::zeros(cv::Size(imagePtr->image.cols, imagePtr->image.rows), CV_8U); + + for(const Line& l : all_lines) { + cv::line(displayLines, l.start, l.end, cv::Scalar(40)); + } + + for(const LineMarking& a : markings_found) { + + std::vector<std::vector<cv::Point>> contourVec; + contourVec.push_back(a.contour); + cv::drawContours(displayLines, contourVec, 0, cv::Scalar(128)); + + cv::line(displayLines, a.middle->start, a.middle->end, cv::Scalar(255)); + } + + + cv_bridge::CvImage image_displayLines (std_msgs::Header(), "mono8", displayLines);; + image_pub->publish(image_displayLines.toImageMsg()); + + ros::Time endtime = ros::Time::now(); + ros::Duration time_spend = endtime - starttime; + ROS_DEBUG("run took: %fs", time_spend.toSec()); +} + + +// node main loop +int main(int argc, char **argv) { + ros::init(argc, argv, "lane_marker_detection"); + + ros::NodeHandle nh; + ros::NodeHandle private_nh("~"); + + /* + * subscribe to camera image creation event + */ + ros::Subscriber sub = nh.subscribe("/img/gray", 10, callback_image); + + /* + * advertise publisher topics + */ + ros::Publisher image_publisher = private_nh.advertise<sensor_msgs::Image>("/img/temp", 2); + image_pub = &image_publisher; + + /* + * start the node + */ + ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Debug); + ros::spin(); + return 0; +} diff --git a/code/lanedetection.hpp b/code/lanedetection.hpp new file mode 100644 index 0000000000000000000000000000000000000000..af93e1bf3d05123092b6ca3246076aabaa44c08f --- /dev/null +++ b/code/lanedetection.hpp @@ -0,0 +1,98 @@ +#include <math.h> +#include <opencv2/opencv.hpp> +#include <cv_bridge/cv_bridge.h> +#include <sensor_msgs/Image.h> + + +#define V 0x01 +#define D1 0x02 +#define D2 0x04 +#define H 0x08 + +std::vector<int> SOBEL_X{ + 0, 0, 0, + -1, 0, 1, + 0, 0, 0 +}; +std::vector<int> SOBEL_Y{ + 0, -1, 0, + 0, 0, 0, + 0, 1, 0 +}; + +typedef cv::Point pnt; + +template <class T> +class DataImage { + public: + std::vector<T> data; + int width; + int height; + + DataImage( int width, int height ): data(width*height, 0), width(width), height(height) {}; + + T& operator[](pnt const& p) { return data[p.y*width + p.x]; } + const T& operator[](pnt const& p) const { return data[p.y*width + p.x]; } +}; +class Image { + public: + std::vector<uint8_t> data; + int width; + int height; + + Image( int width, int height ): data(width*height, 0), width(width), height(height) {}; + Image( sensor_msgs::Image img ): data(img.data), width(img.width), height(img.height) {}; + + uint8_t& operator[](pnt const& p) { return data[p.y*width + p.x]; } + const uint8_t& operator[](pnt const& p) const { return data[p.y*width + p.x]; } +}; + + +class Line { + public: + uint8_t classinfo; + float length; + pnt start; + pnt end; + + Line(pnt start, pnt end, uint8_t classinfobyte) { + classinfo = classinfobyte; + // ensure the start is alway the upper point + if (start.y <= end.y) { + this->start = start; + this->end = end; + } else { + this->start = end; + this->end = start; + } + + int dx = end.x - start.x; + int dy = end.y - start.y; + this->length = sqrt(dx*dx + dy*dy); + } +}; + +class LineMarking { + public: + std::vector<pnt> contour; + Line *middle; + + LineMarking(const Line &a, const Line &b, uint8_t orientation) { + contour.push_back(a.start); + contour.push_back(a.end); + contour.push_back(b.end); + contour.push_back(b.start); + middle = new Line( + pnt( + (a.start.x + b.start.x)/2, + (a.start.y + b.start.y)/2 + ), + pnt( + (a.end.x + b.end.x)/2, + (a.end.y + b.end.y)/2 + ), + orientation + ); + } + +}; diff --git a/code/undistorter.cpp b/code/undistorter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..37eec47e2ff3561345181bdb162df1abfb8be4ea --- /dev/null +++ b/code/undistorter.cpp @@ -0,0 +1,111 @@ +#include <ros/ros.h> +#include <sensor_msgs/Image.h> +#include <sensor_msgs/image_encodings.h> +#include <yaml-cpp/yaml.h> +#include <opencv2/opencv.hpp> +#include <cv_bridge/cv_bridge.h> + + +// helper to convert vercor of vectors to a cv::mat +// stolen from https://stackoverflow.com/a/29021835/14157785 +template<typename _Tp> static cv::Mat toMat(const std::vector<std::vector<_Tp>> vecIn) { + cv::Mat_<_Tp> matOut(vecIn.size(), vecIn.at(0).size()); + for (int i = 0; i < matOut.rows; ++i) + for (int j = 0; j < matOut.cols; ++j) + matOut(i, j) = vecIn.at(i).at(j); + return matOut; +} + +// global variables needed by callback +ros::Publisher* pub_colorImage; +ros::Publisher* pub_grayImage; +cv::Mat rectifyMapX, rectifyMapY; +cv::Rect ROI; + + +void callback_undistort_image(sensor_msgs::Image original) { + ros::Time starttime = ros::Time::now(); + cv::Mat undistoredImage; + + // convert from ROS msg-type to opencv matrix + cv_bridge::CvImagePtr imagePtr = cv_bridge::toCvCopy(original); + + // apply the calculated rectification maps to undistort the image + cv::remap(imagePtr->image, undistoredImage, rectifyMapX, rectifyMapY, cv::INTER_LINEAR); + + // crop relevant section from image + undistoredImage = undistoredImage(ROI); + + // convert to gray + cv::Mat undistoredImage_gray; + cv::cvtColor(undistoredImage, undistoredImage_gray, CV_RGB2GRAY); + + + // publish images + cv_bridge::CvImage colorImage(std_msgs::Header(), "rgb8", undistoredImage); + pub_colorImage->publish(colorImage.toImageMsg()); + cv_bridge::CvImage grayImage(std_msgs::Header(), "mono8", undistoredImage_gray); + pub_grayImage->publish(grayImage.toImageMsg()); + + ROS_DEBUG("run took: %fs", (ros::Time::now()-starttime).toSec()); +} + + +// node main loop +int main(int argc, char **argv) { + ros::init(argc, argv, "img_undistort"); + + ros::NodeHandle nh; + ros::NodeHandle private_nh("~"); + + // ToDo: pass this in as a parameter + std::string configFilePath = "/home/jetbot/workspace/videodrive_ws/tools/calibration/calibration.yaml"; + + // read yaml calibration file + ROS_INFO("Reading calibration parametes from '%s'", configFilePath.c_str()); + YAML::Node full_config = YAML::LoadFile(configFilePath); + YAML::Node camera_config = full_config["cameras"]["default"]; + + auto distortion_YAML = camera_config["intrinsic"]["distortion"].as<std::vector<double>>(); + cv::Mat distortion ( distortion_YAML ); + ROS_INFO_STREAM("using distortion parameters:\n" << distortion); + + auto cameraMatrix_YAML = camera_config["intrinsic"]["matrix"].as<std::vector<std::vector<double>>>(); + cv::Mat cameraMatrix = toMat( cameraMatrix_YAML ); + ROS_INFO_STREAM("using distortion parameters:\n" << cameraMatrix); + + cv::Size imageSize( + full_config["images"]["size"]["width"].as<int>(), + full_config["images"]["size"]["height"].as<int>() + ); + ROS_INFO("using image Size:\n(%i, %i)", imageSize.width, imageSize.height); + + // get scaled camera matrix + auto scaledCameraMatrix = cv::getOptimalNewCameraMatrix(cameraMatrix, distortion, imageSize, 1, imageSize, &ROI); + ROS_INFO("using ROI:\n(%i, %i, %i, %i)", ROI.x, ROI.y, ROI.width, ROI.height); + + // calculate undistortion mapping and storing globaly + cv::initUndistortRectifyMap(cameraMatrix, distortion, cv::Mat(), scaledCameraMatrix, imageSize, CV_16SC2, rectifyMapX, rectifyMapY); + + + /* + * subscribe to camera image creation event + */ + ros::Subscriber sub = nh.subscribe("/img/raw", 10, callback_undistort_image); + + /* + * advertise publisher topics + */ + ros::Publisher grayImage_publisher = private_nh.advertise<sensor_msgs::Image>("/img/gray", 2); + pub_grayImage = &grayImage_publisher; + ros::Publisher colorImage_publisher = private_nh.advertise<sensor_msgs::Image>("/img/color", 2); + pub_colorImage = &colorImage_publisher; + + /* + * start the node + */ + ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info); + ros::spin(); + + return 0; +}