更新全覆盖避障代码

pull/8/head
LXX 2024-01-28 19:42:44 +08:00
parent 4ae4558216
commit 754ccb0e47
235 changed files with 115867 additions and 1 deletions

View File

@ -1 +0,0 @@
# This file currently only serves to mark the location of a catkin workspace for tool integration

View File

@ -0,0 +1,48 @@
# default .gitignore file for ipa320 repositories
# master file can be found in https://github.com/ipa320/care-o-bot/blob/indigo_dev/.gitignore
build/
bin/
lib/
# generated docs
*.dox
*.wikidoc
# eclipse
.project
.cproject
# qcreator
qtcreator-*
*.user
# Emacs
.#*
# VI/VIM
*.swp
# python files
*.pcd
*.pyc
*.pco
# temporary files
*~
# merge conflict files
*.orig
*BACKUP*
*BASE*
*LOCAL*
*REMOTE*
# Catkin custom files
CATKIN_IGNORE
# result files
*.map
# Kdev
*.kdev4

View File

@ -0,0 +1,8 @@
- git:
local-name: cob_extern
uri: https://github.com/ipa320/cob_extern.git
version: indigo_dev
- git:
local-name: cob_perception_common
uri: https://github.com/ipa320/cob_perception_common.git
version: indigo_dev

View File

@ -0,0 +1,30 @@
language: generic
services:
- docker
notifications:
email:
on_success: change
on_failure: always
env:
global:
- ROS_REPO=ros
matrix:
- ROS_DISTRO=kinetic UPSTREAM_WORKSPACE=file
- ROS_DISTRO=kinetic UPSTREAM_WORKSPACE=debian
- ROS_DISTRO=melodic UPSTREAM_WORKSPACE=file
- ROS_DISTRO=melodic UPSTREAM_WORKSPACE=debian
- ROS_DISTRO=noetic UPSTREAM_WORKSPACE=file
- ROS_DISTRO=noetic UPSTREAM_WORKSPACE=debian
matrix:
allow_failures:
- env: ROS_DISTRO=kinetic UPSTREAM_WORKSPACE=file
- env: ROS_DISTRO=kinetic UPSTREAM_WORKSPACE=debian
- env: ROS_DISTRO=melodic UPSTREAM_WORKSPACE=debian
- env: ROS_DISTRO=noetic UPSTREAM_WORKSPACE=debian
install:
- git clone https://github.com/ros-industrial/industrial_ci.git .ci_config
script:
- .ci_config/travis.sh
# - ./travis.sh # Enable this when you have a package-local script

View File

@ -0,0 +1,37 @@
# ipa_coverage_planning
Algorithms for floor plan segmentation and systematic coverage driving patterns.
If you find this software useful in your work, please cite our corresponding papers:
- R. Bormann, F. Jordan, W. Li, J. Hampp, and M. Hägele. Room Segmentation: Survey, Implementation, and Analysis. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2016. https://ieeexplore.ieee.org/abstract/document/7487234 , https://publica.fraunhofer.de/entities/publication/0bf23149-75d5-4601-bfce-992d91698862/details
- R. Bormann, F. Jordan, J. Hampp, and M. Hägele. Indoor coverage path planning: Survey, implementation, analysis. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), pages 17181725, May 2018. https://ieeexplore.ieee.org/abstract/document/8460566 , https://publica.fraunhofer.de/entities/publication/f537c15d-4cbe-4672-9d86-6e756a9ce71b/details
## ROS Distro Support
| | Indigo | Jade | Kinetic | Melodic | Noetic |
|:-------:|:------:|:----:|:-------:|:-------:|:-------:|
| Branch | [`indigo_dev`](https://github.com/ipa320/ipa_coverage_planning/tree/indigo_dev) | [`indigo_dev`](https://github.com/ipa320/ipa_coverage_planning/tree/indigo_dev) | [`indigo_dev`](https://github.com/ipa320/ipa_coverage_planning/tree/indigo_dev) | [`melodic_dev`](https://github.com/ipa320/ipa_coverage_planning/tree/melodic_dev) |[`noetic_dev`](https://github.com/ipa320/ipa_coverage_planning/tree/noetic_dev) |
| Status | not supported | not supported | EOL | supported | supported |
| Version | [version](http://repositories.ros.org/status_page/ros_indigo_default.html?q=ipa_coverage_planning) | [version](http://repositories.ros.org/status_page/ros_jade_default.html?q=ipa_coverage_planning) | [version](http://repositories.ros.org/status_page/ros_kinetic_default.html?q=ipa_coverage_planning) | [version](http://repositories.ros.org/status_page/ros_melodic_default.html?q=ipa_coverage_planning) | [version](http://repositories.ros.org/status_page/ros_noetic_default.html?q=ipa_coverage_planning)
## Travis - Continuous Integration
Status: [![Build Status](https://travis-ci.org/ipa320/ipa_coverage_planning.svg?branch=indigo_dev)](https://travis-ci.org/ipa320/ipa_coverage_planning)
## Quick start
1. Bring up your robot or launch the turtlebot3 simulation
1. Start the room exploration action server:
```
roslaunch ipa_room_exploration room_exploration_action_server.launch
```
1. Start the room exploration action client:
```
roslaunch ipa_room_exploration room_exploration_client.launch robot_env:=your-robot-env use_test_maps:=false
```

View File

@ -0,0 +1,13 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package ipa_building_msgs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
0.2.0 (2017-02-09)
------------------
* added a srv folder
* added a srv for ipa_room_exploration/coverage_check_server
* Contributors: Florian Jordan
0.0.1 (2016-08-06)
------------------
* package set up with first set of msgs
* Contributors: Florian Jordan, Richard Bormann

View File

@ -0,0 +1,79 @@
cmake_minimum_required(VERSION 2.8.3)
project(ipa_building_msgs)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
actionlib
actionlib_msgs
genmsg
geometry_msgs
message_generation
sensor_msgs
std_msgs
)
#######################################
## Declare ROS messages and services ##
#######################################
## Add the Files for actionlib
add_action_files(
DIRECTORY action
FILES
MapSegmentation.action
FindRoomSequenceWithCheckpoints.action
RoomExploration.action
)
## Generate messages in the 'msg' folder
add_message_files(
DIRECTORY
msg
FILES
RoomInformation.msg
RoomSequence.msg
dis_info.msg
dis_info_array.msg
)
## Generate services in the 'srv' folder
add_service_files(
DIRECTORY
srv
FILES
CheckCoverage.srv
ExtractAreaMapFromLabeledMap.srv
)
## Generate added messages and services with any dependencies listed here
generate_messages(
DEPENDENCIES
actionlib_msgs
geometry_msgs
sensor_msgs
std_msgs
)
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if you package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
CATKIN_DEPENDS
actionlib_msgs
geometry_msgs
message_runtime
sensor_msgs
std_msgs
)
include_directories(
${catkin_INCLUDE_DIRS}
)

View File

@ -0,0 +1,17 @@
# goal definition
sensor_msgs/Image input_map # floor plan map [mono8 format], 0=obstacle or unknown, 255=free space
float32 map_resolution # the resolution of the map in [meter/cell]
geometry_msgs/Pose map_origin # the origin of the map in [meter]
ipa_building_msgs/RoomInformation[] room_information_in_pixel # room data (min/max coordinates, center coordinates) measured in pixels
float64 robot_radius # the robot footprint radius [m], used for excluding areas from path planning that could not be visited by the robot
geometry_msgs/Pose robot_start_coordinate # current robot location (used to determine the closest checkpoint in the sequence of checkpoints) [in meter]
---
# result definition
ipa_building_msgs/RoomSequence[] checkpoints # sequence of checkpoints, should be ordered in optimized traveling salesman sequence
# (checkpoint = a clique of rooms close [in terms of robot driving distance] to each other,
# and a central [in terms of rooms in the group] checkpoint location)
sensor_msgs/Image sequence_map # map that has the calculated sequence drawn in
---
#feedback definition

View File

@ -0,0 +1,39 @@
# Map Segmentation action
# Provides interface to get the segmented map
# goal definition
sensor_msgs/Image input_map # the action server need a map as a input image to segment it, IMPORTANT: The algorithm needs a black and white 8bit single-channel image (format 8UC1), which is 0 (black) for obstacles and 255 (white) for free space
float32 map_resolution # the resolution of the map in [meter/cell]
geometry_msgs/Pose map_origin # the origin of the map in [meter]
bool return_format_in_pixel # return room data (see below) in [pixel]
bool return_format_in_meter # return room data (see below) in [meter]
float32 robot_radius # in [meter]; if this variable is set to a value greater than 0, the room centers are chosen at locations that are reachable from neighboring rooms (i.e. not inside the legs of a table surrounded by chairs)
int32 room_segmentation_algorithm # optionally overrides the parameterized segmentation method (if -1 it just takes the preset)
# 0 = take the preset (parameterized) method
# 1 = MorphologicalSegmentation
# 2 = DistanceSegmentation
# 3 = VoronoiSegmentation
# 4 = SemanticSegmentation
# 5 = RandomFieldSegmentation
# 99 = PassThrough (just get a pre-segmented map into the right output format)
---
# result definition
sensor_msgs/Image segmented_map # the action server returns a map segmented into rooms which carry the segment number in every pixel cell, format 32SC1, room labels from 1 to N, room with label i -> access to room_information_in_pixel[i-1]
float32 map_resolution # the resolution of the segmented map in [meter/cell]
geometry_msgs/Pose map_origin # the origin of the segmented map in [meter]
# for the following data: value in pixel can be obtained when the value of [return_format_in_pixel] from goal definition is true
# the computed room centers are not the centroids of the room cells (which could be located outside of the rooms)
# but accessible map pixels located within the center of the largest free space in the room
ipa_building_msgs/RoomInformation[] room_information_in_pixel # room data (min/max coordinates, center coordinates) measured in pixels
# for the following data: value in meter can be obtained when the value of [return_format_in_meter] from goal definition is true
# the computed room centers are not the centroids of the room cells (which could be located outside of the rooms)
# but accessible map pixels located within the center of the largest free space in the room
ipa_building_msgs/RoomInformation[] room_information_in_meter # room data (min/max coordinates, center coordinates) measured in meters
# if wanted the 5th algorithm (vrf) can return single points labeled as a doorway
geometry_msgs/Point32[] doorway_points
---
#feedback definition

View File

@ -0,0 +1,24 @@
# Room Exploration action
# sends an occupancy grid map of a room to the server, which plans a path trough it to cover or inspect the whole room by a device or camera
# goal definition
sensor_msgs/Image input_map # the action server needs a map as a input image to segment it,
# IMPORTANT: The algorithm needs a black and white 8bit single-channel image (format 8UC1), which is 0 (black) for obstacles and 255 (white) for free space
# todo: the image needs to be vertically mirrored compared to the map in RViz for using right coordinate systems
# OccupancyGrid map = origin lower left corner, image = origin upper left corner
# todo: take the OccupanyGrid message here instead to avoid confusion and deal with map coordinates in server
float32 map_resolution # the resolution of the map in [meter/cell]
geometry_msgs/Pose map_origin # the origin of the map in [meter], NOTE: rotations are not supported for now
float32 robot_radius # effective robot radius, taking the enlargement of the costmap into account, in [meter]
float32 coverage_radius # radius that is used to plan the coverage planning for the robot and not the field of view, assuming that the part that needs to cover everything (e.g. the cleaning part) can be represented by a fitting circle (e.g. smaller than the actual part to ensure coverage), in [meter]
geometry_msgs/Point32[] field_of_view # the 4 points that define the field of view of the robot, relatively to the robot coordinate system (with x pointing forwards and y pointing to the left), in [meter]
geometry_msgs/Point32 field_of_view_origin # the mounting position of the camera spanning the field of view, relative to the robot center (x-axis points to robot's front side, y-axis points to robot's left side, z-axis upwards), in [meter]
geometry_msgs/Pose2D starting_position # starting pose of the robot in the room coordinate system [meter,meter,rad]
int32 planning_mode # 1 = plans a path for coverage with the robot footprint, 2 = plans a path for coverage with the robot's field of view
---
# result definition
geometry_msgs/Pose2D[] coverage_path # when the server should return the coverage path, this is done returning the points in an array that shows the order of visiting
geometry_msgs/PoseStamped[] coverage_path_pose_stamped # (same content as coverage_path but different format) when the server should return the coverage path, this is done returning the points in an array that shows the order of visiting
---
# feedback definition

View File

@ -0,0 +1,2 @@
geometry_msgs/Polygon room_min_max # first point provides the minimum (x,y)-coordinate values of the rooms, second point provides the maximum (x,y)-coordinate values of the rooms
geometry_msgs/Point32 room_center # provides the (x,y)-coordinate values of the room centers

View File

@ -0,0 +1,5 @@
uint32[] room_indices # indices of those rooms that belong to one group (connected to a checkpoint,
# i.e. the clique of rooms close [in terms of robot driving distance] to each other),
# indices should be ordered in optimized traveling salesman sequence
geometry_msgs/Point32 checkpoint_position_in_pixel # provides the (x,y)-coordinates of the checkpoint location for this group of rooms [in pixel]
geometry_msgs/Point32 checkpoint_position_in_meter # provides the (x,y)-coordinates of the checkpoint location for this group of rooms [in meter]

View File

@ -0,0 +1,4 @@
float32 distance
float32 width
float32 height
float32 angle

View File

@ -0,0 +1 @@
ipa_building_msgs/dis_info[] dis

View File

@ -0,0 +1,27 @@
<?xml version="1.0"?>
<package format="2">
<name>ipa_building_msgs</name>
<version>0.2.0</version>
<description>
This package contains common message type definitions for building navigation tasks such as floor plan segmentation into rooms or the optimal trajectory through rooms of a building.
</description>
<license>LGPL for academic and non-commercial use; for commercial use, please contact Fraunhofer IPA</license>
<maintainer email="richard.bormann@ipa.fraunhofer.de">Richard Bormann</maintainer>
<author>Richard Bormann</author>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>message_generation</build_depend>
<exec_depend>message_runtime</exec_depend>
<depend>actionlib</depend>
<depend>actionlib_msgs</depend>
<depend>geometry_msgs</depend>
<depend>sensor_msgs</depend>
<depend>std_msgs</depend>
</package>

View File

@ -0,0 +1,12 @@
sensor_msgs/Image input_map # the action server need a map as a input image to segment it, IMPORTANT: The algorithm needs a black and white 8bit single-channel image, which is 0 (black) for obstacles and 255 (white) for free space
float32 map_resolution # resolution of the given map, in [meter/cell]
geometry_msgs/Pose map_origin # the origin of the map, in [meter]
geometry_msgs/Pose2D[] path # check the coverage along this path of the robot center, in the world frame in [meter]
geometry_msgs/Point32[] field_of_view # the points that define the field of view of the robot, relative to the robot center (x-axis points to robot's front side, y-axis points to robot's left side, z-axis upwards), in [meter]
geometry_msgs/Point32 field_of_view_origin # the mounting position of the camera spanning the field of view, relative to the robot center (x-axis points to robot's front side, y-axis points to robot's left side, z-axis upwards), in [meter]
float32 coverage_radius # radius that is used to plan the coverage planning for the robot and not the field of view, assuming that the part that needs to cover everything (e.g. the cleaning part) can be represented by a fitting circle (e.g. smaller than the actual part to ensure coverage), in [meter]
bool check_for_footprint # determine, if the coverage check should be done for the footprint or the field of view
bool check_number_of_coverages # if set, the server returns a map that shows how often one pixel has been covered during the path, return format: 32bit single-channel image
---
sensor_msgs/Image coverage_map # the map that has the covered areas drawn in, with a value of 255, an 8bit single-channel image
sensor_msgs/Image number_of_coverage_image # the image that carries for each pixel the number of coverages when executing the path, 32bit single-channel image

View File

@ -0,0 +1,14 @@
# The request message provides a segmented map which consists of cells with label 0 for inaccessible areas and other number from 1 to N
# for labeling membership with one of the N segmented areas.
# The return message shall deliver the same map but with only one area (segment_of_interest) labeled as 255 and the remainder labeled
# as inaccessible with 0.
sensor_msgs/Image segmented_map # a map segmented into N areas which carry the respective segment number in every pixel cell,
# format 32SC1, room labels from 1 to N, 0 represents inaccessible cells (e.g. walls)
int32 segment_of_interest # the specific area label of the area of which a map shall be created
---
sensor_msgs/Image segmented_area # a map that only contains the area which is labeled with segment_of_interest in segmented_map,
# the parts of that area are labeled as 255 in this map and the remainder is labeled as inaccessible with 0
# data type is a 8bit single-channel image (format 8UC1)

View File

@ -0,0 +1,202 @@
cmake_minimum_required(VERSION 2.8.3)
project(ipa_building_navigation)
# build with c++11
add_compile_options(-std=c++11)
set(catkin_RUN_PACKAGES
actionlib
cv_bridge
geometry_msgs
ipa_building_msgs
roscpp
roslib
sensor_msgs
visualization_msgs
)
set(catkin_BUILD_PACKAGES
${catkin_RUN_PACKAGES}
dynamic_reconfigure
)
## Find catkin macros and libraries
find_package(catkin REQUIRED COMPONENTS
${catkin_BUILD_PACKAGES}
)
generate_dynamic_reconfigure_options(
cfg/BuildingNavigation.cfg
)
find_package(OpenCV REQUIRED)
find_package(Boost REQUIRED COMPONENTS system thread chrono)
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if you package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
# important: common/include needs to be here if you have such a directory
catkin_package(
INCLUDE_DIRS
common/include
ros/include
LIBRARIES
${PROJECT_NAME}
tsp_solvers
CATKIN_DEPENDS
${catkin_RUN_PACKAGES}
DEPENDS
# libconcorde_tsp_solver
OpenCV
Boost
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
# include_directories(include ${catkin_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS})
# important: common/include also needs to be here if you have it
include_directories(
common/include
ros/include
${catkin_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${Boost_INCLUDE_DIRS}
)
# TSP library
add_library(tsp_solvers
common/src/A_star_pathplanner.cpp
common/src/node.cpp
common/src/nearest_neighbor_TSP.cpp
common/src/genetic_TSP.cpp
common/src/concorde_TSP.cpp
)
target_link_libraries(tsp_solvers
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
${Boost_LIBRARIES}
)
add_dependencies(tsp_solvers
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
)
# package library
add_library(${PROJECT_NAME} STATIC
common/src/maximal_clique_finder.cpp
common/src/set_cover_solver.cpp
common/src/trolley_position_finder.cpp
)
target_link_libraries(${PROJECT_NAME}
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
${Boost_LIBRARIES}
)
add_dependencies(${PROJECT_NAME}
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
)
# action provider
add_executable(room_sequence_planning_server
ros/src/room_sequence_planning_action_server.cpp
)
target_link_libraries(room_sequence_planning_server
tsp_solvers
${PROJECT_NAME}
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
${Boost_LIBRARIES}
)
add_dependencies(room_sequence_planning_server
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
${PROJECT_NAME}_gencfg
)
# client for testing purpose
add_executable(room_sequence_planning_client ros/src/room_sequence_planning_action_client.cpp)
target_link_libraries(room_sequence_planning_client
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
)
add_dependencies(room_sequence_planning_client ${catkin_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS})
# client for testing purpose
add_executable(room_sequence_planning_evaluation
ros/src/room_sequence_planning_evaluation.cpp
common/src/A_star_pathplanner.cpp
common/src/node.cpp
)
target_link_libraries(room_sequence_planning_evaluation
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
)
add_dependencies(room_sequence_planning_evaluation ${catkin_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS})
# evaluation
add_executable(TSP_evaluation
ros/src/boosttest.cpp
)
target_link_libraries(TSP_evaluation
tsp_solvers
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
)
add_dependencies(TSP_evaluation ${catkin_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS})
#tester for different functions
#add_executable(a_star_tester ros/src/tester.cpp common/src/A_star_pathplanner.cpp common/src/node.cpp common/src/nearest_neighbor_TSP.cpp common/src/genetic_TSP.cpp common/src/concorde_TSP.cpp common/src/maximal_clique_finder.cpp common/src/set_cover_solver.cpp common/src/trolley_position_finder.cpp)
#target_link_libraries(a_star_tester ${catkin_LIBRARIES} ${OpenCV_LIBRARIES} ${Boost_LIBRARIES})
#add_dependencies(a_star_tester ${catkin_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS})
#############
## Install ##
#############
# Mark executables and/or libraries for installation
install(TARGETS ${PROJECT_NAME} tsp_solvers room_sequence_planning_server room_sequence_planning_client #a_star_tester tester
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
#uncomment this if you have a common-directory with header-files
install(DIRECTORY common/include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h"
PATTERN ".svn" EXCLUDE
)
#uncomment this if you have header-files in your project
install(DIRECTORY ros/include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h"
PATTERN ".svn" EXCLUDE
)
#install(DIRECTORY scripts
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# PATTERN ".svn" EXCLUDE
#)
##uncomment this if you use launch and yaml files
install(DIRECTORY ros/launch
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/ros
PATTERN ".svn" EXCLUDE
)
#install(DIRECTORY common/files
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/common
# PATTERN ".svn" EXCLUDE
#)

View File

@ -0,0 +1,42 @@
#!/usr/bin/env python
PACKAGE = "ipa_building_navigation"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
# TSP solver
tsp_enum = gen.enum([ gen.const("NearestNeighbor", int_t, 1, "Use the nearest neighbor TSP algorithm."),
gen.const("GeneticSolver", int_t, 2, "Use the genetic TSP algorithm."),
gen.const("ConcordeSolver", int_t, 3, "Use the Concorde TSP algorithm.")],
"TSP solver")
gen.add("tsp_solver", int_t, 0, "TSP solver", 3, 1, 3, edit_method=tsp_enum)
# problem setting
problem_setting_enum = gen.enum([ gen.const("SimpleOrderPlanning", int_t, 1, "Plan the optimal order of a simple set of locations."),
gen.const("CheckpointBasedPlanning", int_t, 2, "Two-stage planning that creates local cliques of locations (= checkpoints) and determines the optimal order through the members of each clique as well as the optimal order through the cliques.")],
"Problem setting of the sequence planning problem")
gen.add("problem_setting", int_t, 0, "Problem setting of the sequence planning problem", 2, 1, 2, edit_method=problem_setting_enum)
# checkpoint-based sequence planning specifics
trolley_placement_method_enum = gen.enum([ gen.const("DragTrolleyWhenNecessary", int_t, 1, "Drag the trolley if it is too far away from next room."),
gen.const("PrecomputeRoomGroupsAndCorrespondingTrolleyPosition", int_t, 2, "Put rooms together in groups and calculate a corresponding trolley positions.")],
"Trolley placement method (only applies to CheckpointBasedPlanning problem)")
gen.add("planning_method", int_t, 0, "Trolley placement method (only applies to CheckpointBasedPlanning problem)", 2, 1, 2, edit_method=trolley_placement_method_enum)
gen.add("max_clique_path_length", double_t, 0, "Max A* path length between two rooms that are assigned to the same clique, in [m] (only applies to CheckpointBasedPlanning problem)", 12.0, 0.0)
gen.add("maximum_clique_size", int_t, 0, "Maximal number of nodes belonging to one trolley clique (only applies to CheckpointBasedPlanning problem)", 9001, 0)
# general settings
gen.add("map_downsampling_factor", double_t, 0, "The map may be downsampled during computations (e.g. of A* path lengths) in order to speed up the algorithm, if set to 1 the map will have original size, if set to 0 the algorithm won't work", 0.25, 0.00001, 1.0)
gen.add("check_accessibility_of_rooms", bool_t, 0, "Tells the sequence planner if it should check the given room centers for accessibility from the starting position", True)
gen.add("return_sequence_map", bool_t, 0, "Tells the server if the map with the sequence drawn in should be returned", False)
gen.add("display_map", bool_t, 0, "Show the resulting map with paths (only if return_sequence_map=true)", False)
exit(gen.generate(PACKAGE, "room_sequence_planning_action_server", "BuildingNavigation"))

View File

@ -0,0 +1,62 @@
// Astar.cpp
// http://en.wikipedia.org/wiki/A*
// Compiler: Dev-C++ 4.9.9.2
// FB - 201012256
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <ipa_building_navigation/node.h>
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
//This class provides an AStar pathplanner, which calculates the pathlength from one cv::Point to another. It was taken from
// http://code.activestate.com/recipes/577457-a-star-shortest-path-algorithm/ and slightly changed (to use it with openCV).
//It needs node.h to work, which gives an implementation of nodes of the graph.
//
//!!!!!!!!!Important!!!!!!!!!!!!!
//It downsamples the map mith the given factor (0 < factor < 1) so the map gets reduced and calculationtime gets better.
//If it is set to 1 the map will have original size, if it is 0 the algorithm won't work, so make sure to not set it to 0.
//The algorithm also needs the Robot radius [m] and the map resolution [m²/pixel] to calculate the needed
//amount of erosions to include the radius in the planning.
//
class AStarPlanner
{
protected:
int n;
int m;
std::string route_;
std::string pathFind(const int& xStart, const int& yStart, const int& xFinish, const int& yFinish, const cv::Mat& map);
public:
AStarPlanner();
void drawRoute(cv::Mat& map, const cv::Point start_point, const std::string& route, double step_length);
void getRoute(const cv::Point start_point, const std::string& route, double step_length, std::vector<cv::Point>& route_points);
// computes the path length between start point and end point
double planPath(const cv::Mat& map, const cv::Point& start_point, const cv::Point& end_point,
const double downsampling_factor, const double robot_radius, const double map_resolution,
const int end_point_valid_neighborhood_radius=0, std::vector<cv::Point>* route=NULL);
// computes the path length between start point and end point, tries first with a downsampled map for fast computation and uses the original map if the first try was not successful
// if end_point_valid_neighborhood_radius [measured in cell size of downsampled_map] is set greater than 0, then it is sufficient to find a path to a cell within that neighborhood radius to end_point for a success
double planPath(const cv::Mat& map, const cv::Mat& downsampled_map, const cv::Point& start_point, const cv::Point& end_point, const double downsampling_factor,
const double robot_radius, const double map_resolution, const int end_point_valid_neighborhood_radius=0, cv::Mat* draw_path_map=NULL,
std::vector<cv::Point>* route=NULL);
void downsampleMap(const cv::Mat& map, cv::Mat& downsampled_map, const double downsampling_factor, const double robot_radius, const double map_resolution);
};

View File

@ -0,0 +1,92 @@
#include "ros/ros.h"
#include <ros/package.h>
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <fstream>
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/distance_matrix.h>
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
//This class applies an object to solve a given TSP problem using the concorde TSP solver. This solver can be downloaded at:
// http://www.math.uwaterloo.ca/tsp/concorde.html
//A short explanation on how to build the solver is given at:
// http://www.math.uwaterloo.ca/tsp/concorde/DOC/README.html
//If you have build the solver navigate to the./TSP folder and type " ./concorde -h " to see how to use this solver. This class
//uses the concorde solver by using a systemcall.
//Make sure you have a "/common/files/TSP_order.txt" and a "/common/files/TSPlib_file.txt" file. These files are used to tell
//concorde the current problem and save the output of it.
//
//It needs a symmetrical matrix of pathlenghts between the nodes and the starting-point index in this matrix.
//If the path from one node to another doesn't exist or the path is from one node to itself, the entry in the matrix must
//be 0 or smaller. so the format for this matrix is:
// row: node to start from, column: node to go to
// --- ---
// | 0.0 1.0 3.5 5.8 1.2 |
// | 1.0 0.0 2.4 3.3 9.0 |
// | 3.5 2.4 0.0 7.7 88.0|
// | 5.8 3.3 7.7 0.0 0.0 |
// | 1.2 9.0 88.0 0.0 0.0 |
// --- ---
class ConcordeTSPSolver
{
protected:
//Astar pathplanner to find the pathlengths from cv::Point to cv::Point
AStarPlanner pathplanner_;
//Function to create neccessary TSPlib file to tell concorde what the problem is.
void writeToFile(const cv::Mat& pathlength_matrix, const std::string& tsp_lib_filename, const std::string& tsp_order_filename);
//Function to read the saved TSP order.
std::vector<int> readFromFile(const std::string& tsp_order_filename);
void distance_matrix_thread(DistanceMatrix& distance_matrix_computation, cv::Mat& distance_matrix,
const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, AStarPlanner& path_planner);
bool abort_computation_;
std::string unique_file_identifier_;
public:
//Constructor
ConcordeTSPSolver();
void abortComputation();
//Functions to solve the TSP. It needs a distance matrix, that shows the pathlengths between two nodes of the problem.
//This matrix has to be symmetrical or else the TSPlib must be changed. The int shows the index in the Matrix.
//There are two functions for different cases:
// 1. The distance matrix already exists
// 2. The distance matrix has to be computed and maybe returned
//with given distance matrix
std::vector<int> solveConcordeTSP(const cv::Mat& path_length_Matrix, const int start_Node);
// compute distance matrix and maybe return it
// this version does not exclude infinite paths from the TSP ordering
std::vector<int> solveConcordeTSP(const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, const int start_Node, cv::Mat* distance_matrix = 0);
// compute TSP from a cleaned distance matrix (does not contain any infinity paths) that has to be computed
std::vector<int> solveConcordeTSPClean(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_node);
// compute TSP with pre-computed cleaned distance matrix (does not contain any infinity paths)
std::vector<int> solveConcordeTSPWithCleanedDistanceMatrix(const cv::Mat& distance_matrix,
const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node);
};

View File

@ -0,0 +1,24 @@
#include "ros/ros.h"
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <list>
#include <vector>
#include <math.h>
#pragma once
//This function searches for the given element in the given vector and returns true if it is in it or false if not
template<typename T>
bool inline contains(std::vector<T> vector, T element)
{
//this functions checks, if the given element is in the given vector (in this case for int elements)
if (!vector.empty())
{
return vector.end() != std::find(vector.begin(), vector.end(), element);
}
else
{
return false;
}
}

View File

@ -0,0 +1,261 @@
#pragma once
#include <vector>
#include <opencv2/opencv.hpp>
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/timer.h>
class DistanceMatrix
{
protected:
bool abort_computation_;
public:
DistanceMatrix()
: abort_computation_(false)
{
}
void abortComputation()
{
abort_computation_ = true;
}
//Function to construct the symmetrical distance matrix from the given points. The rows show from which node to start and
//the columns to which node to go. If the path between nodes doesn't exist or the node to go to is the same as the one to
//start from, the entry of the matrix is 0.
// REMARK: paths is a pointer that points to a 3D vector that has dimensionality NxN in the outer vectors to store
// the paths in a matrix manner
void constructDistanceMatrix(cv::Mat& distance_matrix, const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, AStarPlanner& path_planner,
std::vector<std::vector<std::vector<cv::Point> > >* paths=NULL)
{
std::cout << "DistanceMatrix::constructDistanceMatrix: Constructing distance matrix..." << std::endl;
Timer tim;
//create the distance matrix with the right size
distance_matrix.create((int)points.size(), (int)points.size(), CV_64F);
// hack: speed up trick
if (points.size()>500)
downsampling_factor *= 0.5;
// reduce image size already here to avoid resizing in the planner each time
const double one_by_downsampling_factor = 1./downsampling_factor;
cv::Mat downsampled_map;
path_planner.downsampleMap(original_map, downsampled_map, downsampling_factor, robot_radius, map_resolution);
if (points.size()>500)
std::cout << "0 10 20 30 40 50 60 70 80 90 100" << std::endl;
// int a=0, b=0;
for (int i = 0; i < points.size(); i++)
{
//std::cout << " a=" << a << " b=" << b << std::endl;
if (points.size()>500 && i%(std::max(1,(int)points.size()/100))==0)
std::cout << "." << std::flush;
//cv::Point current_center = downsampling_factor * points[i];
for (int j = 0; j < points.size(); j++)
{
if (j != i)
{
if (j > i) //only compute upper right triangle of matrix, rest is symmetrically added
{
if (abort_computation_==true)
return;
// try first with direct connecting line (often sufficient and a significant speedup over A*)
cv::LineIterator it(original_map, points[i], points[j]);
bool direct_connection = true;
for (int k=0; k<it.count && direct_connection==true; k++, ++it)
if (**it < 250)
direct_connection = false; // if a pixel in between is not accessible, direct connection is not possible
if (direct_connection == true)
{
// compute distance
const double length = cv::norm(points[i]-points[j]);
distance_matrix.at<double>(i, j) = length;
distance_matrix.at<double>(j, i) = length; //symmetrical-Matrix --> saves half the computation time
if (paths!=NULL)
{
// store path
cv::LineIterator it2(original_map, points[i], points[j]);
std::vector<cv::Point> current_path(it2.count);
for (int k=0; k<it2.count; k++, ++it2)
current_path[k] = it2.pos();
paths->at(i).at(j) = current_path;
paths->at(j).at(i) = current_path;
}
// ++a;
}
else
{
// A* path planner
if(paths!=NULL)
{
std::vector<cv::Point> current_path;
double length = path_planner.planPath(original_map, downsampled_map, points[i], points[j], downsampling_factor, 0., map_resolution, 0, NULL, &current_path);
distance_matrix.at<double>(i, j) = length;
distance_matrix.at<double>(j, i) = length; //symmetrical-Matrix --> saves half the computation time
// remap path points to original map size
for(std::vector<cv::Point>::iterator point=current_path.begin(); point!=current_path.end(); ++point)
{
point->x = point->x/downsampling_factor;
point->y = point->y/downsampling_factor;
}
paths->at(i).at(j) = current_path;
paths->at(j).at(i) = current_path;
}
else
{
double length = path_planner.planPath(original_map, downsampled_map, points[i], points[j], downsampling_factor, 0., map_resolution);
distance_matrix.at<double>(i, j) = length;
distance_matrix.at<double>(j, i) = length; //symmetrical-Matrix --> saves half the computation time
}
// ++b;
}
}
}
else
{
distance_matrix.at<double>(i, j) = 0;
}
}
}
std::cout << "\nDistance matrix created in " << tim.getElapsedTimeInMilliSec() << " ms" << std::endl;// "\nDistance matrix:\n" << distance_matrix << std::endl;
}
// check whether distance matrix contains infinite path lengths and if this is true, create a new distance matrix with maximum size clique of reachable points
// cleaned_index_to_original_index_mapping --> maps the indices of the cleaned distance_matrix to the original indices of the original distance_matrix
void cleanDistanceMatrix(const cv::Mat& distance_matrix, cv::Mat& distance_matrix_cleaned, std::map<int,int>& cleaned_index_to_original_index_mapping)
{
// standard: use a 1:1 mapping (input = output)
cleaned_index_to_original_index_mapping.clear();
for (int i=0; i<distance_matrix.rows; ++i)
cleaned_index_to_original_index_mapping[i] = i;
distance_matrix_cleaned = distance_matrix.clone();
if (distance_matrix.rows < 1)
return;
const double max_length = 1e90;
std::vector<bool> remove_entry(distance_matrix.rows, false); // keeps track on which entries of distance_matrix need to be removed
// loop until all lines are marked, which need to be removed
cv::Mat distance_matrix_temp = distance_matrix.clone();
while (true)
{
// count infinite entries in each row of distance matrix
std::vector<int> infinite_length_entries(distance_matrix_temp.rows, 0);
for (int i=0; i<distance_matrix_temp.rows; ++i)
for (int j=0; j<distance_matrix_temp.cols; ++j)
if (distance_matrix_temp.at<double>(i,j)>max_length)
infinite_length_entries[i]++;
// sort rows by their number of infinite entries
std::multimap<int, int> number_infinite_entries_to_row_index_mapping; // maps number of infinite entries to the corresponding row index
for (size_t i=0; i<infinite_length_entries.size(); ++i)
number_infinite_entries_to_row_index_mapping.insert(std::pair<int,int>(infinite_length_entries[i], (int)i));
// if any row has at least one infinite entry, mark the row with most infinity entries for deletion
bool mark_line = false;
int mark_index = -1;
std::multimap<int, int>::reverse_iterator number_infinite_entries_to_row_index_mapping_last = number_infinite_entries_to_row_index_mapping.rbegin();
if (number_infinite_entries_to_row_index_mapping_last->first > 0)
{
mark_line = true;
mark_index = number_infinite_entries_to_row_index_mapping_last->second;
remove_entry[mark_index] = true;
}
if (mark_line == true)
{
for (int j=0; j<distance_matrix_temp.cols; ++j) // mark row: mark_index
distance_matrix_temp.at<double>(mark_index, j) = -1.;
for (int i=0; i<distance_matrix_temp.rows; ++i) // mark col: mark_index
distance_matrix_temp.at<double>(i, mark_index) = -1.;
}
else
break;
}
// count entries to remove
int number_entries_to_be_removed = 0;
for (size_t i=0; i<remove_entry.size(); ++i)
if (remove_entry[i] == true)
number_entries_to_be_removed++;
// remove elements from distance matrix if necessary
if (number_entries_to_be_removed > 0)
{
std::cout << " DistanceMatrix::cleanDistanceMatrix: Need to remove " << number_entries_to_be_removed << " elements out of " << distance_matrix.rows << " elements from the distance matrix." << std::endl;
// setup new distance_matrix
const int new_size = distance_matrix.rows - number_entries_to_be_removed;
if (new_size == 0)
{
std::cout << " DistanceMatrix::cleanDistanceMatrix: Warning: Would need to remove all elements of distance_matrix. Aborting." << std::endl;
return;
}
distance_matrix_cleaned.create(new_size, new_size, CV_64F);
cleaned_index_to_original_index_mapping.clear();
// fill new distance_matrix
int new_index = 0;
for (size_t i=0; i<remove_entry.size(); ++i)
{
if (remove_entry[i] == false)
{
// mapping from new to old indices
cleaned_index_to_original_index_mapping[new_index] = (int)i;
// copy values
int new_j = 0;
for (size_t j=0; j<remove_entry.size(); ++j)
{
if (remove_entry[j] == false)
{
distance_matrix_cleaned.at<double>(new_index, new_j) = distance_matrix.at<double>(i,j);
new_j++;
}
}
new_index++;
}
}
if (new_index != new_size)
std::cout << "##################################################\nDistanceMatrix::cleanDistanceMatrix: Warning: new_index != new_size.\n##################################################" << std::endl;
}
}
// calculate the distance matrix and check whether distance matrix contains infinite path lengths and if this is true,
// create a new distance matrix with maximum size clique of reachable points
// start_node --> provide the original start node to the function, it writes the new start node mapped to the new coordinates into it
// cleaned_index_to_original_index_mapping --> maps the indices of the cleaned distance_matrix to the original indices of the original distance_matrix
void computeCleanedDistanceMatrix(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, AStarPlanner& path_planner,
cv::Mat& distance_matrix, std::map<int,int>& cleaned_index_to_original_index_mapping, int& start_node)
{
std::cout << "DistanceMatrix::computeCleanedDistanceMatrix: Constructing distance matrix..." << std::endl;
// calculate the distance matrix
cv::Mat distance_matrix_raw;
constructDistanceMatrix(distance_matrix_raw, original_map, points, downsampling_factor, robot_radius, map_resolution, path_planner);
// check whether distance matrix contains infinite path lengths and if this is true, create a new distance matrix with maximum size clique of reachable points
cleanDistanceMatrix(distance_matrix_raw, distance_matrix, cleaned_index_to_original_index_mapping);
// re-assign the start node to cleaned indices (use 0 if the original start node was removed from distance_matrix_cleaned)
int new_start_node = 0;
for (std::map<int,int>::iterator it=cleaned_index_to_original_index_mapping.begin(); it!=cleaned_index_to_original_index_mapping.end(); ++it)
if (it->second == start_node)
new_start_node = it->first;
start_node = new_start_node;
}
};

View File

@ -0,0 +1,91 @@
#include "ros/ros.h"
#include <iostream>
#include <iomanip>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <ipa_building_navigation/contains.h>
#include <ipa_building_navigation/nearest_neighbor_TSP.h>
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/distance_matrix.h>
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
//This class provides a solution for the TSP by taking the nearest-neighbor path and applying a genetic algorithm on it.
//It needs a symmetrical matrix of pathlenghts between the nodes and the starting-point index in this matrix.
//If the path from one node to another doesn't exist or the path is from one node to itself, the entry in the matrix must
//be 0 or smaller. so the format for this matrix is:
// row: node to start from, column: node to go to
// --- ---
// | 0.0 1.0 3.5 5.8 1.2 |
// | 1.0 0.0 2.4 3.3 9.0 |
// | 3.5 2.4 0.0 7.7 88.0|
// | 5.8 3.3 7.7 0.0 0.0 |
// | 1.2 9.0 88.0 0.0 0.0 |
// --- ---
class GeneticTSPSolver
{
protected:
//Astar pathplanner to find the pathlengths from cv::Point to cv::Point
AStarPlanner pathplanner_;
//function to get the length of a given path
double getPathLength(const cv::Mat& path_length_Matrix, std::vector<int> given_path);
//function to mutate (randomly change) a given Parent-path
std::vector<int> mutatePath(const std::vector<int>& parent_path);
//function that selects the best path from the given paths
std::vector<int> getBestPath(const std::vector<std::vector<int> > paths, const cv::Mat& pathlength_Matrix, bool& changed);
void distance_matrix_thread(DistanceMatrix& distance_matrix_computation, cv::Mat& distance_matrix,
const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, AStarPlanner& path_planner);
bool abort_computation_;
//indicates how many generations shall be at least taken in order to optimize the given path
int min_number_of_generations_;
//indicates the amount of generations for which the path shouldn't have changed in order to stop the
//optimization
int const_generations_number_;
public:
//constructor
GeneticTSPSolver(int min_number_of_gens=2300, int const_generations=100);
void abortComputation();
//Solving-algorithms for the given TSP. It returns a vector of int, which is the order from this solution. The int shows
//the index in the Matrix. There are two functions for different cases:
// 1. The distance matrix already exists
// 2. The distance matrix has to be computet and maybe returned
//with given distance matrix
std::vector<int> solveGeneticTSP(const cv::Mat& path_length_Matrix, const int start_Node);
// compute distance matrix and maybe returning it
// this version does not exclude infinite paths from the TSP ordering
std::vector<int> solveGeneticTSP(const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, const int start_Node, cv::Mat* distance_matrix=0);
// compute TSP from a cleaned distance matrix (does not contain any infinity paths) that has to be computed
std::vector<int> solveGeneticTSPClean(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_node);
// compute TSP with pre-computed cleaned distance matrix (does not contain any infinity paths)
std::vector<int> solveGeneticTSPWithCleanedDistanceMatrix(const cv::Mat& distance_matrix,
const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node);
};

View File

@ -0,0 +1,69 @@
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <fstream>
#include <ipa_building_navigation/contains.h>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/undirected_graph.hpp>
#include <boost/graph/bron_kerbosch_all_cliques.hpp>
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
//This algorithm provides a class that finds all maximal cliques in a given graph. It uses the Bron-Kerbosch Implementation
//in Boost to do this. As input a symmetrical distance-Matrix is needed that shows the pathlenghts from one node to another.
//If the path from one node to another doesn't exist, the entry in the matrix must be 0 or smaller. so the format for this
//Matrix is:
// row: node to start from, column: node to go to
// --- ---
// | 0.0 1.0 3.5 5.8 1.2 |
// | 1.0 0.0 2.4 3.3 9.0 |
// | 3.5 2.4 0.0 7.7 88.0|
// | 5.8 3.3 7.7 0.0 0.0 |
// | 1.2 9.0 88.0 0.0 0.0 |
// --- ---
//It also needs a maximal pathlength that shows the algorithm when the paths are too long and should not be included in the
//Graph. This is neccessary because we assume that you can reach every node from the others and with a Graph in which all
//nodes are connected the only maximal clique are the nodes itself. For example the previous matrix gets
// maxval = 7.0
// --- ---
// | 0.0 1.0 3.5 5.8 1.2 |
// | 1.0 0.0 2.4 3.3 0.0 |
// | 3.5 2.4 0.0 0.0 0.0 |
// | 5.8 3.3 0.0 0.0 0.0 |
// | 1.2 0.0 0.0 0.0 0.0 |
// --- ---
//
//The nodes in the graph are named after their position in the distance-Matrix and the cliques are
// std::vector<int> variables so you can easily acces the right nodes in the matrix outside this class.
class cliqueFinder
{
protected:
//function that allows to add a Vertex with a name to a boost::Graph. See boosts helper.hpp ( http://www.boost.org/doc/libs/1_47_0/libs/graph/example/helper.hpp ) for explanation
template<typename Graph, typename NameMap, typename VertexMap>
typename boost::graph_traits<Graph>::vertex_descriptor addNamedVertex(Graph& g, NameMap nm, const std::string& name, VertexMap& vm);
//function to create a graph out of the given distanceMatrix
template<typename Graph>
void createGraph(Graph& graph, cv::Mat& distance_Matrix);
//function to set too long paths in the distance_matrix to zero
void cutTooLongEdges(cv::Mat& complete_distance_matrix, double maxval);
public:
cliqueFinder();
std::vector<std::vector<int> > getCliques(const cv::Mat& distance_matrix, double maxval);
};

View File

@ -0,0 +1,69 @@
#include "ros/ros.h"
#include <iostream>
#include <iomanip>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <ipa_building_navigation/contains.h>
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/distance_matrix.h>
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
//This class provides a solution for the TSP by taking the nearest neighbor from the current Point as next Point.
//It needs a symmetrical matrix of pathlenghts between the nodes and the starting-point index in this matrix.
//If the path from one node to another doesn't exist or the path is from one node to itself, the entry in the matrix must
//be 0 or smaller. so the format for this matrix is:
// row: node to start from, column: node to go to
// --- ---
// | 0.0 1.0 3.5 5.8 1.2 |
// | 1.0 0.0 2.4 3.3 9.0 |
// | 3.5 2.4 0.0 7.7 88.0|
// | 5.8 3.3 7.7 0.0 0.0 |
// | 1.2 9.0 88.0 0.0 0.0 |
// --- ---
class NearestNeighborTSPSolver
{
protected:
//Astar pathplanner to find the pathlengths from cv::Point to cv::Point
AStarPlanner pathplanner_;
// //Function to construct the distance matrix, showing the pathlength from node to node
// void NearestNeighborTSPSolver::constructDistanceMatrix(cv::Mat& distance_matrix, const cv::Mat& original_map,
// const std::vector<cv::Point>& points, double downsampling_factor, double robot_radius, double map_resolution);
public:
//constructor
NearestNeighborTSPSolver();
//Solving-algorithms for the given TSP. It returns a vector of int, which is the order from this solution. The int shows
//the index in the Matrix. There are two functions for different cases:
// 1. The distance matrix already exists
// 2. The distance matrix has to be computed and maybe returned
//with given distance matrix
std::vector<int> solveNearestTSP(const cv::Mat& path_length_matrix, const int start_node); //with given distance matrix
// compute TSP and distance matrix without cleaning it
// this version does not exclude infinite paths from the TSP ordering
std::vector<int> solveNearestTSP(const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, const int start_node, cv::Mat* distance_matrix=0);
// compute TSP from a cleaned distance matrix (does not contain any infinity paths) that has to be computed
std::vector<int> solveNearestTSPClean(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_node);
// compute TSP with pre-computed cleaned distance matrix (does not contain any infinity paths)
std::vector<int> solveNearestTSPWithCleanedDistanceMatrix(const cv::Mat& distance_matrix,
const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node);
};

View File

@ -0,0 +1,40 @@
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
//This is th object to reperesent a node in a Graph. It is used by A_star_pathplanner.cpp and was applied from:
// http://code.activestate.com/recipes/577457-a-star-shortest-path-algorithm/
//In the estimate() function the distance from the node to the goal is calculated. Uncomment there which distance calculation
//you want to use (Euclidean, Manhattan or Chebyshev). Euclidean is more precisly but could slow the planner a little bit on
//long paths.
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
class NodeAstar
{
protected:
// current position
int xPos_;
int yPos_;
// total distance already travelled to reach the node
int level_;
// priority=level+remaining distance estimate
int priority_; // smaller: higher priority
public:
NodeAstar(int xp, int yp, int d, int p);
int getxPos() const;
int getyPos() const;
int getLevel() const;
int getPriority() const;
void updatePriority(const int& xDest, const int& yDest);
void nextLevel(const int& i); // i: direction
const int& estimate(const int& xDest, const int& yDest) const;
};

View File

@ -0,0 +1,72 @@
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <algorithm>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <fstream>
#include <ipa_building_navigation/contains.h>
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/maximal_clique_finder.h>
#include <ipa_building_navigation/distance_matrix.h>
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
//This algorithm provides a class to solve the set-cover problem for given cliques. This is done by using the greedy-search
//algorithm, which takes the clique with most unvisited nodes before the other nodes and removes the nodes in it from the
//unvisited. It repeats this step until no more node hasn't been visited. It then merges cliques together that have at least
//one node in common.
//
//!!!!!!!!!!!!!!!!Important!!!!!!!!!!!!!!!!!
//Make sure that the cliques cover all nodes in the graph or else this algorithm runs into an endless loop. For best results
//take the cliques from a maximal-clique finder like the Bron-Kerbosch algorithm.
class SetCoverSolver
{
protected:
//Astar pathplanner to find the pathlengths from cv::Point to cv::Point
AStarPlanner pathplanner_;
//This object finds all maximal cliques in the given Graph. It needs a symmetrical distance matrix shwoing the pathlength
//from one node to another and a miximal pathlength to cut edges that are larger than this value. See maximal_clique_finder.h
//for further information.
cliqueFinder maximal_clique_finder;
//function to merge groups together, which have at least one node in common
std::vector<std::vector<int> > mergeGroups(const std::vector<std::vector<int> >& found_groups);
// //Function to construct the distance matrix, showing the pathlength from node to node
// void constructDistanceMatrix(cv::Mat& distance_matrix, const cv::Mat& original_map, const std::vector<cv::Point>& points,
// double downsampling_factor, double robot_radius, double map_resolution);
public:
//Constructor
SetCoverSolver();
//algorithms to solve the set cover problem. There are three functions for different cases:
// 1. The cliques already have been found
// 2. The distance matrix already exists
// 3. The distance matrix has to be computed and may be returned
//cliques are given
std::vector<std::vector<int> > solveSetCover(std::vector<std::vector<int> >& given_cliques, const int number_of_nodes,
const int max_number_of_clique_members, const cv::Mat& distance_matrix);
//the distance matrix is given
std::vector<std::vector<int> > solveSetCover(const cv::Mat& distance_matrix, const std::vector<cv::Point>& points,
const int number_of_nodes, double maximal_pathlength, const int max_number_of_clique_members);
//the distance matrix has to be computed and may be returned
std::vector<std::vector<int> > solveSetCover(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, double maximal_pathlength, const int max_number_of_clique_members, cv::Mat* distance_matrix=0);
};

View File

@ -0,0 +1,183 @@
/*
* timer.h
*
* Created on: May 13, 2013
* Author: rmb-ce
*/
// from: http://snipplr.com/view/40650/timer-class-for-both-unixlinuxmac-and-windows-system/
//////////////////
// How to Use ////
//////////////////
//#include <iostream>
//#include "timer.h"
//using namespace std;
//int main()
//{
// Timer timer;
//
// // start timer
// timer.start();
//
// // do something
// ...
//
// // stop timer
// timer.stop();
//
// // print the elapsed time in millisec
// cout << timer.getElapsedTimeInMilliSec() << " ms.\n";
//
// return 0;
//}
//////////////////////////////////////////////////////////////////////////////
// Timer.h
// =======
// High Resolution Timer.
// This timer is able to measure the elapsed time with 1 micro-second accuracy
// in both Windows, Linux and Unix system
//
// AUTHOR: Song Ho Ahn (song.ahn@gmail.com)
// CREATED: 2003-01-13
// UPDATED: 2006-01-13
//
// Copyright (c) 2003 Song Ho Ahn
//////////////////////////////////////////////////////////////////////////////
#ifndef TIMER_H_DEF
#define TIMER_H_DEF
#ifdef WIN32 // Windows system specific
//#include <windows.h>
#else // Unix based system specific
#include <sys/time.h>
#endif
#include <stdlib.h>
class Timer
{
public:
// default constructor
Timer()
{
#ifdef WIN32
QueryPerformanceFrequency(&frequency);
startCount.QuadPart = 0;
endCount.QuadPart = 0;
#else
startCount.tv_sec = startCount.tv_usec = 0;
endCount.tv_sec = endCount.tv_usec = 0;
#endif
stopped = 0;
startTimeInMicroSec = 0;
endTimeInMicroSec = 0;
start();
}
// default destructor
~Timer()
{
}
///////////////////////////////////////////////////////////////////////////////
// start timer.
// startCount will be set at this point.
///////////////////////////////////////////////////////////////////////////////
void start()
{
stopped = 0; // reset stop flag
#ifdef WIN32
QueryPerformanceCounter(&startCount);
#else
gettimeofday(&startCount, NULL);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// stop the timer.
// endCount will be set at this point.
///////////////////////////////////////////////////////////////////////////////
void stop()
{
stopped = 1; // set timer stopped flag
#ifdef WIN32
QueryPerformanceCounter(&endCount);
#else
gettimeofday(&endCount, NULL);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// same as getElapsedTimeInSec()
///////////////////////////////////////////////////////////////////////////////
double getElapsedTime()
{
return this->getElapsedTimeInSec();
}
///////////////////////////////////////////////////////////////////////////////
// divide elapsedTimeInMicroSec by 1000000
///////////////////////////////////////////////////////////////////////////////
double getElapsedTimeInSec()
{
return this->getElapsedTimeInMicroSec() * 0.000001;
}
///////////////////////////////////////////////////////////////////////////////
// divide elapsedTimeInMicroSec by 1000
///////////////////////////////////////////////////////////////////////////////
double getElapsedTimeInMilliSec()
{
return this->getElapsedTimeInMicroSec() * 0.001;
}
///////////////////////////////////////////////////////////////////////////////
// compute elapsed time in micro-second resolution.
// other getElapsedTime will call this first, then convert to correspond resolution.
///////////////////////////////////////////////////////////////////////////////
double getElapsedTimeInMicroSec()
{
#ifdef WIN32
if(!stopped)
QueryPerformanceCounter(&endCount);
startTimeInMicroSec = startCount.QuadPart * (1000000.0 / frequency.QuadPart);
endTimeInMicroSec = endCount.QuadPart * (1000000.0 / frequency.QuadPart);
#else
if (!stopped)
gettimeofday(&endCount, NULL);
startTimeInMicroSec = (startCount.tv_sec * 1000000.0) + startCount.tv_usec;
endTimeInMicroSec = (endCount.tv_sec * 1000000.0) + endCount.tv_usec;
#endif
return endTimeInMicroSec - startTimeInMicroSec;
}
protected:
private:
double startTimeInMicroSec; // starting time in micro-second
double endTimeInMicroSec; // ending time in micro-second
int stopped; // stop flag
#ifdef WIN32
LARGE_INTEGER frequency; // ticks per second
LARGE_INTEGER startCount;//
LARGE_INTEGER endCount;//
#else
timeval startCount; //
timeval endCount; //
#endif
};
#endif // TIMER_H_DEF

View File

@ -0,0 +1,58 @@
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <fstream>
#include <ipa_building_navigation/contains.h>
#include <ipa_building_navigation/A_star_pathplanner.h>
#pragma once //make sure this header gets included only one time when multiple classes need it in the same project
//regarding to https://en.wikipedia.org/wiki/Pragma_once this is more efficient than #define
//This object provides an algorithm to search for the best trolley position for a group of points. A trolley position is
//the Point where the trolley of a robot should be placed during the cleaning of the group. This is done by searching in the
//bounding box of these points for the point which minimizes the pathlength to every group member. If the goup has only two
//members the algorithm chooses a Point on the optimal path between these two Points that is in the middlest of this path.
//This algorithm needs as input:
// 1. The original occupancy gridmap to get the pathlength between two Points.
// 2. A vector of found groups. This vector stores the group as integer that show the Position of the node in the
// roomcenters vector. To find the groups it is good to find all maximal cliques in the graph and then applying
// a set Cover solver on these.
// 3. A vector of roomcenters that stores the centers of each room. This algorithm was implemented for planning
// the order of cleaning rooms so this variable is called room_centers, but it can store every cv::Point with that
// you want to find the best trolley position.
// 4. A downsampling factor to reduce the size of the map. This is used by the A_star_pathplanner to heavily reduce
// calculationtime. It has to be (0, 1]. If it is 1 the map will be took as it is.
// 5. The Radius of the robot and the map resolution to make sure the A_star pathplanner stays in enough distance to the
// walls and obstacles. (See A_star_pathplanner.cpp for further information)
class TrolleyPositionFinder
{
protected:
AStarPlanner path_planner_; //Object to plan a path from Point A to Point B in a given gridmap
//Function to find a trolley position for one group
cv::Point findOneTrolleyPosition(const std::vector<cv::Point> group_points, const cv::Mat& original_map,
const double downsampling_factor, const double robot_radius, const double map_resolution);
public:
//constructor
TrolleyPositionFinder();
//Function to find a trolley position for each group by using the findOneTrolleyPosition function
std::vector<cv::Point> findTrolleyPositions(const cv::Mat& original_map, const std::vector<std::vector<int> >& found_groups,
const std::vector<cv::Point>& room_centers, const double downsampling_factor, const double robot_radius,
const double map_resolution);
};

View File

@ -0,0 +1,63 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2017 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_building_navigation
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 03.2017
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
enum TSPSolvers {TSP_NEAREST_NEIGHBOR=1, TSP_GENETIC=2, TSP_CONCORDE=3};

View File

@ -0,0 +1,67 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2017 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_building_navigation
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 03.2017
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <ipa_building_navigation/tsp_solver_defines.h>
#include <ipa_building_navigation/nearest_neighbor_TSP.h>
#include <ipa_building_navigation/genetic_TSP.h>
#include <ipa_building_navigation/concorde_TSP.h>

View File

@ -0,0 +1,383 @@
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/timer.h>
const int dir = 8; // number of possible directions to go at any position
// if dir==4
//static int dx[dir]={1, 0, -1, 0};
//static int dy[dir]={0, 1, 0, -1};
// if dir==8
static int dx[dir] =
{ 1, 1, 0, -1, -1, -1, 0, 1 };
static int dy[dir] =
{ 0, 1, 1, 1, 0, -1, -1, -1 };
static int expanding_counter = 0;
// Determine priority (in the priority queue)
bool operator<(const NodeAstar& a, const NodeAstar& b)
{
return a.getPriority() > b.getPriority();
}
AStarPlanner::AStarPlanner()
{
n = 1;
m = 1;
}
void AStarPlanner::drawRoute(cv::Mat& map, const cv::Point start_point, const std::string& route, double step_length)
{
// follow the route on the map and update the path length
if (route.length() > 0)
{
int j;
char c;
int x1 = start_point.x;
int y1 = start_point.y;
int x2,y2;
for (int i = 0; i < route.length(); i++)
{
//get the next char of the string and make it an integer, which shows the direction
c = route.at(i);
j=c-'0';
x2 = x1 + dx[j]*step_length;
y2 = y1 + dy[j]*step_length;
const double progress = 0.2 + 0.6*(double)i/(double)route.length();
cv::line(map, cv::Point(x1,y1), cv::Point(x2,y2), CV_RGB(0,progress*255,0), 1);
x1 = x2;
y1 = y2;
}
}
}
void AStarPlanner::getRoute(const cv::Point start_point, const std::string& route,
double step_length, std::vector<cv::Point>& route_points)
{
// follow the route on the map and update the path length
if (route.length() > 0)
{
int j;
char c;
int x1 = start_point.x;
int y1 = start_point.y;
route_points.push_back(cv::Point(x1, y1));
int x2,y2;
for (int i = 0; i < route.length(); i++)
{
//get the next char of the string and make it an integer, which shows the direction
c = route.at(i);
j=c-'0';
x2 = x1 + dx[j]*step_length;
y2 = y1 + dy[j]*step_length;
route_points.push_back(cv::Point(x2, y2));
x1 = x2;
y1 = y2;
}
}
}
void AStarPlanner::downsampleMap(const cv::Mat& map, cv::Mat& downsampled_map, const double downsampling_factor, const double robot_radius, const double map_resolution)
{
//erode the map so the planner doesn't go near the walls
// --> calculate the number of times for eroding from Robot Radius [m]
cv::Mat eroded_map;
int number_of_erosions = (robot_radius / map_resolution);
cv::erode(map, eroded_map, cv::Mat(), cv::Point(-1, -1), number_of_erosions);
//downsampling of the map to reduce calculation time
if (downsampling_factor != 1.)
cv::resize(eroded_map, downsampled_map, cv::Size(0, 0), downsampling_factor, downsampling_factor, cv::INTER_NEAREST);//_LINEAR);
else
downsampled_map = eroded_map;
}
// A-star algorithm.
// The route returned is a string of direction digits.
std::string AStarPlanner::pathFind(const int & xStart, const int & yStart, const int & xFinish, const int & yFinish, const cv::Mat& map)
{
static std::priority_queue<NodeAstar> pq[2]; // list of open (not-yet-tried) nodes
static int pqi; // pq index
static NodeAstar* n0;
static NodeAstar* m0;
static int i, j, x, y, xdx, ydy;
static char c;
pqi = 0;
cv::Mat map_to_calculate_path(cv::Size(m, n), CV_32S);
// create map from the given eroded map
for (int y = 0; y < map.rows; y++)
{
for (int x = 0; x < map.cols; x++)
{
if (map.at<unsigned char>(y, x) == 255)
{
map_to_calculate_path.at<int>(x, y) = 0;
}
else
{
map_to_calculate_path.at<int>(x, y) = 1;
}
}
}
cv::Mat closed_nodes_map(cv::Size(m, n), CV_32S); //map of already tried nodes
cv::Mat open_nodes_map(cv::Size(m, n), CV_32S); // map of open (not-yet-tried) nodes
cv::Mat dir_map(cv::Size(m, n), CV_32S); // map of directions
// initialize the node maps
for (y = 0; y < closed_nodes_map.rows; y++)
{
for (x = 0; x < closed_nodes_map.cols; x++)
{
closed_nodes_map.at<int>(y, x) = 0;
open_nodes_map.at<int>(y, x) = 0;
}
}
// create the start node and push into list of open nodes
n0 = new NodeAstar(xStart, yStart, 0, 0);
n0->updatePriority(xFinish, yFinish);
pq[pqi].push(*n0);
open_nodes_map.at<int>(xStart, yStart) = n0->getPriority(); // mark it on the open nodes map
//garbage collection
delete n0;
// A* search
while (!pq[pqi].empty())
{
// get the current node w/ the highest priority
// from the list of open nodes
n0 = new NodeAstar(pq[pqi].top().getxPos(), pq[pqi].top().getyPos(), pq[pqi].top().getLevel(), pq[pqi].top().getPriority());
x = n0->getxPos();
y = n0->getyPos();
pq[pqi].pop(); // remove the node from the open list
open_nodes_map.at<int>(x, y) = 0;
// mark it on the closed nodes map
closed_nodes_map.at<int>(x, y) = 1;
// quit searching when the goal state is reached
//if((*n0).estimate(xFinish, yFinish) == 0)
if (x == xFinish && y == yFinish)
{
// generate the path from finish to start
// by following the directions
std::string path = "";
while (!(x == xStart && y == yStart))
{
j = dir_map.at<int>(x, y);
c = '0' + (j + dir / 2) % dir;
path = c + path;
x += dx[j];
y += dy[j];
}
delete n0;// garbage collection
// empty the leftover nodes
while (!pq[pqi].empty())
pq[pqi].pop();
return path;
}
// generate moves (child nodes) in all possible directions
for (i = 0; i < dir; i++)
{
xdx = x + dx[i];
ydy = y + dy[i];
expanding_counter++;
if (!(xdx < 0 || xdx > n - 1 || ydy < 0 || ydy > m - 1 || map_to_calculate_path.at<int>(xdx, ydy) == 1 || closed_nodes_map.at<int>(xdx, ydy) == 1))
{
// generate a child node
m0 = new NodeAstar(xdx, ydy, n0->getLevel(), n0->getPriority());
m0->nextLevel(i);
m0->updatePriority(xFinish, yFinish);
// if it is not in the open list then add into that
if (open_nodes_map.at<int>(xdx, ydy) == 0)
{
open_nodes_map.at<int>(xdx, ydy) = m0->getPriority();
pq[pqi].push(*m0);
// mark its parent node direction
dir_map.at<int>(xdx, ydy) = (i + dir / 2) % dir;
}
else if (open_nodes_map.at<int>(xdx, ydy) > m0->getPriority())
{
// update the priority info
open_nodes_map.at<int>(xdx, ydy) = m0->getPriority();
// update the parent direction info
dir_map.at<int>(xdx, ydy) = (i + dir / 2) % dir;
// replace the node
// by emptying one pq to the other one
// except the node to be replaced will be ignored
// and the new node will be pushed in instead
while (!(pq[pqi].top().getxPos() == xdx && pq[pqi].top().getyPos() == ydy))
{
pq[1 - pqi].push(pq[pqi].top());
pq[pqi].pop();
}
pq[pqi].pop(); // remove the wanted node
// empty the larger size pq to the smaller one
if (pq[pqi].size() > pq[1 - pqi].size())
pqi = 1 - pqi;
while (!pq[pqi].empty())
{
pq[1 - pqi].push(pq[pqi].top());
pq[pqi].pop();
}
pqi = 1 - pqi;
pq[pqi].push(*m0); // add the better node instead
}
delete m0; // garbage collection
}
}
delete n0; // garbage collection
}
return ""; // no route found
}
//This is the path planning algorithm for this class. It downsamples the map with the given factor (0 < factor < 1) so the
//map gets reduced and calculation time gets better. If it is set to 1 the map will have original size, if it is 0 the algorithm
//won't work, so make sure to not set it to 0. The algorithm also needs the Robot radius [m] and the map resolution [m²/pixel] to
//calculate the needed amount of erosions to include the radius in the planning.
double AStarPlanner::planPath(const cv::Mat& map, const cv::Point& start_point, const cv::Point& end_point,
const double downsampling_factor, const double robot_radius, const double map_resolution,
const int end_point_valid_neighborhood_radius, std::vector<cv::Point>* route)
{
expanding_counter = 0;
double step_length = 1./downsampling_factor;
//length of the planned path
double path_length = 0;
if(start_point.x == end_point.x && start_point.y == end_point.y)//if the start and end-point are the same return 0
{
return path_length;
}
// check for valid coordinates of start and end points
if (start_point.x < 0 || start_point.x >= map.cols || start_point.y < 0 || start_point.y >= map.rows ||
end_point.x < 0 || end_point.x >= map.cols || end_point.y < 0 || end_point.y >= map.rows)
{
return 1e100;
}
cv::Mat downsampled_map;
downsampleMap(map, downsampled_map, downsampling_factor, robot_radius, map_resolution);
//transform the Pixel values to the downsampled ones
int start_x = downsampling_factor * start_point.x;
int start_y = downsampling_factor * start_point.y;
int end_x = downsampling_factor * end_point.x;
int end_y = downsampling_factor * end_point.y;
//set the sizes of the map
m = downsampled_map.rows;// horizontal size of the map
n = downsampled_map.cols;// vertical size size of the map
// get the route
// clock_t start = clock();
route_ = pathFind(start_x, start_y, end_x, end_y, downsampled_map);
if (route_ == "")
{
if (end_point_valid_neighborhood_radius > 0)
{
for (int r=1; r<=end_point_valid_neighborhood_radius; ++r)
{
for (int dy=-r; dy<=r; ++dy)
{
for (int dx=-r; dx<=r; ++dx)
{
if ((abs(dy)!=r && abs(dx)!=r) || end_x+dx<0 || end_x+dx>=n || end_y+dy<0 || end_y+dy>=m)
continue;
route_ = pathFind(start_x, start_y, end_x+dx, end_y+dy, downsampled_map);
if (route_ != "")
break;
}
if (route_ != "")
break;
}
if (route_ != "")
break;
}
}
if (route_ == "")
{
// std::cout << "No path from " << start_point << " to " << end_point << " found for map of size " << map.rows << "x" << map.cols << " and downsampling factor " << downsampling_factor << std::endl;
return 1e100; //return extremely large distance as path length if the rout could not be generated
}
}
// clock_t end = clock();
// double time_elapsed = double(end - start);
// follow the route on the map and update the path length
if (route_.length() > 0)
{
int j;
char c;
int x = start_x;
int y = start_y;
const double straight_step = (1. / downsampling_factor);
const double diagonal_step = (std::sqrt(2.) / downsampling_factor);
for (int i = 0; i < route_.length(); i++)
{
//get the next char of the string and make it an integer, which shows the direction
c = route_.at(i);
j=c-'0';
x = x + dx[j];
y = y + dy[j];
//Update the pathlength with the directions of the path. When the path goes vertical or horizontal add length 1.
//When it goes diagonal add sqrt(2)
if (j == 0 || j == 2 || j == 4 || j == 6)
{
path_length += straight_step;
}
if (j == 1 || j == 3 || j == 5 || j == 7)
{
path_length += diagonal_step;
}
}
}
if(route != NULL)
getRoute(start_point, route_, step_length, *route);
return path_length;
}
double AStarPlanner::planPath(const cv::Mat& map, const cv::Mat& downsampled_map, const cv::Point& start_point, const cv::Point& end_point, const double downsampling_factor,
const double robot_radius, const double map_resolution, const int end_point_valid_neighborhood_radius, cv::Mat* draw_path_map,
std::vector<cv::Point>* route)
{
route_ = "";
double step_length = 1./downsampling_factor;
// cv::Mat debug = map.clone();
// cv::circle(debug, start_point, 2, cv::Scalar(127), CV_FILLED);
// cv::circle(debug, end_point, 2, cv::Scalar(127), CV_FILLED);
// cv::imshow("debug", debug);
// cv::waitKey();
double pathlength = step_length * planPath(downsampled_map, downsampling_factor*start_point, downsampling_factor*end_point, 1., 0., map_resolution, end_point_valid_neighborhood_radius, route);
if(pathlength > 1e90) //if no path can be found try with the original map
{
pathlength = planPath(map, start_point, end_point, 1., 0., map_resolution, 1./downsampling_factor * end_point_valid_neighborhood_radius, route);
step_length = 1.;
}
if(pathlength > 1e90)
std::cout << "######################### No path found on the originally sized map #######################" << std::endl;
else
{
if (draw_path_map!=NULL)
{
drawRoute(*draw_path_map, start_point, route_, step_length);
}
}
return pathlength;
}

View File

@ -0,0 +1,317 @@
# include <ipa_building_navigation/concorde_TSP.h>
#include <boost/thread.hpp>
#include <boost/chrono.hpp>
#include <sys/time.h>
//Default constructor
ConcordeTSPSolver::ConcordeTSPSolver()
: abort_computation_(false)
{
}
void ConcordeTSPSolver::distance_matrix_thread(DistanceMatrix& distance_matrix_computation, cv::Mat& distance_matrix,
const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, AStarPlanner& path_planner)
{
distance_matrix_computation.constructDistanceMatrix(distance_matrix, original_map, points, downsampling_factor,
robot_radius, map_resolution, pathplanner_);
}
void ConcordeTSPSolver::abortComputation()
{
abort_computation_ = true;
// kill concorde if running
const std::string pid_filename = "concorde_tsp_pid" + unique_file_identifier_ + ".txt";
std::string pid_cmd = "pidof concorde > " + pid_filename;
int pid_result = system(pid_cmd.c_str());
std::ifstream pid_reader(pid_filename.c_str());
int value = -1;
std::string line;
if (pid_reader.is_open())
{
while (getline(pid_reader, line))
{
std::istringstream iss(line);
while (iss >> value)
{
std::cout << "PID of concorde: " << value << std::endl;
std::stringstream ss;
ss << "kill " << value;
std::string kill_cmd = ss.str();
int kill_result = system(kill_cmd.c_str());
std::cout << "kill result: " << kill_result << std::endl;
}
}
pid_reader.close();
remove(pid_filename.c_str());
}
}
//This function generates a file with the current TSP in TSPlib format. This is necessary because concorde needs this file
//as input to solve the TSP. See http://comopt.ifi.uni-heidelberg.de/software/TSPLIB95/ for documentation.
void ConcordeTSPSolver::writeToFile(const cv::Mat& pathlength_matrix, const std::string& tsp_lib_filename, const std::string& tsp_order_filename)
{
const std::string path_for_saving_file = tsp_lib_filename; //ros::package::getPath("libconcorde_tsp_solver") + "/common/files/TSPlib_file.txt";
std::ofstream saving_file(path_for_saving_file.c_str());
if (saving_file.is_open())
{
std::cout << "Starting to create the TSPlib file: " << path_for_saving_file << std::endl;
//specify name of the Problem, Type (TSP = symmetrical TSP) and add a comment to the file. Name and Type are necessary, comment is for better understanding when you open the file.
saving_file << "NAME: ipa-building-navigation_" << tsp_lib_filename << std::endl
<< "TYPE: TSP" << std::endl
<< "COMMENT: This is the TSPlib file for using concorde. See http://comopt.ifi.uni-heidelberg.de/software/TSPLIB95/ for documentation."
<< std::endl;
saving_file << "DIMENSION: " << pathlength_matrix.cols << std::endl; //Shows the Dimension of the problem --> the number of nodes (Necessary).
//Write the distance-matrix into the file as a full-matrix.
saving_file << "EDGE_WEIGHT_TYPE: EXPLICIT" << std::endl;
saving_file << "EDGE_WEIGHT_FORMAT: FULL_MATRIX" << std::endl;
saving_file << "EDGE_WEIGHT_SECTION" << std::endl;
for (int row = 0; row < pathlength_matrix.rows; row++)
{
for (int col = 0; col < pathlength_matrix.cols; col++)
{
saving_file << " "
<< (int) pathlength_matrix.at<double>(row, col);
}
saving_file << std::endl;
}
//shows the end of the file
saving_file << "EOF";
std::cout << "Created the TSPlib file." << std::endl;
saving_file.close();
}
else
{
std::cout << "Saving file '" << path_for_saving_file << "' for concorde could not be opened." << std::endl;
}
// clear results file
std::ofstream reading_file(tsp_order_filename.c_str()); //open file
if (reading_file.is_open())
{
reading_file << "";
reading_file.close();
}
else
{
std::cout << "Could not clear results file '" << tsp_order_filename << "'." << std::endl;
}
}
//This function opens the file which saves the output from the concorde solver and reads the saved order. The names of the
//nodes in the graph are stored as positions in the distance matrix in this case. The first integer in the file is the number
//of nodes of this problem, so this one is not necessary.
std::vector<int> ConcordeTSPSolver::readFromFile(const std::string& tsp_order_filename)
{
std::string path_for_order_file = tsp_order_filename; //ros::package::getPath("libconcorde_tsp_solver") + "/common/files/TSP_order.txt"; //get path to file
std::ifstream reading_file(path_for_order_file.c_str()); //open file
std::vector<int> order_vector; //vector that stores the calculated TSP order
std::string line; //current line of the file
int value; //current node in the line
int line_counter = 0; //variable to make sure that the first line isn't stored
if (reading_file.is_open())
{
//get new line in the file
while (getline(reading_file, line))
{
std::istringstream iss(line);
while (iss >> value)
{
if (line_counter > 0) //first line shows the number of nodes and is not relevant
order_vector.push_back(value); //put the current node in the last position of the order
line_counter++;
}
}
reading_file.close();
}
else
{
std::cout << "TSP order file '" << path_for_order_file << "' could not be opened." << std::endl;
}
return order_vector;
}
//This function solves the given TSP using the systemcall to use the concorde TSP solver. This solver is applied from:
// http://www.math.uwaterloo.ca/tsp/concorde.html
//First you have to build the Solver in any possible way and if you don't make a ros-package out of it you have to change
//the paths to the right ones. If it is a ros-package the ros::package::getpath() function will find the right path.
//The usage of the solver is: ./concorde [-see below-] [dat_file]
//Navigate to the build Solver and then ./TSP and type ./concorde -h for a short explanation.
//with a given distance matrix
std::vector<int> ConcordeTSPSolver::solveConcordeTSP(const cv::Mat& path_length_matrix, const int start_Node)
{
// generate a unique filename
timeval time;
gettimeofday(&time, NULL);
std::stringstream ss;
ss << "_" << time.tv_sec << "_" << time.tv_usec;
unique_file_identifier_ = ss.str();
const std::string tsp_lib_filename = "TSPlib_file" + unique_file_identifier_ + ".txt";
const std::string tsp_order_filename = "TSP_order" + unique_file_identifier_ + ".txt";
std::vector<int> unsorted_order, sorted_order;
std::cout << "finding optimal order" << std::endl;
std::cout << "number of nodes: " << path_length_matrix.rows << " start node: " << start_Node << std::endl;
if (path_length_matrix.rows > 2) //check if the TSP has at least 3 nodes
{
//create the TSPlib file
writeToFile(path_length_matrix, tsp_lib_filename, tsp_order_filename);
//use concorde to find optimal tour
std::string bin_folder;
while (bin_folder.length()==0)
{
const std::string temp_file = "temp_libconcorde_path" + unique_file_identifier_ + ".txt";
try
{
std::string cmd = "rospack libs-only-L libconcorde_tsp_solver > " + temp_file;
int result = system(cmd.c_str());
std::ifstream file(temp_file.c_str(), std::ifstream::in);
if (file.is_open())
{
file >> bin_folder;
file.close();
}
std::cout << "bin_folder: " << bin_folder << std::endl;
remove(temp_file.c_str());
//bin_folder = ros::package::command("libs-only-L libconcorde_tsp_solver"); // this command crashes sometimes
//bin_folder.erase(std::remove(bin_folder.begin(), bin_folder.end(), '\n'));
}
catch (...)
{
std::cout << "ConcordeTSPSolver::solveConcordeTSP: ERROR: ['rospack libs-only-L libconcorde_tsp_solver > '" << temp_file << "] failed. Trying again." << std::endl;
}
}
std::string cmd = bin_folder + "/libconcorde_tsp_solver/concorde -o " + "$HOME/.ros/" + tsp_order_filename + " $HOME/.ros/" + tsp_lib_filename;
if (abort_computation_==true)
return sorted_order;
int result = system(cmd.c_str());
if (abort_computation_==true)
return sorted_order;
assert(!result);
//get order from saving file
unsorted_order = readFromFile(tsp_order_filename);
}
else
{
for(int node = 0; node < path_length_matrix.rows; node++)
{
unsorted_order.push_back(node);
}
}
// cleanup files
remove(tsp_lib_filename.c_str());
remove(tsp_order_filename.c_str());
const std::string tsp_lib_sol_filename = "TSPlib_file" + unique_file_identifier_ + ".sol";
remove(tsp_lib_sol_filename.c_str());
const std::string tsp_lib_res_filename = "TSPlib_file" + unique_file_identifier_ + ".res";
remove(tsp_lib_res_filename.c_str());
std::cout << "finished TSP" << std::endl;
// if there is an error, just set unsorted order to 1, 2, 3, ...
if (unsorted_order.size() != path_length_matrix.rows)
{
std::cout << "ConcordeTSPSolver::solveConcordeTSP: Warning: Optimized order invalid, taking standard order 1, 2, 3, ..." << std::endl;
unsorted_order.clear();
unsorted_order.resize(path_length_matrix.rows);
for (int i=0; i<path_length_matrix.rows; ++i)
unsorted_order[i] = i;
}
//sort the order with the start_node at the beginning
unsigned int start_node_position;
for (unsigned int i = 0; i < unsorted_order.size(); i++) //find position of the start node in the order
{
if (unsorted_order[i] == start_Node)
{
start_node_position = i;
}
}
for (unsigned int i = start_node_position; i < unsorted_order.size(); i++) //sort the vector starting at start_node
{
sorted_order.push_back(unsorted_order[i]);
}
for (unsigned int i = 0; i < start_node_position; i++)
{
sorted_order.push_back(unsorted_order[i]);
}
return sorted_order;
}
// compute the distance matrix and maybe return it
// this version does not exclude infinite paths from the TSP ordering
std::vector<int> ConcordeTSPSolver::solveConcordeTSP(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_Node, cv::Mat* distance_matrix)
{
//calculate the distance matrix
std::cout << "ConcordeTSPSolver::solveConcordeTSP: Constructing distance matrix..." << std::endl;
cv::Mat distance_matrix_ref;
if (distance_matrix != 0)
distance_matrix_ref = *distance_matrix;
DistanceMatrix distance_matrix_computation;
boost::thread t(boost::bind(&ConcordeTSPSolver::distance_matrix_thread, this, boost::ref(distance_matrix_computation),
boost::ref(distance_matrix_ref), boost::cref(original_map), boost::cref(points), downsampling_factor,
robot_radius, map_resolution, boost::ref(pathplanner_)));
bool finished = false;
while (finished==false)
{
if (abort_computation_==true)
distance_matrix_computation.abortComputation();
finished = t.try_join_for(boost::chrono::milliseconds(10));
}
// distance_matrix_computation.constructDistanceMatrix(distance_matrix_ref, original_map, points, downsampling_factor,
// robot_radius, map_resolution, pathplanner_);
if (abort_computation_==true)
{
std::vector<int> sorted_order;
return sorted_order;
}
return (solveConcordeTSP(distance_matrix_ref, start_Node));
}
// compute TSP from a cleaned distance matrix (does not contain any infinity paths) that has to be computed
std::vector<int> ConcordeTSPSolver::solveConcordeTSPClean(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_node)
{
// compute a cleaned distance matrix
cv::Mat distance_matrix_cleaned;
std::map<int,int> cleaned_index_to_original_index_mapping; // maps the indices of the cleaned distance_matrix to the original indices of the original distance_matrix
int new_start_node = start_node;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.computeCleanedDistanceMatrix(original_map, points, downsampling_factor, robot_radius, map_resolution, pathplanner_,
distance_matrix_cleaned, cleaned_index_to_original_index_mapping, new_start_node);
// solve TSP and re-index points to original indices
return solveConcordeTSPWithCleanedDistanceMatrix(distance_matrix_cleaned, cleaned_index_to_original_index_mapping, new_start_node);
}
// compute TSP with pre-computed cleaned distance matrix (does not contain any infinity paths)
std::vector<int> ConcordeTSPSolver::solveConcordeTSPWithCleanedDistanceMatrix(const cv::Mat& distance_matrix,
const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node)
{
// solve TSP and re-index points to original indices
std::vector<int> optimal_order = solveConcordeTSP(distance_matrix, start_node);
for (size_t i=0; i<optimal_order.size(); ++i)
optimal_order[i] = cleaned_index_to_original_index_mapping.at(optimal_order[i]);
return optimal_order;
}

View File

@ -0,0 +1,319 @@
#include <ipa_building_navigation/genetic_TSP.h>
#include <boost/thread.hpp>
#include <boost/chrono.hpp>
//Default constructor
GeneticTSPSolver::GeneticTSPSolver(int min_number_of_gens, int const_generations)
: abort_computation_(false)
{
min_number_of_generations_ = min_number_of_gens;
const_generations_number_ = const_generations;
}
void GeneticTSPSolver::distance_matrix_thread(DistanceMatrix& distance_matrix_computation, cv::Mat& distance_matrix,
const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, AStarPlanner& path_planner)
{
distance_matrix_computation.constructDistanceMatrix(distance_matrix, original_map, points, downsampling_factor,
robot_radius, map_resolution, pathplanner_);
}
void GeneticTSPSolver::abortComputation()
{
abort_computation_ = true;
}
////Function to construct the distance matrix from the given points. See the definition at solveGeneticTSP for the style of this matrix.
//void GeneticTSPSolver::constructDistanceMatrix(cv::Mat& distance_matrix, const cv::Mat& original_map, const int number_of_nodes,
// const std::vector<cv::Point>& points, double downsampling_factor, double robot_radius, double map_resolution)
//{
// //create the distance matrix with the right size
// cv::Mat pathlengths(cv::Size(number_of_nodes, number_of_nodes), CV_64F);
//
// for (int i = 0; i < points.size(); i++)
// {
// cv::Point current_center = points[i];
// for (int p = 0; p < points.size(); p++)
// {
// if (p != i)
// {
// if (p > i) //only compute upper right triangle of matrix, rest is symmetrically added
// {
// cv::Point neighbor = points[p];
// double length = pathplanner_.planPath(original_map, current_center, neighbor, downsampling_factor, robot_radius, map_resolution);
// pathlengths.at<double>(i, p) = length;
// pathlengths.at<double>(p, i) = length; //symmetrical-Matrix --> saves half the computationtime
// }
// }
// else
// {
// pathlengths.at<double>(i, p) = 0;
// }
// }
// }
//
// distance_matrix = pathlengths.clone();
//}
// This function calculates for a given path the length of it. The Path is a vector with ints, that show the index of the
// node in the path. It uses a pathlength Matrix, which should be calculated once.
// This Matrix should save the pathlengths with this logic:
// 1. The rows show from which Node the length is calculated.
// 2. For the columns in a row the Matrix shows the distance to the Node in the column.
// 3. From the node to itself the distance is 0.
double GeneticTSPSolver::getPathLength(const cv::Mat& path_length_Matrix, std::vector<int> given_path)
{
double length_of_given_path = 0;
for (int i = 0; i < given_path.size() - 1; i++)
{
length_of_given_path += path_length_Matrix.at<double>(given_path[i], given_path[i + 1]);
}
return length_of_given_path;
}
// This Function takes the given path and mutates it. A mutation is a random change of the path-order. For example random
// nodes can be switched, or a random intervall of nodes can be inverted. Only the first and last Node can't be changed, because
// they are given from the Main-function.
std::vector<int> GeneticTSPSolver::mutatePath(const std::vector<int>& parent_path)
{
std::vector<int> mutated_path;
std::vector<int> temporary_path;
std::vector<int> saving_variable_path = parent_path; //saving-variable for the one time mutated path
//this variable sets which aspect should be changed:
// 0: random nodes should be switched
// 1: random intervall should be inverted:
int what_to_change = (rand() % 2);
if (what_to_change == 0) //random node-switching
{
int number_of_switches = (rand() % (parent_path.size() - 3)) + 1; // Set the number of switches that should be done.
// Because the first needs to be unchanged the number is limited.
// Also at least one change should be done.
for (int change = 0; change < number_of_switches; change++)
{
temporary_path.clear(); //reset the temporary path to fill it with new order
bool switched = false; //this variable makes sure that the switch has been done
do
{
int node_one = (rand() % (saving_variable_path.size() - 2)) + 1; //this variables random choose which nodes should be changed
int node_two = (rand() % (saving_variable_path.size() - 2)) + 1; //The first and last one should be untouched
if (node_one != node_two) //node can't be switched with himself
{
for (int node = 0; node < saving_variable_path.size(); node++) //fill the mutated path with the information
{
if (node == node_one)
{
temporary_path.push_back(saving_variable_path[node_two]); //add the second node which should be switched
}
else if (node == node_two)
{
temporary_path.push_back(saving_variable_path[node_one]); //add the first node which should be switched
}
else
{
temporary_path.push_back(saving_variable_path[node]); //add the nodes as they are
}
}
switched = true;
}
} while (!switched);
saving_variable_path = temporary_path; //save the one time mutated path
}
mutated_path = saving_variable_path; //save the finished mutated path
}
else if (what_to_change == 1) //random intervall-inverting (always from middle node on)
{
bool inverted = false;
do
{
int node_one = (rand() % (saving_variable_path.size() - 2)) + 1; //this variables random choose which intervall
int node_two = (rand() % (saving_variable_path.size() - 2)) + 1; //The first and last one should be untouched
int inverting_counter = 0; //variable to choose the node based on distance to the node_two
if (node_one > node_two) //switch variables, if the node_one is bigger than the node_two (easier to work with here)
{
int tmp_node = node_one;
node_one = node_two;
node_two = tmp_node;
}
if (node_one != node_two)
{
for (int node = 0; node < parent_path.size(); node++)
{
if (node < node_one || node > node_two) //add the nodes as they are in the mutated path
{
mutated_path.push_back(parent_path[node]);
}
else //invert the intervall
{
mutated_path.push_back(parent_path[node_two - inverting_counter]);
inverting_counter++;
}
}
inverted = true;
}
} while (!inverted);
}
else
{
ROS_INFO("Something was wrong in mutation-function.");
}
return mutated_path;
}
//This Function calculates the length of each given path and chooses the shortest one. It uses the getPathLength function.
std::vector<int> GeneticTSPSolver::getBestPath(const std::vector<std::vector<int> > paths, const cv::Mat& pathlength_Matrix, bool& changed)
{
std::vector<int> best_path = paths[0];
double best_distance = getPathLength(pathlength_Matrix, paths[0]); //saving-variable for the distance of the current best path
for (int current_path = 1; current_path < paths.size(); current_path++)
{
double current_distance = getPathLength(pathlength_Matrix, paths[current_path]); //get distance of current path
if (current_distance < best_distance)
{
best_distance = current_distance;
best_path = paths[current_path];
changed = true;
}
}
return best_path;
}
//This is a solver for the TSP using a genetic algorithm. It calculates a initial path by using the nearest-neighbor
//search. It then applies an evolutional algorithm:
//
// I. Take the parent of the current generation and calculate 8 mutated children of it. A mutation can be a change
// of positions of nodes or the inversion of a intervall. The initial parent is the path from the nearest-neighbor search.
// II. It checks for the 9 paths (parent and children) for the best path (shortest) and takes this path as the parent
// of the new generation.
// III. It repeats the steps I. and II. at least a specified amount of times and then checks if the pathlength
// hasn't changed in the last steps.
//
//As input a symmetrical matrix of pathlenghts is needed. This matrix should save the pathlengths with this logic:
// 1. The rows show from which Node the length is calculated.
// 2. For the columns in a row the Matrix shows the distance to the Node in the column.
// 3. From the node to itself the distance is 0.
//don't compute distance matrix
std::vector<int> GeneticTSPSolver::solveGeneticTSP(const cv::Mat& path_length_Matrix, const int start_Node)
{
std::vector<int> return_vector;
NearestNeighborTSPSolver nearest_neighbor_solver;
std::vector<int> calculated_path = nearest_neighbor_solver.solveNearestTSP(path_length_Matrix, start_Node);
calculated_path.push_back(start_Node); //push the start node at the end, so the reaching of the start at the end is included in the planning
if(path_length_Matrix.rows > 2) //check if graph has at least three members, if not the algorithm won't work properly
{
bool changed_path = false; //this variable checks if the path has been changed in the mutation process
int changing_counter = const_generations_number_; //this variable is a counter for how many times a path has been the same
int number_of_generations = 0;
do
{
if (abort_computation_==true)
return return_vector;
number_of_generations++;
changed_path = false;
std::vector < std::vector<int> > current_generation_paths; //vector to save the current generation
current_generation_paths.push_back(calculated_path); //first path is always the parent --> important for checking if the path has changed in getBestPath!!
for (int child = 0; child < 8; child++) //get 8 children and add them to the vector
{
current_generation_paths.push_back(mutatePath(calculated_path));
}
calculated_path = getBestPath(current_generation_paths, path_length_Matrix, changed_path); //get the best path of this generation
if (number_of_generations >= min_number_of_generations_) //when a specified amount of steps have been done the algorithm checks if the last paths didn't change
{
if (changed_path)
{
changing_counter = const_generations_number_; //reset the counting-variable
}
else
{
changing_counter -= 1; //decrease the counting-variable by 1
}
}
} while (changing_counter > 0 || number_of_generations < min_number_of_generations_);
}
//return the calculated path without the last node (same as start node)
for(size_t node = 0; node < calculated_path.size()-1; ++node)
{
return_vector.push_back(calculated_path[node]);
}
return return_vector;
}
// compute distance matrix and maybe returning it
// this version does not exclude infinite paths from the TSP ordering
std::vector<int> GeneticTSPSolver::solveGeneticTSP(const cv::Mat& original_map, const std::vector<cv::Point>& points, double downsampling_factor,
double robot_radius, double map_resolution, const int start_Node, cv::Mat* distance_matrix)
{
//calculate the distance matrix
std::cout << "GeneticTSPSolver::solveGeneticTSP: Constructing distance matrix..." << std::endl;
cv::Mat distance_matrix_ref;
if (distance_matrix != 0)
distance_matrix_ref = *distance_matrix;
DistanceMatrix distance_matrix_computation;
boost::thread t(boost::bind(&GeneticTSPSolver::distance_matrix_thread, this, boost::ref(distance_matrix_computation),
boost::ref(distance_matrix_ref), boost::cref(original_map), boost::cref(points), downsampling_factor,
robot_radius, map_resolution, boost::ref(pathplanner_)));
bool finished = false;
while (finished==false)
{
if (abort_computation_==true)
distance_matrix_computation.abortComputation();
finished = t.try_join_for(boost::chrono::milliseconds(10));
}
// DistanceMatrix distance_matrix_computation;
// distance_matrix_computation.constructDistanceMatrix(distance_matrix_ref, original_map, points, downsampling_factor, robot_radius, map_resolution, pathplanner_);
if (abort_computation_==true)
{
std::vector<int> return_vector;
return return_vector;
}
return (solveGeneticTSP(distance_matrix_ref, start_Node));
}
// compute TSP from a cleaned distance matrix (does not contain any infinity paths) that has to be computed
std::vector<int> GeneticTSPSolver::solveGeneticTSPClean(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_node)
{
// compute a cleaned distance matrix
cv::Mat distance_matrix_cleaned;
std::map<int,int> cleaned_index_to_original_index_mapping; // maps the indices of the cleaned distance_matrix to the original indices of the original distance_matrix
int new_start_node = start_node;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.computeCleanedDistanceMatrix(original_map, points, downsampling_factor, robot_radius, map_resolution, pathplanner_,
distance_matrix_cleaned, cleaned_index_to_original_index_mapping, new_start_node);
// solve TSP and re-index points to original indices
return solveGeneticTSPWithCleanedDistanceMatrix(distance_matrix_cleaned, cleaned_index_to_original_index_mapping, new_start_node);
}
// compute TSP with pre-computed cleaned distance matrix (does not contain any infinity paths)
std::vector<int> GeneticTSPSolver::solveGeneticTSPWithCleanedDistanceMatrix(const cv::Mat& distance_matrix,
const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node)
{
// solve TSP and re-index points to original indices
std::vector<int> optimal_order = solveGeneticTSP(distance_matrix, start_node);
for (size_t i=0; i<optimal_order.size(); ++i)
optimal_order[i] = cleaned_index_to_original_index_mapping.at(optimal_order[i]);
return optimal_order;
}

View File

@ -0,0 +1,186 @@
#include <ipa_building_navigation/maximal_clique_finder.h>
using namespace std;
using namespace boost;
//
//***********************Maximal Clique Finder*****************************
//
//This class provides a maximal clique-finder for a given Graph that finds all maximal cliques in this. A maximal clique
//is a subgraph in the given Graph, in which all Nodes are connected to each other and cannot be enlarged by adding other
//Nodes ( https://en.wikipedia.org/wiki/Maximum_clique ). It uses the c++ Boost library by using the implementen Bron-Kerbosch
//function and defining the Graph as a boost::undirected_Graph. Due of a missing documentation for this function see
//
// http://www.boost.org/doc/libs/1_56_0/libs/graph/example/bron_kerbosch_print_cliques.cpp
// http://www.boost.org/doc/libs/1_58_0/libs/graph/example/bron_kerbosch_clique_number.cpp
// http://www.boost.org/doc/libs/1_47_0/libs/graph/example/helper.hpp
// http://stackoverflow.com/questions/23299406/maximum-weighted-clique-in-a-large-graph-with-high-density
//
//for further information.
//As input this function takes a symmetrical Matrix that stores the pathlengths from one node of the graph to another.
//If one Node has no connection to another the element in the matrix is zero, it also is at the main-diagonal.
//!!!!!!!!!!!!!See maximal_clique_finder.h for further information on formatting.!!!!!!!!!!!!!
static std::vector<std::vector<int> > names_; //vector to save the cliques achieved by Boost
// The Actor type stores the name of each vertex in the graph.
struct Actor
{
string name;
};
//define some types that are used for the boost function
typedef undirected_graph<Actor> Graph;
typedef graph_traits<Graph>::vertex_descriptor Vertex;
typedef graph_traits<Graph>::edge_descriptor Edge;
typedef property_map<Graph, string Actor::*>::type NameMap;
template<typename Graph, typename NameMap, typename VertexMap>
typename graph_traits<Graph>::vertex_descriptor cliqueFinder::addNamedVertex(Graph& g, NameMap nm, const string& name, VertexMap& vm)
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename VertexMap::iterator Iterator;
Vertex v;
Iterator iter;
bool inserted;
boost::tie(iter, inserted) = vm.insert(make_pair(name, Vertex()));
if (inserted)
{
// The name was unique so we need to add a vertex to the graph
v = add_vertex(g);
iter->second = v;
put(nm, v, name); // store the name in the name map
}
else
{
// We had already inserted this name so we can return the
// associated vertex.
v = iter->second;
}
return v;
}
//This class is the Visitor for all nodes in the graph, which is used by boost::bron_kerbosch. the function clique() gets
//called everytime a maximal clique was found. This comes from the boost implementation.
template<typename OutputStream>
struct visitor
{
vector<int> current_names;
visitor()
{
}
template<typename Clique, typename Graph>
void clique(const Clique& c, const Graph& g)
{
// Iterate over the clique and print each vertex within it.
typename Clique::const_iterator i, end = c.end();
for (i = c.begin(); i != end; ++i)
{
current_names.push_back(atoi(g[*i].name.c_str()));
}
names_.push_back(current_names); //save the clique to return all later
current_names.clear(); //clear the current clique
}
};
cliqueFinder::cliqueFinder()
{
}
//This function creates a boost::undirected_graph out of the cutted distance-Matrix. The Graph is used for the
//boost::bron-kerbosch algorihtm.
template<typename Graph>
void cliqueFinder::createGraph(Graph& graph, cv::Mat& distance_Matrix)
{
vector<Vertex> vertexes;
NameMap nmap(get(&Actor::name, graph));
std::map<std::string, Vertex> vert_map;
//add all Nodes to the graph
for (int counter = 0; counter < distance_Matrix.cols; counter++)
{
stringstream current_name;
current_name << counter; //<< counter;
Vertex current_vertex = addNamedVertex(graph, nmap, current_name.str(), vert_map);
vertexes.push_back(current_vertex);
}
//add each Edge if there is a connection between the Nodes
for (int current_vertex = 0; current_vertex < vertexes.size(); current_vertex++)
{
for (int neighbor_node = current_vertex; neighbor_node < vertexes.size(); neighbor_node++)
{
if (distance_Matrix.at<double>(current_vertex, neighbor_node) > 0)
{
add_edge(vertexes[current_vertex], vertexes[neighbor_node], graph);
}
}
}
}
//This function cuts an edge if the distance between the two Nodes is too large. This is neccessary to find possible
//areas in the graph for cliques. If the complete graph is connected only one clique will be found, containing all
//Nodes in the graph, which isn't very useful for planning.
void cliqueFinder::cutTooLongEdges(cv::Mat& complete_distance_matrix, double maxval)
{
for (int row = 0; row < complete_distance_matrix.rows; row++)
{
for (int col = 0; col < complete_distance_matrix.cols; col++)
{
if (complete_distance_matrix.at<double>(row, col) > maxval)
{
complete_distance_matrix.at<double>(row, col) = 0;
}
}
}
}
//This function uses the previously described functions and finds all maximal cliques in a given graph. The maxval parameter
//is used to cut edges that are too long. See maximal_clique_finder.h for further information on formatting.
std::vector<std::vector<int> > cliqueFinder::getCliques(const cv::Mat& distance_matrix, double maxval)
{
// Create a graph object
Graph g;
//cut the edges if they are too long
cv::Mat cutted_distance_matrix = distance_matrix.clone();
cutTooLongEdges(cutted_distance_matrix, maxval);
//Create a graph out of the cutted distance matrix
createGraph(g, cutted_distance_matrix);
//Object needed from boost to return the results
visitor<ostream> vis;
// Use the Bron-Kerbosch algorithm to find all cliques
bron_kerbosch_all_cliques(g, vis);
//Make sure that nodes, which are too far away from other nodes are in the clique-vector
//(a clique has at least two nodes, but in this case it is neccessary to have all nodes in the cliques and
//nodes that are too far away from others count also as a possible group)
for (int node = 0; node < distance_matrix.rows; node++)
{
bool add = true;
for (int group = 0; group < names_.size(); group++)
{
if (contains(names_[group], node))
{
add = false;
}
}
if(add)
{
std::vector<int> adding_vector;
adding_vector.push_back(node);
names_.push_back(adding_vector);
}
}
//save the names_ vector and clear it for next usage
std::vector<std::vector<int> > returning_vector(names_);
names_.clear();
return returning_vector;
}

View File

@ -0,0 +1,102 @@
#include <ipa_building_navigation/nearest_neighbor_TSP.h>
//Default Constructor
NearestNeighborTSPSolver::NearestNeighborTSPSolver()
{
}
//This function calculates the order of the TSP, using the nearest neighbor method. It uses a pathlength Matrix, which
//should be calculated once. This Matrix should save the pathlengths with this logic:
// 1. The rows show from which Node the length is calculated.
// 2. For the columns in a row the Matrix shows the distance to the Node in the column.
// 3. From the node to itself the distance is 0.
std::vector<int> NearestNeighborTSPSolver::solveNearestTSP(const cv::Mat& path_length_matrix, const int start_node)
{
std::vector<int> calculated_order; //solution order
if(path_length_matrix.rows > 1) //check if clique has more than one member or else this algorithm produces a order of size=3
{
int last_node; //index of the last spectated node
std::vector<bool> visited(path_length_matrix.rows, false);
int current_node = start_node; //index of the current spectated node
calculated_order.push_back(current_node);
visited[current_node] = true;
//check every Point for the next nearest neighbor and add it to the order
do
{
int next_node; //saver for next node
double min_distance = 1e100; //saver for distance to current next node
for (int current_neighbor = 0; current_neighbor < path_length_matrix.cols; current_neighbor++)
{
if (visited[current_neighbor]==false) //check if current neighbor hasn't been visited yet
{
const double length = path_length_matrix.at<double>(current_node, current_neighbor);
if (length < min_distance && length > 0)
{
next_node = current_neighbor;
min_distance = length;
}
}
}
calculated_order.push_back(next_node); //add the found nearest neighbor to the order-vector
visited[next_node] = true;
current_node = next_node;
} while (calculated_order.size() < path_length_matrix.rows); //when the order has as many elements as the pathlength Matrix has the solver is ready
}
else
{
calculated_order.push_back(start_node);
}
return calculated_order;
}
// compute TSP and distance matrix without cleaning it
// this version does not exclude infinite paths from the TSP ordering
std::vector<int> NearestNeighborTSPSolver::solveNearestTSP(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_node, cv::Mat* distance_matrix)
{
std::cout << "NearestNeighborTSPSolver::solveNearestTSP: Constructing distance matrix..." << std::endl;
//calculate the distance matrix
cv::Mat distance_matrix_ref;
if (distance_matrix != 0)
distance_matrix_ref = *distance_matrix;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.constructDistanceMatrix(distance_matrix_ref, original_map, points, downsampling_factor, robot_radius, map_resolution, pathplanner_);
return solveNearestTSP(distance_matrix_ref, start_node);
}
// compute TSP from a cleaned distance matrix (does not contain any infinity paths) that has to be computed
std::vector<int> NearestNeighborTSPSolver::solveNearestTSPClean(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, const int start_node)
{
// compute a cleaned distance matrix
cv::Mat distance_matrix_cleaned;
std::map<int,int> cleaned_index_to_original_index_mapping; // maps the indices of the cleaned distance_matrix to the original indices of the original distance_matrix
int new_start_node = start_node;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.computeCleanedDistanceMatrix(original_map, points, downsampling_factor, robot_radius, map_resolution, pathplanner_,
distance_matrix_cleaned, cleaned_index_to_original_index_mapping, new_start_node);
// solve TSP and re-index points to original indices
return solveNearestTSPWithCleanedDistanceMatrix(distance_matrix_cleaned, cleaned_index_to_original_index_mapping, new_start_node);
}
// compute TSP with pre-computed cleaned distance matrix (does not contain any infinity paths)
std::vector<int> NearestNeighborTSPSolver::solveNearestTSPWithCleanedDistanceMatrix(const cv::Mat& distance_matrix,
const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node)
{
// solve TSP and re-index points to original indices
std::vector<int> optimal_order = solveNearestTSP(distance_matrix, start_node);
for (size_t i=0; i<optimal_order.size(); ++i)
optimal_order[i] = cleaned_index_to_original_index_mapping.at(optimal_order[i]);
return optimal_order;
}

View File

@ -0,0 +1,63 @@
#include <ipa_building_navigation/node.h>
const int dir_ = 8;
NodeAstar::NodeAstar(int xp, int yp, int d, int p)
{
xPos_ = xp;
yPos_ = yp;
level_ = d;
priority_ = p;
}
int NodeAstar::getxPos() const
{
return xPos_;
}
int NodeAstar::getyPos() const
{
return yPos_;
}
int NodeAstar::getLevel() const
{
return level_;
}
int NodeAstar::getPriority() const
{
return priority_;
}
void NodeAstar::updatePriority(const int& xDest, const int& yDest)
{
priority_ = level_ + estimate(xDest, yDest); // * 10; //A*
}
// give better priority to going strait instead of diagonally
void NodeAstar::nextLevel(const int& i) // i: direction
{
level_ += (dir_ == 8 ? (i % 2 == 0 ? 10 : 14) : 10);
}
// Estimation function for the remaining distance to the goal.
//
//!!!!!!!!!!!!!!!!!!!!!!!Important!!!!!!!!!!!!!!!!!!!!!!!
//Uncomment the method to calculate the distance between this node and the goal you want to use. Eclidean is more precisly
//but could take longer to get long paths.
//
const int& NodeAstar::estimate(const int& xDest, const int& yDest) const
{
static int xd, yd, d;
xd = xDest - xPos_;
yd = yDest - yPos_;
// Euclidian Distance
d = static_cast<int>(sqrt(xd * xd + yd * yd));
// Manhattan distance
// d = abs(xd) + abs(yd);
// Chebyshev distance
// d=std::max(abs(xd), abs(yd));
return (d);
}

View File

@ -0,0 +1,306 @@
#include <ipa_building_navigation/set_cover_solver.h>
//Default constructor
SetCoverSolver::SetCoverSolver()
{
}
////Function to construct the symmetrical distance matrix from the given points. The rows show from which node to start and
////the columns to which node to go. If the path between nodes doesn't exist or the node to go to is the same as the one to
////start from, the entry of the matrix is 0.
//void SetCoverSolver::constructDistanceMatrix(cv::Mat& distance_matrix, const cv::Mat& original_map,
// const std::vector<cv::Point>& points, double downsampling_factor, double robot_radius, double map_resolution)
//{
// //create the distance matrix with the right size
// cv::Mat pathlengths(cv::Size((int)points.size(), (int)points.size()), CV_64F);
//
// // reduce image size already here to avoid resizing in the planner each time
// const double one_by_downsampling_factor = 1./downsampling_factor;
// cv::Mat downsampled_map;
// pathplanner_.downsampleMap(original_map, downsampled_map, downsampling_factor, robot_radius, map_resolution);
//
// for (int i = 0; i < points.size(); i++)
// {
// cv::Point current_center = downsampling_factor * points[i];
// for (int j = 0; j < points.size(); j++)
// {
// if (j != i)
// {
// if (j > i) //only compute upper right triangle of matrix, rest is symmetrically added
// {
// cv::Point neighbor = downsampling_factor * points[j];
// double length = one_by_downsampling_factor * pathplanner_.planPath(downsampled_map, current_center, neighbor, 1., 0., map_resolution);
// pathlengths.at<double>(i, j) = length;
// pathlengths.at<double>(j, i) = length; //symmetrical-Matrix --> saves half the computationtime
// }
// }
// else
// {
// pathlengths.at<double>(i, j) = 0;
// }
// }
// }
//
// distance_matrix = pathlengths.clone();
//}
//This function takes a vector of found nodes and merges them together, if they have at least one node in common.
std::vector<std::vector<int> > SetCoverSolver::mergeGroups(const std::vector<std::vector<int> >& found_groups)
{
std::vector < std::vector<int> > merged_groups; //The merged groups.
std::vector<int> done_groups; //Vector to remember which groups has already been looked at
for (int current_group = 0; current_group < found_groups.size(); current_group++)
{
if (!contains(done_groups, current_group)) //If the group is in the done-vector don't look at it, because it has already been looked at.
{
done_groups.push_back(current_group); //add the current group to the done groups
std::vector<int> current_group_saver(found_groups[current_group]); //vector to save the current group
std::vector<int> merging_candidates; //vector to save the groups which should be merged with the current group
//check if the current group has at least one node in common with the other groups
for (int next_group = 0; next_group < found_groups.size(); next_group++)
{
bool merge = false; //variable to check if the room should be added to the current group
//if it is the same group or has already been looked at it doesn't need to be checked
if (next_group != current_group && !contains(done_groups, next_group))
{
for (int node = 0; node < found_groups[next_group].size(); node++)
{
if (contains(found_groups[current_group], found_groups[next_group][node]))
{
merge = true;
}
}
}
if (merge) //If the group has at least one neighbor save it for merging
{
merging_candidates.push_back(next_group);
}
}
//Add the merging-candidates nodes to the current group and add the candidates to the done groups
for (int merge_candidate = 0; merge_candidate < merging_candidates.size(); merge_candidate++)
{
done_groups.push_back(merging_candidates[merge_candidate]);
for (int node = 0; node < found_groups[merging_candidates[merge_candidate]].size(); node++)
{
if (!contains(current_group_saver, found_groups[merging_candidates[merge_candidate]][node]))
{
current_group_saver.push_back(found_groups[merging_candidates[merge_candidate]][node]);
}
}
}
//add the merged group to the vector
merged_groups.push_back(current_group_saver);
}
}
std::cout << "Finished merging." << std::endl;
return merged_groups;
}
//This functions solves the set-cover Problem ( https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm ) using
//the greedy-search algorithm. It chooses the clique that has the most uncovered nodes in it first. Then it uses the merge-function
//above to merge groups that have at least one node in common together. The vector stores the indexes of the nodes, which
//are the same as the ones from the clique-solver and also the distance-matrix. The variable max_number_of_clique_members
//implies how many members a clique is allowed to have.
//the cliques are given
std::vector<std::vector<int> > SetCoverSolver::solveSetCover(std::vector<std::vector<int> >& given_cliques,
const int number_of_nodes, const int max_number_of_clique_members, const cv::Mat& distance_matrix)
{
std::vector < std::vector<int> > minimal_set;
std::vector<std::vector<int> > cliques_for_covering;
for(size_t clique = 0; clique < given_cliques.size(); clique++)
{
std::vector<int> temporary_clique;
for(size_t node = 0; node < given_cliques[clique].size(); node++)
{
temporary_clique.push_back(given_cliques[clique][node]);
}
cliques_for_covering.push_back(temporary_clique);
}
//Put the nodes in a open-nodes vector. The nodes are named after their position in the room-centers-vector and so every
//node from 0 to number_of_nodes-1 is in the Graph.
std::vector<int> open_nodes;
for (int new_node = 0; new_node < number_of_nodes; new_node++)
{
open_nodes.push_back(new_node);
}
std::cout << "Starting greedy search for set-cover-problem." << std::endl;
//Search for the clique with the most unvisited nodes and choose this one before the others. Then remove the nodes of
//this clique from the unvisited-vector. This is done until no more nodes can be visited.
do
{
int covered_open_nodes;
int best_covered_counter = 0;
int best_clique = -1;
for (int clique = 0; clique < cliques_for_covering.size(); clique++)
{
// skip too big cliques
if(cliques_for_covering[clique].size() > max_number_of_clique_members)
continue;
covered_open_nodes = 0;
for (int node = 0; node < cliques_for_covering[clique].size(); node++)
{
if (contains(open_nodes, cliques_for_covering[clique][node]))
{
covered_open_nodes++;
}
}
if (covered_open_nodes > best_covered_counter)
{
best_covered_counter = covered_open_nodes;
best_clique = clique;
}
}
// check if a allowed clique could be found, if not split the biggest clique until it consists of cliques that are of the
// allowed size
if(best_clique == -1)
{
for (int clique = 0; clique < cliques_for_covering.size(); clique++)
{
covered_open_nodes = 0;
for (int node = 0; node < cliques_for_covering[clique].size(); node++)
{
if (contains(open_nodes, cliques_for_covering[clique][node]))
{
covered_open_nodes++;
}
}
if (covered_open_nodes > best_covered_counter)
{
best_covered_counter = covered_open_nodes;
best_clique = clique;
}
}
// save big clique
std::vector<int> big_clique = cliques_for_covering[best_clique];
// iteratively remove nodes far away from the remaining nodes to create small cliques
bool removed_node = false;
std::vector<std::vector<int> > found_subgraphs;
do
{
removed_node = false; // reset checking boolean
std::vector<int> current_subgraph = big_clique;
while(current_subgraph.size() > max_number_of_clique_members)
{
removed_node = true;
// find the node farthest away from the other nodes
double max_distance = 0.0;
int worst_node = -1;
for(size_t node = 0; node < current_subgraph.size(); ++node)
{
// compute sum of distances from current node to neighboring nodes
double current_distance = 0;
for(size_t neighbor = 0; neighbor < current_subgraph.size(); ++neighbor)
{
// don't look at node itself
if(node == neighbor)
continue;
current_distance += distance_matrix.at<double>(current_subgraph[node], current_subgraph[neighbor]);
}
// check if sum of distances is worse than the previously found ones
if(current_distance > max_distance)
{
worst_node = node;
max_distance = current_distance;
}
}
// remove the node farthest away from all other nodes out of the subgraph
current_subgraph.erase(current_subgraph.begin() + worst_node);
}
// save the found subgraph
found_subgraphs.push_back(current_subgraph);
// erase the covered nodes from the big clique
for(size_t node = 0; node < current_subgraph.size(); ++node)
big_clique.erase(std::remove(big_clique.begin(), big_clique.end(), current_subgraph[node]), big_clique.end());
}while(removed_node == true && big_clique.size() > 0);
// add found subgraphs to the minimal set
for(size_t subgraph = 0; subgraph < found_subgraphs.size(); ++subgraph)
minimal_set.push_back(found_subgraphs[subgraph]);
}
else
{
minimal_set.push_back(cliques_for_covering[best_clique]);
}
//remove nodes of best clique from all cliques (this is okay because if you remove a node from a clique it stays a clique, it only isn't a maximal clique anymore)
for(size_t clique = 0; clique < cliques_for_covering.size(); clique++)
{
for(int node = 0; node < cliques_for_covering[best_clique].size(); node++)
{
if(clique != best_clique)
cliques_for_covering[clique].erase(std::remove(cliques_for_covering[clique].begin(), cliques_for_covering[clique].end(), cliques_for_covering[best_clique][node]), cliques_for_covering[clique].end());
}
}
for (int node = 0; node < cliques_for_covering[best_clique].size(); node++)
{
open_nodes.erase(std::remove(open_nodes.begin(), open_nodes.end(), cliques_for_covering[best_clique][node]), open_nodes.end());
}
// cliques_for_covering = new_set;
// std::cout << open_nodes.size() << std::endl;
} while (open_nodes.size() > 0);
std::cout << "Finished greedy search." << std::endl;
// std::cout << "Starting merging the found groups." << std::endl;
//
// return mergeGroups(minimal_set);
return minimal_set;
}
//the distance matrix is given, but not the cliques
std::vector<std::vector<int> > SetCoverSolver::solveSetCover(const cv::Mat& distance_matrix, const std::vector<cv::Point>& points,
const int number_of_nodes, double maximal_pathlength, const int max_number_of_clique_members)
{
//get all maximal cliques for this graph
std::vector < std::vector<int> > maximal_cliques = maximal_clique_finder.getCliques(distance_matrix, maximal_pathlength);
//put the single nodes in the maximal cliques vector to make sure every node gets covered from the setCover solver
for(int single_node = 0; single_node < number_of_nodes; single_node++)
{
std::vector<int> temp;
temp.push_back(single_node);
maximal_cliques.push_back(temp);
}
return (solveSetCover(maximal_cliques, number_of_nodes, max_number_of_clique_members, distance_matrix));
}
//the distance matrix and cliques aren't given and the matrix should not be returned
std::vector<std::vector<int> > SetCoverSolver::solveSetCover(const cv::Mat& original_map, const std::vector<cv::Point>& points,
double downsampling_factor, double robot_radius, double map_resolution, double maximal_pathlength, const int max_number_of_clique_members, cv::Mat* distance_matrix)
{
//calculate the distance matrix
cv::Mat distance_matrix_ref;
if (distance_matrix != 0)
distance_matrix_ref = *distance_matrix;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.constructDistanceMatrix(distance_matrix_ref, original_map, points, downsampling_factor, robot_radius, map_resolution, pathplanner_);
//get all maximal cliques for this graph and solve the set cover problem
return (solveSetCover(distance_matrix_ref, points, (int)points.size(), maximal_pathlength, max_number_of_clique_members));
}

View File

@ -0,0 +1,231 @@
#include <ipa_building_navigation/trolley_position_finder.h>
//Defaul Constructor
TrolleyPositionFinder::TrolleyPositionFinder()
{
}
//This function takes one group and calculates the trolley position for it. It does following steps:
// I. Get the bounding box for all Points in the group. Then expand it by a little factor to make sure the best
// position is found, even when it is slightly outside the bounding Box.
// II. Put a grid over the bounding box to get cells in which the trolley-position possibly is.
// Find the Point in these cells that have the largest distance to the closest zero Pixel as possible candidates
// for trolley positions.
// III. From these candidates the one is chosen, which gets the smallest pathlength to all group Points. If the group
// has only two members the algorithm chooses the candidate as trolley position that is the middlest between these.
cv::Point TrolleyPositionFinder::findOneTrolleyPosition(const std::vector<cv::Point> group_points, const cv::Mat& original_map,
const double downsampling_factor, const double robot_radius, const double map_resolution)
{
double largening_of_bounding_box = 5; //Variable to expand the bounding box of the roomcenters a little bit. This is done to make sure the best trolley position is found if it is a little bit outside this bounding box.
double max_x_value = group_points[0].x; //max/min values of the Points that get the bounding box. Initialized with the coordinates of the first Point of the group.
double min_x_value = group_points[0].x;
double max_y_value = group_points[0].y;
double min_y_value = group_points[0].y;
//create eroded map, which is used to check if the trolley-position candidates are too close to the boundaries
cv::Mat eroded_map;
cv::erode(original_map, eroded_map, cv::Mat(), cv::Point(-1, -1), 4);
//create the distance-map to find the candidates for trolley-Positions
cv::Mat temporary_map = original_map.clone();
cv::erode(temporary_map, temporary_map, cv::Mat());
cv::Mat distance_map; //variable for the distance-transformed map, type: CV_32FC1
#if CV_MAJOR_VERSION<=3
cv::distanceTransform(temporary_map, distance_map, CV_DIST_L2, 5);
#else
cv::distanceTransform(temporary_map, distance_map, cv::DIST_L2, 5);
#endif
cv::convertScaleAbs(distance_map, distance_map); // conversion to 8 bit image
//
//******************************** I. Get bounding box of the group ********************************
//
//go trough each Point and find the min/max x/y values --> bounding box
for (int point = 0; point < group_points.size(); point++)
{
if (group_points[point].x > max_x_value)
{
max_x_value = group_points[point].x;
}
if (group_points[point].x < min_x_value)
{
min_x_value = group_points[point].x;
}
if (group_points[point].y > max_y_value)
{
max_y_value = group_points[point].y;
}
if (group_points[point].y < min_y_value)
{
min_y_value = group_points[point].y;
}
}
//expand the bounding box sligthly by the defined factor (check if the values aren't out of the map boundaries before doing this)
if (max_x_value + largening_of_bounding_box < original_map.cols)
{
max_x_value += largening_of_bounding_box;
}
if (min_x_value - largening_of_bounding_box > 0)
{
min_x_value -= largening_of_bounding_box;
}
if (max_y_value + largening_of_bounding_box < original_map.rows)
{
max_y_value += largening_of_bounding_box;
}
if (min_y_value - largening_of_bounding_box > 0)
{
min_y_value -= largening_of_bounding_box;
}
//
//******************************** II. Get the candidates for trolley positions ********************************
//
double cell_side_length = 10;
double max_x_cell = min_x_value + cell_side_length;
double min_x_cell = min_x_value;
double max_y_cell = min_y_value + cell_side_length;
double min_y_cell = min_y_value;
bool out_of_y = false;
bool out_of_x = false;
std::vector < cv::Point > trolley_position_candidates;
//go trough each cell and find the candidate for each of it
do //go from y_min to y_max
{
max_x_cell = min_x_value + cell_side_length; //reset the x-values for the cell to start from beginning when a new y coordinate is reached
min_x_cell = min_x_value;
do //go from x_min to x_max
{
out_of_x = false;
double best_x = min_x_cell;
double best_y = min_y_cell;
//go trough each Pixel of the cell and take the one that is most far away of the zero Pixels.
for (int y = min_y_cell; y < max_y_cell; y++)
{
for (int x = min_x_cell; x < max_x_cell; x++)
{
if (distance_map.at<unsigned char>(best_y, best_x) < distance_map.at<unsigned char>(y, x) && eroded_map.at<unsigned char>(y, x) != 0)
{
best_x = x;
best_y = y;
}
}
}
//check if candidate is far enough away from boundary
if (eroded_map.at<unsigned char>(best_y, best_x) != 0)
{
trolley_position_candidates.push_back(cv::Point(best_x, best_y));
}
min_x_cell = max_x_cell; //set new x values for next step
max_x_cell += cell_side_length;
//check if x is out of box --> if so the next y values should be checked
if (min_x_cell > max_x_value)
{
out_of_x = true;
}
} while (!out_of_x);
min_y_cell = max_y_cell; //set new y values for next step
max_y_cell += cell_side_length;
//check if y is out of bounding box --> if true step is done
if (min_y_cell > max_y_value)
{
out_of_y = true;
}
} while (!out_of_y);
//
//***************** III. Find the candidate that minimizes the pathlengths to all group points *****************
//
//variables to save the best candidate
double best_pathlength = 1e10;
double best_pathlength_point_distance = 1e10;
int best_trolley_candidate = 0;
// reduce image size already here to avoid resizing in the planner each time
const double one_by_downsampling_factor = 1./downsampling_factor;
cv::Mat downsampled_map;
path_planner_.downsampleMap(original_map, downsampled_map, downsampling_factor, robot_radius, map_resolution);
//go trough each candidate and calculate the sum of pathlengths
for (size_t candidate = 0; candidate < trolley_position_candidates.size(); candidate++)
{
cv::Point start_point = downsampling_factor * trolley_position_candidates[candidate];
double current_pathlength = 0;
std::vector<double> pathlengths;
for (int room_center = 0; room_center < group_points.size(); room_center++)
{
cv::Point end_point = downsampling_factor * group_points[room_center];
//get the pathlength to the current center and save it
double center_pathlength = one_by_downsampling_factor * path_planner_.planPath(downsampled_map, start_point, end_point, 1., 0., map_resolution);
pathlengths.push_back(center_pathlength);
//add the pathlenght to the total pathlength
current_pathlength += center_pathlength;
}
//check for the best position that has the shortest pathlength to all centers. Adding a little bit to the best_distances
//because the downsampling generates an error and with this better positions can be found.
if (group_points.size() == 2)
{
//If the group only has two members check for the position that is in the middlest of the connectionpath between
//these points or else a random point will be chosen.
double current_point_distance = std::abs(pathlengths[1] - pathlengths[0]);
if (current_pathlength <= (best_pathlength + 0.05) && current_point_distance <= (best_pathlength_point_distance + 0.05)
&& downsampled_map.at<unsigned char>(downsampling_factor * trolley_position_candidates[best_trolley_candidate]) != 0)
{
best_pathlength_point_distance = current_point_distance;
best_pathlength = current_pathlength;
best_trolley_candidate = candidate;
}
}
else
{
if (current_pathlength <= (best_pathlength + 0.05))
{
best_pathlength = current_pathlength;
best_trolley_candidate = candidate;
}
}
}
return trolley_position_candidates[best_trolley_candidate];
}
//This function takes all found groups and calculates for each of it the best trolley-position using the previously
//described functions.
std::vector<cv::Point> TrolleyPositionFinder::findTrolleyPositions(const cv::Mat& original_map, const std::vector<std::vector<int> >& found_groups,
const std::vector<cv::Point>& room_centers, const double downsampling_factor, const double robot_radius, const double map_resolution)
{
std::vector < cv::Point > trolley_positions;
//go trough each group and find the best trolley position.
for (int current_group = 0; current_group < found_groups.size(); current_group++)
{
std::vector < cv::Point > group_points_vector; //vector to save the Points for each group
//add the Points from the given groups vector
for (int index = 0; index < found_groups[current_group].size(); index++)
{
group_points_vector.push_back(room_centers[found_groups[current_group][index]]);
}
//calculate the trolley-position for each group that has at least 2 members
if (found_groups[current_group].size() > 1)
{
trolley_positions.push_back(
findOneTrolleyPosition(group_points_vector, original_map, downsampling_factor, robot_radius, map_resolution));
}
else //if the group has only one member this one is the trolley-position
{
cv::Point trolley_position_for_one_sized_groups = room_centers[found_groups[current_group][0]];
trolley_positions.push_back(trolley_position_for_one_sized_groups);
}
}
return trolley_positions;
}

View File

@ -0,0 +1,37 @@
<?xml version="1.0"?>
<package format="2">
<name>ipa_building_navigation</name>
<version>0.1.0</version>
<description>
This package plans the navigation trough the building in the autopnp-scenario. It has the following parts/functions:
1. An A-star pathplanning-algorithm, which finds the shortest way between two Points in a given map. The map should be a gridmap as a OpenCV 8bit grayscale image with 0 as obstacle
and 255 as reachable area.
2. TSP-solving functions, which solve the TSP-Problem for a given, segmented map. The segmented map comes from the functions in the pakage ipa_room_segmentation with the corresponding roomcenters. These centers need to be visited in minimal time, so a TSP-Solver is applied. There are following algorithms for this implemented:
I. nearest-neighbor: This algorithm takes the current Point and goes to the nearest neighbor, the distance is given by the A-star pathplanner.
II. genetic-solver: This algorithm takes the path from the nearest-neighbor solver and improves it using evolutional methods. For this the given path is seen as parent, which gets 7 children. These children have been mutated, meaning that the path of the parent has been changed randomly. The Mutations can be random switching of centerorder or inverting random parts of the path. After these children has been made the function calculates the length of the path, using the results from the A-star pathplanner, and compares the children and the parent (so 8 paths). The shortest path is chosen and becomes the new parent. This step is done at least 42 times and then the algorithm checks, if the pathlength hasn't changed in the last 10 steps, if so the path is close to the optimal solution.
III. The Concorde-TSP-solver: The TSP-solving-library Concorde is used. See http://www.math.uwaterloo.ca/tsp/concorde/index.html for further information.
</description>
<author email="florian.jordan@ipa.fraunhofer.de">Florian Jordan</author>
<maintainer email="richard.bormann@ipa.fraunhofer.de">Richard Bormann</maintainer>
<license>LGPL for academic and non-commercial use; for commercial use, please contact Fraunhofer IPA</license>
<url>http://ros.org/wiki/ipa_building_navigation</url>
<buildtool_depend>catkin</buildtool_depend>
<depend>actionlib</depend>
<depend>boost</depend>
<depend>cv_bridge</depend>
<depend>dynamic_reconfigure</depend>
<depend>geometry_msgs</depend>
<depend>ipa_building_msgs</depend>
<!--depend>libconcorde_tsp_solver</depend-->
<depend>libopencv-dev</depend>
<!-- <depend>message_generation</depend> -->
<build_depend>message_generation</build_depend>
<exec_depend>message_runtime</exec_depend>
<depend>roscpp</depend>
<depend>roslib</depend>
<depend>sensor_msgs</depend>
<depend>visualization_msgs</depend>
</package>

View File

@ -0,0 +1,66 @@
# ipa_building_navigation
Algorithms for systematic coverage of different spaces in an optimal traveling order.
If you find this software useful in your work, please cite our corresponding paper:
- R. Bormann, F. Jordan, J. Hampp, and M. Hägele. Indoor coverage path planning: Survey, implementation, analysis. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), pages 17181725, May 2018. https://ieeexplore.ieee.org/abstract/document/8460566 , https://publica.fraunhofer.de/entities/publication/f537c15d-4cbe-4672-9d86-6e756a9ce71b/details
# General Procedure
1. Change the algorithm parameters in ros/launch/room_sequence_planning_action_server_params.yaml in ros/launch to the wanted algorithms and settings.
* tsp_solver: Choose which traveling salesman problem (TSP) solver you want to use.
* planning_method: Choose which planning method for the trolley you want to use.
* for both planning methods the parameter **max_clique_path_length** determines how far two rooms can be away from each other until they get separated into two different cliques.
* map_downsampling_factor: The algorithm plans an Astar-path trough the environment to determine distances between the roomcenters that are used as edge weights for the TSP solver. For this you can set this parameter to reduce the size of the map, which speeds up the pathfinding. The originally sized map is checked, if no path for the downsampled map could be found. **Range**: 0<factor<=1
* check_accessibility_of_rooms: Choose if you want the action server to check which given roomcenters are accessible from the starting position.
* return_sequence_map: If enabled, the server returns an image containing the drawn in cliques, that contain the corresponding roomcenters and trolley positions, and also shows the visiting order of each clique and in each clique.
* display_map: If true, the server displays the sequence map. **REMARK**: Only possible if the sequence map should be returned, since this enables the computation of it.
* maximum_clique_size: This parameter determines how many roomcenters one clique is allowed to have. this can be useful e.g. when you have a limited number of rooms the robot can clean with one battery loading. If you want to allow all cliques, simply set this parameter very high. E.g. for the cases that result from the room-segmentations in http://wiki.ros.org/ipa_room_segmentation a value of **9001** was more than enough. Of course if in your case more than 9001 centers occur, this is not enough, so when setting this parameter you unfortunately have to think of the way you use this package.
2. Start the action server using the file /ros/launch/room_sequence_planning_action_server.launch, which executes the /ros/src/room_sequence_planning_action_server.cpp file. If the server crashes for some reason (e.g. no valid map given by the client) it respawns with a little delay.
3. Start an action client, which sends a goal to the action server, corresponding to the FindRoomSequenceWithCheckpoints.action message, which lies in ipa_building_msgs/action. The goal consists of the following parts
* input_map: The map for which the visiting sequence should be planned, as sensor_msgs/Image. **Has to be a 8-Bit single channel image, with 0 as occupied space and 255 as free space**.
* map_resolution: The resolution the map has been sampled with [meter/cell].
* map_origin: The origin of the map in [meter] to bring the provided map coordinates in absolute coordinates, if wanted.
* room_information_in_pixel: Gives for each room in the map the min/max coordinates of the pixels belonging to this room and the computed center of it. See ipa_building_msgs/msg/RoomInformation.msg for further details.
* robot_radius: The radius of your robot in [meter]. The given map will be eroded corresponding to this radius, s.t. each white pixel in the image can actually be reached by the robot.
* robot_start_coordinate: Determines the position of your robot before executing the computed sequence. This is used to find the room that is closest to this position and starts the sequence there.
4. The action server returns as result the sequence of cliques. This is based on ipa_building_msgs/RoomSequence.msg and gives for each clique
* room_indices: The room indices, corresponding to the positions in room_information_in_pixel, belonging to this clique.
* checkpoint_position_in_pixel/.._in_meter: The trolley location for this clique.
A Client also can change the parameters of the server by using dynamic_reconfigure. This can be done using the dynamic_reconfigure_client.h file that provides a client for this purpose. With client.setConfig("param_name", value) one specific parameter can be changed from the client, see ros/src/room_sequence_planning_action_client.cpp for an example. Of course rqt_reconfigure can also be used to change specific parameters.
The algorithms are implemented in common/src, using the headers in common/include/ipa_building_navigation.
The first planning method is faster than the second one, but may give worse results because of the underlying algorithm. The choice of the TSP solver depends heavily on the scale of your problem. The nearest neighbor solver is significantly faster than the concorde solver, but of course gives bad results in large scale problems. An advantage of our second planning procedure is, that the server divides the problem into smaller subproblems, meaning a TSP over the trolley positions and a TSP for each clique over the rooms belonging to this clique. This reduces the dimensionality for each problem and allows in most cases to get good results with the genetic solver that approximates the best solution, so in most cases this this solver should do fine. If you have very large problems with hundreds of rooms or you want exactly the optimal tour and not just an approximation of it, the concorde solver is the best choice.
# Available TSP solvers
1. Nearest Neighbor solver: An approximate TSP solver that starts at the given start-point and iteratively goes to the node that is nearest to the last node of the path. At the end, the path returns to the start-point again to close the tour.
2. Genetic solver: This solver is based on the work of Chatterjee et. al. [1]. The proposed method takes the nearest neighbor path and uses a genetic optimization algorithm to iteratively improve the computed path.
3. Concorde solver: This solver is based on the Concorde TSP solver package, obtained from Applegate et. al. [3], which is free for academic research. It provides an exact TSP solver that has proven to obtain the optimal solution for several large TSPs in a rather short time. Anyway this solver of course is a little bit slower than the other solvers, but gives the optimal solution.
# Available planning algorithms
1. Trolley drag method: This method is very intuitive, but produces not the best results. The trolley starts at the given robot starting position and stays there for the first clique. Then a TSP over all rooms is solved to get an optimal visiting order. Following the tour, the algorithm checks which room is still in the range you defined from the current trolley location and adds these rooms to one clique. When the next room is too far away from the trolley, a new clique is opened and the trolley is dragged to the room opening it. When the last clique will become larger than the specified max. size of one clique, a new one is also opened. This is done until all rooms have been assigned to cliques.
2. Room-group method: For this method the whole rooms are spanned as a graph, with edges between two different rooms if the path-distance between them is not larger than the specified parameter. In this graph then a set-cover problem is solved, finding the biggest cliques in the graph, i.e. subgraphs in which all nodes are connected to all other nodes in it, to cover the whole graph. This produces several groups that are reachable with the given max. travel distance from each other. For each group then a central trolley position is computed s.t. the travel distance from the trolley to all rooms is minimized. At last, two TSPs are solved, one for the computed trolley positions and one for each room to determine the optimal room visiting order.
# References
[1] Chatterjee, S., Carrera, C., and Lynch, L. A. Genetic algorithms and
traveling salesman problems. European journal of operational research 93, 3 (1996),
490510.
[2] Applegate, D., Bixby, R., Chvatal, V., and Cook, W. Concorde tsp solver.
http://www.math.uwaterloo.ca/tsp/concorde.html, 2006.
In this pakage an Astar pathplanning algorithm is implemented. It was provided and slightly changed from:
http://code.activestate.com/recipes/577457-a-star-shortest-path-algorithm/
It was released under the MIT-license (https://en.wikipedia.org/wiki/MIT_License), so it is freely usable for everyone.

View File

@ -0,0 +1,248 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2015 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_segmentation
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 08.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#ifndef _DYNAMIC_RECONFIGURE_CLIENT_H_
#define _DYNAMIC_RECONFIGURE_CLIENT_H_
#include <ros/ros.h>
#include <ros/subscriber.h>
#include <boost/thread/mutex.hpp>
#include <dynamic_reconfigure/DoubleParameter.h>
#include <dynamic_reconfigure/IntParameter.h>
#include <dynamic_reconfigure/Reconfigure.h>
#include <dynamic_reconfigure/Config.h>
class DynamicReconfigureClient
{
public:
DynamicReconfigureClient(ros::NodeHandle& nh, const std::string& dynamic_reconfigure_service_name, const std::string& parameter_updates_topic)
: dynamic_reconfigure_current_config_received_(false), node_handle_(nh), dynamic_reconfigure_service_name_(dynamic_reconfigure_service_name)
{
dynamic_reconfigure_sub_ = node_handle_.subscribe(parameter_updates_topic, 1, &DynamicReconfigureClient::dynamic_reconfigure_current_config_callback, this);
// receive current configuration
ros::Duration sleep_rate(0.5);
while (dynamic_reconfigure_current_config_received_ == false)
{
ros::spinOnce();
sleep_rate.sleep();
}
}
dynamic_reconfigure::Config& getConfig()
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
return dynamic_reconfigure_config_;
}
bool setConfig(const std::string& param_name, const bool param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.bools.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.bools[i].name) == 0)
{
dynamic_reconfigure_config_.bools[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::BoolParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.bools.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
bool setConfig(const std::string& param_name, const double param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.doubles.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.doubles[i].name) == 0)
{
dynamic_reconfigure_config_.doubles[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::DoubleParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.doubles.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
bool setConfig(const std::string& param_name, const int param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.ints.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.ints[i].name) == 0)
{
dynamic_reconfigure_config_.ints[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::IntParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.ints.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
bool setConfig(const std::string& param_name, const std::string& param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.strs.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.strs[i].name) == 0)
{
dynamic_reconfigure_config_.strs[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::StrParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.strs.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
private:
void dynamic_reconfigure_current_config_callback(const dynamic_reconfigure::ConfigConstPtr& current_config)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
dynamic_reconfigure_config_ = *current_config;
dynamic_reconfigure_current_config_received_ = true;
}
bool sendConfiguration(const dynamic_reconfigure::Config& dynamic_reconfigure_config)
{
dynamic_reconfigure::ReconfigureRequest req;
dynamic_reconfigure::ReconfigureResponse res;
req.config = dynamic_reconfigure_config;
const bool success = ros::service::call(dynamic_reconfigure_service_name_, req, res);
return success;
}
// parameters
ros::NodeHandle node_handle_;
ros::Subscriber dynamic_reconfigure_sub_;
dynamic_reconfigure::Config dynamic_reconfigure_config_;
bool dynamic_reconfigure_current_config_received_;
std::string dynamic_reconfigure_service_name_;
boost::mutex dynamic_reconfigure_lock_;
};
#endif //_DYNAMIC_RECONFIGURE_CLIENT_H_

View File

@ -0,0 +1,165 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2015 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_building_navigation
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 08.2015
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <ros/ros.h>
#include <ros/package.h>
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <visualization_msgs/MarkerArray.h>
#include <fstream>
// Dynamic reconfigure
#include <dynamic_reconfigure/server.h>
#include <ipa_building_navigation/BuildingNavigationConfig.h>
//TSP solver
#include <ipa_building_navigation/tsp_solver_defines.h>
#include <ipa_building_navigation/nearest_neighbor_TSP.h>
#include <ipa_building_navigation/genetic_TSP.h>
#include <ipa_building_navigation/concorde_TSP.h>
//Set Cover solver to find room groups
#include <ipa_building_navigation/set_cover_solver.h>
//finder of trolley positions for each room group
#include <ipa_building_navigation/trolley_position_finder.h>
// A* planner
#include <ipa_building_navigation/A_star_pathplanner.h>
// action
#include <actionlib/server/simple_action_server.h>
#include <ipa_building_msgs/FindRoomSequenceWithCheckpointsAction.h>
class RoomSequencePlanningServer
{
public:
RoomSequencePlanningServer(ros::NodeHandle nh, std::string name_of_the_action);
~RoomSequencePlanningServer()
{
}
protected:
//!!Important!!
// define the Nodehandle before the action server, or else the server won't start
ros::NodeHandle node_handle_;
ros::Publisher room_sequence_visualization_pub_; // visualization of the room sequence
visualization_msgs::MarkerArray room_sequence_visualization_msg_;
actionlib::SimpleActionServer<ipa_building_msgs::FindRoomSequenceWithCheckpointsAction> room_sequence_with_checkpoints_server_;
std::string action_name_;
//converter-> Pixel to meter for X coordinate
double convert_pixel_to_meter_for_x_coordinate(const int pixel_valued_object_x, const float map_resolution, const cv::Point2d map_origin)
{
double meter_value_obj_x = (pixel_valued_object_x * map_resolution) + map_origin.x;
return meter_value_obj_x;
}
//converter-> Pixel to meter for Y coordinate
double convert_pixel_to_meter_for_y_coordinate(int pixel_valued_object_y, const float map_resolution, const cv::Point2d map_origin)
{
double meter_value_obj_y = (pixel_valued_object_y * map_resolution) + map_origin.y;
return meter_value_obj_y;
}
// this is the execution function used by action server
void findRoomSequenceWithCheckpointsServer(const ipa_building_msgs::FindRoomSequenceWithCheckpointsGoalConstPtr &goal);
size_t getNearestLocation(const cv::Mat& floor_plan, const cv::Point start_coordinate, const std::vector<cv::Point>& positions,
const double map_downsampling_factor, const double robot_radius, const double map_resolution);
void publishSequenceVisualization(const std::vector<ipa_building_msgs::RoomSequence>& room_sequences, const std::vector<cv::Point>& room_centers,
std::vector< std::vector<int> >& cliques, const double map_resolution, const cv::Point2d& map_origin);
// callback function for dynamic reconfigure
void dynamic_reconfigure_callback(ipa_building_navigation::BuildingNavigationConfig &config, uint32_t level);
dynamic_reconfigure::Server<ipa_building_navigation::BuildingNavigationConfig> room_sequence_planning_dynamic_reconfigure_server_;
// params
int tsp_solver_; // TSP solver: 1 = Nearest Neighbor, 2 = Genetic solver, 3 = Concorde solver
int problem_setting_; // problem setting of the sequence planning problem
// 1 = SimpleOrderPlanning (plan the optimal order of a simple set of locations)
// 2 = CheckpointBasedPlanning (two-stage planning that creates local cliques of locations (= checkpoints) and determines
// the optimal order through the members of each clique as well as the optimal order through the cliques)
int planning_method_; // Method of planning the sequence: 1 = drag trolley if next room is too far away, 2 = calculate cliques as roomgroups with trolleypositions
double max_clique_path_length_; // max A* path length between two rooms that are assigned to the same clique, in [m]
double map_downsampling_factor_; // the map may be downsampled during computations (e.g. of A* path lengths) in order to speed up the algorithm, range of the factor [0 < factor <= 1], if set to 1 the map will have original size, if set to 0 the algorithm won't work
bool check_accessibility_of_rooms_; // boolean to tell the sequence planner if it should check the given room centers for accessibility from the starting position
bool return_sequence_map_; // boolean to tell the server if the map with the sequence drawn in should be returned
int max_clique_size_; // maximal number of nodes belonging to one clique, when planning trolley positions
bool display_map_; // displays the map with paths upon service call (only if return_sequence_map=true)
};

View File

@ -0,0 +1,9 @@
<?xml version="1.0"?>
<launch>
<!-- -->
<node ns="room_sequence_planning" pkg="ipa_building_navigation" type="room_sequence_planning_server" name="room_sequence_planning_server" output="screen" respawn="true" respawn_delay="2">
<rosparam command="load" file="$(find ipa_building_navigation)/ros/launch/room_sequence_planning_action_server_params.yaml"/>
</node>
</launch>

View File

@ -0,0 +1,53 @@
# TSP Solver
# ==========
# indicates which TSP solver should be used
# 1 = Nearest Neighbor
# 2 = Genetic solver
# 3 = Concorde solver
# int
tsp_solver: 3
# Problem Setting
# ===============
# problem setting of the sequence planning problem
# 1 = SimpleOrderPlanning (plan the optimal order of a simple set of locations)
# 2 = CheckpointBasedPlanning (two-stage planning that creates local cliques of locations (= checkpoints) and determines
# the optimal order through the members of each clique as well as the optimal order through the cliques)
# int
problem_setting: 2
# Checkpoint-based Sequence Planning Specifics
# ============================================
# method that is used to plan the trolley positions (only applies to CheckpointBasedPlanning (2) problem_setting)
# 1 = drag the trolley if it is too far away from next room
# 2 = put rooms together in groups and calculate a corresponding trolley positions
# int
planning_method: 2
# max A* path length between two rooms that are assigned to the same clique, in [m] (only applies to CheckpointBasedPlanning (2) problem_setting)
# double
max_clique_path_length: 1200.0
# maximal nodes in one clique for one trolley position (only applies to CheckpointBasedPlanning (2) problem_setting)
# int
maximum_clique_size: 9001
# General Settings
# ================
# the map may be downsampled during computations (e.g. of A* path lengths) in order to speed up the algorithm,
# range of the factor [0 < factor <= 1]
# if set to 1 the map will have original size, if set to 0 the algorithm won't work
# double
map_downsampling_factor: 0.25
# boolean to tell the sequence planner if it should check the given room centers for accessibility from the starting position
# bool
check_accessibility_of_rooms: true
# boolean to tell the server if the map with the sequence drawn in should be returned
# bool
return_sequence_map: false
# displays the map with paths upon service call (only if return_sequence_map=true)
# bool
display_map: false

View File

@ -0,0 +1,216 @@
#include "ros/ros.h"
#include <ros/package.h>
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <fstream>
#include <time.h>
#include <sys/time.h>
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/contains.h>
#include <ipa_building_navigation/nearest_neighbor_TSP.h>
#include <ipa_building_navigation/genetic_TSP.h>
#include <ipa_building_navigation/concorde_TSP.h>
#include <ipa_building_navigation/distance_matrix.h>
int main(int argc, char **argv)
{
srand(time(NULL));
ros::init(argc, argv, "TSPevaluation");
ros::NodeHandle nh;
//define parameters to describe the Graph
const int dimension = 600;
//define parameters for solving the TSP
double downsampling = 0.25;
double robot_radius = 0.;
double map_resolution = 0.05;
int start_node = 0;
//saving variables for pathlengths
std::vector<double> nearest_pathlengths;
std::vector<double> genetic_pathlengths;
std::vector<double> concorde_pathlengths;
//create empty map to random generate Points in it
cv::Mat map(dimension, dimension, CV_8UC1, cv::Scalar(255));
//path to save the results
const std::string data_storage_path = "tsp_evaluation/";
const std::string upper_command = "mkdir -p " + data_storage_path;
int return_value = system(upper_command.c_str());
//stingstreams to save the parameters
std::stringstream pathlength_output;
std::stringstream times_output;
for(int number_of_nodes = 10; number_of_nodes <= 210; number_of_nodes += 20)
{
std::stringstream folder_name;
folder_name << number_of_nodes << "nodes";
const std::string evaluation_path = data_storage_path + folder_name.str() + "/";
const std::string command = "mkdir -p " + evaluation_path;
int return_value = system(command.c_str());
//generate random Points as nodes for the TSP solvers
int point_counter = 0;
std::vector<cv::Point> nodes;
std::cout << "getting random Points" << std::endl;
do
{
int rand_x = (rand() % (dimension-30)) + 30;
int rand_y = (rand() % (dimension-30)) + 30;
if(map.at<unsigned char>(rand_y, rand_x) == 255)
{
nodes.push_back(cv::Point(rand_x, rand_y));
point_counter++;
}
}while(point_counter < number_of_nodes);
NearestNeighborTSPSolver nearest_solver;
GeneticTSPSolver genetic_solver;
ConcordeTSPSolver concorde_solver;
//solve the TSPs and save the calculation time and orders
cv::Mat distance_matrix;
struct timespec t0, t1, t2, t3;
//construct distance matrix once
std::cout << "constructing distance matrix" << std::endl;
AStarPlanner planner;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.constructDistanceMatrix(distance_matrix, map, nodes, downsampling, robot_radius, map_resolution, planner);
std::cout << "solving TSPs" << std::endl;
clock_gettime(CLOCK_MONOTONIC, &t0);
std::vector<int> nearest_order = nearest_solver.solveNearestTSP(distance_matrix, start_node);
std::cout << "solved nearest TSP" << std::endl;
clock_gettime(CLOCK_MONOTONIC, &t1);
std::vector<int> genetic_order = genetic_solver.solveGeneticTSP(distance_matrix, start_node);
std::cout << "solved genetic TSP" << std::endl;
clock_gettime(CLOCK_MONOTONIC, &t2);
std::vector<int> concorde_order = concorde_solver.solveConcordeTSP(distance_matrix, start_node);
std::cout << "solved concorde TSP" << std::endl;
clock_gettime(CLOCK_MONOTONIC, &t3);
std::cout << "number of nodes in the paths: " << nearest_order.size() << " " << genetic_order.size() << " " << concorde_order.size() << std::endl;
//create maps to draw the paths in
cv::Mat nearest_map = map.clone();
#if CV_MAJOR_VERSION<=3
cv::cvtColor(nearest_map, nearest_map, CV_GRAY2BGR);
#else
cv::cvtColor(nearest_map, nearest_map, cv::COLOR_GRAY2BGR);
#endif
cv::Mat genetic_map = nearest_map.clone();
cv::Mat concorde_map = nearest_map.clone();
//draw the order into the maps
// draw the start node as red
std::cout << "starting to draw the maps" << std::endl;
#if CV_MAJOR_VERSION<=3
cv::circle(nearest_map, nodes[nearest_order[0]], 2, CV_RGB(255,0,0), CV_FILLED);
cv::circle(genetic_map, nodes[genetic_order[0]], 2, CV_RGB(255,0,0), CV_FILLED);
cv::circle(concorde_map, nodes[concorde_order[0]], 2, CV_RGB(255,0,0), CV_FILLED);
#else
cv::circle(nearest_map, nodes[nearest_order[0]], 2, CV_RGB(255,0,0), cv::FILLED);
cv::circle(genetic_map, nodes[genetic_order[0]], 2, CV_RGB(255,0,0), cv::FILLED);
cv::circle(concorde_map, nodes[concorde_order[0]], 2, CV_RGB(255,0,0), cv::FILLED);
#endif
for(size_t i = 1; i < nearest_order.size(); ++i)
{
cv::line(nearest_map, nodes[nearest_order[i-1]], nodes[nearest_order[i]], CV_RGB(128,128,255), 1);
cv::line(genetic_map, nodes[genetic_order[i-1]], nodes[genetic_order[i]], CV_RGB(128,128,255), 1);
cv::line(concorde_map, nodes[concorde_order[i-1]], nodes[concorde_order[i]], CV_RGB(128,128,255), 1);
#if CV_MAJOR_VERSION<=3
cv::circle(nearest_map, nodes[nearest_order[i]], 2, CV_RGB(0,0,0), CV_FILLED);
cv::circle(genetic_map, nodes[genetic_order[i]], 2, CV_RGB(0,0,0), CV_FILLED);
cv::circle(concorde_map, nodes[concorde_order[i]], 2, CV_RGB(0,0,0), CV_FILLED);
#else
cv::circle(nearest_map, nodes[nearest_order[i]], 2, CV_RGB(0,0,0), cv::FILLED);
cv::circle(genetic_map, nodes[genetic_order[i]], 2, CV_RGB(0,0,0), cv::FILLED);
cv::circle(concorde_map, nodes[concorde_order[i]], 2, CV_RGB(0,0,0), cv::FILLED);
#endif
}
//draw line back to start
cv::line(nearest_map, nodes[nearest_order[0]], nodes[nearest_order.back()], CV_RGB(128,128,255), 1);
cv::line(genetic_map, nodes[genetic_order[0]], nodes[genetic_order.back()], CV_RGB(128,128,255), 1);
cv::line(concorde_map, nodes[concorde_order[0]], nodes[concorde_order.back()], CV_RGB(128,128,255), 1);
//save the maps
std::string nearest_path = evaluation_path + "nearest_order.png";
std::string genetic_path = evaluation_path + "genetic_order.png";
std::string concorde_path = evaluation_path + "concorde_order.png";
cv::imwrite(nearest_path.c_str(), nearest_map);
cv::imwrite(genetic_path.c_str(), genetic_map);
cv::imwrite(concorde_path.c_str(), concorde_map);
std::cout << "saved the maps" << std::endl;
//get the pathlengths for each solver
double nearest_pathlength= 0;
double genetic_pathlength = 0;
double concorde_pathlength = 0;
//add each pathlength
std::cout << "starting to calculate the pathlengths " << distance_matrix.cols << std::endl;
for(size_t i = 1; i < nearest_order.size(); ++i)
{
nearest_pathlength += distance_matrix.at<double>(nearest_order[i-1], nearest_order[i]);
genetic_pathlength += distance_matrix.at<double>(genetic_order[i-1], genetic_order[i]);
concorde_pathlength += distance_matrix.at<double>(concorde_order[i-1], concorde_order[i]);
std::cout << "done node: " << (int) i << std::endl;
}
//add path from end to start
std::cout << "doing last paths. Indexes: " << nearest_order.back() << " " << genetic_order.back() << " " << concorde_order.back() << std::endl;
int last_index = nearest_order.back();
nearest_pathlength += distance_matrix.at<double>(nearest_order[0], last_index);
std::cout << "finished nearest path" << std::endl;
last_index = genetic_order.back();
genetic_pathlength += distance_matrix.at<double>(genetic_order[0], last_index);
std::cout << "finished genetic path." << std::endl;
last_index = concorde_order.back();
concorde_pathlength += distance_matrix.at<double>(concorde_order[0], last_index);
std::cout << "finished concorde path" << std::endl;
//calculate computation times
double nearest_time = (t1.tv_sec - t0.tv_sec) + (double) (t1.tv_nsec - t0.tv_nsec) * 1e-9;
double genetic_time = (t2.tv_sec - t1.tv_sec) + (double) (t2.tv_nsec - t1.tv_nsec) * 1e-9;
double concorde_time = (t3.tv_sec - t2.tv_sec) + (double) (t3.tv_nsec - t2.tv_nsec) * 1e-9;
//save the pathlengths and computation times
pathlength_output << "number of nodes: " << number_of_nodes << std::endl
<< nearest_pathlength << std::endl << genetic_pathlength << std::endl << concorde_pathlength << std::endl << std::endl;
times_output << "number of nodes: " << number_of_nodes << std::endl
<< nearest_time << std::endl << genetic_time << std::endl << concorde_time << std::endl << std::endl;
}
std::string pathlength_log_filename = data_storage_path + "pathlengths.txt";
std::ofstream pathlength_file(pathlength_log_filename.c_str(), std::ios::out);
if (pathlength_file.is_open()==true)
pathlength_file << pathlength_output.str();
pathlength_file.close();
std::cout << "finished to save the pathlengths" << std::endl;
std::string genetic_log_filename = data_storage_path + "times.txt";
std::ofstream genetic_file(genetic_log_filename.c_str(), std::ios::out);
if (genetic_file.is_open()==true)
genetic_file << times_output.str();
genetic_file.close();
std::cout << "finished to save the times" << std::endl;
return 0;
}

View File

@ -0,0 +1,142 @@
#include <ros/ros.h>
#include <ros/package.h>
#include <string>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cv_bridge/cv_bridge.h>
#include <actionlib/client/simple_action_client.h>
#include <actionlib/client/terminal_state.h>
#include <ipa_building_msgs/MapSegmentationAction.h>
#include <ipa_building_msgs/FindRoomSequenceWithCheckpointsAction.h>
#include <ipa_building_navigation/dynamic_reconfigure_client.h>
int main(int argc, char **argv)
{
ros::init(argc, argv, "room_sequence_planning_client");
ros::NodeHandle nh;
std::vector< std::string > map_names;
map_names.push_back("lab_ipa.png");
// map_names.push_back("freiburg_building101.png");
map_names.push_back("freiburg_building52.png");
map_names.push_back("freiburg_building79.png");
// map_names.push_back("intel_map.png");
// map_names.push_back("lab_a.png");
// map_names.push_back("lab_b.png");
map_names.push_back("lab_c.png");
map_names.push_back("lab_d.png");
// map_names.push_back("lab_e.png");
for (size_t image_index = 0; image_index<map_names.size(); ++image_index)
{
std::string image_filename = ros::package::getPath("ipa_room_segmentation") + "/common/files/test_maps/" + map_names[image_index];
cv::Mat map = cv::imread(image_filename.c_str(), 0);
//make non-white pixels black
for (int y = 0; y < map.rows; y++)
{
for (int x = 0; x < map.cols; x++)
{
//find not reachable regions and make them black
if (map.at<unsigned char>(y, x) != 255)
{
map.at<unsigned char>(y, x) = 0;
}
}
}
sensor_msgs::Image map_msg;
cv_bridge::CvImage cv_image;
// cv_image.header.stamp = ros::Time::now();
cv_image.encoding = "mono8";
cv_image.image = map;
cv_image.toImageMsg(map_msg);
// create the action client --> "name of server"
// true causes the client to spin its own thread
actionlib::SimpleActionClient<ipa_building_msgs::MapSegmentationAction> ac_seg("/room_segmentation/room_segmentation_server", true);
ROS_INFO("Waiting for action server '/room_segmentation/room_segmentation_server' to start.");
// wait for the action server to start
ac_seg.waitForServer(); //will wait for infinite time
// set algorithm parameters
ROS_INFO("Action server started, sending goal.");
DynamicReconfigureClient drc_seg(nh, "/room_segmentation/room_segmentation_server/set_parameters", "/room_segmentation/room_segmentation_server/parameter_updates");
drc_seg.setConfig("room_segmentation_algorithm", 1);
// send a goal to the action
ipa_building_msgs::MapSegmentationGoal goal_seg;
goal_seg.input_map = map_msg;
goal_seg.map_origin.position.x = 0;
goal_seg.map_origin.position.y = 0;
goal_seg.map_resolution = 0.05;
goal_seg.return_format_in_meter = false;
goal_seg.return_format_in_pixel = true;
ac_seg.sendGoal(goal_seg);
//wait for the action to return
bool finished_before_timeout = ac_seg.waitForResult(ros::Duration(300.0));
if (finished_before_timeout == false)
{
ROS_ERROR("Timeout on room segmentation.");
return -1;
}
ipa_building_msgs::MapSegmentationResultConstPtr result_seg = ac_seg.getResult();
ROS_INFO("Finished segmentation successfully!");
// solve sequence problem
actionlib::SimpleActionClient<ipa_building_msgs::FindRoomSequenceWithCheckpointsAction> ac_seq("/room_sequence_planning/room_sequence_planning_server", true);
ROS_INFO("Waiting for action server '/room_sequence_planning/room_sequence_planning_server' to start.");
// wait for the action server to start
ac_seq.waitForServer(); //will wait for infinite time
// set algorithm parameters
ROS_INFO("Action server started, sending goal_seq.");
DynamicReconfigureClient drc_seq(nh, "/room_sequence_planning/room_sequence_planning_server/set_parameters", "/room_sequence_planning/room_sequence_planning_server/parameter_updates");
drc_seq.setConfig("planning_method", 1);
drc_seq.setConfig("tsp_solver", 3);
drc_seq.setConfig("return_sequence_map", true);
drc_seq.setConfig("display_map", true);
// send a goal_seg to the action
ipa_building_msgs::FindRoomSequenceWithCheckpointsGoal goal_seq;
goal_seq.input_map = map_msg;
goal_seq.map_resolution = goal_seg.map_resolution;
goal_seq.map_origin.position.x = goal_seg.map_origin.position.x;
goal_seq.map_origin.position.y = goal_seg.map_origin.position.y;
goal_seq.room_information_in_pixel = result_seg->room_information_in_pixel;
goal_seq.robot_radius = 0.3;
cv::Mat map_eroded;
cv::erode(map, map_eroded, cv::Mat(), cv::Point(-1,-1), goal_seq.robot_radius/goal_seq.map_resolution+2);
cv::Mat distance_map; //variable for the distance-transformed map, type: CV_32FC1
cv::distanceTransform(map_eroded, distance_map, CV_DIST_L2, 5);
cv::convertScaleAbs(distance_map, distance_map); // conversion to 8 bit image
bool robot_start_coordinate_set = false;
for (int v=0; v<map_eroded.rows && robot_start_coordinate_set==false; ++v)
for (int u=0; u<map_eroded.cols && robot_start_coordinate_set==false; ++u)
if (map_eroded.at<uchar>(v,u) != 0 && distance_map.at<uchar>(v,u) > 15)
{
goal_seq.robot_start_coordinate.position.x = u*goal_seq.map_resolution + goal_seg.map_origin.position.x;
goal_seq.robot_start_coordinate.position.y = v*goal_seq.map_resolution + goal_seg.map_origin.position.y;
robot_start_coordinate_set = true;
}
ac_seq.sendGoal(goal_seq);
//wait for the action to return
finished_before_timeout = ac_seq.waitForResult(ros::Duration(300.0));
if (finished_before_timeout == false)
{
ROS_ERROR("Timeout on room sequence planning.");
return -1;
}
ROS_INFO("Finished sequence planning successfully!");
}
//exit
return 0;
}

View File

@ -0,0 +1,901 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2015 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_building_navigation
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 08.2015
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#include <ipa_building_navigation/room_sequence_planning_action_server.h>
RoomSequencePlanningServer::RoomSequencePlanningServer(ros::NodeHandle nh, std::string name_of_the_action) :
node_handle_(nh),
room_sequence_with_checkpoints_server_(node_handle_, name_of_the_action, boost::bind(&RoomSequencePlanningServer::findRoomSequenceWithCheckpointsServer, this, _1), false),
action_name_(name_of_the_action)
{
// setup publishers
room_sequence_visualization_pub_ = nh.advertise<visualization_msgs::MarkerArray>("room_sequence_marker", 1);
// start action server
room_sequence_with_checkpoints_server_.start();
// dynamic reconfigure
room_sequence_planning_dynamic_reconfigure_server_.setCallback(boost::bind(&RoomSequencePlanningServer::dynamic_reconfigure_callback, this, _1, _2));
// Parameters
std::cout << "\n------------------------------------\nRoom Sequence Planner Parameters:\n------------------------------------\n";
// TSP solver
node_handle_.param("tsp_solver", tsp_solver_, (int)TSP_CONCORDE);
std::cout << "room_sequence_planning/tsp_solver = " << tsp_solver_ << std::endl;
if (tsp_solver_ == TSP_NEAREST_NEIGHBOR)
ROS_INFO("You have chosen the Nearest Neighbor TSP method.");
else if (tsp_solver_ == TSP_GENETIC)
ROS_INFO("You have chosen the Genetic TSP method.");
else if (tsp_solver_ == TSP_CONCORDE)
ROS_INFO("You have chosen the Concorde TSP solver.");
else
ROS_ERROR("Undefined TSP Solver.");
// problem setting
node_handle_.param("problem_setting", problem_setting_, 2);
std::cout << "room_sequence_planning/problem_setting = " << problem_setting_ << std::endl;
if (problem_setting_ == 1)
ROS_INFO("You have chosen the SimpleOrderPlanning setting.");
else if (problem_setting_ == 2)
ROS_INFO("You have chosen the CheckpointBasedPlanning setting.");
else
ROS_ERROR("Undefined problem setting.");
// checkpoint-based sequence planning specifics
if (problem_setting_ == 1)
{
planning_method_ = 1;
max_clique_path_length_ = 0.; // always split into cliques --> each clique contains only one room
max_clique_size_ = 1; // always split into cliques --> each clique contains only one room
}
else if (problem_setting_ == 2)
{
node_handle_.param("planning_method", planning_method_, 2);
std::cout << "room_sequence_planning/planning_method = " << planning_method_ << std::endl;
if (planning_method_ == 1)
ROS_INFO("You have chosen the Trolley dragging method.");
else if (planning_method_ == 2)
ROS_INFO("You have chosen the room group planning method.");
else
ROS_ERROR("Undefined planning method.");
node_handle_.param("max_clique_path_length", max_clique_path_length_, 12.0);
std::cout << "room_sequence_planning/max_clique_path_length = " << max_clique_path_length_ << std::endl;
node_handle_.param("maximum_clique_size", max_clique_size_, 9001);
std::cout << "room_sequence_planning/maximum_clique_size = " << max_clique_size_ << std::endl;
}
else
ROS_ERROR("Undefined problem setting.");
// general settings
node_handle_.param("map_downsampling_factor", map_downsampling_factor_, 0.25);
std::cout << "room_sequence_planning/map_downsampling_factor = " << map_downsampling_factor_ << std::endl;
node_handle_.param("check_accessibility_of_rooms", check_accessibility_of_rooms_, true);
std::cout << "room_sequence_planning/check_accessibility_of_rooms = " << check_accessibility_of_rooms_ << std::endl;
node_handle_.param("return_sequence_map", return_sequence_map_, false);
std::cout << "room_sequence_planning/return_sequence_map = " << return_sequence_map_ << std::endl;
node_handle_.param("display_map", display_map_, false);
std::cout << "room_sequence_planning/display_map = " << display_map_ << std::endl;
}
// callback function for dynamic reconfigure
void RoomSequencePlanningServer::dynamic_reconfigure_callback(ipa_building_navigation::BuildingNavigationConfig &config, uint32_t level)
{
std::cout << "######################################################################################" << std::endl;
std::cout << "Dynamic reconfigure request:" << std::endl;
// TSP solver
tsp_solver_ = config.tsp_solver;
std::cout << "room_sequence_planning/tsp_solver = " << tsp_solver_ << std::endl;
if (tsp_solver_ == TSP_NEAREST_NEIGHBOR)
ROS_INFO("You have chosen the Nearest Neighbor TSP method.");
else if (tsp_solver_ == TSP_GENETIC)
ROS_INFO("You have chosen the Genetic TSP method.");
else if (tsp_solver_ == TSP_CONCORDE)
ROS_INFO("You have chosen the Concorde TSP solver.");
else
ROS_ERROR("Undefined TSP Solver.");
// problem setting
problem_setting_ = config.problem_setting;
std::cout << "room_sequence_planning/problem_setting = " << problem_setting_ << std::endl;
if (problem_setting_ == 1)
ROS_INFO("You have chosen the SimpleOrderPlanning setting.");
else if (problem_setting_ == 2)
ROS_INFO("You have chosen the CheckpointBasedPlanning setting.");
else
ROS_ERROR("Undefined problem setting.");
// checkpoint-based sequence planning specifics
if (problem_setting_ == 1)
{
planning_method_ = 1;
max_clique_path_length_ = 0.; // always split into cliques --> each clique contains only one room
max_clique_size_ = 1; // always split into cliques --> each clique contains only one room
}
else if (problem_setting_ == 2)
{
planning_method_ = config.planning_method;
std::cout << "room_sequence_planning/planning_method = " << planning_method_ << std::endl;
if (planning_method_ == 1)
ROS_INFO("You have chosen the Trolley dragging method.");
else if (planning_method_ == 2)
ROS_INFO("You have chosen the room group planning method.");
else
ROS_ERROR("Undefined planning method.");
max_clique_path_length_ = config.max_clique_path_length;
std::cout << "room_sequence_planning/max_clique_path_length = " << max_clique_path_length_ << std::endl;
max_clique_size_ = config.maximum_clique_size;
std::cout << "room_sequence_planning/maximum_clique_size = " << max_clique_size_ << std::endl;
}
else
ROS_ERROR("Undefined problem setting.");
// general settings
map_downsampling_factor_ = config.map_downsampling_factor;
std::cout << "room_sequence_planning/map_downsampling_factor = " << map_downsampling_factor_ << std::endl;
check_accessibility_of_rooms_ = config.check_accessibility_of_rooms;
std::cout << "room_sequence_planning/check_accessibility_of_rooms = " << check_accessibility_of_rooms_ << std::endl;
return_sequence_map_ = config.return_sequence_map;
std::cout << "room_sequence_planning/return_sequence_map = " << return_sequence_map_ << std::endl;
display_map_ = config.display_map;
std::cout << "room_sequence_planning/display_map = " << display_map_ << std::endl;
}
void RoomSequencePlanningServer::findRoomSequenceWithCheckpointsServer(const ipa_building_msgs::FindRoomSequenceWithCheckpointsGoalConstPtr &goal)
{
ROS_INFO("********Sequence planning started************");
// converting the map msg in cv format
cv_bridge::CvImagePtr cv_ptr_obj;
cv_ptr_obj = cv_bridge::toCvCopy(goal->input_map, sensor_msgs::image_encodings::MONO8);
cv::Mat floor_plan = cv_ptr_obj->image;
//get map origin and convert robot start coordinate to [pixel]
const cv::Point2d map_origin(goal->map_origin.position.x, goal->map_origin.position.y);
cv::Point robot_start_coordinate((goal->robot_start_coordinate.position.x - map_origin.x)/goal->map_resolution, (goal->robot_start_coordinate.position.y - map_origin.y)/goal->map_resolution);
//create a star pathplanner to plan a path from Point A to Point B in a given gridmap
AStarPlanner a_star_path_planner;
//get room centers and check how many of them are reachable
cv::Mat downsampled_map_for_accessibility_checking;
if(check_accessibility_of_rooms_ == true)
{
a_star_path_planner.downsampleMap(floor_plan, downsampled_map_for_accessibility_checking, map_downsampling_factor_, goal->robot_radius, goal->map_resolution);
}
std::vector<cv::Point> room_centers; // collect the valid, accessible room_centers
std::map<size_t, size_t> mapping_room_centers_index_to_original_room_index; // maps the index of each entry in room_centers to the original index in goal->room_information_in_pixel
for (size_t i=0; i<goal->room_information_in_pixel.size(); ++i)
{
cv::Point current_center(goal->room_information_in_pixel[i].room_center.x, goal->room_information_in_pixel[i].room_center.y);
if(check_accessibility_of_rooms_ == true)
{
std::cout << "checking for accessibility of rooms" << std::endl;
double length = a_star_path_planner.planPath(floor_plan, downsampled_map_for_accessibility_checking, robot_start_coordinate, current_center, map_downsampling_factor_, 0., goal->map_resolution);
if(length < 1e9)
{
room_centers.push_back(current_center);
mapping_room_centers_index_to_original_room_index[room_centers.size()-1] = i;
std::cout << "room " << i << " added, center: " << current_center << std::endl;
}
else
std::cout << "room " << i << " not accessible, center: " << current_center << std::endl;
}
else
{
room_centers.push_back(current_center);
}
}
downsampled_map_for_accessibility_checking.release(); //release not anymore needed space
std::cout << "number of reachable roomcenters: " << room_centers.size() << std::endl;
if(room_centers.size() == 0)
{
ROS_ERROR("No given roomcenter reachable from starting position.");
ipa_building_msgs::FindRoomSequenceWithCheckpointsResult action_result;
room_sequence_with_checkpoints_server_.setAborted(action_result);
return;
}
if(planning_method_ > 0 && planning_method_ < 3)
{
if(planning_method_ == 1)
ROS_INFO("You have chosen the drag planning method.");
if(planning_method_ == 2)
ROS_INFO("You have chosen the grouping planning method.");
}
if(tsp_solver_ > 0 && tsp_solver_ < 4)
{
if(tsp_solver_ == TSP_NEAREST_NEIGHBOR)
ROS_INFO("You have chosen the nearest neighbor solver.");
if(tsp_solver_ == TSP_GENETIC)
ROS_INFO("You have chosen the genetic TSP solver.");
if(tsp_solver_ == TSP_CONCORDE)
ROS_INFO("You have chosen the concorde TSP solver.");
}
//saving vectors needed from both planning methods
std::vector<std::vector<int> > cliques;
std::vector<cv::Point> trolley_positions;
std::vector< std::vector<cv::Point> > room_cliques_as_points;
//image container to draw the sequence in if needed
cv::Mat display;
if(planning_method_ == 1) //Drag Trolley if the next room is too far away
{
std::cout << "Maximal cliquedistance [m]: "<< max_clique_path_length_ << " Maximal cliquedistance [Pixel]: "<< max_clique_path_length_/goal->map_resolution << std::endl;
//calculate the index of the best starting position
size_t optimal_start_position = getNearestLocation(floor_plan, robot_start_coordinate, room_centers, map_downsampling_factor_, goal->robot_radius, goal->map_resolution);
//plan the optimal path trough all given rooms
std::vector<int> optimal_room_sequence;
if(tsp_solver_ == TSP_NEAREST_NEIGHBOR) //nearest neighbor TSP solver
{
NearestNeighborTSPSolver nearest_neighbor_tsp_solver;
optimal_room_sequence = nearest_neighbor_tsp_solver.solveNearestTSP(floor_plan, room_centers, map_downsampling_factor_, goal->robot_radius, goal->map_resolution, (int) optimal_start_position);
}
if(tsp_solver_ == TSP_GENETIC) //genetic TSP solver
{
GeneticTSPSolver genetic_tsp_solver;
optimal_room_sequence = genetic_tsp_solver.solveGeneticTSP(floor_plan, room_centers, map_downsampling_factor_, goal->robot_radius, goal->map_resolution, (int) optimal_start_position);
}
if(tsp_solver_ == TSP_CONCORDE) //concorde TSP solver
{
ConcordeTSPSolver concorde_tsp_solver;
optimal_room_sequence = concorde_tsp_solver.solveConcordeTSP(floor_plan, room_centers, map_downsampling_factor_, goal->robot_radius, goal->map_resolution, (int) optimal_start_position);
}
//put the rooms that are close enough together into the same clique, if a new clique is needed put the first roomcenter as a trolleyposition
std::vector<int> current_clique;
trolley_positions.push_back(robot_start_coordinate); //trolley stands close to robot on startup
//sample down map one time to reduce calculation time
cv::Mat downsampled_map;
a_star_path_planner.downsampleMap(floor_plan, downsampled_map, map_downsampling_factor_, goal->robot_radius, goal->map_resolution);
const double one_by_downsampling_factor = 1 / map_downsampling_factor_;
for(size_t i=0; i<optimal_room_sequence.size(); ++i)
{
double distance_to_trolley = a_star_path_planner.planPath(floor_plan, downsampled_map, trolley_positions.back(), room_centers[optimal_room_sequence[i]], map_downsampling_factor_, 0, goal->map_resolution);
if (distance_to_trolley <= max_clique_path_length_/goal->map_resolution && current_clique.size() < max_clique_size_) //expand current clique by next roomcenter
{
current_clique.push_back(optimal_room_sequence[i]);
}
else //start new clique and put the old clique into the cliques vector
{
cliques.push_back(current_clique);
current_clique.clear();
current_clique.push_back(optimal_room_sequence[i]);
trolley_positions.push_back(room_centers[optimal_room_sequence[i]]);
}
}
//add last clique
cliques.push_back(current_clique);
//fill vector of cv::Point for display purpose
for(size_t i=0; i<cliques.size(); ++i)
{
std::vector<cv::Point> current_clique;
for(size_t j=0; j<cliques[i].size(); ++j)
{
current_clique.push_back(room_centers[cliques[i][j]]);
}
room_cliques_as_points.push_back(current_clique);
}
if(return_sequence_map_ == true)
{
cv::cvtColor(floor_plan, display, CV_GRAY2BGR);
for (size_t t=0; t<trolley_positions.size(); ++t)
{
// trolley positions + connections
if (t>0)
{
#if CV_MAJOR_VERSION<=3
cv::circle(display, trolley_positions[t], 5, CV_RGB(0,0,255), CV_FILLED);
#else
cv::circle(display, trolley_positions[t], 5, CV_RGB(0,0,255), cv::FILLED);
#endif
cv::line(display, trolley_positions[t], trolley_positions[t-1], CV_RGB(128,128,255), 1);
}
else
{
#if CV_MAJOR_VERSION<=3
cv::circle(display, trolley_positions[t], 5, CV_RGB(255,0,0), CV_FILLED);
#else
cv::circle(display, trolley_positions[t], 5, CV_RGB(255,0,0), cv::FILLED);
#endif
}
// room positions and connections
for (size_t r=0; r<cliques[t].size(); ++r)
{
#if CV_MAJOR_VERSION<=3
cv::circle(display, room_cliques_as_points[t][r], 3, CV_RGB(0,255,0), CV_FILLED);
#else
cv::circle(display, room_cliques_as_points[t][r], 3, CV_RGB(0,255,0), cv::FILLED);
#endif
if (r==0)
cv::line(display, trolley_positions[t], room_cliques_as_points[t][r], CV_RGB(255,0,0), 1);
else
{
if(r==cliques[t].size()-1)
cv::line(display, room_cliques_as_points[t][r], trolley_positions[t], CV_RGB(255,0,255), 1);
cv::line(display, room_cliques_as_points[t][r-1], room_cliques_as_points[t][r], CV_RGB(128,255,128), 1);
}
}
}
}
// display
if (display_map_ == true && return_sequence_map_ == true)
{
cv::imshow("sequence planning", display);
cv::waitKey();
}
}
else if(planning_method_ == 2) //calculate roomgroups and corresponding trolley positions
{
std::cout << "Maximal cliquedistance [m]: "<< max_clique_path_length_ << " Maximal cliquedistance [Pixel]: "<< max_clique_path_length_/goal->map_resolution << std::endl;
std::cout << "finding trolley positions" << std::endl;
// 1. determine cliques of rooms
SetCoverSolver set_cover_solver;
cliques = set_cover_solver.solveSetCover(floor_plan, room_centers, map_downsampling_factor_, goal->robot_radius, goal->map_resolution, max_clique_path_length_/goal->map_resolution, max_clique_size_);
// 2. determine trolley position within each clique (same indexing as in cliques)
TrolleyPositionFinder trolley_position_finder;
trolley_positions = trolley_position_finder.findTrolleyPositions(floor_plan, cliques, room_centers, map_downsampling_factor_, goal->robot_radius, goal->map_resolution);
std::cout << "Trolley positions within each clique computed" << std::endl;
// 3. determine optimal sequence of trolley positions (solve TSP problem)
// a) find nearest trolley location to current robot location
// b) solve the TSP for the trolley positions
// reduce image size already here to avoid resizing in the planner each time
size_t optimal_trolley_start_position = getNearestLocation(floor_plan, robot_start_coordinate, trolley_positions, map_downsampling_factor_, goal->robot_radius, goal->map_resolution);
//solve the TSP
std::vector<int> optimal_trolley_sequence;
std::cout << "finding optimal trolley sequence. Start: " << optimal_trolley_start_position << std::endl;
if(tsp_solver_ == TSP_NEAREST_NEIGHBOR) //nearest neighbor TSP solver
{
NearestNeighborTSPSolver nearest_neighbor_tsp_solver;
optimal_trolley_sequence = nearest_neighbor_tsp_solver.solveNearestTSP(floor_plan, trolley_positions, map_downsampling_factor_, goal->robot_radius, goal->map_resolution, (int) optimal_trolley_start_position);
}
if(tsp_solver_ == TSP_GENETIC) //genetic TSP solver
{
GeneticTSPSolver genetic_tsp_solver;
optimal_trolley_sequence = genetic_tsp_solver.solveGeneticTSP(floor_plan, trolley_positions, map_downsampling_factor_, goal->robot_radius, goal->map_resolution, (int) optimal_trolley_start_position);
}
if(tsp_solver_ == TSP_CONCORDE) //concorde TSP solver
{
ConcordeTSPSolver concorde_tsp_solver;
optimal_trolley_sequence = concorde_tsp_solver.solveConcordeTSP(floor_plan, trolley_positions, map_downsampling_factor_, goal->robot_radius, goal->map_resolution, (int) optimal_trolley_start_position);
}
// 4. determine optimal sequence of rooms with each clique (solve TSP problem)
// a) find start point for each clique closest to the trolley position
// b) create a vector< vector <Point> > to fill the groups into the TSP solver
// c) solve the TSP for each clique
//fill vector of cv::Point for TSP solver
for(size_t i=0; i<cliques.size(); ++i)
{
std::vector<cv::Point> current_clique;
for(size_t j=0; j<cliques[i].size(); ++j)
{
current_clique.push_back(room_centers[cliques[i][j]]);
}
room_cliques_as_points.push_back(current_clique);
}
std::cout << "cliques: " << std::endl;
for(size_t i = 0; i < cliques.size(); ++i)
{
for(size_t j = 0; j < cliques[i].size(); ++j)
{
std::cout << room_cliques_as_points[i][j] << std::endl;
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "trolley indexes: " << std::endl;
for(size_t i = 0; i < optimal_trolley_sequence.size(); ++i)
{
std::cout << optimal_trolley_sequence[i] << std::endl;
}
std::cout << std::endl;
std::cout << "number of trolley positions: " << trolley_positions.size() << " " << optimal_trolley_sequence.size() << std::endl;
//find starting points
//find starting points
std::vector<size_t> clique_starting_points(cliques.size());
for(size_t i=0; i<cliques.size(); ++i)
clique_starting_points[i] = getNearestLocation(floor_plan, trolley_positions[i], room_cliques_as_points[i], map_downsampling_factor_, goal->robot_radius, goal->map_resolution);
//solve TSPs
std::vector< std::vector <int> > optimal_room_sequences(cliques.size());
// Create the TSP solver objects two times and not one time for the whole function because by this way only two objects has to be
// created and else three
if(tsp_solver_ == TSP_NEAREST_NEIGHBOR) //nearest neighbor TSP solver
{
NearestNeighborTSPSolver nearest_neighbor_tsp_solver;
for(size_t i=0; i<cliques.size(); ++i)
{
optimal_room_sequences[i] = nearest_neighbor_tsp_solver.solveNearestTSP(floor_plan, room_cliques_as_points[i], map_downsampling_factor_, goal->robot_radius, goal->map_resolution, clique_starting_points[i]);
std::cout << "done one clique" << std::endl;
}
}
if(tsp_solver_ == TSP_GENETIC) //genetic TSP solver
{
GeneticTSPSolver genetic_tsp_solver;
for(size_t i=0; i<cliques.size(); ++i)
{
optimal_room_sequences[i] = genetic_tsp_solver.solveGeneticTSP(floor_plan, room_cliques_as_points[i], map_downsampling_factor_, goal->robot_radius, goal->map_resolution, clique_starting_points[i]);
std::cout << "done one clique" << std::endl;
}
}
if(tsp_solver_ == TSP_CONCORDE) //concorde TSP solver
{
ConcordeTSPSolver concorde_tsp_solver;
for(size_t i=0; i<cliques.size(); ++i)
{
optimal_room_sequences[i] = concorde_tsp_solver.solveConcordeTSP(floor_plan, room_cliques_as_points[i], map_downsampling_factor_, goal->robot_radius, goal->map_resolution, clique_starting_points[i]);
std::cout << "done one clique" << std::endl;
}
}
if(return_sequence_map_ == true)
{
cv::cvtColor(floor_plan, display, CV_GRAY2BGR);
for (size_t t=0; t<trolley_positions.size(); ++t)
{
// trolley positions + connections
const int ot = optimal_trolley_sequence[t];
//std::cout << "starting to draw one clique. Position: " << trolley_positions[ot] << std::endl;
if (t>0)
{
#if CV_MAJOR_VERSION<=3
cv::circle(display, trolley_positions[ot], 5, CV_RGB(0,0,255), CV_FILLED);
#else
cv::circle(display, trolley_positions[ot], 5, CV_RGB(0,0,255), cv::FILLED);
#endif
cv::line(display, trolley_positions[ot], trolley_positions[optimal_trolley_sequence[t-1]], CV_RGB(128,128,255), 1);
}
else
{
#if CV_MAJOR_VERSION<=3
cv::circle(display, trolley_positions[ot], 5, CV_RGB(255,0,0), CV_FILLED);
#else
cv::circle(display, trolley_positions[ot], 5, CV_RGB(255,0,0), cv::FILLED);
#endif
}
// room positions and connections
for (size_t r=0; r<optimal_room_sequences[ot].size(); ++r)
{
#if CV_MAJOR_VERSION<=3
cv::circle(display, room_cliques_as_points[ot][optimal_room_sequences[ot][r]], 3, CV_RGB(0,255,0), CV_FILLED);
#else
cv::circle(display, room_cliques_as_points[ot][optimal_room_sequences[ot][r]], 3, CV_RGB(0,255,0), cv::FILLED);
#endif
if (r==0)
cv::line(display, trolley_positions[ot], room_cliques_as_points[ot][optimal_room_sequences[ot][r]], CV_RGB(255,0,0), 1);
else
{
if(r==optimal_room_sequences[ot].size()-1)
cv::line(display, room_cliques_as_points[ot][optimal_room_sequences[ot][r]], trolley_positions[ot], CV_RGB(255,0,255), 1);
cv::line(display, room_cliques_as_points[ot][optimal_room_sequences[ot][r-1]], room_cliques_as_points[ot][optimal_room_sequences[ot][r]], CV_RGB(128,255,128), 1);
}
}
//std::cout << "finished to draw one clique" << std::endl;
}
}
// display
if (display_map_ == true && return_sequence_map_ == true)
{
cv::imshow("sequence planning", display);
cv::waitKey();
}
// reorder cliques, trolley positions and rooms into optimal order
std::vector<std::vector<int> > new_cliques_order(cliques.size());
std::vector<cv::Point> new_trolley_positions(trolley_positions.size());
for (size_t i=0; i<optimal_trolley_sequence.size(); ++i)
{
const int oi = optimal_trolley_sequence[i];
new_trolley_positions[i] = trolley_positions[oi];
new_cliques_order[i].resize(optimal_room_sequences[oi].size());
for (size_t j=0; j<optimal_room_sequences[oi].size(); ++j)
new_cliques_order[i][j] = cliques[oi][optimal_room_sequences[oi][j]];
}
cliques = new_cliques_order;
trolley_positions = new_trolley_positions;
}
else
{
ROS_ERROR("Undefined planning method.");
ipa_building_msgs::FindRoomSequenceWithCheckpointsResult action_result;
room_sequence_with_checkpoints_server_.setAborted(action_result);
return;
}
std::cout << "done sequence planning" << std::endl << std::endl;
// return results
ipa_building_msgs::FindRoomSequenceWithCheckpointsResult action_result;
std::vector<ipa_building_msgs::RoomSequence> room_sequences(cliques.size());
for(size_t i=0; i<cliques.size(); ++i)
{
//convert signed int to unsigned int (necessary for this msg type)
room_sequences[i].room_indices.resize(cliques[i].size());
for (size_t j=0; j<cliques[i].size(); ++j)
room_sequences[i].room_indices[j] = mapping_room_centers_index_to_original_room_index[cliques[i][j]];
room_sequences[i].checkpoint_position_in_pixel.x = trolley_positions[i].x;
room_sequences[i].checkpoint_position_in_pixel.y = trolley_positions[i].y;
room_sequences[i].checkpoint_position_in_pixel.z = 0.;
room_sequences[i].checkpoint_position_in_meter.x = convert_pixel_to_meter_for_x_coordinate(trolley_positions[i].x, goal->map_resolution, map_origin);
room_sequences[i].checkpoint_position_in_meter.y = convert_pixel_to_meter_for_y_coordinate(trolley_positions[i].y, goal->map_resolution, map_origin);
room_sequences[i].checkpoint_position_in_meter.z = 0.;
}
action_result.checkpoints = room_sequences;
if(return_sequence_map_ == true)
{
//converting the cv format in map msg format
cv_bridge::CvImage cv_image;
cv_image.header.stamp = ros::Time::now();
cv_image.encoding = "bgr8";
cv_image.image = display;
cv_image.toImageMsg(action_result.sequence_map);
}
// publish visualization msg for RViz
publishSequenceVisualization(room_sequences, room_centers, cliques, goal->map_resolution, cv::Point2d(goal->map_origin.position.x, goal->map_origin.position.y));
room_sequence_with_checkpoints_server_.setSucceeded(action_result);
//garbage collection
action_result.checkpoints.clear();
ROS_INFO("********Sequence planning finished************");
}
size_t RoomSequencePlanningServer::getNearestLocation(const cv::Mat& floor_plan, const cv::Point start_coordinate, const std::vector<cv::Point>& positions,
const double map_downsampling_factor, const double robot_radius, const double map_resolution)
{
// const double one_by_downsampling_factor = 1./map_downsampling_factor;
cv::Mat downsampled_map;
AStarPlanner a_star_path_planner;
a_star_path_planner.downsampleMap(floor_plan, downsampled_map, map_downsampling_factor, robot_radius, map_resolution);
//find nearest trolley position as start point for TSP
double min_dist = 1e10;
size_t nearest_position = 0;
// const cv::Point start_point = map_downsampling_factor * start_coordinate;
for (size_t i=0; i<positions.size(); ++i)
{
// const cv::Point end_point = map_downsampling_factor * positions[i];
double dist = a_star_path_planner.planPath(floor_plan, downsampled_map, start_coordinate, positions[i], map_downsampling_factor, 0., map_resolution);
if (dist < min_dist)
{
min_dist = dist;
nearest_position = i;
}
}
return nearest_position;
}
void RoomSequencePlanningServer::publishSequenceVisualization(const std::vector<ipa_building_msgs::RoomSequence>& room_sequences, const std::vector<cv::Point>& room_centers,
std::vector< std::vector<int> >& cliques, const double map_resolution, const cv::Point2d& map_origin)
{
room_sequence_visualization_msg_.markers.clear();
// publish clique centers
int room_count = 0;
for (size_t i=0; i<room_sequences.size(); ++i)
{
// prepare clique center circle message
visualization_msgs::Marker circle;
// Set the frame ID and timestamp. See the TF tutorials for information on these.
circle.header.frame_id = "/map";
circle.header.stamp = ros::Time::now();
// Set the namespace and id for this marker. This serves to create a unique ID
// Any marker sent with the same namespace and id will overwrite the old one
circle.ns = "room_sequence_checkpoints";
circle.id = i;
// Set the marker type. Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
circle.type = visualization_msgs::Marker::SPHERE;
// Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
circle.action = visualization_msgs::Marker::ADD;
// Set the pose of the marker. This is a full 6DOF pose relative to the frame/time specified in the header
circle.pose.position.x = room_sequences[i].checkpoint_position_in_meter.x;
circle.pose.position.y = room_sequences[i].checkpoint_position_in_meter.y;
circle.pose.position.z = room_sequences[i].checkpoint_position_in_meter.z;
circle.pose.orientation.x = 0.0;
circle.pose.orientation.y = 0.0;
circle.pose.orientation.z = 0.0;
circle.pose.orientation.w = 1.0;
// Set the scale of the marker -- 1x1x1 here means 1m on a side
circle.scale.x = 0.20; // this is the line width
circle.scale.y = 0.20;
circle.scale.z = 0.02;
// Set the color -- be sure to set alpha to something non-zero!
circle.color.r = 1.0f;
circle.color.g = 1.0f;
circle.color.b = 0.0f;
circle.color.a = 0.8;
circle.lifetime = ros::Duration();
room_sequence_visualization_msg_.markers.push_back(circle);
// prepare clique center text message
visualization_msgs::Marker text;
// Set the frame ID and timestamp. See the TF tutorials for information on these.
text.header.frame_id = "/map";
text.header.stamp = ros::Time::now();
// Set the namespace and id for this marker. This serves to create a unique ID
// Any marker sent with the same namespace and id will overwrite the old one
text.ns = "room_sequence_checkpoints";
text.id = room_sequences.size()+i;
// Set the marker type. Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
text.type = visualization_msgs::Marker::TEXT_VIEW_FACING;
// Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
text.action = visualization_msgs::Marker::ADD;
// Set the pose of the marker. This is a full 6DOF pose relative to the frame/time specified in the header
text.pose.position.x = room_sequences[i].checkpoint_position_in_meter.x;
text.pose.position.y = room_sequences[i].checkpoint_position_in_meter.y;
text.pose.position.z = room_sequences[i].checkpoint_position_in_meter.z+0.4;
text.pose.orientation.x = 0.0;
text.pose.orientation.y = 0.0;
text.pose.orientation.z = 0.0;
text.pose.orientation.w = 1.0;
// Set the scale of the marker -- 1x1x1 here means 1m on a side
text.scale.x = 1.5; // this is the line width
text.scale.y = 1.0;
text.scale.z = 0.5;
// Set the color -- be sure to set alpha to something non-zero!
text.color.r = 1.0f;
text.color.g = 1.0f;
text.color.b = 0.0f;
text.color.a = 0.8;
text.lifetime = ros::Duration();
std::stringstream ss;
ss << "C" << i+1;
text.text = ss.str();
room_sequence_visualization_msg_.markers.push_back(text);
// prepare clique center to center arrow message
if (i>0)
{
visualization_msgs::Marker arrow;
// Set the frame ID and timestamp. See the TF tutorials for information on these.
arrow.header.frame_id = "/map";
arrow.header.stamp = ros::Time::now();
// Set the namespace and id for this marker. This serves to create a unique ID
// Any marker sent with the same namespace and id will overwrite the old one
arrow.ns = "room_sequence_checkpoints";
arrow.id = 2*room_sequences.size()+i;
// Set the marker type. Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
arrow.type = visualization_msgs::Marker::ARROW;
// Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
arrow.action = visualization_msgs::Marker::ADD;
// Set the pose of the marker. This is a full 6DOF pose relative to the frame/time specified in the header
arrow.pose.position.x = 0;
arrow.pose.position.y = 0;
arrow.pose.position.z = 0;
arrow.pose.orientation.x = 0.0;
arrow.pose.orientation.y = 0.0;
arrow.pose.orientation.z = 0.0;
arrow.pose.orientation.w = 1.0;
arrow.points.resize(2);
arrow.points[0].x = room_sequences[i-1].checkpoint_position_in_meter.x;
arrow.points[0].y = room_sequences[i-1].checkpoint_position_in_meter.y;
arrow.points[0].z = room_sequences[i-1].checkpoint_position_in_meter.z;
arrow.points[1].x = room_sequences[i].checkpoint_position_in_meter.x;
arrow.points[1].y = room_sequences[i].checkpoint_position_in_meter.y;
arrow.points[1].z = room_sequences[i].checkpoint_position_in_meter.z;
// Set the scale of the marker -- 1x1x1 here means 1m on a side
arrow.scale.x = 0.03; // this is the line width
arrow.scale.y = 0.06;
arrow.scale.z = 0.1;
// Set the color -- be sure to set alpha to something non-zero!
arrow.color.r = 1.0f;
arrow.color.g = 1.0f;
arrow.color.b = 0.0f;
arrow.color.a = 0.8;
arrow.lifetime = ros::Duration();
room_sequence_visualization_msg_.markers.push_back(arrow);
}
room_count += room_sequences[i].room_indices.size();
}
// publish room centers
int room_index = 0;
for (size_t i=0; i<room_sequences.size(); ++i)
{
for (size_t j=0; j<room_sequences[i].room_indices.size(); ++j, ++room_index)
{
cv::Point2d current_room_center(room_centers[cliques[i][j]].x * map_resolution + map_origin.x, room_centers[cliques[i][j]].y * map_resolution + map_origin.y);
// prepare room center circle message
visualization_msgs::Marker circle;
// Set the frame ID and timestamp. See the TF tutorials for information on these.
circle.header.frame_id = "/map";
circle.header.stamp = ros::Time::now();
// Set the namespace and id for this marker. This serves to create a unique ID
// Any marker sent with the same namespace and id will overwrite the old one
circle.ns = "room_sequence_rooms";
circle.id = room_index;
// Set the marker type. Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
circle.type = visualization_msgs::Marker::SPHERE;
// Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
circle.action = visualization_msgs::Marker::ADD;
// Set the pose of the marker. This is a full 6DOF pose relative to the frame/time specified in the header
circle.pose.position.x = current_room_center.x;
circle.pose.position.y = current_room_center.y;
circle.pose.position.z = 0;
circle.pose.orientation.x = 0.0;
circle.pose.orientation.y = 0.0;
circle.pose.orientation.z = 0.0;
circle.pose.orientation.w = 1.0;
// Set the scale of the marker -- 1x1x1 here means 1m on a side
circle.scale.x = 0.20; // this is the line width
circle.scale.y = 0.20;
circle.scale.z = 0.02;
// Set the color -- be sure to set alpha to something non-zero!
circle.color.r = 1.0f;
circle.color.g = 0.5f;
circle.color.b = 0.0f;
circle.color.a = 0.8;
circle.lifetime = ros::Duration();
room_sequence_visualization_msg_.markers.push_back(circle);
// prepare room center text message
visualization_msgs::Marker text;
// Set the frame ID and timestamp. See the TF tutorials for information on these.
text.header.frame_id = "/map";
text.header.stamp = ros::Time::now();
// Set the namespace and id for this marker. This serves to create a unique ID
// Any marker sent with the same namespace and id will overwrite the old one
text.ns = "room_sequence_rooms";
text.id = room_count+room_index;
// Set the marker type. Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
text.type = visualization_msgs::Marker::TEXT_VIEW_FACING;
// Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
text.action = visualization_msgs::Marker::ADD;
// Set the pose of the marker. This is a full 6DOF pose relative to the frame/time specified in the header
text.pose.position.x = current_room_center.x;
text.pose.position.y = current_room_center.y;
text.pose.position.z = 0.4;
text.pose.orientation.x = 0.0;
text.pose.orientation.y = 0.0;
text.pose.orientation.z = 0.0;
text.pose.orientation.w = 1.0;
// Set the scale of the marker -- 1x1x1 here means 1m on a side
text.scale.x = 1.5; // this is the line width
text.scale.y = 1.0;
text.scale.z = 0.5;
// Set the color -- be sure to set alpha to something non-zero!
text.color.r = 1.0f;
text.color.g = 0.5f;
text.color.b = 0.0f;
text.color.a = 0.8;
text.lifetime = ros::Duration();
std::stringstream ss;
ss << "R" << room_index+1;
text.text = ss.str();
room_sequence_visualization_msg_.markers.push_back(text);
// prepare room center to clique center line message
visualization_msgs::Marker arrow;
// Set the frame ID and timestamp. See the TF tutorials for information on these.
arrow.header.frame_id = "/map";
arrow.header.stamp = ros::Time::now();
// Set the namespace and id for this marker. This serves to create a unique ID
// Any marker sent with the same namespace and id will overwrite the old one
arrow.ns = "room_sequence_rooms";
arrow.id = 2*room_count+room_index;
// Set the marker type. Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
arrow.type = visualization_msgs::Marker::ARROW;
// Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
arrow.action = visualization_msgs::Marker::ADD;
// Set the pose of the marker. This is a full 6DOF pose relative to the frame/time specified in the header
arrow.pose.position.x = 0;
arrow.pose.position.y = 0;
arrow.pose.position.z = 0;
arrow.pose.orientation.x = 0.0;
arrow.pose.orientation.y = 0.0;
arrow.pose.orientation.z = 0.0;
arrow.pose.orientation.w = 1.0;
arrow.points.resize(2);
arrow.points[0].x = current_room_center.x;
arrow.points[0].y = current_room_center.y;
arrow.points[0].z = 0;
arrow.points[1].x = room_sequences[i].checkpoint_position_in_meter.x;
arrow.points[1].y = room_sequences[i].checkpoint_position_in_meter.y;
arrow.points[1].z = room_sequences[i].checkpoint_position_in_meter.z;
// Set the scale of the marker -- 1x1x1 here means 1m on a side
arrow.scale.x = 0.03; // this is the line width
arrow.scale.y = 0.03;
arrow.scale.z = 0.1;
// Set the color -- be sure to set alpha to something non-zero!
arrow.color.r = 1.0f;
arrow.color.g = 0.5f;
arrow.color.b = 0.0f;
arrow.color.a = 0.8;
arrow.lifetime = ros::Duration();
room_sequence_visualization_msg_.markers.push_back(arrow);
}
}
room_sequence_visualization_pub_.publish(room_sequence_visualization_msg_);
}
int main(int argc, char **argv)
{
ros::init(argc, argv, "room_sequence_planning_server");
ros::NodeHandle nh("~");
RoomSequencePlanningServer sqp(nh, ros::this_node::getName());
ROS_INFO("Action server for room sequence planning has been initialized......");
ros::spin();
return 0;
}

View File

@ -0,0 +1,215 @@
#include "ros/ros.h"
#include <ros/package.h>
#include <iostream>
#include <iomanip>
#include <queue>
#include <string>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <fstream>
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/nearest_neighbor_TSP.h>
#include <ipa_building_navigation/genetic_TSP.h>
#include <ipa_building_navigation/concorde_TSP.h>
#include <ipa_building_navigation/maximal_clique_finder.h>
#include <ipa_building_navigation/set_cover_solver.h>
#include <ipa_building_navigation/trolley_position_finder.h>
int main(int argc, char **argv)
{
srand(5); //time(NULL));
ros::init(argc, argv, "a_star_tester");
ros::NodeHandle nh;
cv::Mat map = cv::imread("/home/rmb-fj/Pictures/maps/black_map.png", 0);
AStarPlanner planner;
NearestNeighborTSPSolver TSPsolver;
GeneticTSPSolver genTSPsolver;
ConcordeTSPSolver conTSPsolver;
cliqueFinder finder; //Object to find all maximal cliques for the given map
SetCoverSolver setsolver; //Object to find the groups based on the found cliques
TrolleyPositionFinder tolley_finder;
std::vector < cv::Point > centers;
int n = 9;
double downfactor = 0.25;
double map_resolution_factor = 0.05;
double robot_radius_test = 0.5;
cv::Mat pathlengths(cv::Size(n, n), CV_64F);
cv::Mat distancematrix;
// cv::Mat eroded_map;
//
// cv::erode(map, eroded_map, cv::Mat(), cv::Point(-1, -1), 4);
//Testcenters:
// x: 494 y: 535
// x: 218 y: 176
// x: 152 y: 148
// x: 475 y: 417
// x: 342 y: 333
// x: 283 y: 205
// x: 149 y: 229
// x: 201 y: 456
// x: 286 y: 125
// for (int i = 0; i < n; i++) //add Points for TSP to test the solvers
// {
// bool done = false;
// do
// {
// int x = rand() % map.cols;
// int y = rand() % map.rows;
// if (eroded_map.at<unsigned char>(y, x) == 255)
// {
// centers.push_back(cv::Point(x, y));
// done = true;
// }
// } while (!done);
// }
centers.push_back(cv::Point(494, 535));
centers.push_back(cv::Point(218, 176));
centers.push_back(cv::Point(152, 148));
centers.push_back(cv::Point(475, 417));
centers.push_back(cv::Point(342, 333));
centers.push_back(cv::Point(283, 205));
centers.push_back(cv::Point(149, 229));
centers.push_back(cv::Point(201, 456));
centers.push_back(cv::Point(286, 125));
std::vector<int> nearest_neighbor_order = genTSPsolver.solveGeneticTSP(map, n, centers, downfactor, robot_radius_test, map_resolution_factor, 0);
std::cout << "without distance matrix:" << std::endl;
for (int i = 0; i < nearest_neighbor_order.size(); i++)
{
std::cout << nearest_neighbor_order[i] << std::endl;
}
std::cout << std::endl;
nearest_neighbor_order = genTSPsolver.solveGeneticTSP(map, n, centers, downfactor, robot_radius_test, map_resolution_factor, 0, distancematrix);
std::cout << "without distance matrix, returned:" << std::endl;
for (int i = 0; i < nearest_neighbor_order.size(); i++)
{
std::cout << nearest_neighbor_order[i] << std::endl;
}
std::cout << std::endl;
std::cout << "distance matrix out of solver: " << std::endl;
for (int row = 0; row < distancematrix.rows; row++)
{
for (int col = 0; col < distancematrix.cols; col++)
{
std::cout << distancematrix.at<double>(row, col) << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
// cv::Mat testermap = map.clone();
//
for (int i = 0; i < centers.size(); i++)
{
cv::Point center = centers[i];
for (int p = 0; p < centers.size(); p++)
{
if (p != i)
{
if (p > i) //only compute upper right triangle of matrix, rest is symmetrically added
{
double length = planner.planPath(map, center, centers[p], downfactor, robot_radius_test, map_resolution_factor);
pathlengths.at<double>(i, p) = length;
pathlengths.at<double>(p, i) = length; //symmetrical-Matrix --> saves half the computationtime
}
}
else
{
pathlengths.at<double>(i, p) = 0;
}
}
}
nearest_neighbor_order = genTSPsolver.solveGeneticTSP(pathlengths, 0);
std::cout << "with distance matrix:" << std::endl;
for (int i = 0; i < nearest_neighbor_order.size(); i++)
{
std::cout << nearest_neighbor_order[i] << std::endl;
}
std::cout << std::endl;
std::cout << "distance matrix out of main: " << std::endl;
for (int row = 0; row < pathlengths.rows; row++)
{
for (int col = 0; col < pathlengths.cols; col++)
{
std::cout << pathlengths.at<double>(row, col) << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::vector < std::vector<int> > cliques = finder.getCliques(pathlengths, 150.0);
// std::cout << "All maximum cliques in the graph:" << std::endl;
//
// for (int i = 0; i < cliques.size(); i++)
// {
// for (int j = 0; j < cliques[i].size(); j++)
// {
// std::cout << cliques[i][j] << std::endl;
// }
// std::cout << std::endl;
// }
//
ROS_INFO("Starting to solve the setcover problem.");
std::vector<std::vector<int> > groups = setsolver.solveSetCover(cliques, n);
std::vector<std::vector<int> > new_groups = setsolver.solveSetCover(map, n, centers, downfactor, robot_radius_test, map_resolution_factor, 150.0);
ROS_INFO("Starting to find the trolley positions.");
std::vector<cv::Point> trolley_positions = tolley_finder.findTrolleyPositions(map, groups, centers, downfactor, robot_radius_test, map_resolution_factor);
std::vector<cv::Point> new_trolleys = tolley_finder.findTrolleyPositions(map, new_groups, centers, downfactor, robot_radius_test, map_resolution_factor);
std::cout << "groups from new method" << std::endl;
for(int i = 0; i < new_groups.size(); i++)
{
for(int j = 0; j < new_groups[i].size(); j++)
{
std::cout << new_groups[i][j] << std::endl;
}
std::cout << "group done. trolley position: " << new_trolleys[i] << std::endl << std::endl;
}
std::cout << "groups from old method" << std::endl;
for(int i = 0; i < groups.size(); i++)
{
for(int j = 0; j < groups[i].size(); j++)
{
std::cout << groups[i][j] << std::endl;
}
std::cout << "group done. trolley position: " << trolley_positions[i] << std::endl;
}
//
// cv::imwrite("/home/rmb-fj/Pictures/TSP/genetic.png", testermap);
return 0;
}

View File

@ -0,0 +1,7 @@
<?xml version="1.0"?>
<launch>
<!-- -->
<node ns="room_sequence_planning" pkg="ipa_building_navigation" type="room_sequence_planning_client" name="room_sequence_planning_client" output="screen"/>
</launch>

View File

@ -0,0 +1,7 @@
<?xml version="1.0"?>
<launch>
<!-- -->
<node ns="room_sequence_planning" pkg="ipa_building_navigation" type="room_sequence_planning_evaluation" name="room_sequence_planning_evaluation" output="screen"/>
</launch>

View File

@ -0,0 +1,4 @@
cmake_minimum_required(VERSION 2.8.3)
project(ipa_coverage_planning)
find_package(catkin REQUIRED)
catkin_metapackage()

View File

@ -0,0 +1,22 @@
<?xml version="1.0"?>
<package>
<name>ipa_coverage_planning</name>
<version>0.1.0</version>
<description>This meta-package includes open source software for efficient coverage (cleaning, inspection, ...) of buildings.</description>
<license>LGPL for academic and non-commercial use; for commercial use, please contact Fraunhofer IPA</license>
<!--url type="website">http://ros.org/wiki/autopnp</url-->
<!-- <url type="bugtracker"></url> -->
<author email="florian.jordan@ipa.fraunhofer.de">Florian Jordan</author>
<maintainer email="richard.bormann@ipa.fraunhofer.de">Richard Bormann</maintainer>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>ipa_building_msgs</run_depend>
<run_depend>ipa_building_navigation</run_depend>
<run_depend>ipa_room_exploration</run_depend>
<run_depend>ipa_room_segmentation</run_depend>
<export>
<metapackage/>
</export>
</package>

View File

@ -0,0 +1,15 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package ipa_room_exploration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
0.1.2 (2017-02-13)
------------------
* rotation of the map in flowNetworkExplorator
* Contributors: Florian Jordan
0.1.1 (2017-02-10)
------------------
* excluded coverage check to standalone service server
* Contributors: Florian Jordan
0.1.0 (2017-02-09)
------------------
* first completely working version of package
* Contributors: Florian Jordan, Richard Bormann

View File

@ -0,0 +1,320 @@
cmake_minimum_required(VERSION 2.8.3)
project(ipa_room_exploration)
# build with c++11
add_compile_options(-std=c++11)
set(catkin_RUN_PACKAGES
angles
cob_map_accessibility_analysis
cv_bridge
eigen_conversions
geometry_msgs
ipa_building_msgs
ipa_building_navigation
laser_geometry
move_base_msgs
nav_msgs
roscpp
roslib
sensor_msgs
std_msgs
std_srvs
tf
visualization_msgs
# message_generation
# message_runtime
)
set(catkin_BUILD_PACKAGES
${catkin_RUN_PACKAGES}
dynamic_reconfigure
)
## Find catkin macros and libraries
find_package(catkin REQUIRED COMPONENTS
${catkin_BUILD_PACKAGES}
)
# add_message_files(
# FILES
# dis_info.msg
# dis_info_array.msg
# )
# generate_messages(
# DEPENDENCIES
# std_msgs
# )
generate_dynamic_reconfigure_options(
cfg/RoomExploration.cfg
cfg/CoverageMonitor.cfg
)
find_package(OpenCV REQUIRED)
find_package(Boost REQUIRED COMPONENTS system chrono thread)
find_package(Eigen3 )
if(NOT EIGEN3_FOUND)
# Fallback to cmake_modules
find_package(cmake_modules REQUIRED)
find_package(Eigen REQUIRED)
set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS})
set(EIGEN3_LIBRARIES ${EIGEN_LIBRARIES}) # Not strictly necessary as Eigen is head only
# Possibly map additional variables to the EIGEN3_ prefix.
else()
set(EIGEN3_INCLUDE_DIRS ${EIGEN3_INCLUDE_DIR})
endif()
# include the FindGUROBI.cmake file to search for Gurobi on the system
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake/")
find_package(GUROBI)
# if Gurobi was found, set a flag to use it
if(GUROBI_FOUND)
add_definitions(-DGUROBI_FOUND=${GUROBI_FOUND})
endif(GUROBI_FOUND)
Message(STATUS "Gurobi include dirs: " ${GUROBI_INCLUDE_DIRS})
Message(STATUS "Gurobi lib dirs: " ${GUROBI_LIBRARIES})
# Coin-Or and Cbc, included as ubuntu system package, without Cmake-module so include pkg-config file
# !!! Important note: the order of the libraries when linking your executable is important, if it is wrong you get undefined references !!!
find_package(PkgConfig REQUIRED)
pkg_check_modules(CoinUtils REQUIRED coinutils)# coinutils osi-clp clp osi cbc cgl
pkg_check_modules(OsiClp REQUIRED osi-clp)
pkg_check_modules(Clp REQUIRED clp)
pkg_check_modules(Osi REQUIRED osi)
pkg_check_modules(Cbc-lib REQUIRED cbc)
pkg_check_modules(Cgl REQUIRED cgl)
Message(STATUS "coinutils include dirs: " ${CoinUtils_INCLUDE_DIRS})
Message(STATUS "osi-clp include dirs: " ${OsiClp_INCLUDE_DIRS})
Message(STATUS "clp include dirs: " ${Clp_INCLUDE_DIRS})
Message(STATUS "osi include dirs: " ${Osi_INCLUDE_DIRS})
Message(STATUS "cbc include dirs: " ${Cbc-lib_INCLUDE_DIRS})
Message(STATUS "cgl include dirs: " ${Cgl_INCLUDE_DIRS})
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if you package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
# important: common/include needs to be here if you have such a directory
catkin_package(
INCLUDE_DIRS
common/include
ros/include
LIBRARIES
${PROJECT_NAME}
libcoverage_check_server
CATKIN_DEPENDS
${catkin_RUN_PACKAGES}
# message_runtime
DEPENDS
OpenCV
Boost
CoinUtils
OsiClp
Clp
Osi
Cgl
Cbc-lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
# important: common/include also needs to be here if you have it, order of including the coinor-packages relevant, not working when including in wrong order
include_directories(
common/include
ros/include
${catkin_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${Boost_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIRS}
${GUROBI_INCLUDE_DIRS}
${CoinUtils_INCLUDE_DIRS}
${OsiClp_INCLUDE_DIRS}
${Clp_INCLUDE_DIRS}
${Osi_INCLUDE_DIRS}
${Cgl_INCLUDE_DIRS}
${Cbc-lib_INCLUDE_DIRS}
)
### package library
add_library(${PROJECT_NAME} STATIC
common/src/grid_point_explorator.cpp
common/src/boustrophedon_explorator.cpp
common/src/neural_network_explorator.cpp
common/src/convex_sensor_placement_explorator.cpp
common/src/energy_functional_explorator.cpp
common/src/flow_network_explorator.cpp
common/src/room_rotator.cpp
common/src/meanshift2d.cpp
)
target_link_libraries(${PROJECT_NAME}
${catkin_LIBRARIES}
${OpenCV_LIBS}
${Boost_LIBRARIES}
${CoinUtils_LIBRARIES}
${OsiClp_LIBRARIES}
${Clp_LIBRARIES}
${Osi_LIBRARIES}
${Cgl_LIBRARIES}
${Cbc-lib_LIBRARIES}
${GUROBI_LIBRARIES}
libcoverage_check_server
)
add_dependencies(${PROJECT_NAME}
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
)
### room exploration action server, note: order of linking the Coin-Or packages important
add_executable(room_exploration_server
ros/src/room_exploration_action_server.cpp
ros/src/fov_to_robot_mapper.cpp
)
target_link_libraries(room_exploration_server
${catkin_LIBRARIES}
${OpenCV_LIBS}
${Boost_LIBRARIES}
${PROJECT_NAME}
libcoverage_check_server
)
add_dependencies(room_exploration_server
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
)
### library for coverage checking
add_library(libcoverage_check_server
ros/src/coverage_check_server.cpp
)
target_link_libraries(libcoverage_check_server
${catkin_LIBRARIES}
${OpenCV_LIBS}
${Boost_LIBRARIES}
)
add_dependencies(libcoverage_check_server ${catkin_EXPORTED_TARGETS})
### server for coverage checking
add_executable(coverage_check_server
ros/src/coverage_check_server_main.cpp
)
target_link_libraries(coverage_check_server
${catkin_LIBRARIES}
${OpenCV_LIBS}
${Boost_LIBRARIES}
libcoverage_check_server
)
add_dependencies(coverage_check_server ${catkin_EXPORTED_TARGETS})
### server for coverage monitoring
add_executable(coverage_monitor_server
ros/src/coverage_monitor_server.cpp
)
target_link_libraries(coverage_monitor_server
${catkin_LIBRARIES}
${OpenCV_LIBS}
${Boost_LIBRARIES}
libcoverage_check_server
)
add_dependencies(coverage_monitor_server ${catkin_EXPORTED_TARGETS})
### room exploration client
add_executable(room_exploration_client
ros/src/room_exploration_action_client.cpp
)
target_link_libraries(room_exploration_client
${catkin_LIBRARIES}
${OpenCV_LIBS}
${Boost_LIBRARIES}
)
add_dependencies(room_exploration_client
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
)
### next goal
add_executable(next_goal
ros/src/next_goal.cpp
)
target_link_libraries(next_goal
${catkin_LIBRARIES}
${OpenCV_LIBS}
${Boost_LIBRARIES}
)
add_dependencies(next_goal
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
)
add_executable(sub
ros/src/sub.cpp
)
target_link_libraries(sub
${catkin_LIBRARIES}
)
### evaluation of room exploration algorithms
add_executable(room_exploration_evaluation
ros/src/room_exploration_evaluation.cpp
ros/src/fov_to_robot_mapper.cpp
)
target_link_libraries(room_exploration_evaluation
${catkin_LIBRARIES}
${OpenCV_LIBS}
libcoverage_check_server
)
add_dependencies(room_exploration_evaluation
${catkin_EXPORTED_TARGETS}
${${PROJECT_NAME}_EXPORTED_TARGETS}
)
#############
## Install ##
#############
# Mark executables and/or libraries for installation
install(TARGETS ${PROJECT_NAME} room_exploration_server room_exploration_client
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
#uncomment this if you have a common-directory with header-files
install(DIRECTORY common/include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h"
PATTERN ".svn" EXCLUDE
)
#uncomment this if you have header-files in your project
install(DIRECTORY ros/include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h"
PATTERN ".svn" EXCLUDE
)
#install(DIRECTORY scripts
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# PATTERN ".svn" EXCLUDE
#)
# uncomment this if you have launch files
install(DIRECTORY ros/launch
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/ros
PATTERN ".svn" EXCLUDE
)
#install(DIRECTORY common/files
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/common
# PATTERN ".svn" EXCLUDE
#)

View File

@ -0,0 +1,18 @@
#!/usr/bin/env python
PACKAGE = "ipa_room_exploration"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("map_frame", str_t, 0, "Name of the map coordinate system.", "map")
gen.add("robot_frame", str_t, 0, "Name of the robot coordinate system.", "base_link")
gen.add("coverage_radius", double_t, 0, "Radius of the circular coverage device, in [m].", 0.25, 0.001, 100000.0)
gen.add("coverage_circle_offset_transform_x", double_t, 0, "The offset of the coverage circle from the robot center, [x,y,z] in [m].", 0.29035, -100000.0, 100000.0)
gen.add("coverage_circle_offset_transform_y", double_t, 0, "The offset of the coverage circle from the robot center, [x,y,z] in [m].", -0.114, -100000.0, 100000.0)
gen.add("coverage_circle_offset_transform_z", double_t, 0, "The offset of the coverage circle from the robot center, [x,y,z] in [m].", 0.0, -100000.0, 100000.0)
gen.add("robot_trajectory_recording_active", bool_t, 0, "The robot trajectory is only recorded if this flag is true.", False)
exit(gen.generate(PACKAGE, "coverage_monitor_server", "CoverageMonitor"))

View File

@ -0,0 +1,123 @@
#!/usr/bin/env python
PACKAGE = "ipa_room_exploration"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
# Method to plan a path trough the room
method_enum = gen.enum([ gen.const("GridPointExplorator", int_t, 1, "Use the grid point exploration algorithm."),
gen.const("BoustrophedonExplorator", int_t, 2, "Use the boustrophedon exploration algorithm."),
gen.const("NeuralNetworkExplorator", int_t, 3, "Use the neural network exploration algorithm."),
gen.const("ConvexSPPExplorator", int_t, 4, "Use the convexSPP exploration algorithm."),
gen.const("FlowNetworkExplorator", int_t, 5, "Use the flowNetwork exploration algorithm."),
gen.const("EnergyFunctionalExplorator", int_t, 6, "Use the energyFunctional exploration algorithm."),
gen.const("VoronoiExplorator", int_t, 7, "Use the voronoi exploration algorithm."),
gen.const("BoustrophedonVariantExplorator", int_t, 8, "Use a variant of the boustrophedon exploration algorithm.")],
"Exploration algorithm")
gen.add("room_exploration_algorithm", int_t, 0, "Exploration method", 2, 1, 8, edit_method=method_enum)
# Parameters on map correction options
# ====================================
gen.add("map_correction_closing_neighborhood_size", int_t, 0, "Applies a closing operation to neglect inaccessible areas and map errors/artifacts if the map_correction_closing_neighborhood_size parameter is larger than 0. The parameter then specifies the iterations (or neighborhood size) of that closing operation..", 2, -1, 100);
# Parameters specific to the navigation of the robot along the computed coverage trajectory
# =========================================================================================
gen.add("return_path", bool_t, 0, "Boolean used to determine whether the server should return the computed coverage path in the response message.", True)
gen.add("execute_path", bool_t, 0, "Boolean used to determine whether the server should navigate the robot along the computed coverage path.", False)
# double that shows how close the robot can get to a published navigation goal before another gets published
gen.add("goal_eps", double_t, 0, "Distance from robot to published goal to publish the next goal in the path.", 2.0, 1.0)
# Use a dynamic goal distance criterion: the larger the path's curvature, the more accurate the navigation.
# Uses 'goal_eps' param as maximum distance on straight paths and zero distance (accurate goal approaching) on 90deg curves.
gen.add("use_dyn_goal_eps", bool_t, 0, "Use a dynamic goal distance criterion: the larger the path's curvature, the more accurate the navigation.", False)
# Boolean that interrupts the publishing of the navigation goals as long as needed, e.g. when the robot sees a trashbin during a cleaning task and wants to clean it directly and resume later with the coverage path.
gen.add("interrupt_navigation_publishing", bool_t, 0, "Interrupt the publishing of navigation goals as long as needed.", False)
# turn the functionality on/off to revisit areas that haven't been seen during the execution of the coverage path, due to uncertainties or dynamic obstacles
gen.add("revisit_areas", bool_t, 0, "Revisiting not seen areas due to dynamic obstacles on/off.", True)
# min area before previously not seen areas have to be revisited, [m^2]
gen.add("left_sections_min_area", double_t, 0, "Minimal size of left sections to revisit them after one go [m^2].", 0.01, 1e-7)
gen.add("global_costmap_topic", str_t, 0, "The name of the global costmap topic.", "/move_base/global_costmap/costmap")
gen.add("coverage_check_service_name", str_t, 0, "The name of the service to call for a coverage check of the driven trajectory.", "/room_exploration/coverage_check_server/coverage_check")
gen.add("map_frame", str_t, 0, "The name of the map frame, used for tracking of the robot.", "map")
gen.add("camera_frame", str_t, 0, "The name of the camera frame, that is in the same kinematic chain as the map_frame and shows the camera pose.", "base_link")
# Grid point Explorator
# =====================
tsp_solver_enum = gen.enum([ gen.const("NearestNeighborTSP", int_t, 1, "Use the Nearest Neighbor TSP algorithm."),
gen.const("GeneticTSP", int_t, 2, "Use the Genetic TSP solver."),
gen.const("ConcordeTSP", int_t, 3, "Use the Concorde TSP solver.")],
"Indicates which TSP solver should be used.")
gen.add("tsp_solver", int_t, 0, "Exploration method", 3, 1, 3, edit_method=tsp_solver_enum)
gen.add("tsp_solver_timeout", int_t, 0, "A sophisticated solver like Concorde or Genetic can be interrupted if it does not find a solution within this time (in [s]), and then falls back to the nearest neighbor solver.", 600, 1);
# Boustrophedon Explorator
# ========================
gen.add("min_cell_area", double_t, 0, "Minimum area of one cell for the boustrophedon explorator.", 10.0, 1e-7)
# min distance between two points in the created coverage path
gen.add("path_eps", double_t, 0, "Minimal distance between two points on the generated path [pixel].", 2.0, 0)
# the additional offset of the grid to obstacles, i.e. allows to displace the grid by more than the standard half_grid_size from obstacles, in [m]
gen.add("grid_obstacle_offset", double_t, 0, "Allows to displace the grid by more than the standard half_grid_size from obstacles [m].", 0.0, 0)
# maximal allowed shift off the ideal boustrophedon track to both sides for avoiding obstacles on track
# setting max_deviation_from_track=grid_spacing is usually a good choice
# for negative values (e.g. max_deviation_from_track: -1) max_deviation_from_track is automatically set to grid_spacing
# in [pixel]
gen.add("max_deviation_from_track", int_t, 0, "Maximal allowed shift off the ideal boustrophedon track for avoiding obstacles on track, in [pixel]. For negative values max_deviation_from_track is automatically set to grid_spacing.", -1, -1)
# enum for cell visiting order
cell_visiting_order_enum = gen.enum([gen.const("OptimalTSP", int_t, 1, "The optimal visiting order of the cells is determined as TSP problem."),
gen.const("LeftToRight", int_t, 2, "Alternative ordering from left to right (measured on y-coordinates of the cells), visits the cells in a more obvious fashion to the human observer (though it is not optimal).")],
"Cell visiting order")
gen.add("cell_visiting_order", int_t, 0, "Cell visiting order method", 1, 1, 2, edit_method=cell_visiting_order_enum)
# Neural network explorator, see room_exploration_action_server.params.yaml for further details
# =============================================================================================
gen.add("step_size", double_t, 0, "Step size for integrating the state dynamics of the neural network.", 0.008, 0.0001, 1.0)
gen.add("A", int_t, 0, "Decaying parameter that pulls the activity of a neuron closer to zero, larger value means faster decreasing.", 17, 1)
gen.add("B", int_t, 0, "Increasing parameter that tries to increase the activity of a neuron, higher value means a higher desired value and a faster increasing at the beginning.", 5, 1)
gen.add("D", int_t, 0, "Decreasing parameter when the neuron is labeled as obstacle, higher value means faster decreasing.", 7, 1)
gen.add("E", int_t, 0, "External input parameter of one neuron that is used in the dynamics corresponding to if it is an obstacle or uncleaned/cleaned, E>>B.", 80, 1)
gen.add("mu", double_t, 0, "Parameter to set the importance of the states of neighboring neurons to the dynamics, higher value means higher influence.", 1.03, 0.001)
gen.add("delta_theta_weight", double_t, 0, "Parameter to set the importance of the traveleing direction from the previous step and the next step, a higher value means that the robot should turn less.", 0.15, 0.0)
# ConvexSPP explorator
# ====================
# size of one cell when discretizing the free space
gen.add("cell_size", int_t, 0, "Side length of one grid cell, when discretizing the free space.", 0, -1)
gen.add("delta_theta", double_t, 0, "Sampling angle when creating possible sensing poses.", 1.570796, 1e-4)
# flowNetwork explorator
# ======================
gen.add("curvature_factor", double_t, 0, "Factor an arc can be longer than a straight arc.", 1.1, 1.0)
gen.add("max_distance_factor", double_t, 0, "#Factor, an arc can be longer than the maximal distance of the room.", 1.0, 1.0)
exit(gen.generate(PACKAGE, "ipa_room_exploration_action_server", "RoomExploration"))

View File

@ -0,0 +1,62 @@
#### Taken from https://github.com/joschu/trajopt/blob/master/cmake/modules/FindGUROBI.cmake
# - Try to find GUROBI --> not a system package, needs to be searched separately
# Once done this will define
# GUROBI_FOUND - System has Gurobi
# GUROBI_INCLUDE_DIRS - The Gurobi include directories
# GUROBI_LIBRARIES - The libraries needed to use Gurobi
if(GUROBI_INCLUDE_DIR)
# variables already in cache
set(GUROBI_FOUND TRUE)
set(GUROBI_INCLUDE_DIRS "${GUROBI_INCLUDE_DIR}" )
set(GUROBI_LIBRARIES "${GUROBI_LIBRARY};${GUROBI_CXX_LIBRARY}" )
else(GUROBI_INCLUDE_DIR)
find_path(GUROBI_INCLUDE_DIR
NAMES gurobi_c++.h
PATHS "$ENV{GUROBI_HOME}/include"
"/Library/gurobi702/linux64/include"
)
# search for the used version of the Gurobi library
find_library(GUROBI_LIBRARY
NAMES gurobi
gurobi45
gurobi46
gurobi50
gurobi51
gurobi52
gurobi55
gurobi56
gurobi60
gurobi70
PATHS "$ENV{GUROBI_HOME}/lib"
"/Library/gurobi702/linux64/lib"
)
find_library(GUROBI_CXX_LIBRARY
NAMES gurobi_c++
libgurobi_c++.a
PATHS "$ENV{GUROBI_HOME}/lib"
"/Library/gurobi702/linux64/lib"
)
if(GUROBI_LIBRARY AND GUROBI_CXX_LIBRARY AND GUROBI_INCLUDE_DIR)
set(GUROBI_INCLUDE_DIRS "${GUROBI_INCLUDE_DIR}")
set(GUROBI_LIBRARIES "${GUROBI_LIBRARY};${GUROBI_CXX_LIBRARY}")
set(GUROBI_FOUND TRUE)
else(GUROBI_LIBRARY AND GUROBI_CXX_LIBRARY AND GUROBI_INCLUDE_DIR)
Message(STATUS "Gurobi has not been found")
endif(GUROBI_LIBRARY AND GUROBI_CXX_LIBRARY AND GUROBI_INCLUDE_DIR)
include(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments
find_package_handle_standard_args(GUROBI DEFAULT_MSG
GUROBI_LIBRARY GUROBI_CXX_LIBRARY GUROBI_INCLUDE_DIR)
mark_as_advanced(GUROBI_INCLUDE_DIR GUROBI_LIBRARY GUROBI_CXX_LIBRARY)
endif(GUROBI_INCLUDE_DIR)

View File

@ -0,0 +1,334 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 11.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
#include <map>
#include <set>
#include <cmath>
#include <string>
#include <Eigen/Dense>
#include <ipa_building_navigation/concorde_TSP.h>
#include <ipa_building_navigation/genetic_TSP.h>
#include <ipa_room_exploration/meanshift2d.h>
#include <ipa_room_exploration/fov_to_robot_mapper.h>
#include <ipa_room_exploration/room_rotator.h>
#include <ipa_room_exploration/grid.h>
#include <geometry_msgs/Pose2D.h>
#include <geometry_msgs/Polygon.h>
#include <geometry_msgs/Point32.h>
#define PI 3.14159265359
// Class that is used to store cells and obstacles in a certain manner. For this the vertexes are stored as points and
// the edges are stored as vectors in a counter-clockwise manner. The constructor becomes a set of respectively sorted
// points and computes the vectors out of them. Additionally the accessible/visible center of the polygon gets computed,
// to simplify the visiting order later, by using a meanshift algorithm.
class GeneralizedPolygon
{
protected:
// vertexes
std::vector<cv::Point> vertices_;
// accessible center: a central point inside the polygon with maximum distance to walls
cv::Point center_;
// center of bounding rectangle of polygon, may be located outside the polygon, i.e. in an inaccessible area多边形的边界矩形的中心可以位于多边形之外即在无法进入的区域
cv::Point bounding_box_center_;
// min/max coordinates
int max_x_, min_x_, max_y_, min_y_;
// area of the polygon (cell number), in [pixel^2]
double area_;
public:
// constructor
GeneralizedPolygon(const std::vector<cv::Point>& vertices, const double map_resolution)
{
// save given vertexes
vertices_ = vertices;
// get max/min x/y coordinates
max_x_ = 0;
min_x_ = 100000;
max_y_ = 0;
min_y_ = 100000;
for(size_t point=0; point<vertices_.size(); ++point)
{
if(vertices_[point].x > max_x_)
max_x_ = vertices_[point].x;
if(vertices_[point].y > max_y_)
max_y_ = vertices_[point].y;
if(vertices_[point].x < min_x_)
min_x_ = vertices_[point].x;
if(vertices_[point].y < min_y_)
min_y_ = vertices_[point].y;
}
bounding_box_center_.x = (min_x_+max_x_)/2;
bounding_box_center_.y = (min_y_+max_y_)/2;
// compute visible center
MeanShift2D ms;
cv::Mat room = cv::Mat::zeros(max_y_+10, max_x_+10, CV_8UC1);
#if CV_MAJOR_VERSION<=3
cv::drawContours(room, std::vector<std::vector<cv::Point> >(1,vertices), -1, cv::Scalar(255), CV_FILLED);//轮廓绘制
#else
cv::drawContours(room, std::vector<std::vector<cv::Point> >(1,vertices), -1, cv::Scalar(255), cv::FILLED);
#endif
area_ = cv::countNonZero(room);
cv::Mat distance_map; //variable for the distance-transformed map, type: CV_32FC1
#if CV_MAJOR_VERSION<=3
cv::distanceTransform(room, distance_map, CV_DIST_L2, 5);
#else
cv::distanceTransform(room, distance_map, cv::DIST_L2, 5);
#endif
// find point set with largest distance to obstacles
double min_val = 0., max_val = 0.;
cv::minMaxLoc(distance_map, &min_val, &max_val);
std::vector<cv::Vec2d> room_cells;
for (int v = 0; v < distance_map.rows; ++v)
for (int u = 0; u < distance_map.cols; ++u)
if (distance_map.at<float>(v, u) > max_val * 0.95f)
room_cells.push_back(cv::Vec2d(u, v));
// use meanshift to find the modes in that set
cv::Vec2d room_center = ms.findRoomCenter(room, room_cells, map_resolution);
// save found center
center_.x = room_center[0];
center_.y = room_center[1];
}
std::vector<cv::Point> getVertices() const
{
return vertices_;
}
cv::Point getCenter() const
{
return center_;
}
cv::Point getBoundingBoxCenter() const
{
return bounding_box_center_;
}
double getArea() const
{
return area_;
}
void drawPolygon(cv::Mat& image, const cv::Scalar& color) const
{
// draw polygon in an black image with necessary size
cv::Mat black_image = cv::Mat(max_y_+10, max_x_+10, CV_8UC1, cv::Scalar(0));
#if CV_MAJOR_VERSION<=3
cv::drawContours(black_image, std::vector<std::vector<cv::Point> >(1,vertices_), -1, color, CV_FILLED);
#else
cv::drawContours(black_image, std::vector<std::vector<cv::Point> >(1,vertices_), -1, color, cv::FILLED);
#endif
// assign drawn map
image = black_image.clone();
}
void getMinMaxCoordinates(int& min_x, int& max_x, int& min_y, int& max_y)
{
min_x = min_x_;
max_x = max_x_;
min_y = min_y_;
max_y = max_y_;
}
};
// Structure to save edges of a path on one row, that allows to easily get the order of the edges when planning the
// boustrophedon path.
struct BoustrophedonHorizontalLine
{
cv::Point left_corner_, right_corner_;
};
// Structure for saving several properties of cells
struct BoustrophedonCell
{
typedef std::set<boost::shared_ptr<BoustrophedonCell> > BoustrophedonCellSet;
typedef std::set<boost::shared_ptr<BoustrophedonCell> >::iterator BoustrophedonCellSetIterator;
int label_; // label id of the cell
double area_; // area of the cell, in [pixel^2]
cv::Rect bounding_box_; // bounding box of the cell
BoustrophedonCellSet neighbors_; // pointer to neighboring cells
BoustrophedonCell(const int label, const double area, const cv::Rect& bounding_box)
{
label_ = label;
area_ = area;
bounding_box_ = bounding_box;
}
};
// Class that generates a room exploration path by using the morse cellular decomposition method, proposed by
//
// "H. Choset, E. Acar, A. A. Rizzi and J. Luntz,
// "Exact cellular decompositions in terms of critical points of Morse functions," Robotics and Automation, 2000. Proceedings.
// ICRA '00. IEEE International Conference on, San Francisco, CA, 2000, pp. 2270-2277 vol.3."
//
// This method decomposes the given environment into several cells which don't have any obstacle in it. For each of this
// cell the boustrophedon path is generated, which goes up and down in each cell, see the upper paper for reference.
// This class only produces a static path, regarding the given map in form of a point series. To react on dynamic
// obstacles, one has to do this in upper algorithms.
//
class BoustrophedonExplorer
{
protected:
// pathplanner to check for the next nearest locations
AStarPlanner path_planner_;
static const uchar BORDER_PIXEL_VALUE = 25;
// rotates the original map for a good axis alignment and divides it into Morse cells
// the functions tries two axis alignments with 90deg rotation difference and chooses the one with the lower number of cells
void findBestCellDecomposition(const cv::Mat& room_map, const float map_resolution, const double min_cell_area,
const int min_cell_width, cv::Mat& R, cv::Rect& bbox, cv::Mat& rotated_room_map,
std::vector<GeneralizedPolygon>& cell_polygons, std::vector<cv::Point>& polygon_centers);
// rotates the original map for a good axis alignment and divides it into Morse cells
// @param rotation_offset can be used to put an offset to the computed rotation for good axis alignment, in [rad]
void computeCellDecompositionWithRotation(const cv::Mat& room_map, const float map_resolution, const double min_cell_area,
const int min_cell_width, const double rotation_offset, cv::Mat& R, cv::Rect& bbox, cv::Mat& rotated_room_map,
std::vector<GeneralizedPolygon>& cell_polygons, std::vector<cv::Point>& polygon_centers);
// divides the provided map into Morse cells
void computeCellDecomposition(const cv::Mat& room_map, const float map_resolution, const double min_cell_area,
const int min_cell_width, std::vector<GeneralizedPolygon>& cell_polygons, std::vector<cv::Point>& polygon_centers);//该函数返回每一个区间的多边形点数组cell_polygons以及每一个区间的中心点数组polygon_centers。
// merges cells after a cell decomposition according to various criteria specified in function @mergeCellsSelection
// returns the number of cells after merging
int mergeCells(cv::Mat& cell_map, cv::Mat& cell_map_labels, const double min_cell_area, const int min_cell_width);
// implements the selection criterion for cell merging, in this case: too small (area) or too thin (width or height) cells
// are merged with their largest neighboring cell.
void mergeCellsSelection(cv::Mat& cell_map, cv::Mat& cell_map_labels, std::map<int, boost::shared_ptr<BoustrophedonCell> >& cell_index_mapping,
const double min_cell_area, const int min_cell_width);
// executes the merger of minor cell into major cell
void mergeTwoCells(cv::Mat& cell_map, cv::Mat& cell_map_labels, const BoustrophedonCell& minor_cell, BoustrophedonCell& major_cell,
std::map<int, boost::shared_ptr<BoustrophedonCell> >& cell_index_mapping);
// this function corrects obstacles that are one pixel width at 45deg angle, i.e. a 2x2 pixel neighborhood with [0, 255, 255, 0] or [255, 0, 0, 255]
void correctThinWalls(cv::Mat& room_map);
// computes the Boustrophedon path pattern for a single cell
void computeBoustrophedonPath(const cv::Mat& room_map, const float map_resolution, const GeneralizedPolygon& cell,
std::vector<cv::Point2f>& fov_middlepoint_path, cv::Point& robot_pos,
const int grid_spacing_as_int, const int half_grid_spacing_as_int, const double path_eps, const int max_deviation_from_track, const int grid_obstacle_offset=0);
// downsamples a given path original_path to waypoint distances of path_eps and appends the resulting path to downsampled_path
void downsamplePath(const std::vector<cv::Point>& original_path, std::vector<cv::Point>& downsampled_path,
cv::Point& cell_robot_pos, const double path_eps);
// downsamples a given path original_path to waypoint distances of path_eps in reverse order as given in original_path
// and appends the resulting path to downsampled_path
void downsamplePathReverse(const std::vector<cv::Point>& original_path, std::vector<cv::Point>& downsampled_path,
cv::Point& robot_pos, const double path_eps);
void printCells(std::map<int, boost::shared_ptr<BoustrophedonCell> >& cell_index_mapping);
public:
// constructor
BoustrophedonExplorer();
// Function that creates an exploration path for a given room. The room has to be drawn in a cv::Mat (filled with Bit-uchar),
// with free space drawn white (255) and obstacles as black (0). It returns a series of 2D poses that show to which positions
// the robot should drive at.
void getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const float map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const double grid_spacing_in_pixel,
const double grid_obstacle_offset, const double path_eps, const int cell_visiting_order, const bool plan_for_footprint,
const Eigen::Matrix<float, 2, 1> robot_to_fov_vector, const double min_cell_area, const int max_deviation_from_track);
enum CellVisitingOrder {OPTIMAL_TSP=1, LEFT_TO_RIGHT=2};
};
class BoustrophedonVariantExplorer : public BoustrophedonExplorer
{
protected:
// implements the selection criterion for cell merging, in this case: only large cells with different major axis are not merged.
void mergeCellsSelection(cv::Mat& cell_map, cv::Mat& cell_map_labels, std::map<int, boost::shared_ptr<BoustrophedonCell> >& cell_index_mapping,
const double min_cell_area, const int min_cell_width);
public:
BoustrophedonVariantExplorer() {};
~BoustrophedonVariantExplorer() {};
};

View File

@ -0,0 +1,162 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 11.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
#include <cmath>
#include <string>
#include <fstream>
#include <stdio.h>
// Eigen library for matrix/vector computations
#include <Eigen/Dense>
// Coin-Or Cbc linear programming solver
#include <coin/OsiClpSolverInterface.hpp>
#include <coin/CoinModel.hpp>
#include <coin/CbcModel.hpp>
#include <coin/CbcHeuristicLocal.hpp>
// if available, use Gurobi
#ifdef GUROBI_FOUND
#include "gurobi_c++.h"
#endif
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/distance_matrix.h>
#include <ipa_building_navigation/nearest_neighbor_TSP.h>
#include <ipa_room_exploration/room_rotator.h>
#include <ipa_room_exploration/fov_to_robot_mapper.h>
#include <ipa_room_exploration/grid.h>
#include <ipa_room_exploration/timer.h>
#include <geometry_msgs/Pose2D.h>
#define PI 3.14159265359
// This class provides a coverage path planning algorithm, based on the work of
//
// Arain, M. A., Cirillo, M., Bennetts, V. H., Schaffernicht, E., Trincavelli, M., & Lilienthal, A. J. (2015, May). Efficient measurement planning for remote gas sensing with mobile robots. In 2015 IEEE International Conference on Robotics and Automation (ICRA) (pp. 3428-3434). IEEE.
//
// In this work originally a gas sensing robot is assumed, but by specifically defining the visibility function one can
// use this work for coverage path planning, especially for planning a route s.t. the field of view covers the free space.
// In the proposed paper the free space gets discretized into cells, that need to be covered. To do so a integer linear
// program of the form
// min C^T Ts
// s.t. VC >= 1 (elementwise)
// C[i] = {0, 1}
// is used, with C a column vector, representing if a sensing pose is used (1) or not (0), Ts a column vector storing the
// sensing cost at a specific sensing pose, V a matrix storing what cell is observed from a sensing pose. The first
// constraint ensures that each cell has been seen at least once and the objective function minimizes the number of sensing
// poses. To improve the efficiency of the algorithm a re-weighted convex relaxation method is used to enhance the
// sparsity of the above linear program. This is done by solving
// min (W o C)^T Ts
// s.t. VC >= 1 (elementwise)
// 0 <= C[i] <= 1
// a few times, with calculating W new after each step. When the sparsity of C doesn't change much anymore or a number of
// iterations is reached, the zero elements of C are discarded and the first linear program is solved for the final result.
// This gives a minimal set of sensing poses s.t. all free cells can be observed with these.
//
class convexSPPExplorator
{
protected:
// function that is used to create and solve a Gurobi optimization problem out of the given matrices and vectors, if
// Gurobi was found on the computer
template<typename T>
void solveGurobiOptimizationProblem(std::vector<T>& C, const cv::Mat& V, const std::vector<double>* W);
// function that is used to create and solve a Qsopt optimization problem out of the given matrices and vectors
template<typename T>
void solveOptimizationProblem(std::vector<T>& C, const cv::Mat& V, const std::vector<double>* W);
// object to find a path trough the chosen sensing poses by doing a repetitive nearest neighbor algorithm
NearestNeighborTSPSolver tsp_solver_;
// object that plans a path from A to B using the Astar method
AStarPlanner path_planner_;
public:
// constructor
convexSPPExplorator();
// Function that creates an exploration path for a given room. The room has to be drawn in a cv::Mat (filled with Bit-uchar),
// with free space drawn white (255) and obstacles as black (0). It returns a series of 2D poses that show to which positions
// the robot should drive at. The footprint stores a polygon that is used to determine the visibility at a specific
// sensing pose. delta_theta provides an angular step to determine candidates for sensing poses.
// map_resolution in [m/cell]
// starting_position starting position of the robot in [px]
// cell_size_pixel the side length of a grid square in [px]
// delta_theta angular sampling step when creating possible sensing poses in [rad]
// fov_corners_meter vector of corner coordinates of the field of view in [m]
// robot_to_fov_vector_meter the center of the field of view measured from the robot center in [m]
// largest_robot_to_footprint_distance_meter is the radius of the robot footprint for footprint planning (plan_for_footprint==true);
// or the largest distance between robot and a field of view corner (plan_for_footprint==false)
// (for fov planning, this may be set 0 and the function computes the maximum distance fov corner)
// plan_for_footprint if true, plan for the robot footprint of given radius (largest_robot_to_footprint_distance_meter);
// if false, plan for the field of view
void getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const float map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const int cell_size_pixel, const double delta_theta,
const std::vector<Eigen::Matrix<float, 2, 1> >& fov_corners_meter, const Eigen::Matrix<float, 2, 1>& robot_to_fov_vector_meter,
const double largest_robot_to_footprint_distance_meter, const uint sparsity_check_range, const bool plan_for_footprint);
};

View File

@ -0,0 +1,150 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 01.2017
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
#include <cmath>
#include <string>
#include <Eigen/Dense>
#include <ipa_room_exploration/meanshift2d.h>
#include <ipa_room_exploration/room_rotator.h>
#include <ipa_room_exploration/fov_to_robot_mapper.h>
#include <ipa_room_exploration/grid.h>
#include <geometry_msgs/Pose2D.h>
#include <geometry_msgs/Polygon.h>
#include <geometry_msgs/Point32.h>
#define PI 3.14159265359
const double PI_2_INV = 1./(0.5*PI);
// Struct that is used to create a node and save the corresponding neighbors.
struct EnergyExploratorNode
{
cv::Point center_;
bool obstacle_;
bool visited_;
std::vector<EnergyExploratorNode*> neighbors_;
int countNonObstacleNeighbors()
{
int non_obstacle_neighbors = 0;
for(std::vector<EnergyExploratorNode*>::iterator neighbor=neighbors_.begin(); neighbor!=neighbors_.end(); ++neighbor)
if((*neighbor)->obstacle_==false)
++non_obstacle_neighbors;
return non_obstacle_neighbors;
}
};
// Struct used to create sets of energyExplorationNodes
struct cv_Point_cmp
{
bool operator() (const cv::Point& lhs, const cv::Point& rhs) const
{
return ((lhs.y < rhs.y) || (lhs.y == rhs.y && lhs.x < rhs.x));
}
};
// This class provides the functionality of coverage path planning, based on the work in
//
// Bormann Richard, Joshua Hampp, and Martin Hägele. "New brooms sweep clean-an autonomous robotic cleaning assistant for
// professional office cleaning." Robotics and Automation (ICRA), 2015 IEEE International Conference on. IEEE, 2015.
//
// In this method the area that should be covered is discretized into a grid, using the given grid size parameter d_0. Each of these
// resulting nodes should be visited in order to cover the whole free space. The path starts at the corner of the area that is
// closest to the starting position. After the start node, the next node is chosen from the neighbors that minimizes the energy functional
// E(l,n) = d_t(l,n) + d_r(l,n) + N(n),
// where l is the current robot pose, n the to be checked next pose. The summands represent different aspects that need to
// be taken into account. d_t is the translational distance computed as
// d_t(l,n) = ||l_xy-n_xy||_2/d_0,
// where l_xy or n_xy is the vector storing the position of the corresponding location. d_r is the rotational distance, computed as
// d_r(l,n) = 2*(l_theta-n_theta)/pi,
// where l_theta or n_theta are the angles of the corresponding poses. Function N(n) represents half the number of not yet visited
// locations among the 8 neighbors Nb8(n) around n and is computed as
// N(n) = 4 - sum_(k in Nb8(n)) |k ∩ L|/2,
// where L is the number of already visited nodes. If no accessible node in the direct neighborhood could be found, the algorithm
// searches for the next node in the whole grid. This procedure is repeated until all nodes have been visited.
// This class only produces a static path, regarding the given map in form of a point series. To react on dynamic
// obstacles, one has to do this in upper algorithms.
//
class EnergyFunctionalExplorator
{
protected:
// function to compute the energy function for each pair of nodes
double E(const EnergyExploratorNode& location, const EnergyExploratorNode& neighbor, const double cell_size_in_pixel, const double previous_travel_angle);
public:
// constructor
EnergyFunctionalExplorator();
// Function that creates an exploration path for a given room. The room has to be drawn in a cv::Mat (filled with Bit-uchar),
// with free space drawn white (255) and obstacles as black (0). It returns a series of 2D poses that show to which positions
// the robot should drive at.
void getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const float map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const double grid_spacing_in_pixel,
const bool plan_for_footprint, const Eigen::Matrix<float, 2, 1> robot_to_fov_vector);
};

View File

@ -0,0 +1,509 @@
// OpenCV
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
// standard c++ includes
#include <iostream>
#include <vector>
#include <set>
#include <cmath>
#include <string>
#include <fstream>
#include <iostream>
#include <stdio.h>
// Eigen library for matrix/vector computations
#include <Eigen/Dense>
// Coin-Or library with Cbc mixed integer programming solver
#include <coin/OsiClpSolverInterface.hpp>
#include <coin/CoinModel.hpp>
#include <coin/CbcModel.hpp>
#include <coin/CbcHeuristicFPump.hpp>
// Coin-Or library with Clp linear programming solver
#include <coin/ClpSimplex.hpp>
// Boost libraries
#include <boost/config.hpp>
#include <boost/graph/strong_components.hpp>
#include <boost/graph/adjacency_list.hpp>
// package specific includes
#include <ipa_building_navigation/A_star_pathplanner.h>
#include <ipa_building_navigation/distance_matrix.h>
#include <ipa_building_navigation/contains.h>
#include <ipa_room_exploration/fov_to_robot_mapper.h>
#include <ipa_room_exploration/room_rotator.h>
// msgs
#include <geometry_msgs/Pose2D.h>
#include <geometry_msgs/Polygon.h>
#include <geometry_msgs/Point32.h>
// if available, use Gurobi
#ifdef GUROBI_FOUND
#include "gurobi_c++.h"
#endif
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 11.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
// This struct is used to create arcs for the flow network, used to plan a coverage path trough the given map.
struct arcStruct
{
cv::Point start_point;
cv::Point end_point;
double weight;
std::vector<cv::Point> edge_points;
};
// typedef for boost, defining a directed graph
typedef boost::adjacency_list <boost::vecS, boost::vecS, boost::directedS > directedGraph;
// TODO: update
// This class provides a coverage path planning algorithm based on a flow network. It spans such a network by going trough
// the given room map with a sweep line and defining edges of it whenever it touches an obstacle. From this touchpoint
// the edge is generated in a distance along the sweep line, that is equivalent to the given coverage radius, because the
// free space should determine the area that should be covered after this procedure. After this the arcs of the network
// are generated by taking the distance of two edges as its weight (gives two directed edges), if the arc is not too long or
// not straight enough (to reduce the dimensionality). To find a coverage path trough this flow network then a linear
// program of the form
// min c^T*w
//
// s.t. Vc >= 1 (row-wise)
// sum(flows_into_node) - sum(flows_out_of_node) = 0;
// flows_into_nodes: variables corresponding to arcs flowing into an edge/node, excluding the final arcs
// flows_out_of_nodes: variables corresponding to arcs flowing out of an edge/node, excluding the start arcs
// sum(start_flows) = 1
// sum(terminal_flows) = 1
// sum(flows_of_subset) <= |subset|-1; for any node subset S with |S|>=2
// c(i)={0;1}
// is solved, where 3 stages are taken, the initial step to start the path somewhere, the coverage stage in which the arcs
// should cover the most of the area and the final step where the path can be terminated. These three stages are necessary,
// because the second constraint ensures that the path is connected and not consisting of separated parts. The initial step
// then starts the path at the desired position and the final step allows a termination of this flow conservativity constraint.
// The first constraint ensures that each cell of the free space is covered. The fourth constraint simply ensures to start
// at the node that one specifies. The last constraint prevents that the solution consists of different cycles. A cycle would
// fulfill the conservativity constraint, but of course may not be connected to the rest of the path. By saying that the
// number of goen arcs in a subset of nodes has to be smaller than the size of the subset lesser 1, not all arcs can be
// gone together, which prevents cycles. These cosntraints are added in a lazy fashion, meaning that after a solution has
// been obtained it is checked if the constraints have been violated in any way. If so, the constraints are added and the
// model is resolved. This step repeats, until no more cycles, that are not connected to the rest of the path, appear.
//
#pragma once
#define PI 3.14159265359
#ifdef GUROBI_FOUND
class CyclePreventionCallbackClass: public GRBCallback
{
public:
std::vector<GRBVar>* vars;
int n;
std::vector<std::vector<uint> > flows_out_of_nodes, flows_into_nodes;
std::vector<uint> start_arcs;
std::vector<std::vector<double> > lhs;
std::vector<double> rhs;
CyclePreventionCallbackClass(std::vector<GRBVar>* xvars, int xn, const std::vector<std::vector<uint> >& outflows,
const std::vector<std::vector<uint> >& inflows, const std::vector<uint>& start_indices)
{
vars = xvars;
n = xn;
flows_out_of_nodes = outflows;
flows_into_nodes = inflows;
start_arcs = start_indices;
}
protected:
void callback()
{
try
{
if (where==GRB_CB_MIPSOL)
{
// Found an integer feasible solution
std::vector<double> solution(vars->size());
for (int var=0; var<vars->size(); var++)
{
double current_value = GRBCallback::getSolution(vars->operator[](var));
solution[var] = (current_value>=0) ? current_value : 0.0;
// std::cout << solution[var] << std::endl;
}
// check if cycles appear in the solution
// get the arcs that are used in the previously calculated solution
std::set<uint> used_arcs; // set that stores the indices of the arcs corresponding to non-zero elements in the solution
// go trough the start arcs
std::cout << "getting used arcs: ";
// TODO: add again, better reading out of the paths
// for(uint start_arc=0; start_arc<start_arcs.size(); ++start_arc)
// {
//// std::cout << solution[start_arc] << std::endl;
// if(solution[start_arc]>0.01) // precision of the solver
// {
// // insert start index
// used_arcs.insert(start_arcs[start_arc]);
// std::cout << start_arcs[start_arc] << " ";
// }
// }
// go trough the coverage stage
std::cout << "| ";
for(uint cover_arc=start_arcs.size(); cover_arc<start_arcs.size()+n; ++cover_arc)
{
// std::cout << cover_arc << std::endl;
if(solution[cover_arc]>0.01) // precision of the solver
{
// insert index, relative to the first coverage variable
used_arcs.insert(cover_arc-start_arcs.size());
std::cout << cover_arc-start_arcs.size() << " ";
}
}
std::cout << "| ";
// TODO: add again, better reading out of the paths
// go trough the final stage and find the remaining used arcs
// for(uint flow=start_arcs.size()+n; flow<start_arcs.size()+2*n; ++flow)
// {
// if(solution[flow]>0.01) // precision of the solver
// {
// // insert saved outgoing flow index
// used_arcs.insert(flow-start_arcs.size()-n);
// std::cout << flow-start_arcs.size()-n << " ";
// }
// }
std::cout << "got " << used_arcs.size() << " used arcs" << std::endl;
// construct the directed edges out of the used arcs
std::vector<std::vector<int> > directed_edges; // vector that stores the directed edges for each node
for(uint start_node=0; start_node<flows_out_of_nodes.size(); ++start_node)
{
// check if a used arc is flowing out of the current start node
std::vector<uint> originating_flows;
bool originating = false;
for(std::set<uint>::iterator arc=used_arcs.begin(); arc!=used_arcs.end(); ++arc)
{
if(contains(flows_out_of_nodes[start_node], *arc)==true)
{
originating = true;
originating_flows.push_back(*arc);
}
}
// if the arc is originating from this node, find its destination
std::vector<int> current_directed_edges;
if(originating==true)
{
for(uint end_node=0; end_node<flows_into_nodes.size(); ++end_node)
{
if(end_node==start_node)
continue;
for(std::vector<uint>::iterator arc=originating_flows.begin(); arc!=originating_flows.end(); ++arc)
if(contains(flows_into_nodes[end_node], *arc)==true)
current_directed_edges.push_back(end_node);
}
}
// if the current node doesn't flow into another node insert a vector storing -1
if(current_directed_edges.size()==0)
current_directed_edges.push_back(-1);
// save the found used directed edges
directed_edges.push_back(current_directed_edges);
}
// construct the support graph out of the directed edges
directedGraph support_graph(flows_out_of_nodes.size()); // initialize with the right number of edges
int number_of_not_used_nodes = 0;
for(size_t start=0; start<directed_edges.size(); ++start)
{
// std::cout << "node " << start << " has destinations: " << std::endl;
for(size_t end=0; end<directed_edges[start].size(); ++end)
{
// std::cout << directed_edges[start][end] << std::endl;
// if no destination starting from this node could be found ignore this node
if(directed_edges[start][end]==-1)
{
break;
++number_of_not_used_nodes;
}
// add the directed edge
boost::add_edge(start, directed_edges[start][end], support_graph);
}
}
// search for the strongly connected components
std::vector<int> c(flows_into_nodes.size());
int number_of_strong_components = boost::strong_components(support_graph, boost::make_iterator_property_map(c.begin(), boost::get(boost::vertex_index, support_graph), c[0]));
std::cout << "got " << number_of_strong_components << " strongly connected components" << std::endl;
// for (std::vector <int>::iterator i = c.begin(); i != c.end(); ++i)
// std::cout << "Vertex " << i - c.begin() << " is in component " << *i << std::endl;
// check how many cycles there are in the solution (components with a size >= 2)
int number_of_cycles = 0;
std::set<int> done_components; // set that stores the component indices for that the nodes already were found
for(std::vector<int>::iterator comp=c.begin(); comp!=c.end(); ++comp)
{
// don't check a component more than one time
if(done_components.find(*comp)!=done_components.end())
continue;
int elements = std::count(c.begin(), c.end(), *comp);
if(elements>=2)
++number_of_cycles;
// check if a tsp path is computed (number of arcs is same as number of nodes), each arc is a strongly
// connected component itself or all the nodes belong to one strongly connected component
// TODO: add again, better reading out of the paths
// if(elements==used_arcs.size() || elements==flows_out_of_nodes.size())
// number_of_cycles=0;
// add it to done components
done_components.insert(*comp);
}
std::cout << "current number of cycles: " << number_of_cycles << std::endl;
// if more than one cycle appears find it and add the prevention constraint to the problem and resolve it
if(number_of_cycles!=0)
{
// go trough the components and find components with more than 1 element in it
std::vector<std::vector<uint> > cycle_nodes;
done_components.clear();
for (int component=0; component<c.size(); ++component)
{
// check if the component hasn't been done yet
if(done_components.find(c[component])==done_components.end())
{
std::vector<uint> current_component_nodes;
int elements = std::count(c.begin(), c.end(), c[component]);
// std::cout << c[component] << std::endl;
// TODO: if connected to rest of path --> okay, don't add constraint
if(elements>=2)
{
// std::cout << "size: " << elements << std::endl;
for(std::vector<int>::iterator element=c.begin(); element!=c.end(); ++element)
{
if(*element==c[component])
{
current_component_nodes.push_back(element-c.begin());
}
}
// save the found cycle
if(current_component_nodes.size()>0)
cycle_nodes.push_back(current_component_nodes);
// add it to done components
done_components.insert(c[component]);
}
}
}
// add the cycle prevention constraints for each found cycle to the optimization problem
for(size_t cycle=0; cycle<cycle_nodes.size(); ++cycle)
{
// for each cycle find the arcs that lie in it
// std::cout << "size: " << cycle_nodes[cycle].size() << std::endl;
// std::cout << "constraint: " << std::endl;
std::vector<int> cpc_indices;
std::vector<double> cpc_coefficients;
for(size_t node=0; node<cycle_nodes[cycle].size(); ++node)
{
for(std::vector<uint>::iterator outflow=flows_out_of_nodes[cycle_nodes[cycle][node]].begin(); outflow!=flows_out_of_nodes[cycle_nodes[cycle][node]].end(); ++outflow)
{
for(size_t neighbor=0; neighbor<cycle_nodes[cycle].size(); ++neighbor)
{
if(neighbor==node)
continue;
// if a cycle-node contains this arc is incoming node and was used in the solution, it belongs to the subset
if(contains(flows_into_nodes[cycle_nodes[cycle][neighbor]], *outflow)==true && used_arcs.find(*outflow)!=used_arcs.end())
{
cpc_indices.push_back(*outflow+start_arcs.size());
cpc_coefficients.push_back(1.0);
}
}
}
// gather the arcs in outgoing from all neighbors
// for(size_t neighbor=0; neighbor<cycle_nodes[cycle].size(); ++neighbor)
// {
// // for the node itself gather the outflows that belong to the cycle
// if(neighbor==node)
// {
//// std::cout << "node itself: " << cycle_nodes[cycle][neighbor] << std::endl;
// int flow_index = -1;
// for(std::vector<uint>::const_iterator outflow=flows_out_of_nodes[cycle_nodes[cycle][neighbor]].begin(); outflow!=flows_out_of_nodes[cycle_nodes[cycle][neighbor]].end(); ++outflow)
// {
// // if the current arc is used in the solution, search for it in the incoming flows of
// // the other nodes in the cycle
// if(used_arcs.find(*outflow)!=used_arcs.end())
// for(size_t new_node=0; new_node<cycle_nodes[cycle].size(); ++new_node)
// if(contains(flows_into_nodes[cycle_nodes[cycle][new_node]], *outflow)==true)
// flow_index=*outflow;
// }
//
// // if the outflow is an inflow of another cycle node, add its index to the constraint
// if(flow_index!=-1)
// {
// cpc_indices.push_back(flow_index+start_arcs.size());
// cpc_coefficients.push_back(-1.0);
// }
// }
// // for other nodes gather outflows that are not part of the cycle
// else
// {
//// std::cout << "neighbor" << std::endl;
// bool in_cycle = false;
// for(std::vector<uint>::const_iterator outflow=flows_out_of_nodes[cycle_nodes[cycle][neighbor]].begin(); outflow!=flows_out_of_nodes[cycle_nodes[cycle][neighbor]].end(); ++outflow)
// {
// // search for the current flow in the incoming flows of the other nodes in the cycle
// for(size_t new_node=0; new_node<cycle_nodes[cycle].size(); ++new_node)
// if(contains(flows_into_nodes[cycle_nodes[cycle][new_node]], *outflow)==true)
// in_cycle=true;
//
// // if the arc is not in the cycle add its index
// if(in_cycle==false)
// {
// cpc_indices.push_back(*outflow+start_arcs.size());
// cpc_coefficients.push_back(1.0);
// }
//
// // reset the indication boolean
// in_cycle = false;
// }
// }
// }
}
// std::cout << "adding constraint" << std::endl;
// add the constraint
GRBLinExpr current_cpc_constraint;
std::vector<double> current_lhs;
for(size_t var=0; var<cpc_indices.size(); ++var)
{
current_cpc_constraint += cpc_coefficients[var]*vars->operator[](cpc_indices[var]);
current_lhs.push_back(cpc_coefficients[var]*cpc_indices[var]);
// std::cout << cpc_coefficients[var] << "*" << cpc_indices[var] << std::endl;
}
// std::cout << "adding lazy, size of rhs: " << cycle_nodes[cycle].size()-1 << std::endl;
addLazy(current_cpc_constraint<=cycle_nodes[cycle].size()-1);
lhs.push_back(current_lhs);
rhs.push_back(cycle_nodes[cycle].size()-1);
}
}
}
}
catch (GRBException e)
{
std::cout << "Error number: " << e.getErrorCode() << std::endl;
std::cout << e.getMessage() << std::endl;
}
catch (...)
{
std::cout << "Error during callback" << std::endl;
}
}
};
#endif
class FlowNetworkExplorator
{
protected:
// function that is used to create and solve a Cbc optimization problem out of the given matrices and vectors, using
// the three-stage ansatz and single-flow cycle prevention constraints
void solveThreeStageOptimizationProblem(std::vector<double>& C, const cv::Mat& V, const std::vector<double>& weights,
const std::vector<std::vector<uint> >& flows_into_nodes, const std::vector<std::vector<uint> >& flows_out_of_nodes,
const std::vector<uint>& start_arcs);
// function that is used to create and solve a Gurobi optimization problem out of the given matrices and vectors, using
// the three-stage ansatz and lazy generalized cutset inequalities (GCI)
void solveGurobiOptimizationProblem(std::vector<double>& C, const cv::Mat& V, const std::vector<double>& weights,
const std::vector<std::vector<uint> >& flows_into_nodes, const std::vector<std::vector<uint> >& flows_out_of_nodes,
const std::vector<uint>& start_arcs);
// function that is used to create and solve a Cbc optimization problem out of the given matrices and vectors, using
// the three-stage ansatz and lazy generalized cutset inequalities (GCI)
void solveLazyConstraintOptimizationProblem(std::vector<double>& C, const cv::Mat& V, const std::vector<double>& weights,
const std::vector<std::vector<uint> >& flows_into_nodes, const std::vector<std::vector<uint> >& flows_out_of_nodes,
const std::vector<uint>& start_arcs);
// function that checks if the given point is more close enough to any point in the given vector
bool pointClose(const std::vector<cv::Point>& points, const cv::Point& point, const double min_distance);
// object that plans a path from A to B using the Astar method
AStarPlanner path_planner_;
public:
// constructor
FlowNetworkExplorator();
// Function that creates an exploration path for a given room. The room has to be drawn in a cv::Mat (filled with Bit-uchar),
// with free space drawn white (255) and obstacles as black (0). It returns a series of 2D poses that show to which positions
// the robot should drive at. The footprint stores a polygon that is used to determine the visibility at a specific
// sensing pose. delta_theta provides an angular step to determine candidates for sensing poses.
void getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const float map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin,
const int cell_size, const Eigen::Matrix<float, 2, 1>& robot_to_fov_middlepoint_vector, const float coverage_radius,
const bool plan_for_footprint, const double path_eps, const double curvature_factor, const double max_distance_factor);
// test function
void testFunc();
};

View File

@ -0,0 +1,461 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2017 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 03.2017
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <vector>
#include <opencv2/opencv.hpp>
class BoustrophedonLine
{
public:
std::vector<cv::Point> upper_line; // points of the upper part of the line (always filled first)
std::vector<cv::Point> lower_line; // points of the potentially existing lower part of the line
bool has_two_valid_lines; // true if both lines, upper and lower, concurrently provide valid alternative points at some locations (i.e. only if this is true, there are two individual lines of places to visit)
BoustrophedonLine()
: has_two_valid_lines(false)
{
}
};
class BoustrophedonGrid : public std::vector<BoustrophedonLine>
{
};
class GridGenerator
{
public:
GridGenerator()
{
}
// generates a standard grid with fixed spacing starting at the upper left accessible cell (if the map is provided
// with walls grown by the size of half robot radius, i.e. the map is eroded by the inaccessible areas) or starting at half grid distance
// from the walls (if the original map is provided). If the map is provided in its normal appearance, then the start_grid_at_first_free_pixel
// parameter should be set to false and the first grid center will be placed with appropriate distance to the walls.
// room_map = (optionally eroded) map of the room in [pixels] (erosion = growing of walls by robot radius --> inaccessible areas are already excluded)
// cell_centers = the computed grid cell center points in [pixels]
// cell_size = the grid spacing in [pixels]
// complete_cell_test = if this is set false, cells are only inserted to the grid if their centers are accessible. If set to true, then the cell
// is created if there are any accessible cells within the cell area. The cell center is however set to one of the accessible pixels.
// start_grid_at_first_free_pixel = if set to true, it is assumed that room_map is eroded by the inaccessible areas and hence the first cell center
// should be placed in the upper left pixel. If this is false, the cell center will be placed off the wall with
// cell_size spacing.
void generateStandardGrid(const cv::Mat& room_map, std::vector<cv::Point>& cell_centers, const int cell_size,
const bool complete_cell_test=true, const bool start_grid_at_first_free_pixel=true)
{
// determine min and max coordinates of the room
int min_y = 1000000, max_y = 0, min_x = 1000000, max_x = 0;
for (int y=0; y<room_map.rows; y++)
{
for (int x=0; x<room_map.cols; x++)
{
//find not reachable regions and make them black
if (room_map.at<unsigned char>(y,x)==255)
{
if(x<min_x)
min_x = x;
if(x>max_x)
max_x = x;
if(y<min_y)
min_y = y;
if(y>max_y)
max_y = y;
}
}
}
// if the map was not eroded, the grid cell centers might be placed with cell_size/2 distance from the wall
const uint16_t half_cell_size = (uint16_t)cell_size/(uint16_t)2; // just ensure that the rounding is always reproducible
if (start_grid_at_first_free_pixel == false)
{
min_x += half_cell_size;
min_y += half_cell_size;
}
// create the grid
if (complete_cell_test == true)
{
for(int y=min_y; y<=max_y; y+=cell_size)
{
for(int x=min_x; x<=max_x; x+=cell_size)
{
cv::Point cell_center(x,y);
if (completeCellTest(room_map, cell_center, cell_size) == true)
cell_centers.push_back(cell_center);
}
}
}
else
{
// only create cells where the cell center is accessible
for (int y=min_y; y<=max_y; y+=cell_size)
for (int x=min_x; x<=max_x; x+=cell_size)
if (room_map.at<unsigned char>(y,x)==255)
cell_centers.push_back(cv::Point(x,y));
}
}
// checks the whole cell for accessible areas and sets cell_center to the cell-center-most accessible point in the cell
// room_map = the map with inaccessible areas = 0 and accessible areas = 255
// cell_center = the provided cell center point to check, is updated with a new cell center if the original cell_center is not accessible but some other pixels in the cell around
// cell_size = the grid spacing in [pixels]
// returns true if any accessible cell was found in the cell area and then cell_center is returned with an updated value. If the cell does not contain
// any accessible pixel, the return value is false.
static bool completeCellTest(const cv::Mat& room_map, cv::Point& cell_center, const int cell_size)
{
const int x = cell_center.x;
const int y = cell_center.y;
if (room_map.at<unsigned char>(y,x)==255)
{
// just take cell center if accessible
return true;
}
else
{
const uint16_t half_cell_size = (uint16_t)cell_size/(uint16_t)2; // just ensure that the rounding is always reproducible
const bool even_grid_size = ((cell_size%2)==0);
// check whether there are accessible pixels within the cell
const int upper_bound = even_grid_size==true ? half_cell_size-1 : half_cell_size; // adapt the neighborhood accordingly for even and odd grid sizes
cv::Mat cell_pixels = cv::Mat::zeros(cell_size, cell_size, CV_8UC1);
int accessible_pixels = 0;
for (int dy=-half_cell_size; dy<=upper_bound; ++dy)
{
for (int dx=-half_cell_size; dx<=upper_bound; ++dx)
{
const int nx = x+dx;
const int ny = y+dy;
if (nx<0 || nx>=room_map.cols || ny<0 || ny>=room_map.rows)
continue;
if (room_map.at<unsigned char>(ny,nx)==255)
{
++accessible_pixels;
cell_pixels.at<unsigned char>(half_cell_size+dy, half_cell_size+dx) = 255;
}
}
}
// if there are accessible pixels within the cell, find their center and use this as cell center
if (accessible_pixels>0)
{
// use distance transform to find the pixels with maximum distance to obstacles, take from the maximum distance pixels the one
// closest to the original cell center
cv::Mat distances;
#if CV_MAJOR_VERSION<=3
cv::distanceTransform(cell_pixels, distances, CV_DIST_L2, 5);
#else
cv::distanceTransform(cell_pixels, distances, cv::DIST_L2, 5);
#endif
double max_distance = 0.;
cv::minMaxLoc(distances, 0, &max_distance, 0, &cell_center);
cell_center.x += x-half_cell_size;
cell_center.y += y-half_cell_size;
// if there are multiple candidates with same max distance, take the one closest to the center
double min_squared_center_distance = (x-cell_center.x)*(x-cell_center.x) + (y-cell_center.y)*(y-cell_center.y);
for (int v=0; v<distances.rows; ++v)
{
for (int u=0; u<distances.cols; ++u)
{
if ((double)distances.at<float>(v,u)==max_distance)
{
const double squared_center_distance = (u-half_cell_size)*(u-half_cell_size)+(v-half_cell_size)*(v-half_cell_size);
if (squared_center_distance < min_squared_center_distance)
{
cell_center = cv::Point(x-half_cell_size+u, y-half_cell_size+v);
min_squared_center_distance = squared_center_distance;
}
}
}
}
// display new center point
// cv::Mat disp = distances.clone();
// cv::Mat disp2;
// cv::normalize(disp, disp, 0, 1., cv::NORM_MINMAX);
// cv::resize(disp, disp2, cv::Size(), 10., 10., cv::INTER_AREA);
// cv::circle(disp2, 10*cv::Point(cell_center.x-x+half_cell_size, cell_center.y-y+half_cell_size), 2, cv::Scalar(0), CV_FILLED);
// cv::imshow("distance transform", disp2);
// cv::waitKey();
return true;
}
}
return false;
}
// Generates a grid with fixed vertical spacing and variable distance between the points in a line (parameter), s.t. the algorithm may create
// ordinary grid points or horizontal lines with defined vertical spacing. The generator considers obstacle safety distances and only places
// grid cells with sufficient distance to obstacles (parameter). If a grid point cannot be placed because it would be located in an obstacle
// or in the inaccessible area around an obstacle, then the algorithm tries to shift the point vertically up and down up to grid_spacing
// (parameter) units. As soon as it finds free space, the grid point is set there. It can happen that such a free point exists above and below
// the original placement of the grid point. Then the two options are store in an upper and lower trajectory and the user must later chose how
// to deal with these option.
// room_map = original map of the room as a CV_8UC1 map with 0=obstacles, 255=free space, in [pixels]
// inflated_room_map = map of the room with inflated obstacles (can be provided, if cv::Mat() is provided, it is computed here with map_inflation_radius)
// map_inflation_radius = the number of pixels obstacles shall be inflated if no precomputed inflated_room_map is provided (map_inflation_radius can be -1 otherwise), in [pixels]
// grid_points = a vector of BoustrophedonLine objects, each of them containing line information in upper_line and optionally another line in lower_line if two_valid_lines is true, in [pixels]
// min_max_map_coordinates = optionally precomputed min/max coordinates (min_x, max_x, min_y, max_y) of the free space in inflated_room_map, if cv::Vec4i(-1,-1,-1,-1) is provided, min/max map coordinates are computed by this function, in [pixels]
// grid_spacing = the basic distance between two grid cell centers, is used for vertical grid spacing, in [pixels]
// half_grid_spacing = the rounded half distance between two grid cell centers (the user shall define how it is rounded), in [pixels]
// grid_spacing_horizontal = this value allows to specify the horizontal basic distance between two grid cell centers, it can be set to grid_spacing if the basic horizontal spacing shall be identical to the vertical spacing, in [pixels]
// max_deviation_from_track = maximal allowed shift off the track to both sides for avoiding obstacles on track, setting max_deviation_from_track=grid_spacing is usually a good choice, for negative values max_deviation_from_track is set to grid_spacing, in [pixels]
static void generateBoustrophedonGrid(const cv::Mat& room_map, cv::Mat& inflated_room_map, const int map_inflation_radius,
BoustrophedonGrid& grid_points, const cv::Vec4i& min_max_map_coordinates, const int grid_spacing, const int half_grid_spacing,
const int grid_spacing_horizontal, int max_deviation_from_track = -1)
{
if (max_deviation_from_track < 0)
max_deviation_from_track = grid_spacing;
// compute inflated_room_map if not provided
if (inflated_room_map.rows!=room_map.rows || inflated_room_map.cols!=room_map.cols)
cv::erode(room_map, inflated_room_map, cv::Mat(), cv::Point(-1, -1), map_inflation_radius);
// compute min/max map coordinates if necessary
int min_x=inflated_room_map.cols, max_x=-1, min_y=inflated_room_map.rows, max_y=-1;
if (min_max_map_coordinates[0]==-1 && min_max_map_coordinates[1]==-1 && min_max_map_coordinates[2]==-1 && min_max_map_coordinates[3]==-1)
{
for (int v=0; v<inflated_room_map.rows; ++v)
{
for (int u=0; u<inflated_room_map.cols; ++u)
{
if (inflated_room_map.at<uchar>(v,u) == 255)
{
if (min_x > u)
min_x = u;
if (max_x < u)
max_x = u;
if (min_y > v)
min_y = v;
if (max_y < v)
max_y = v;
}
}
}
}
else
{
min_x = min_max_map_coordinates[0];
max_x = min_max_map_coordinates[1];
min_y = min_max_map_coordinates[2];
max_y = min_max_map_coordinates[3];
}
// if the room has no accessible cells, hence no min/max coordinates, return
if ((min_x==inflated_room_map.cols) || (max_x==-1) || (min_y==inflated_room_map.rows) || (max_y==-1))
return;
// create grid
const int squared_grid_spacing_horizontal = grid_spacing_horizontal*grid_spacing_horizontal;
//std::cout << "((max_y - min_y) <= grid_spacing): min_y=" << min_y << " max_y=" << max_y << " grid_spacing=" << grid_spacing << std::endl;
int y=min_y;
// loop through the vertical grid lines with regular grid spacing
for (; y<=max_y+half_grid_spacing; y += grid_spacing) // we use max_y+half_grid_spacing as upper bound to cover the bottom-most line as well
{
if (y > max_y) // this should happen at most once for the bottom line
y = max_y;
BoustrophedonLine line;
const cv::Point invalid_point(-1,-1);
cv::Point last_added_grid_point_above(-10000,-10000), last_added_grid_point_below(-10000,-10000); // for keeping the horizontal grid distance
cv::Point last_valid_grid_point_above(-1,-1), last_valid_grid_point_below(-1,-1); // for adding the rightmost possible point
// loop through the horizontal grid points with horizontal grid spacing length
for (int x=min_x; x<=max_x; x+=1)
{
// points are added to the grid line as follows:
// 1. if the original point is accessible --> point is added to upper_line, invalid point (-1,-1) is added to lower_line
// 2. if the original point is not accessible:
// a) and no other point in the y-vicinity --> upper_line and lower_line are not updated
// b) but some point above and none below --> valid point is added to upper_line, invalid point (-1,-1) is added to lower_line
// c) but some point below and none above --> valid point is added to lower_line, invalid point (-1,-1) is added to upper_line
// d) but some point below and above are --> valid points are added to upper_line and lower_line, respectively
// 1. check accessibility on regular location
if (inflated_room_map.at<uchar>(y,x)==255)
{
if (squaredPointDistance(last_added_grid_point_above,cv::Point(x,y))>=squared_grid_spacing_horizontal)
{
line.upper_line.push_back(cv::Point(x,y));
line.lower_line.push_back(invalid_point);
last_added_grid_point_above = cv::Point(x,y);
}
else
last_valid_grid_point_above = cv::Point(x,y); // store this point and add it to the upper line if it was the rightmost point
}
// todo: add parameter to switch else branch off
else // 2. check accessibility above or below the targeted point
{
// check accessibility above the target location
bool found_above = false;
int dy = -1;
for (; dy>-max_deviation_from_track; --dy)
{
if (y+dy>=0 && inflated_room_map.at<uchar>(y+dy,x)==255)
{
found_above = true;
break;
}
}
if (found_above == true)
{
if (squaredPointDistance(last_added_grid_point_above,cv::Point(x,y+dy))>=squared_grid_spacing_horizontal)
{
line.upper_line.push_back(cv::Point(x,y+dy));
line.lower_line.push_back(invalid_point); // can be overwritten below if this point also exists
last_added_grid_point_above = cv::Point(x,y+dy);
}
else
last_valid_grid_point_above = cv::Point(x,y+dy); // store this point and add it to the upper line if it was the rightmost point
}
// check accessibility below the target location
bool found_below = false;
dy = 1;
for (; dy<max_deviation_from_track; ++dy)
{
if (y+dy<inflated_room_map.rows && inflated_room_map.at<uchar>(y+dy,x)==255)
{
found_below = true;
break;
}
}
if (found_below == true)
{
if (squaredPointDistance(last_added_grid_point_below,cv::Point(x,y+dy))>=squared_grid_spacing_horizontal)
{
if (found_above == true) // update the existing entry
{
line.has_two_valid_lines = true;
line.lower_line.back().x = x;
line.lower_line.back().y = y+dy;
}
else // create a new entry
{
line.upper_line.push_back(invalid_point);
line.lower_line.push_back(cv::Point(x,y+dy));
}
last_added_grid_point_below = cv::Point(x,y+dy);
}
else
last_valid_grid_point_below = cv::Point(x,y+dy); // store this point and add it to the lower line if it was the rightmost point
}
}
}
// add the rightmost points if available
if (last_valid_grid_point_above.x > -1 && last_valid_grid_point_above.x > last_added_grid_point_above.x)
{
// upper point is valid
line.upper_line.push_back(last_valid_grid_point_above);
if (last_valid_grid_point_below.x > -1 && last_valid_grid_point_below.x > last_added_grid_point_below.x)
line.lower_line.push_back(last_valid_grid_point_below);
else
line.lower_line.push_back(invalid_point);
}
else
{
// upper point is invalid
if (last_valid_grid_point_below.x > -1 && last_valid_grid_point_below.x > last_added_grid_point_below.x)
{
// lower point is valid
line.upper_line.push_back(invalid_point);
line.lower_line.push_back(last_valid_grid_point_below);
}
}
// clean the grid line data
// 1. if there are no valid points --> do not add the line
// 2. if two_valid_lines is true, there are two individual lines available with places to visit
// 3. else there is just one valid line with data potentially distributed over upper_line and lower_line
BoustrophedonLine cleaned_line;
if (line.upper_line.size()>0 && line.lower_line.size()>0) // 1. check that there is valid data in the lines
{
// 2. if two_valid_lines is true, create two individual lines with places to visit
if (line.has_two_valid_lines == true)
{
cleaned_line.has_two_valid_lines = true;
for (size_t i=0; i<line.upper_line.size(); ++i)
{
if (line.upper_line[i]!=invalid_point)
cleaned_line.upper_line.push_back(line.upper_line[i]);
if (line.lower_line[i]!=invalid_point)
cleaned_line.lower_line.push_back(line.lower_line[i]);
}
}
else // 3. there is just one valid line that needs to be merged from lower_line and upper_line
{
for (size_t i=0; i<line.upper_line.size(); ++i)
{
if (line.upper_line[i]!=invalid_point)
cleaned_line.upper_line.push_back(line.upper_line[i]); // keep the upper_line as is
else // the upper_line does not contain a valid point
if (line.lower_line[i]!=invalid_point) // move the valid point from lower to upper line
cleaned_line.upper_line.push_back(line.lower_line[i]);
}
}
// add cleaned line to the grid
grid_points.push_back(cleaned_line);
}
}
}
static double squaredPointDistance(const cv::Point& p1, const cv::Point& p2)
{
return (p1.x-p2.x)*(p1.x-p2.x) + (p1.y-p2.y)*(p1.y-p2.y);
}
};

View File

@ -0,0 +1,108 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2015 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 03.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
#include <cmath>
#include <string>
#include <Eigen/Dense>
#include <ipa_building_navigation/tsp_solvers.h>
#include <ipa_building_navigation/distance_matrix.h>
#include <ipa_room_exploration/room_rotator.h>
#include <ipa_room_exploration/fov_to_robot_mapper.h>
#include <ipa_room_exploration/grid.h>
#include <geometry_msgs/Pose2D.h>
#include <geometry_msgs/Polygon.h>
#include <geometry_msgs/Point32.h>
// Class that generates a room exploration path by laying a small grid over the given map and then planning the best path trough
// all points, by defining an traveling salesman problem (TSP). This class only produces a static path, regarding the given map
// in form of a point series. To react on dynamic obstacles, one has to do this in upper algorithms.
//
class GridPointExplorator
{
public:
// constructor
GridPointExplorator();
// separate, interruptible thread for the external solvers
void tsp_solver_thread_concorde(ConcordeTSPSolver& tsp_solver, std::vector<int>& optimal_order,
const cv::Mat& distance_matrix, const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node);
void tsp_solver_thread_genetic(GeneticTSPSolver& tsp_solver, std::vector<int>& optimal_order,
const cv::Mat& distance_matrix, const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node);
void tsp_solver_thread(const int tsp_solver, std::vector<int>& optimal_order, const cv::Mat& original_map,
const std::vector<cv::Point>& points, const double downsampling_factor, const double robot_radius, const double map_resolution,
const int start_node);
// Function that creates an exploration path for a given room. The room has to be drawn in a cv::Mat (filled with Bit-uchar),
// with free space drawn white (255) and obstacles as black (0). It returns a series of 2D poses that show to which positions
// the robot should drive at.
void getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const double map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const int cell_size, const bool plan_for_footprint,
const Eigen::Matrix<float, 2, 1> robot_to_fov_vector, int tsp_solver, int64_t tsp_solver_timeout);
};

View File

@ -0,0 +1,132 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 02.2017
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <stddef.h>
#include <vector>
template <typename T>
class Histogram
{
public:
Histogram(const T lower_bound, const T upper_bound, const size_t histogram_bins)
{
lower_bound_ = lower_bound;
upper_bound_ = upper_bound;
range_inverse_ = 1.0/(upper_bound_-lower_bound_);
histogram_bins_ = histogram_bins;
data_.resize(histogram_bins);
for (size_t i=0; i<data_.size(); ++i)
data_[i] = 0.;
raw_data_.resize(histogram_bins);
}
void addData(const T val, const double weight=1.0)
{
const size_t bin = std::max((size_t)0, std::min(histogram_bins_-1, (size_t)((val - lower_bound_) * range_inverse_ * (T)histogram_bins_)));
data_[bin] += weight;
raw_data_[bin].push_back(std::pair<T, double>(val, weight));
}
size_t getMaxBin()
{
double max_val = 0.;
size_t max_bin = 0;
for (size_t i=0; i<data_.size(); ++i)
{
if (max_val < data_[i])
{
max_val = data_[i];
max_bin = i;
}
}
return max_bin;
}
T getMaxBinPreciseVal()
{
if (raw_data_.size() == 0 || raw_data_.size() != data_.size())
return 0.;
const size_t max_bin = getMaxBin();
T sum = 0.;
T weight_sum = 0.;
RawData& data = raw_data_[max_bin];
for (size_t i=0; i<data.size(); ++i)
{
sum += data[i].first*data[i].second;
weight_sum += data[i].second;
}
if (weight_sum==0)
weight_sum = 1;
return sum/weight_sum;
}
protected:
typedef std::vector< std::pair< T, double> > RawData;
std::vector<double> data_; // stores the histogram
std::vector<RawData> raw_data_; // stores all entered data pairs (data, weight) for each histogram bin
T lower_bound_; // lowest possible value
T upper_bound_; // highest possible value
T range_inverse_; // = 1.0/(upper_bound_-lower_bound_)
size_t histogram_bins_; // number of histogram bins
};

View File

@ -0,0 +1,78 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2015 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_segmentation
*
* \author
* Author: Richard Bormann
* \author
* Supervised by:
*
* \date Date of creation: 22.07.2015
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#pragma once
class MeanShift2D
{
public:
MeanShift2D(void) {};
void filter(const std::vector<cv::Vec2d>& data, std::vector<cv::Vec2d>& filtered_data, const double bandwidth, const int maximumIterations=100);
void computeConvergencePoints(const std::vector<cv::Vec2d>& filtered_data, std::vector<cv::Vec2d>& convergence_points, std::vector< std::vector<int> >& convergence_sets, const double sensitivity);
// map_resolution in [m/cell]
cv::Vec2d findRoomCenter(const cv::Mat& room_image, const std::vector<cv::Vec2d>& room_cells, double map_resolution);
};

View File

@ -0,0 +1,148 @@
#include <iostream>
#include <list>
#include <string>
#include <vector>
#include <algorithm>
#include <cmath>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <geometry_msgs/Pose2D.h>
#include <geometry_msgs/Polygon.h>
#include <Eigen/Dense>
#include <ipa_room_exploration/neuron_class.h>
#include <ipa_room_exploration/fov_to_robot_mapper.h>
#include <ipa_room_exploration/room_rotator.h>
#include <ipa_room_exploration/grid.h>
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 11.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#define PI 3.14159265359
// Definition if the operator == for geometry_msgs::Pose2D for checking how often a specific pose is in the so far calculated
// path.
inline bool operator==(const geometry_msgs::Pose2D& A, const geometry_msgs::Pose2D& B)
{
if(A.x == B.x && A.y == B.y)
return true;
return false;
}
// This class provides a room explorator based on an artificial neural network. This network is used to compute a
// coverage path s.t. the whole environment is visited at least once. The used method is stated in:
//
// Yang, Simon X., and Chaomin Luo. "A neural network approach to complete coverage path planning." IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics) 34.1 (2004): 718-724.
//
// In this algorithm the free space gets sampled into several neurons that span a neural network over the space. Each
// neuron then needs to be visited once to clean it. Each neuron has a state that is used later to determine the next
// neuron that needs to be visited. Going then trough the space over time produces a path that covers all neurons, see
// the stated paper for reference.
// This class provides the functionality to provide a room map and discretize it into several neurons, based on the given
// sampling distance and the radius of the robot/field-of-view (assuming that the footprint/fov gets approximated by a
// inner circle). After this the coverage path gets computed based on the stated paper. This implementation only provides
// a static path, any reaction to unexpected behavior (e.g. sudden obstacles) need to be done in an upper program.
//
class NeuralNetworkExplorator
{
protected:
// vector that stores the neurons of the given map
std::vector<std::vector<Neuron> > neurons_;
// step size used for integrating the states of the neurons
double step_size_;
// parameters for the neural network
double A_, B_, D_, E_, mu_, delta_theta_weight_;
public:
// constructor
NeuralNetworkExplorator();
// function to set the step size to a certain value
void setStepSize(double step_size)
{
step_size_ = step_size;
}
// function to set the parameters needed for the neural network
void setParameters(double A, double B, double D, double E, double mu, double step_size, double delta_theta_weight)
{
A_ = A;
B_ = B;
D_ = D;
E_ = E;
mu_ = mu;
step_size_ = step_size;
delta_theta_weight_ = delta_theta_weight;
}
// Function that creates an exploration path for a given room. The room has to be drawn in a cv::Mat (filled with Bit-uchar),
// with free space drawn white (255) and obstacles as black (0). It returns a series of 2D poses that show to which positions
// the robot should drive at.
void getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const float map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const double grid_spacing_in_pixel,
const bool plan_for_footprint, const Eigen::Matrix<float, 2, 1> robot_to_fov_vector, bool show_path_evolution=false);
};

View File

@ -0,0 +1,211 @@
#include <iostream>
#include <list>
#include <string>
#include <vector>
#include <algorithm>
#include <cmath>
#include <opencv2/opencv.hpp>
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 11.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
// This class provides a neuron for an artificial neural network. This network is used to compute a coverage path s.t.
// the whole environment is visited at least once. The used method is stated in:
//
// Yang, Simon X., and Chaomin Luo. "A neural network approach to complete coverage path planning." IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics) 34.1 (2004): 718-724.
//
// In this algorithm the free space gets sampled into several neurons that span a neural network over the space. Each
// neuron then needs to be visited once to clean it. Each neuron has a state that is used later to determine the next
// neuron that needs to be visited. Going then trough the space over time produces a path that covers all neurons, see
// the stated paper for reference.
// This class additionally contains the position of the neuron, the parameters used to update the state of it and
// booleans to mark if this neuron has already been cleaned or not. The function I() provides an "external input" to the
// neuron, see the paper for reference.
//
class Neuron
{
protected:
// vector that stores the direct neighbors of this neuron
std::vector<Neuron*> neighbors_;
// vector that stores the weights to the neighbors --> used for updating the state
std::vector<double> weights_;
// position of the neuron
cv::Point position_;
// booleans to check if this neuron is cleaned or an obstacle
bool visited_, obstacle_;
// state (activity) of this neuron at current time step and last
double state_, previous_state_;
// parameters used to update the state
double A_, B_, D_, E_, mu_;
// step size for updating the state
double step_size_;
// function to generate the external input
double I()
{
if(obstacle_ == true)
return -1.0*E_;
else if(visited_ == false)
return E_;
else
return 0.0;
}
public:
// constructor
Neuron(cv::Point position, double A, double B, double D, double E, double mu, double step_size, bool obstacle, bool visited=false)
{
state_ = 0;
previous_state_ = 0;
position_ = position;
A_ = A;
B_ = B;
D_ = D;
E_ = E;
mu_ = mu;
step_size_ = step_size;
obstacle_ = obstacle;
visited_ = visited;
}
// function to insert a neighbor
void addNeighbor(Neuron* new_neighbor)
{
// save pointer to neighbor
neighbors_.push_back(new_neighbor);
// calculate distance and corresponding weight to it
cv::Point difference = position_ - new_neighbor->position_;
double distance = cv::norm(difference);
weights_.push_back(mu_/distance);
}
// function to get the position of the neuron
cv::Point getPosition()
{
return position_;
}
// function to get the state of the neuron, return the previous state if wanted
double getState(bool previous=false)
{
if(previous == true)
return previous_state_;
return state_;
}
// function to save the current state as previous state
void saveState()
{
previous_state_ = state_;
}
// function to get the neighbors
void getNeighbors(std::vector<Neuron*>& neighbors)
{
neighbors = neighbors_;
}
// function to mark the neuron as cleaned
void markAsVisited()
{
visited_ = true;
}
// function to check if the current neuron is an obstacle or not
bool isObstacle()
{
return obstacle_;
}
// function to check if the neuron has been visited or not
bool visitedNeuron()
{
return visited_;
}
// function to update the state of the neuron using euler discretization
void updateState()
{
// get external input
const double input = I();
// get the current sum of weights times the state of the neighbor
double weight_sum = 0;
for(size_t neighbor=0; neighbor<neighbors_.size(); ++neighbor)
weight_sum += weights_[neighbor]*std::max(neighbors_[neighbor]->getState(true), 0.0);
// calculate current gradient --> see stated paper from the beginning
double gradient = -A_*state_ + (B_-state_)*(std::max(input, 0.0) + weight_sum) - (D_+state_)*std::max(-1.0*input, 0.0);
// update state using euler method
state_ += step_size_*gradient;
}
};

View File

@ -0,0 +1,104 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2017 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 02.2017
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
#include <ipa_room_exploration/histogram.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#include <geometry_msgs/Pose2D.h>
class RoomRotator
{
public:
RoomRotator()
{
}
void rotateRoom(const cv::Mat& room_map, cv::Mat& rotated_room_map, const cv::Mat& R, const cv::Rect& bounding_rect);
// compute the affine rotation matrix for rotating a room into parallel alignment with x-axis (longer side of the room is aligned with x-axis)
// R is the transform
// bounding_rect is the ROI of the warped image
// rotation_offset is an optional offset to the determined rotation, in [rad]
// returns the computed optimal rotation angle, in [rad]
double computeRoomRotationMatrix(const cv::Mat& room_map, cv::Mat& R, cv::Rect& bounding_rect, const double map_resolution,
const cv::Point* center=0, const double rotation_offset=0.);
// computes the major direction of the walls from a map (preferably one room)
// the map (room_map, CV_8UC1) is black (0) at impassable areas and white (255) on drivable areas
double computeRoomMainDirection(const cv::Mat& room_map, const double map_resolution);
// transforms a vector of points back to the original map and generates poses
void transformPathBackToOriginalRotation(const std::vector<cv::Point2f>& fov_middlepoint_path, std::vector<geometry_msgs::Pose2D>& path_fov_poses, const cv::Mat& R);
// converts a point path to a pose path with angles
void transformPointPathToPosePath(const std::vector<cv::Point2f>& point_path, std::vector<geometry_msgs::Pose2D>& pose_path);
// converts a point path to a pose path with angles, the points are already stored in pose_path
void transformPointPathToPosePath(std::vector<geometry_msgs::Pose2D>& pose_path);
// get min/max coordinates of free pixels (255)
void getMinMaxCoordinates(const cv::Mat& map, cv::Point& min_room, cv::Point& max_room);
};

View File

@ -0,0 +1,183 @@
/*
* timer.h
*
* Created on: May 13, 2013
* Author: rmb-ce
*/
// from: http://snipplr.com/view/40650/timer-class-for-both-unixlinuxmac-and-windows-system/
//////////////////
// How to Use ////
//////////////////
//#include <iostream>
//#include "timer.h"
//using namespace std;
//int main()
//{
// Timer timer;
//
// // start timer
// timer.start();
//
// // do something
// ...
//
// // stop timer
// timer.stop();
//
// // print the elapsed time in millisec
// cout << timer.getElapsedTimeInMilliSec() << " ms.\n";
//
// return 0;
//}
//////////////////////////////////////////////////////////////////////////////
// Timer.h
// =======
// High Resolution Timer.
// This timer is able to measure the elapsed time with 1 micro-second accuracy
// in both Windows, Linux and Unix system
//
// AUTHOR: Song Ho Ahn (song.ahn@gmail.com)
// CREATED: 2003-01-13
// UPDATED: 2006-01-13
//
// Copyright (c) 2003 Song Ho Ahn
//////////////////////////////////////////////////////////////////////////////
#ifndef TIMER_H_DEF
#define TIMER_H_DEF
#ifdef WIN32 // Windows system specific
//#include <windows.h>
#else // Unix based system specific
#include <sys/time.h>
#endif
#include <stdlib.h>
class Timer
{
public:
// default constructor
Timer()
{
#ifdef WIN32
QueryPerformanceFrequency(&frequency);
startCount.QuadPart = 0;
endCount.QuadPart = 0;
#else
startCount.tv_sec = startCount.tv_usec = 0;
endCount.tv_sec = endCount.tv_usec = 0;
#endif
stopped = 0;
startTimeInMicroSec = 0;
endTimeInMicroSec = 0;
start();
}
// default destructor
~Timer()
{
}
///////////////////////////////////////////////////////////////////////////////
// start timer.
// startCount will be set at this point.
///////////////////////////////////////////////////////////////////////////////
void start()
{
stopped = 0; // reset stop flag
#ifdef WIN32
QueryPerformanceCounter(&startCount);
#else
gettimeofday(&startCount, NULL);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// stop the timer.
// endCount will be set at this point.
///////////////////////////////////////////////////////////////////////////////
void stop()
{
stopped = 1; // set timer stopped flag
#ifdef WIN32
QueryPerformanceCounter(&endCount);
#else
gettimeofday(&endCount, NULL);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// same as getElapsedTimeInSec()
///////////////////////////////////////////////////////////////////////////////
double getElapsedTime()
{
return this->getElapsedTimeInSec();
}
///////////////////////////////////////////////////////////////////////////////
// divide elapsedTimeInMicroSec by 1000000
///////////////////////////////////////////////////////////////////////////////
double getElapsedTimeInSec()
{
return this->getElapsedTimeInMicroSec() * 0.000001;
}
///////////////////////////////////////////////////////////////////////////////
// divide elapsedTimeInMicroSec by 1000
///////////////////////////////////////////////////////////////////////////////
double getElapsedTimeInMilliSec()
{
return this->getElapsedTimeInMicroSec() * 0.001;
}
///////////////////////////////////////////////////////////////////////////////
// compute elapsed time in micro-second resolution.
// other getElapsedTime will call this first, then convert to correspond resolution.
///////////////////////////////////////////////////////////////////////////////
double getElapsedTimeInMicroSec()
{
#ifdef WIN32
if(!stopped)
QueryPerformanceCounter(&endCount);
startTimeInMicroSec = startCount.QuadPart * (1000000.0 / frequency.QuadPart);
endTimeInMicroSec = endCount.QuadPart * (1000000.0 / frequency.QuadPart);
#else
if (!stopped)
gettimeofday(&endCount, NULL);
startTimeInMicroSec = (startCount.tv_sec * 1000000.0) + startCount.tv_usec;
endTimeInMicroSec = (endCount.tv_sec * 1000000.0) + endCount.tv_usec;
#endif
return endTimeInMicroSec - startTimeInMicroSec;
}
protected:
private:
double startTimeInMicroSec; // starting time in micro-second
double endTimeInMicroSec; // ending time in micro-second
int stopped; // stop flag
#ifdef WIN32
LARGE_INTEGER frequency; // ticks per second
LARGE_INTEGER startCount;//
LARGE_INTEGER endCount;//
#else
timeval startCount; //
timeval endCount; //
#endif
};
#endif // TIMER_H_DEF

View File

@ -0,0 +1,596 @@
#include <ipa_room_exploration/convex_sensor_placement_explorator.h>
// Constructor
convexSPPExplorator::convexSPPExplorator()
{
}
// function that is used to create and solve a Gurobi optimization problem out of the given matrices and vectors, if
// Gurobi was found on the computer
template<typename T>
void convexSPPExplorator::solveGurobiOptimizationProblem(std::vector<T>& C, const cv::Mat& V, const std::vector<double>* W)
{
#ifdef GUROBI_FOUND
std::cout << "Creating and solving linear program with Gurobi." << std::endl;
// initialize the problem
GRBEnv *env = new GRBEnv();
GRBModel model = GRBModel(*env);
// vector that stores the variables of the problem
std::vector<GRBVar> optimization_variables;
// add the optimization variables to the problem
int number_of_variables = 0;
for(size_t var=0; var<C.size(); ++var) // initial stage
{
if(W != NULL) // if a weight-vector is provided, use it to set the weights for the variables
{
GRBVar current_variable = model.addVar(0.0, 1.0, W->operator[](var), GRB_CONTINUOUS);
optimization_variables.push_back(current_variable);
++number_of_variables;
}
else
{
GRBVar current_variable = model.addVar(0.0, 1.0, 1.0, GRB_BINARY);
optimization_variables.push_back(current_variable);
++number_of_variables;
}
}
std::cout << "number of variables in the problem: " << number_of_variables << std::endl;
// inequality constraints to ensure that every position has been seen at least once
for(size_t row=0; row<V.rows; ++row)
{
// gather the indices of the variables that are used in this constraint (row), i.e. where V[row][column] == 1
std::vector<int> variable_indices;
for(size_t col=0; col<V.cols; ++col)
if(V.at<uchar>(row, col) == 1)
variable_indices.push_back((int) col);
// add the constraint, if the current cell can be covered by the given arcs, indices=1 in this constraint
if(variable_indices.size()>0)
{
GRBLinExpr current_coverage_constraint;
for(size_t var=0; var<variable_indices.size(); ++var)
current_coverage_constraint += optimization_variables[variable_indices[var]];
model.addConstr(current_coverage_constraint>=1);
}
}
// solve the optimization
model.optimize();
// retrieve solution
std::cout << "retrieving solution" << std::endl;
for(size_t var=0; var<number_of_variables; ++var)
{
C[var]= optimization_variables[var].get(GRB_DoubleAttr_X);
}
// garbage collection
delete env;
#endif
}
// Function that creates a Qsopt optimization problem and solves it, using the given matrices and vectors.
template<typename T>
void convexSPPExplorator::solveOptimizationProblem(std::vector<T>& C, const cv::Mat& V, const std::vector<double>* W)
{
// initialize the problem
CoinModel problem_builder;
ROS_INFO("Creating and solving linear program.");
// add the optimization variables to the problem
int rval;
for(size_t variable=0; variable<C.size(); ++variable)
{
if(W != NULL) // if a weight-vector is provided, use it to set the weights for the variables
{
problem_builder.setColBounds(variable, 0.0, 1.0);
problem_builder.setObjective(variable, W->operator[](variable));
}
else
{
problem_builder.setColBounds(variable, 0.0, 1.0);
problem_builder.setObjective(variable, 1.0);
problem_builder.setInteger(variable);
}
}
// inequality constraints to ensure that every position has been seen at least once
for(size_t row=0; row<V.rows; ++row)
{
// gather the indices of the variables that are used in this constraint (row), i.e. where V[row][column] == 1
std::vector<int> variable_indices;
for(size_t col=0; col<V.cols; ++col)
if(V.at<uchar>(row, col) == 1)
variable_indices.push_back((int) col);
// all indices are 1 in this constraint
std::vector<double> variable_coefficients(variable_indices.size(), 1.0);
// add the constraint
problem_builder.addRow((int) variable_indices.size(), &variable_indices[0], &variable_coefficients[0], 1.0);
}
// load the created LP problem to the solver
OsiClpSolverInterface LP_solver;
OsiClpSolverInterface* solver_pointer = &LP_solver;
solver_pointer->loadFromCoinModel(problem_builder);
// testing
solver_pointer->writeLp("lin_cpp_prog", "lp");
// solve the created optimization problem
CbcModel model(*solver_pointer);
model.solver()->setHintParam(OsiDoReducePrint, true, OsiHintTry);
model.initialSolve();
model.branchAndBound();
// retrieve solution
const double * solution = model.solver()->getColSolution();
for(size_t res=0; res<C.size(); ++res)
{
C[res] = solution[res];
}
}
// Function that is used to get a coverage path that covers the free space of the given map. It is programmed according to
//
// Arain, M. A., Cirillo, M., Bennetts, V. H., Schaffernicht, E., Trincavelli, M., & Lilienthal, A. J. (2015, May).
// Efficient measurement planning for remote gas sensing with mobile robots.
// In 2015 IEEE International Conference on Robotics and Automation (ICRA) (pp. 3428-3434). IEEE.
//
// In this paper a linear program is used to get the minimal set of sensing poses to check the whole area for gas, but it
// can also be used to to do coverage path planning by defining the visibility function in a certain way. To do so the
// following steps are done
// I. Discretize the given map into cells by using the given cell size. The free cells are the ones that have a center
// that is a white pixel in the map (value=255), i.e. belongs to the free space. After this compute the candidate
// sensing poses, that determine the possible poses the robot later is.
// II. Construct the matrices that are used in the linear program. These are:
// W: weight matrix for the re-weighted convex relaxation method
// V: visibility matrix that stores 1 if cell i can be observed from candidate pose j (V[i,j] = 1) or 0 else
// (when planning for footprint: if cell i can be covered from pose j)
// III. Solve the optimization problems in the following order
// 1. Iteratively solve the weighted optimization problem to approximate the problem by a convex optimization. This
// speeds up the solution and is done until the sparsity of the optimization variables doesn't change anymore,
// i.e. converged, or a specific number of iterations is reached. To measure the sparsity a l^0_eps measure is
// used, that checks |{i: c[i] <= eps}|. In each step the weights are adapted with respect to the previous solution.
// 2. After the convex relaxation procedure is finished the candidate poses that are not chosen previously, i.e.
// those that have an corresponding optimization variable equal to 0, are discarded and the matrices of the
// optimization problem are updated. With the reduced problem the original unweighted problem is solved to get
// the final solution, that shows which candidate poses should be visited to observe/visit the whole free space.
// IV. Read out the chosen sensing poses (those corresponding to a variable of the solution equal to one) and create a
// path trough all of them. The path is created by applying a repetitive nearest neighbor algorithm. This algorithm
// solves a TSP for the chosen poses, with each pose being the start node once. Out of the computed paths then the
// shortest is chosen, which is an Hamiltonian cycle through the graph. After this path has been obtained, determine
// the pose in the cycle that is closest to the start position, which becomes the start of the fov-path.
void convexSPPExplorator::getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path,
const float map_resolution, const cv::Point starting_position, const cv::Point2d map_origin,
const int cell_size_pixel, const double delta_theta, const std::vector<Eigen::Matrix<float, 2, 1> >& fov_corners_meter,
const Eigen::Matrix<float, 2, 1>& robot_to_fov_vector_meter, const double largest_robot_to_footprint_distance_meter,
const uint sparsity_check_range, const bool plan_for_footprint)
{
const int half_cell_size = cell_size_pixel/2;
// *********************** I. Find the main directions of the map and rotate it in this manner. ***********************
// rotate map
cv::Mat R;
cv::Rect bbox;
cv::Mat rotated_room_map;
RoomRotator room_rotation;
const double room_rotation_angle = room_rotation.computeRoomRotationMatrix(room_map, R, bbox, map_resolution);
room_rotation.rotateRoom(room_map, rotated_room_map, R, bbox);
// transform starting position
std::vector<cv::Point> starting_point_vector(1, starting_position); // opencv syntax
cv::transform(starting_point_vector, starting_point_vector, R);
cv::Point rotated_starting_position = starting_point_vector[0];
// ************* II. Go trough the map and discretize it. *************
// get cells
// compute the basic Boustrophedon grid lines
std::vector<cv::Point> cell_centers_rotated, cell_centers; // in [pixels]
cv::Mat inflated_rotated_room_map;
BoustrophedonGrid grid_lines;
GridGenerator::generateBoustrophedonGrid(rotated_room_map, inflated_rotated_room_map, half_cell_size, grid_lines, cv::Vec4i(-1, -1, -1, -1),
cell_size_pixel, half_cell_size, cell_size_pixel);
// convert grid points format
for (BoustrophedonGrid::iterator line=grid_lines.begin(); line!=grid_lines.end(); ++line)
{
cell_centers_rotated.insert(cell_centers_rotated.end(), line->upper_line.begin(), line->upper_line.end());
if (line->has_two_valid_lines == true)
cell_centers_rotated.insert(cell_centers_rotated.end(), line->lower_line.begin(), line->lower_line.end());
}
// rotate back to original image
cv::Mat R_inv;
cv::invertAffineTransform(R, R_inv);
std::vector<cv::Point> fov_middlepoint_path_transformed;
cv::transform(cell_centers_rotated, cell_centers, R_inv);
// // print grid
// cv::Mat point_map = room_map.clone();
// for(std::vector<cv::Point>::iterator point = cell_centers.begin(); point != cell_centers.end(); ++point)
// {
// cv::circle(point_map, *point, 2, cv::Scalar(127), CV_FILLED);
// std::cout << " - " << *point << "\n";
// }
// cv::imshow("grid", point_map);
// cv::waitKey();
// get candidate sensing poses
std::vector<geometry_msgs::Pose2D> candidate_sensing_poses;
double delta_angle = (plan_for_footprint == true ? 4.*PI : delta_theta);
for(std::vector<cv::Point>::iterator center=cell_centers.begin(); center!=cell_centers.end(); ++center)
{
for(double angle=room_rotation_angle; angle<2.0*PI+room_rotation_angle; angle+=delta_angle)
{
// create and save pose
geometry_msgs::Pose2D candidate_pose;
candidate_pose.x = center->x;
candidate_pose.y = center->y;
candidate_pose.theta = angle;
while (candidate_pose.theta < -PI)
candidate_pose.theta += 2*PI;
while (candidate_pose.theta > PI)
candidate_pose.theta -= 2*PI;
candidate_sensing_poses.push_back(candidate_pose);
}
}
// ************* III. Construct the matrices needed in the linear program. *************
// set or compute largest_robot_to_footprint_distance_pixel depending on plan_for_footprint
double max_dist=0.;
if (plan_for_footprint==true || largest_robot_to_footprint_distance_meter > 0.)
max_dist = largest_robot_to_footprint_distance_meter;
else
{
// find largest_robot_to_footprint_distance_pixel by checking the fov corners
for (size_t i=0; i<fov_corners_meter.size(); ++i)
{
const double dist = fov_corners_meter[i].norm();
if (dist > max_dist)
max_dist = dist;
}
}
const double largest_robot_to_footprint_distance_pixel = max_dist / map_resolution;
const double cell_outcircle_radius_pixel = cell_size_pixel/sqrt(2);
// construct W
int number_of_candidates=candidate_sensing_poses.size();
std::vector<double> W(number_of_candidates, 1.0); // initial weights
// construct V
cv::Mat V = cv::Mat::zeros(cell_centers.size(), number_of_candidates, CV_8U); // binary variables
// check observable cells from each candidate pose
const double map_resolution_inverse = 1./map_resolution;
for(std::vector<geometry_msgs::Pose2D>::iterator pose=candidate_sensing_poses.begin(); pose!=candidate_sensing_poses.end(); ++pose)
{
// get the transformed field of view
// get the rotation matrix
const float sin_theta = std::sin(pose->theta);
const float cos_theta = std::cos(pose->theta);
Eigen::Matrix<float, 2, 2> R_fov;
R_fov << cos_theta, -sin_theta, sin_theta, cos_theta;
// transform field of view points, if the planning should be done for the field of view
std::vector<cv::Point> transformed_fov_points;
Eigen::Matrix<float, 2, 1> pose_as_matrix;
if(plan_for_footprint==false)
{
pose_as_matrix << (pose->x*map_resolution)+map_origin.x, (pose->y*map_resolution)+map_origin.y; // convert to [meter]
for(size_t point = 0; point < fov_corners_meter.size(); ++point)
{
// linear transformation
Eigen::Matrix<float, 2, 1> transformed_vector = pose_as_matrix + R_fov * fov_corners_meter[point];
// save the transformed point as cv::Point, also check if map borders are satisfied and transform it into pixel
// values
cv::Point current_point = cv::Point((transformed_vector(0, 0) - map_origin.x)*map_resolution_inverse, (transformed_vector(1, 0) - map_origin.y)*map_resolution_inverse);
current_point.x = std::max(current_point.x, 0);
current_point.y = std::max(current_point.y, 0);
current_point.x = std::min(current_point.x, room_map.cols);
current_point.y = std::min(current_point.y, room_map.rows);
transformed_fov_points.push_back(current_point);
}
}
// for each pose check the cells that are closer than the max distance from robot to fov-corner and more far away
// than the min distance, also only check points that span an angle to the robot-to-fov vector smaller than the
// max found angle to the corners
// when planning for the robot footprint simply check if its distance to the pose is at most the given coverage radius
for(std::vector<cv::Point>::iterator neighbor=cell_centers.begin(); neighbor!=cell_centers.end(); ++neighbor)
{
// compute pose to neighbor vector
Eigen::Matrix<float, 2, 1> pose_to_neighbor;
pose_to_neighbor << neighbor->x-pose->x, neighbor->y-pose->y;
double distance = pose_to_neighbor.norm();
// if neighbor is in the possible distance range check it further, distances given in [pixel]
if(plan_for_footprint==false && distance<=largest_robot_to_footprint_distance_pixel)
{
if(cv::pointPolygonTest(transformed_fov_points, *neighbor, false) >= 0) // point inside
{
// check if the line from the robot pose to the neighbor crosses an obstacle, if so it is not observable from the pose
cv::LineIterator border_line(room_map, cv::Point(pose->x, pose->y), *neighbor, 8); // opencv implementation of bresenham algorithm, 8: color, irrelevant
bool hit_obstacle = false;
for(size_t i = 0; i < border_line.count; ++i, ++border_line)
if(room_map.at<uchar>(border_line.pos()) == 0)
hit_obstacle = true;
if(hit_obstacle == false)
{
V.at<uchar>(neighbor-cell_centers.begin(), pose-candidate_sensing_poses.begin()) = 1;
}
else // neighbor cell not observable
V.at<uchar>(neighbor-cell_centers.begin(), pose-candidate_sensing_poses.begin()) = 0;
}
else // neighbor cell outside the field of view
V.at<uchar>(neighbor-cell_centers.begin(), pose-candidate_sensing_poses.begin()) = 0;
}
// check if neighbor is covered by footprint when planning for it
else if(plan_for_footprint==true && (distance+cell_outcircle_radius_pixel)<=largest_robot_to_footprint_distance_pixel)
{
// check if the line from the robot pose to the neighbor crosses an obstacle, if so it is not observable from the pose
cv::LineIterator border_line(room_map, cv::Point(pose->x, pose->y), *neighbor, 8); // opencv implementation of bresenham algorithm, 8: color, irrelevant
bool hit_obstacle = false;
for(size_t i = 0; i < border_line.count; ++i, ++border_line)
if(room_map.at<uchar>(border_line.pos()) == 0)
hit_obstacle = true;
if(hit_obstacle == false)
V.at<uchar>(neighbor-cell_centers.begin(), pose-candidate_sensing_poses.begin()) = 1;
else // neighbor cell not observable
V.at<uchar>(neighbor-cell_centers.begin(), pose-candidate_sensing_poses.begin()) = 0;
}
else // point not in the right range to be inside the fov
V.at<uchar>(neighbor-cell_centers.begin(), pose-candidate_sensing_poses.begin()) = 0;
}
}
std::cout << "number of optimization variables: " << W.size() << std::endl;
// testing
// for(size_t i=0; i<cell_centers.size(); ++i)
// {
// cv::Mat black_map = cv::Mat(room_map.rows, room_map.cols, room_map.type(), cv::Scalar(0));
// cv::circle(black_map, cell_centers[i], 2, cv::Scalar(127), CV_FILLED);
// for(size_t j=0; j<V.cols; ++j)
// {
// if(V.at<uchar>(i, j) == 1)
// {
// cv::circle(black_map, cv::Point(candidate_sensing_poses[j].x, candidate_sensing_poses[j].y), 2, cv::Scalar(100), CV_FILLED);
// cv::imshow("candidates", black_map);
// cv::waitKey();
// }
// }
// }
// ************* IV. Solve the different linear problems. *************
// 1. solve the weighted optimization problem until a convergence in the sparsity is reached or a defined number of
// iterations is reached
std::vector<double> C(W.size()); //initialize the objective vector
bool sparsity_converged = false; // boolean to check, if the sparsity of C has converged to a certain value
double weight_epsilon = 0.0; // parameter that is used to update the weights after one solution has been obtained
uint number_of_iterations = 0;
std::vector<uint> sparsity_measures; // vector that stores the computed sparsity measures to check convergence
const double euler_constant = std::exp(1.0);
Timer tim;
do
{
// increase number of iterations
++number_of_iterations;
// solve optimization of the current step
#ifdef GUROBI_FOUND
solveGurobiOptimizationProblem(C, V, &W);
#else
solveOptimizationProblem(C, V, &W);
#endif
// update epsilon and W
const int exponent = 1 + (number_of_iterations - 1)*0.1;
weight_epsilon = std::pow(1./(euler_constant-1.), exponent);
for(size_t weight=0; weight<W.size(); ++weight)
W[weight] = weight_epsilon/(weight_epsilon + C[weight]);
// measure sparsity of C to check terminal condition, used measure: l^0_eps (|{i: c[i] <= eps}|)
uint sparsity_measure = 0;
for(size_t variable=0; variable<C.size(); ++variable)
if(C[variable]<=0.01)
++sparsity_measure;
sparsity_measures.push_back(sparsity_measure);
// check terminal condition, i.e. if the sparsity hasn't improved in the last n steps using l^0_eps measure,
// if enough iterations have been done yet
if(sparsity_measures.size() >= sparsity_check_range)
{
uint number_of_last_measure = 0;
for(std::vector<uint>::reverse_iterator measure=sparsity_measures.rbegin(); measure!=sparsity_measures.rbegin()+sparsity_check_range && measure!=sparsity_measures.rend(); ++measure)
if(*measure >= sparsity_measures.back())
++number_of_last_measure;
if(number_of_last_measure == sparsity_check_range)
sparsity_converged = true;
}
std::cout << "Iteration: " << number_of_iterations << ", sparsity: " << sparsity_measures.back() << std::endl;
} while(sparsity_converged == false && number_of_iterations <= 150 && tim.getElapsedTimeInSec() < 1200); // wait no longer than 20 minutes
// 2. Reduce the optimization problem by discarding the candidate poses that correspond to an optimization variable
// equal to 0, i.e. those that are not considered any further.
uint new_number_of_variables = 0;
cv::Mat V_reduced = cv::Mat(cell_centers.size(), 1, CV_8U); // initialize one column because opencv wants it this way, add other columns later
std::vector<geometry_msgs::Pose2D> reduced_sensing_candidates;
for(std::vector<double>::iterator result=C.begin(); result!=C.end(); ++result)
{
if(*result != 0.0)
{
// increase number of optimization variables
++new_number_of_variables;
// gather column corresponding to this candidate pose and add it to the new observability matrix
cv::Mat column = V.col(result-C.begin());
cv::hconcat(V_reduced, column, V_reduced);
// save the new possible sensing candidate
reduced_sensing_candidates.push_back(candidate_sensing_poses[result-C.begin()]);
}
}
// remove the first initial column
V_reduced = V_reduced.colRange(1, V_reduced.cols);
// solve the final optimization problem
std::cout << "new_number_of_variables=" << new_number_of_variables << std::endl;
std::vector<int> C_reduced(new_number_of_variables);
#ifdef GUROBI_FOUND
solveGurobiOptimizationProblem(C_reduced, V_reduced, NULL);
#else
solveOptimizationProblem(C_reduced, V_reduced, NULL);
#endif
// ************* V. Retrieve solution and find a path trough the chosen poses. *************
// read out solution
std::vector<cv::Point> chosen_positions; // vector to determine the tsp solution, in [pixels]
std::vector<geometry_msgs::Pose2D> chosen_poses; // in [px,px,rad]
for(std::vector<int>::iterator result=C_reduced.begin(); result!=C_reduced.end(); ++result)
{
if(*result == 1)
{
chosen_poses.push_back(reduced_sensing_candidates[result-C_reduced.begin()]);
chosen_positions.push_back(cv::Point(reduced_sensing_candidates[result-C_reduced.begin()].x, reduced_sensing_candidates[result-C_reduced.begin()].y));
}
}
if (chosen_positions.size()==0)
{
std::cout << "Convex SPP: chosen_positions is empty." << std::endl;
return;
}
// get the distance matrix
ROS_INFO("Constructing distance matrix");
cv::Mat distance_matrix;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.constructDistanceMatrix(distance_matrix, room_map, chosen_positions, 0.25, 0.0, map_resolution, path_planner_);
// go trough the matrix and multiply the distances between the poses with a factor corresponding to the difference of
// the travel angle from the first pose to the other pose
for(size_t pose=0; pose<chosen_poses.size(); ++pose)
{
for(size_t neighbor=0; neighbor<chosen_poses.size(); ++neighbor)
{
// don't look at pose to itself
if(pose!=neighbor)
{
// get travel angle from current pose to next pose
cv::Point current_point = cv::Point(chosen_poses[pose].x, chosen_poses[pose].y);
cv::Point next_point = cv::Point(chosen_poses[neighbor].x, chosen_poses[neighbor].y);
cv::Point vector = next_point - current_point;
float angle = std::atan2(vector.y, vector.x);
// abs angle distance between poses
double delta_theta = std::abs(chosen_poses[pose].theta - angle);
// compute penalizing function
double penalizing_factor = 1 + (delta_theta/PI);
// update distances
distance_matrix.at<double>(pose, neighbor) = penalizing_factor*distance_matrix.at<double>(pose, neighbor);
distance_matrix.at<double>(neighbor, pose) = penalizing_factor*distance_matrix.at<double>(neighbor, pose); //symmetrical-Matrix --> saves half the computation time
}
}
}
// do the repetitive nearest neighbor algorithm
ROS_INFO("Solving TSP with repetitive nearest neighbor");
std::vector<int> best_order;
double min_distance = 1e9;
if (chosen_positions.size()>100)
std::cout << "0 10 20 30 40 50 60 70 80 90 100" << std::endl;
for(int start=0; start<chosen_positions.size(); ++start)
{
if (chosen_positions.size()>500 && start%(std::max(1,(int)chosen_positions.size()/100))==0)
std::cout << "." << std::flush;
// obtain nearest neighbor solution for this start
std::vector<int> current_order = tsp_solver_.solveNearestTSP(distance_matrix, start);
// get pathlength for this solution
double current_pathlength = 0.0;
for(size_t vertex=0; vertex<current_order.size(); ++vertex)
current_pathlength += distance_matrix.at<double>(current_order[vertex], current_order[(vertex+1)%current_order.size()]);
// if current path is better than previous one save it
if(current_pathlength < min_distance)
{
min_distance = current_pathlength;
best_order = current_order;
}
}
std::cout << std::endl;
// find the node that is closest to the starting position
min_distance = 1e9;
int starting_index = 0;
for(std::vector<cv::Point>::iterator position=chosen_positions.begin(); position!=chosen_positions.end(); ++position)
{
// calculate current distance
double current_distance = cv::norm(rotated_starting_position - *position);
// check if current length is smaller than optimal
if(current_distance < min_distance)
{
min_distance = current_distance;
starting_index = position-chosen_positions.begin();
}
}
// create the path starting from the found start
std::vector<cv::Point2f> fov_poses;
std::vector<int>::iterator start = std::find(best_order.begin(), best_order.end(), starting_index); // obtain iterator to index in best order to start path creation from there
for(size_t pose=start-best_order.begin(); path.size()!=chosen_poses.size() && fov_poses.size()!=chosen_poses.size(); ++pose)
{
// check if end has been reached, if true start from the beginning again
if(pose == best_order.size())
pose = 0;
// insert pose mapped to global coordinates
geometry_msgs::Pose2D current_pose;
if (plan_for_footprint == false)
{
// take the viewing directions as computed for fov mode, convert locations from pixels to meters
current_pose.x = (chosen_poses[best_order[pose]].x*map_resolution)+map_origin.x;
current_pose.y = (chosen_poses[best_order[pose]].y*map_resolution)+map_origin.y;
current_pose.theta = chosen_poses[best_order[pose]].theta;
path.push_back(current_pose);
}
else
{
// for footprint planning the viewing direction has to be computed from the trajectory
fov_poses.push_back(cv::Point2f(chosen_poses[best_order[pose]].x, chosen_poses[best_order[pose]].y));
}
}
// *********************** VI. Get the robot path out of the fov path. ***********************
// determine the correct viewing angles for the poses (footprint planning just used one fixed dummy direction)
if (plan_for_footprint == true)
{
// compute viewing directions
room_rotation.transformPointPathToPosePath(fov_poses, path);
// convert to meters
for (size_t i=0; i<path.size(); ++i)
{
path[i].x = path[i].x * map_resolution + map_origin.x;
path[i].y = path[i].y * map_resolution + map_origin.y;
}
}
}

View File

@ -0,0 +1,388 @@
#include <ipa_room_exploration/energy_functional_explorator.h>
// Constructor
EnergyFunctionalExplorator::EnergyFunctionalExplorator()
{
}
// Function that computes the energy functional for each pair of nodes.
double EnergyFunctionalExplorator::E(const EnergyExploratorNode& location, const EnergyExploratorNode& neighbor, const double cell_size_in_pixel, const double previous_travel_angle)
{
float energy_functional = 0.0;
// 1. translational distance
cv::Point diff = neighbor.center_ - location.center_;
energy_functional += cv::norm(diff)/cell_size_in_pixel;
// 2. rotational distance
const double travel_angle_to_node = std::atan2(diff.y, diff.x);
double diff_angle = travel_angle_to_node - previous_travel_angle;
while (diff_angle < -PI)
diff_angle += 2*PI;
while (diff_angle > PI)
diff_angle -= 2*PI;
energy_functional += std::abs(diff_angle)*PI_2_INV; // 1.01 for punishing turns a little bit more on a tie
// 3. neighboring function, determining how many neighbors of the neighbor have been visited
int visited_neighbors = 0;
for(std::vector<EnergyExploratorNode*>::const_iterator n=neighbor.neighbors_.begin(); n!=neighbor.neighbors_.end(); ++n)
if ((*n)->obstacle_ == false && (*n)->visited_ == true)
++visited_neighbors;
energy_functional += 4. - 0.5*visited_neighbors;
int wall_points = 0;
for(std::vector<EnergyExploratorNode*>::const_iterator n=neighbor.neighbors_.begin(); n!=neighbor.neighbors_.end(); ++n)
if ((*n)->obstacle_ == true)
++wall_points;
energy_functional += 0.72 - 0.09*wall_points;
//std::cout << "E: " << cv::norm(diff)/cell_size << " + " << std::abs(diff_angle)*PI_2_INV << " + " << 4. - 0.5*visited_neighbors << " + " << 0.72 - 0.09*wall_points << " angles: " << travel_angle_to_node << ", " << previous_travel_angle << " diff ang: " << diff_angle << std::endl;
return energy_functional;
}
// Function that plans a coverage path trough the given map, using the method proposed in
//
// Bormann Richard, Joshua Hampp, and Martin Hägele. "New brooms sweep clean-an autonomous robotic cleaning assistant for
// professional office cleaning." Robotics and Automation (ICRA), 2015 IEEE International Conference on. IEEE, 2015.
//
// This method discretizes the free space, that should be covered, into several nodes. Each of the node has to be covered, in order
// to cover the whole area. The path starts at the node that is closest to the given starting position and chooses the next node as
// the one that minimizes the energy functional, provided in the paper above. To do this here the following steps are done.
// I. The free area gets discretized into several nodes, using the given cell_size parameter, starting at the upper left white pixel of
// the room. Whenever the overlaid grid then hits a white pixel, this point is added as a node. Then after all nodes have been found
// the direct 8 neighbors for each node are found, which will be used later in the energy functional.
// II. After all nodes have been found, the coverage path is computed.
// i. The start node gets chosen as the one that is closest to the given starting position and is an edge of the given room, i.e
// a node that has less than 4 neighbors.
// ii. The next node is then iteratively chosen from the directly neighboring ones, by finding the node that minimizes the given
// energy functional and wasn't visited before.
// iii.If in the neighborhood no accessible point could be found, search for the next node in the whole grid to continue the path.
// iv. This procedure is repeated, until all created nodes have been covered.
// III. If wanted use the given vector from the robot middlepoint to the fov middlepoint to map the fov poses to the
// robot footprint poses. To do so simply a vector transformation is applied. If the computed robot pose is not in the
// free space, another accessible point is generated by finding it on the radius around the fov middlepoint s.t.
// the distance to the last robot position is minimized. If this is not wanted one has to set the corresponding
// Boolean to false (shows that the path planning should be done for the robot footprint).
//
void EnergyFunctionalExplorator::getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const float map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const double grid_spacing_in_pixel,
const bool plan_for_footprint, const Eigen::Matrix<float, 2, 1> robot_to_fov_vector)
{
const int grid_spacing_as_int = std::floor(grid_spacing_in_pixel);
const int half_grid_spacing_as_int = std::floor(grid_spacing_in_pixel*0.5);
// *********************** I. Find the main directions of the map and rotate it in this manner. ***********************
cv::Mat R;
cv::Rect bbox;
cv::Mat rotated_room_map;
RoomRotator room_rotation;
room_rotation.computeRoomRotationMatrix(room_map, R, bbox, map_resolution);
room_rotation.rotateRoom(room_map, rotated_room_map, R, bbox);
// compute min/max room coordinates
cv::Point min_room(1000000, 1000000), max_room(0, 0);
for (int v=0; v<rotated_room_map.rows; ++v)
{
for (int u=0; u<rotated_room_map.cols; ++u)
{
if (rotated_room_map.at<uchar>(v,u)==255)
{
min_room.x = std::min(min_room.x, u);
min_room.y = std::min(min_room.y, v);
max_room.x = std::max(max_room.x, u);
max_room.y = std::max(max_room.y, v);
}
}
}
cv::Mat inflated_rotated_room_map;
cv::erode(rotated_room_map, inflated_rotated_room_map, cv::Mat(), cv::Point(-1, -1), half_grid_spacing_as_int);
// *********************** II. Find the nodes and their neighbors ***********************
// get the nodes in the free space
std::vector<std::vector<EnergyExploratorNode> > nodes; // 2-dimensional vector to easily find the neighbors
int number_of_nodes = 0;
// // compute min/max room coordinates
// int min_y = 1000000, min_x = 1000000;
// for (int v=0; v<rotated_room_map.rows; ++v)
// {
// for (int u=0; u<rotated_room_map.cols; ++u)
// {
// if (rotated_room_map.at<uchar>(v,u)==255)
// {
// min_x = std::min(min_x, u);
// min_y = std::min(min_y, v);
// }
// }
// }
// if (min_x-grid_spacing_as_int > 0)
// min_x -= grid_spacing_as_int;
// if (min_y-grid_spacing_as_int > 0)
// min_y -= grid_spacing_as_int;
// todo: create grid in external class - it is the same in all approaches
// todo: if first/last row or column in grid has accessible areas but center is inaccessible, create a node in the accessible area
for(int y=min_room.y+half_grid_spacing_as_int; y<max_room.y; y+=grid_spacing_as_int)
{
// for the current row create a new set of neurons to span the network over time
std::vector<EnergyExploratorNode> current_row;
for(int x=min_room.x+half_grid_spacing_as_int; x<max_room.x; x+=grid_spacing_as_int)
{
// create node if the current point is in the free space
EnergyExploratorNode current_node;
current_node.center_ = cv::Point(x,y);
//if(rotated_room_map.at<uchar>(y,x) == 255) // could make sense to test all pixels of the cell, not only the center
if (GridGenerator::completeCellTest(inflated_rotated_room_map, current_node.center_, grid_spacing_as_int) == true)
{
current_node.obstacle_ = false;
current_node.visited_ = false;
++number_of_nodes;
}
// add the obstacle nodes as already visited
else
{
current_node.obstacle_ = true;
current_node.visited_ = true;
++number_of_nodes;
}
current_row.push_back(current_node);
}
// insert the current row into grid
nodes.push_back(current_row);
}
std::cout << "found " << number_of_nodes << " nodes" << std::endl;
// find the neighbors for each node
EnergyExploratorNode* first_accessible_node = 0;
std::vector<EnergyExploratorNode*> corner_nodes; // vector that stores the corner nodes, i.e. nodes with 3 or less neighbors
for(size_t row=0; row<nodes.size(); ++row)
{
for(size_t column=0; column<nodes[row].size(); ++column)
{
std::vector<EnergyExploratorNode*>& current_neighbors = nodes[row][column].neighbors_;
for(int dy=-1; dy<=1; ++dy)
{
// don't exceed the current row
if(row+dy < 0 || row+dy >= nodes.size())
continue;
// get the neighbors left from the current neuron
if(column > 0)
current_neighbors.push_back(&nodes[row+dy][column-1]);
// get the nodes on the same column as the current neuron
if(dy != 0)
current_neighbors.push_back(&nodes[row+dy][column]);
// get the nodes right from the current neuron
if(column < nodes[row].size()-1)
current_neighbors.push_back(&nodes[row+dy][column+1]);
}
// check if the current node is a corner, i.e. nodes that have 3 or less neighbors that are not obstacles
int non_obstacle_neighbors = nodes[row][column].countNonObstacleNeighbors();
if(non_obstacle_neighbors<=3 && nodes[row][column].obstacle_==false)
corner_nodes.push_back(&nodes[row][column]);
if (first_accessible_node==0 && nodes[row][column].obstacle_==false)
first_accessible_node = &nodes[row][column];
}
}
std::cout << "found neighbors, corners: " << corner_nodes.size() << std::endl;
if (first_accessible_node == 0)
{
std::cout << "Warning: there are no accessible points in this room." << std::endl;
return;
}
// // testing
// cv::Mat test_map = rotated_room_map.clone();
// for (size_t i=0; i<nodes.size(); ++i)
// for (size_t j=0; j<nodes[i].size(); ++j)
// if (nodes[i][j].obstacle_==false)
// cv::circle(test_map, nodes[i][j].center_, 2, cv::Scalar(127), CV_FILLED);
// cv::imshow("grid", test_map);
// cv::waitKey();
// for(size_t i=0; i<nodes.size(); ++i)
// {
// for(size_t j=0; j<nodes[i].size(); ++j)
// {
// cv::Mat test_map = rotated_room_map.clone();
//
// std::vector<EnergyExploratorNode*> neighbors = nodes[i][j].neighbors_;
// for(std::vector<EnergyExploratorNode*>::iterator n=neighbors.begin(); n!=neighbors.end(); ++n)
// cv::circle(test_map, (*n)->center_, 2, cv::Scalar(127), CV_FILLED);
//
// cv::imshow("neighbors", test_map);
// cv::waitKey();
// }
// }
// *********************** III. Plan the coverage path ***********************
// i. find the start node of the path as a corner that is closest to the starting position
std::vector<cv::Point> starting_point_vector(1, starting_position); // opencv syntax
cv::transform(starting_point_vector, starting_point_vector, R);
const cv::Point rotated_starting_point = starting_point_vector[0]; // Point that keeps track of the last point after the boustrophedon path in each cell
EnergyExploratorNode* start_node = first_accessible_node;
double min_distance = 1e10;
for(std::vector<EnergyExploratorNode*>::iterator corner=corner_nodes.begin(); corner!=corner_nodes.end(); ++corner)
{
cv::Point diff = (*corner)->center_ - rotated_starting_point;
double current_distance = diff.x*diff.x+diff.y*diff.y;
if(current_distance<=min_distance)
{
start_node = *corner;
min_distance = current_distance;
}
}
std::cout << "start node: " << start_node->center_ << std::endl;
// insert start node into coverage path
std::vector<cv::Point2f> fov_coverage_path;
fov_coverage_path.push_back(cv::Point2f(start_node->center_.x, start_node->center_.y));
start_node->visited_ = true; // mark visited nodes as obstacles
// ii. starting at the start node, find the coverage path, by choosing the node that min. the energy functional
EnergyExploratorNode* last_node = start_node;
double previous_travel_angle = 0; //always use x-direction in the rotated map //std::atan2(rotated_starting_point.y-start_node->center_.y, rotated_starting_point.x-start_node->center_.x);
for(std::vector<EnergyExploratorNode*>::iterator neighbor=last_node->neighbors_.begin(); neighbor!=last_node->neighbors_.end(); ++neighbor)
{
if ((*neighbor)->obstacle_==false && (*neighbor)->center_.y==start_node->center_.y && (*neighbor)->center_.x>start_node->center_.x)
{
previous_travel_angle = 0;
break;
}
if ((*neighbor)->obstacle_==false && (*neighbor)->center_.y==start_node->center_.y && (*neighbor)->center_.x<start_node->center_.x)
{
previous_travel_angle = PI;
break;
}
if ((*neighbor)->obstacle_==false && (*neighbor)->center_.y<start_node->center_.y && (*neighbor)->center_.x==start_node->center_.x)
{
previous_travel_angle = -0.5*PI;
}
if ((*neighbor)->obstacle_==false && (*neighbor)->center_.y>start_node->center_.y && (*neighbor)->center_.x==start_node->center_.x)
{
previous_travel_angle = 0.5*PI;
}
}
// cv::Mat path_map = rotated_room_map.clone();
// cv::circle(path_map, fov_coverage_path[0], 2, cv::Scalar(100), CV_FILLED);
do
{
//std::cout << "Point: " << last_node->center_ << std::endl;
// check the direct neighbors, if at least one is not already visited
std::vector<EnergyExploratorNode*> not_visited_neighbors;
for(std::vector<EnergyExploratorNode*>::iterator neighbor=last_node->neighbors_.begin(); neighbor!=last_node->neighbors_.end(); ++neighbor)
if ((*neighbor)->obstacle_ == false && (*neighbor)->visited_ == false)
not_visited_neighbors.push_back(*neighbor);
// if there are not visited direct neighbors, find the one of them that minimizes the energy functional
double min_energy = 1e10;
EnergyExploratorNode* next_node = 0;
if (not_visited_neighbors.size() > 0)
{
// find best neighbor
for (std::vector<EnergyExploratorNode*>::iterator candidate=not_visited_neighbors.begin(); candidate!=not_visited_neighbors.end(); ++candidate)
{
const double current_energy = E(*last_node, **candidate, grid_spacing_in_pixel, previous_travel_angle);
//std::cout << "Neighbor: " << (*candidate)->center_ << " energy: " << current_energy << std::endl;
if(current_energy < min_energy)
{
min_energy = current_energy;
next_node = *candidate;
}
}
}
// if no direct neighbor is unvisited, search for the next node in all unvisited nodes
else
{
// find best next node
for (size_t row=0; row<nodes.size(); ++row)
{
for (size_t col=0; col<nodes[row].size(); ++col)
{
// only check free nodes and not visited ones
if (nodes[row][col].obstacle_==false && nodes[row][col].visited_==false)
{
// check if current node has a better energy
const double current_energy = E(*last_node, nodes[row][col], grid_spacing_in_pixel, previous_travel_angle);
if (current_energy < min_energy)
{
min_energy = current_energy;
next_node = &nodes[row][col];
}
}
}
}
if (next_node == 0)
break; // stop if all nodes have been visited
}
// add next node to path and set robot location
previous_travel_angle = std::atan2(next_node->center_.y-last_node->center_.y, next_node->center_.x-last_node->center_.x);
fov_coverage_path.push_back(cv::Point2f(next_node->center_.x, next_node->center_.y));
next_node->visited_ = true; // mark visited nodes as obstacles
// cv::circle(path_map, next_node->center_, 2, cv::Scalar(100), CV_FILLED);
// cv::line(path_map, next_node->center_, last_node->center_, cv::Scalar(127));
// cv::imshow("path", path_map);
// cv::waitKey();
last_node = next_node;
} while (true);
// transform the calculated path back to the originally rotated map
std::vector<geometry_msgs::Pose2D> fov_poses;
room_rotation.transformPathBackToOriginalRotation(fov_coverage_path, fov_poses, R);
// // go trough the found fov-path and compute the angles of the poses s.t. it points to the next pose that should be visited
// for(unsigned int point_index=0; point_index<fov_coverage_path.size(); ++point_index)
// {
// // get the vector from the current point to the next point
// geometry_msgs::Pose2D current_point = fov_coverage_path[point_index];
// geometry_msgs::Pose2D next_point = fov_coverage_path[(point_index+1)%(fov_coverage_path.size())];
//
// float angle = std::atan2(next_point.y-current_point.y, next_point.x-current_point.x);
//
// // save the found angle
// fov_coverage_path[point_index].theta = angle;
// }
// if the path should be planned for the footprint, transform the image points to the map coordinates
if(plan_for_footprint==true)
{
for(std::vector<geometry_msgs::Pose2D>::iterator pose=fov_poses.begin(); pose!=fov_poses.end(); ++pose)
{
geometry_msgs::Pose2D current_pose;
current_pose.x = (pose->x * map_resolution) + map_origin.x;
current_pose.y = (pose->y * map_resolution) + map_origin.y;
current_pose.theta = pose->theta;
path.push_back(current_pose);
}
return;
}
// // testing
// cv::Mat path_map = rotated_room_map.clone();
// cv::circle(path_map, fov_coverage_path[0], 2, cv::Scalar(100), CV_FILLED);
// for(std::vector<cv::Point>::iterator path_node=fov_coverage_path.begin()+1; path_node!=fov_coverage_path.end(); ++path_node)
// {
// cv::circle(path_map, *path_node, 2, cv::Scalar(100), CV_FILLED);
// cv::line(path_map, *path_node, *(path_node-1), cv::Scalar(127));
// cv::imshow("path", path_map);
// cv::waitKey();
// }
// cv::imshow("path", path_map);
// cv::waitKey();
// ****************** IV. Map the found fov path to the robot path ******************
//mapPath(room_map, path, fov_poses, robot_to_fov_vector, map_resolution, map_origin, starting_position);
ROS_INFO("Starting to map from field of view pose to robot pose");
cv::Point robot_starting_position = (fov_poses.size()>0 ? cv::Point(fov_poses[0].x, fov_poses[0].y) : starting_position);
cv::Mat inflated_room_map;
cv::erode(room_map, inflated_room_map, cv::Mat(), cv::Point(-1, -1), half_grid_spacing_as_int);
mapPath(inflated_room_map, path, fov_poses, robot_to_fov_vector, map_resolution, map_origin, robot_starting_position);
}

View File

@ -0,0 +1,262 @@
#include <ipa_room_exploration/grid_point_explorator.h>
#include <boost/thread.hpp>
#include <boost/chrono.hpp>
#include <pthread.h>
#include <signal.h>
// Constructor
GridPointExplorator::GridPointExplorator()
{
}
void GridPointExplorator::tsp_solver_thread_concorde(ConcordeTSPSolver& tsp_solver, std::vector<int>& optimal_order,
const cv::Mat& distance_matrix, const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node)
{
try
{
optimal_order = tsp_solver.solveConcordeTSPWithCleanedDistanceMatrix(distance_matrix, cleaned_index_to_original_index_mapping, start_node);
}
catch (boost::thread_interrupted&)
{
std::cout << "GridPointExplorator::tsp_solver_thread_concorde: Thread with Concorde TSP solver was interrupted." << std::endl;
}
std::cout << "GridPointExplorator::tsp_solver_thread_concorde: finished TSP with solver 3=Concorde and optimal_order.size=" << optimal_order.size() << std::endl;
}
void GridPointExplorator::tsp_solver_thread_genetic(GeneticTSPSolver& tsp_solver, std::vector<int>& optimal_order,
const cv::Mat& distance_matrix, const std::map<int,int>& cleaned_index_to_original_index_mapping, const int start_node)
{
try
{
optimal_order = tsp_solver.solveGeneticTSPWithCleanedDistanceMatrix(distance_matrix, cleaned_index_to_original_index_mapping, start_node);
}
catch (boost::thread_interrupted&)
{
std::cout << "GridPointExplorator::tsp_solver_thread_genetic: Thread with Genetic TSP solver was interrupted." << std::endl;
}
std::cout << "GridPointExplorator::tsp_solver_thread_genetic: finished TSP with solver 2=Genetic and optimal_order.size=" << optimal_order.size() << std::endl;
}
void GridPointExplorator::tsp_solver_thread(const int tsp_solver, std::vector<int>& optimal_order, const cv::Mat& original_map,
const std::vector<cv::Point>& points, const double downsampling_factor, const double robot_radius, const double map_resolution,
const int start_node)
{
try
{
if (tsp_solver == TSP_NEAREST_NEIGHBOR)
{
NearestNeighborTSPSolver tsp_solve;
optimal_order = tsp_solve.solveNearestTSP(original_map, points, downsampling_factor, robot_radius, map_resolution, start_node, 0);
}
else if (tsp_solver == TSP_GENETIC)
{
GeneticTSPSolver tsp_solve;
optimal_order = tsp_solve.solveGeneticTSP(original_map, points, downsampling_factor, robot_radius, map_resolution, start_node, 0);
}
else if (tsp_solver == TSP_CONCORDE)
{
ConcordeTSPSolver tsp_solve;
optimal_order = tsp_solve.solveConcordeTSP(original_map, points, downsampling_factor, robot_radius, map_resolution, start_node, 0);
}
else
{
std::cout << "GridPointExplorator::tsp_solver_thread: Error: tsp_solver " << tsp_solver << " is undefined." << std::endl;
}
}
catch (boost::thread_interrupted&)
{
std::cout << "GridPointExplorator::tsp_solver_thread: Thread with TSP solver was interrupted." << std::endl;
}
std::cout << "GridPointExplorator::tsp_solver_thread: finished TSP with solver " << tsp_solver << " and optimal_order.size=" << optimal_order.size() << std::endl;
}
// Function to create a static pose series that has the goal to inspect the complete floor of the given room.
// This is done in the following steps:
// I. It lays a grid over the given map, with a line_size defined by the constructor/set-function. All intersection points
// that are not laying on an obstacle, or near to one, become one point of the path. The grid starts at the upper left
// point that is reachable.
// II. It plans an optimal series of the previously found points by solving a Traveling-Salesman-Problem (TSP). This produces
// a path that covers all nodes and ends at the node where the path started. depending on this series the angle of the
// Poses are computed, by calculating a vector from the old node to the next and using the angle of this with the x-axis
// as angle for the Poses.
// room_map = expects to receive the original, not inflated room map
void GridPointExplorator::getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const double map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const int cell_size, const bool plan_for_footprint,
const Eigen::Matrix<float, 2, 1> robot_to_fov_vector, int tsp_solver, int64_t tsp_solver_timeout)
{
const int half_cell_size = cell_size/2;
// *********************** I. Find the main directions of the map and rotate it in this manner. ***********************
// rotate map
cv::Mat R;
cv::Rect bbox;
cv::Mat rotated_room_map;
RoomRotator room_rotation;
room_rotation.computeRoomRotationMatrix(room_map, R, bbox, map_resolution);
room_rotation.rotateRoom(room_map, rotated_room_map, R, bbox);
// transform starting position
std::vector<cv::Point> starting_point_vector(1, starting_position); // opencv syntax
cv::transform(starting_point_vector, starting_point_vector, R);
cv::Point rotated_starting_position = starting_point_vector[0];
//******************* II. Get grid points *************************
// vector to store all found points
std::vector<cv::Point> grid_points;
// compute the basic Boustrophedon grid lines
cv::Mat inflated_rotated_room_map;
BoustrophedonGrid grid_lines;
GridGenerator::generateBoustrophedonGrid(rotated_room_map, inflated_rotated_room_map, half_cell_size, grid_lines, cv::Vec4i(-1, -1, -1, -1),
cell_size, half_cell_size, cell_size-1); // using cell_size-1 instead of cell_size for grid_spacing_horizontal helps
// the TSP planner to avoid unnecessary rotations by following a preferred direction
// convert grid points format
for (BoustrophedonGrid::iterator line=grid_lines.begin(); line!=grid_lines.end(); ++line)
{
grid_points.insert(grid_points.end(), line->upper_line.begin(), line->upper_line.end());
if (line->has_two_valid_lines == true)
grid_points.insert(grid_points.end(), line->lower_line.begin(), line->lower_line.end());
}
// // print results
// cv::Mat point_map = rotated_room_map.clone();
// for(std::vector<cv::Point>::iterator point = grid_points.begin(); point != grid_points.end(); ++point)
// {
// cv::circle(point_map, *point, 2, cv::Scalar(127), CV_FILLED);
// std::cout << " - " << *point << "\n";
// }
// cv::imshow("grid", point_map);
// cv::waitKey();
//******************* II. Plan a path trough the found points *************************
// find the index of the point, which is closest to the starting position
int min_index = 0;
double min_distance = 1e10;
for(std::vector<cv::Point>::iterator point = grid_points.begin(); point != grid_points.end(); ++point)
{
double distance = cv::norm(rotated_starting_position - *point);
if(distance < min_distance)
{
min_distance = distance;
min_index = point - grid_points.begin();
}
}
// solve the Traveling Salesman Problem
std::cout << "Finding optimal order of the " << grid_points.size() << " found points. Start-index: " << min_index << std::endl;
const double map_downsampling_factor = 0.25;
// compute distance matrix for TSP (outside of time limits for solving TSP)
cv::Mat distance_matrix_cleaned;
std::map<int,int> cleaned_index_to_original_index_mapping; // maps the indices of the cleaned distance_matrix to the original indices of the original distance_matrix
AStarPlanner path_planner;
DistanceMatrix distance_matrix_computation;
distance_matrix_computation.computeCleanedDistanceMatrix(rotated_room_map, grid_points, map_downsampling_factor, 0.0, map_resolution, path_planner,
distance_matrix_cleaned, cleaned_index_to_original_index_mapping, min_index);
// solve TSP
bool finished = false;
std::vector<int> optimal_order;
if (tsp_solver == TSP_CONCORDE)
{
// start TSP solver in extra thread
ConcordeTSPSolver tsp_solve;
boost::thread t(boost::bind(&GridPointExplorator::tsp_solver_thread_concorde, this, boost::ref(tsp_solve), boost::ref(optimal_order),
boost::cref(distance_matrix_cleaned), boost::cref(cleaned_index_to_original_index_mapping), min_index));
if (tsp_solver_timeout > 0)
{
finished = t.try_join_for(boost::chrono::seconds(tsp_solver_timeout));
if (finished == false)
{
tsp_solve.abortComputation();
std::cout << "GridPointExplorator::getExplorationPath: INFO: Terminated tsp_solver " << tsp_solver << " because of time out. Taking the Nearest Neighbor TSP instead." << std::endl;
}
}
else
finished = true;
t.join();
}
else if (tsp_solver == TSP_GENETIC)
{
// start TSP solver in extra thread
GeneticTSPSolver tsp_solve;
boost::thread t(boost::bind(&GridPointExplorator::tsp_solver_thread_genetic, this, boost::ref(tsp_solve), boost::ref(optimal_order),
boost::cref(distance_matrix_cleaned), boost::cref(cleaned_index_to_original_index_mapping), min_index));
if (tsp_solver_timeout > 0)
{
finished = t.try_join_for(boost::chrono::seconds(tsp_solver_timeout));
if (finished == false)
{
tsp_solve.abortComputation();
std::cout << "GridPointExplorator::getExplorationPath: INFO: Terminated tsp_solver " << tsp_solver << " because of time out. Taking the Nearest Neighbor TSP instead." << std::endl;
}
}
else
finished = true;
t.join();
}
// fall back to nearest neighbor TSP if the other approach was timed out
if (tsp_solver==TSP_NEAREST_NEIGHBOR || finished==false)
{
NearestNeighborTSPSolver tsp_solve;
optimal_order = tsp_solve.solveNearestTSPWithCleanedDistanceMatrix(distance_matrix_cleaned, cleaned_index_to_original_index_mapping, min_index);
std::cout << "GridPointExplorator::getExplorationPath: finished TSP with solver 1 and optimal_order.size=" << optimal_order.size() << std::endl;
}
// rearrange the found points in the optimal order and convert them to the right format
std::vector<cv::Point2f> fov_middlepoint_path(optimal_order.size());
for(unsigned int point_index = 0; point_index < optimal_order.size(); ++point_index)
fov_middlepoint_path[point_index] = cv::Point2f(grid_points[optimal_order[point_index]].x, grid_points[optimal_order[point_index]].y);
// transform the calculated path back to the originally rotated map and create poses with an angle
std::vector<geometry_msgs::Pose2D> path_fov_poses;
room_rotation.transformPathBackToOriginalRotation(fov_middlepoint_path, path_fov_poses, R);
// for(unsigned int point_index = 0; point_index < fov_middlepoint_path.size(); ++point_index)
// {
// // get the vector from the current point to the next point
// cv::Point current_point = grid_points[optimal_order[point_index]];
// cv::Point next_point = grid_points[optimal_order[(point_index+1)%(optimal_order.size())]];
// cv::Point vector = next_point - current_point;
//
// float angle = std::atan2(vector.y, vector.x);//std::acos(quotient);
//
// // add the next navigation goal to the path
// geometry_msgs::Pose2D navigation_goal;
// navigation_goal.x = (current_point.x * map_resolution) + map_origin.x; // coordinate systems are rotated to each other
// navigation_goal.y = (current_point.y * map_resolution) + map_origin.y;
// navigation_goal.theta = angle;
//
// path.push_back(navigation_goal);
// }
// if the path should be planned for the robot footprint create the path and return here
if(plan_for_footprint == true)
{
for(std::vector<geometry_msgs::Pose2D>::iterator pose=path_fov_poses.begin(); pose != path_fov_poses.end(); ++pose)
{
geometry_msgs::Pose2D current_pose;
current_pose.x = (pose->x * map_resolution) + map_origin.x;
current_pose.y = (pose->y * map_resolution) + map_origin.y;
current_pose.theta = pose->theta;
path.push_back(current_pose);
}
return;
}
// *********************** III. Get the robot path out of the fov path. ***********************
// go trough all computed fov poses and compute the corresponding robot pose
//mapPath(room_map, path, path_fov_poses, robot_to_fov_vector, map_resolution, map_origin, starting_position);
ROS_INFO("Starting to map from field of view pose to robot pose");
cv::Point robot_starting_position = (path_fov_poses.size()>0 ? cv::Point(path_fov_poses[0].x, path_fov_poses[0].y) : starting_position);
cv::Mat inflated_room_map;
cv::erode(room_map, inflated_room_map, cv::Mat(), cv::Point(-1, -1), half_cell_size);
mapPath(inflated_room_map, path, path_fov_poses, robot_to_fov_vector, map_resolution, map_origin, robot_starting_position);
}

View File

@ -0,0 +1,161 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2015 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_segmentation
*
* \author
* Author: Richard Bormann
* \author
* Supervised by:
*
* \date Date of creation: 22.07.2015
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#include "ipa_room_exploration/meanshift2d.h"
#include "ipa_room_exploration/fast_math.h"
void MeanShift2D::filter(const std::vector<cv::Vec2d>& data, std::vector<cv::Vec2d>& filtered_data, const double bandwidth, const int maximumIterations)
{
// prepare mean shift set
std::vector<cv::Vec2d> mean_shift_set(data.size());
filtered_data = data;
// mean shift iteration
for (int iter=0; iter<maximumIterations; iter++)
{
for (int i=0; i<(int)filtered_data.size(); i++)
{
cv::Vec2d nominator(0., 0.);
double denominator = 0.;
for (int j=0; j<(int)filtered_data.size(); j++)
{
cv::Vec2d diff = filtered_data[j]-filtered_data[i];
double weight = fasterexp(-bandwidth * (diff[0]*diff[0] + diff[1]*diff[1]));
nominator += weight*filtered_data[j];
denominator += weight;
}
mean_shift_set[i] = nominator/denominator;
}
filtered_data = mean_shift_set;
}
// for (int i=0; i<(int)filtered_data.size(); i++)
// std::cout << " meanshift[" << i << "] = (" << filtered_data[i][0] << ", " << filtered_data[i][1] << ")" << std::endl;
}
void MeanShift2D::computeConvergencePoints(const std::vector<cv::Vec2d>& filtered_data, std::vector<cv::Vec2d>& convergence_points, std::vector< std::vector<int> >& convergence_sets, const double sensitivity)
{
// cluster data according to convergence points
convergence_sets.resize(1, std::vector<int>(1, 0));
convergence_points.resize(1, filtered_data[0]);
for (int i=1; i<(int)filtered_data.size(); i++)
{
bool create_new_set = true;
for (int j=0; j<(int)convergence_points.size(); j++)
{
if (cv::norm(filtered_data[i]-convergence_points[j]) < sensitivity)
{
convergence_sets[j].push_back(i);
convergence_points[j] = (convergence_points[j]*(convergence_sets[j].size()-1.) + filtered_data[i]) / (double)convergence_sets[j].size(); // update mean of convergence point
create_new_set = false;
break;
}
}
if (create_new_set == true)
{
convergence_sets.push_back(std::vector<int>(1, i));
convergence_points.push_back(filtered_data[i]);
}
}
}
cv::Vec2d MeanShift2D::findRoomCenter(const cv::Mat& room_image, const std::vector<cv::Vec2d>& room_cells, double map_resolution)
{
// downsample data if too big
std::vector<cv::Vec2d> room_cells_sampled;
if (room_cells.size() > 1000)
{
const int factor = room_cells.size()/1000;
for (size_t i=0; i<room_cells.size(); ++i)
if ((int)i % factor == 0)
room_cells_sampled.push_back(room_cells[i]);
}
else
room_cells_sampled = room_cells;
// mean shift filter
const double bandwidth = map_resolution/2.; // parameter
std::vector<cv::Vec2d> filtered_room_cells;
filter(room_cells_sampled, filtered_room_cells, bandwidth, 100);
// take mean of filtering result in simple rooms, i.e. if no obstacles is located at the computed cell
cv::Scalar mean_coordinates = cv::mean(filtered_room_cells);
cv::Vec2d room_center(mean_coordinates[0], mean_coordinates[1]);
if (room_image.at<uchar>(room_center[1], room_center[0]) == 255)
return room_center;
// otherwise compute convergence sets of filtering results and return mean of largest convergence set
// determine convergence points
std::vector<cv::Vec2d> convergence_points;
std::vector< std::vector<int> > convergence_sets;
computeConvergencePoints(filtered_room_cells, convergence_points, convergence_sets, 1./bandwidth*0.1);
// select convergence point with largest point support
size_t max_index = 0;
size_t max_size = 0;
for (size_t i=0; i<convergence_sets.size(); ++i)
{
if (convergence_sets[i].size() > max_size)
{
max_size = convergence_sets[i].size();
max_index = i;
}
}
return convergence_points[max_index];
}

View File

@ -0,0 +1,367 @@
#include <ipa_room_exploration/neural_network_explorator.h>
// Default constructor
NeuralNetworkExplorator::NeuralNetworkExplorator()
{
// default values TODO: param
step_size_ = 0.008; // 0.008
A_ = 17; // 17
B_ = 5; // 5
D_ = 7; // 7
E_ = 80; // E >> B, 80
mu_ = 1.03; // 1.03
delta_theta_weight_ = 0.15; // 0.15
}
// Function that calculates an exploration path trough the given map s.t. everything has been covered by the robot-footprint
// or the field of view. The algorithm is programmed after
//
// Yang, Simon X., and Chaomin Luo. "A neural network approach to complete coverage path planning." IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics) 34.1 (2004): 718-724.
//
// and uses a artificial neural network to produce the path. For this the following steps are done:
// I. Construct the neural network by sampling the given map, using the given fitting circle radius as step size. This is
// done because it allows that when all neurons (the samples) are covered the whole space have been cleaned/inspected.
// II. Starting with the given robot pose go trough the found network. At every time-step choose the next Neuron by
// solving x_n = max(x_j + c*y_j), with c as a positive scalar and y_j a function penalizing movements of the robot
// into a direction he is currently not pointing at. At every time-step an update of the states of the neurons is done.
// After this step a path consisting of poses for the fov middlepoint is obtained. If the plan should be planned for
// the robot footprint, then the algorithm returns this path and is finished.
// III. If wanted use the given vector from the robot middlepoint to the fov middlepoint to map the fov poses to the
// robot footprint poses. To do so simply a vector transformation is applied. If the computed robot pose is not in the
// free space, another accessible point is generated by finding it on the radius around the fov middlepoint s.t.
// the distance to the last robot position is minimized. If this is not wanted one has to set the corresponding
// Boolean to false (shows that the path planning should be done for the robot footprint).
void NeuralNetworkExplorator::getExplorationPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& path, const float map_resolution,
const cv::Point starting_position, const cv::Point2d map_origin, const double grid_spacing_in_pixel,
const bool plan_for_footprint, const Eigen::Matrix<float, 2, 1> robot_to_fov_vector, bool show_path_computation)
{
const int grid_spacing_as_int = std::floor(grid_spacing_in_pixel);
const int half_grid_spacing_as_int = std::floor(grid_spacing_in_pixel*0.5);
// *********************** I. Find the main directions of the map and rotate it in this manner. ***********************
cv::Mat R;
cv::Rect bbox;
cv::Mat rotated_room_map;
RoomRotator room_rotation;
room_rotation.computeRoomRotationMatrix(room_map, R, bbox, map_resolution);
room_rotation.rotateRoom(room_map, rotated_room_map, R, bbox);
// compute min/max room coordinates
cv::Point min_room(1000000, 1000000), max_room(0, 0);
for (int v=0; v<rotated_room_map.rows; ++v)
{
for (int u=0; u<rotated_room_map.cols; ++u)
{
if (rotated_room_map.at<uchar>(v,u)==255)
{
min_room.x = std::min(min_room.x, u);
min_room.y = std::min(min_room.y, v);
max_room.x = std::max(max_room.x, u);
max_room.y = std::max(max_room.y, v);
}
}
}
cv::Mat inflated_rotated_room_map;
cv::erode(rotated_room_map, inflated_rotated_room_map, cv::Mat(), cv::Point(-1, -1), half_grid_spacing_as_int);
// ****************** II. Create the neural network ******************
// reset previously computed neurons
neurons_.clear();
// go trough the map and create the neurons
int number_of_free_neurons = 0;
for(int y=min_room.y+half_grid_spacing_as_int; y<max_room.y; y+=grid_spacing_as_int)
{
// for the current row create a new set of neurons to span the network over time
std::vector<Neuron> current_network_row;
for(int x=min_room.x+half_grid_spacing_as_int; x<max_room.x; x+=grid_spacing_as_int)
{
// create free neuron
cv::Point cell_center(x,y);
if (GridGenerator::completeCellTest(inflated_rotated_room_map, cell_center, grid_spacing_as_int) == true)
//if(rotated_room_map.at<uchar>(y,x) == 255)
{
Neuron current_neuron(cell_center, A_, B_, D_, E_, mu_, step_size_, false);
current_network_row.push_back(current_neuron);
++number_of_free_neurons;
}
else // obstacle neuron
{
Neuron current_neuron(cell_center, A_, B_, D_, E_, mu_, step_size_, true);
current_network_row.push_back(current_neuron);
}
}
// insert the current row into the network
neurons_.push_back(current_network_row);
}
// todo: do not limit to direct neighbors but cycle through all neurons for finding the best next
// go trough the found neurons and get the direct neighbors of each
for(size_t row=0; row<neurons_.size(); ++row)
{
for(size_t column=0; column<neurons_[row].size(); ++column)
{
for(int dy=-1; dy<=1; ++dy)
{
// don't exceed the current row
if(row+dy < 0 || row+dy >= neurons_.size())
continue;
// get the neighbors left from the current neuron
if(column > 0)
neurons_[row][column].addNeighbor(&neurons_[row+dy][column-1]);
// get the neurons on the same column as the current neuron
if(dy != 0)
neurons_[row][column].addNeighbor(&neurons_[row+dy][column]);
// get the neurons right from the current neuron
if(column < neurons_[row].size()-1)
neurons_[row][column].addNeighbor(&neurons_[row+dy][column+1]);
}
}
}
// testing
// cv::Mat black_map = cv::Mat(rotated_room_map.rows, rotated_room_map.cols, rotated_room_map.type(), cv::Scalar(0));
// for(size_t i=0; i<neurons_.size(); ++i)
// {
// for(size_t j=0; j<neurons_[i].size(); ++j)
// {
// std::vector<Neuron*> neighbors;
// neurons_[i][j].getNeighbors(neighbors);
// for(size_t k=0; k<neighbors.size(); ++k)
// {
// cv::circle(black_map, neighbors[k]->getPosition(), 2, cv::Scalar(127), CV_FILLED);
// }
// }
// }
// cv::imshow("neighbors", black_map);
// cv::waitKey();
// ****************** III. Find the coverage path ******************
// mark the first non-obstacle neuron as starting node
Neuron* starting_neuron = 0;
bool found = false;
for(size_t row=0; row<neurons_.size(); ++row)
{
for(size_t column=0; column<neurons_[row].size(); ++column)
{
if(neurons_[row][column].isObstacle() == false && found == false)
{
found = true;
starting_neuron = &neurons_[row][column];
break;
}
}
if(found == true)
break;
}
if (starting_neuron==0)
{
std::cout << "Warning: there are no accessible points in this room." << std::endl;
return;
}
starting_neuron->markAsVisited();
// initial updates of the states to mark obstacles and unvisited free neurons as such
for(size_t init=1; init<=100; ++init)
{
for(size_t row=0; row<neurons_.size(); ++row)
for(size_t column=0; column<neurons_[row].size(); ++column)
neurons_[row][column].saveState();
for(size_t row=0; row<neurons_.size(); ++row)
for(size_t column=0; column<neurons_[row].size(); ++column)
neurons_[row][column].updateState();
}
// testing
// cv::Mat black_map = cv::Mat(rotated_room_map.rows, rotated_room_map.cols, CV_32F, cv::Scalar(0));
// for(size_t row=0; row<neurons_.size(); ++row)
// {
// for(size_t column=0; column<neurons_[row].size(); ++column)
// {
// std::cout << neurons_[row][column].getState(false) << " ";
// black_map.at<float>(row*fitting_radius_as_int, column*fitting_radius_as_int) = (float) 1000.0*neurons_[row][column].getState(false);
// }
// std::cout << std::endl;
// }
// std::cout << std::endl;
// cv::namedWindow("states", cv::WINDOW_NORMAL);
// cv::imshow("states", black_map);
// cv::resizeWindow("states", 600, 600);
// cv::waitKey();
// iteratively choose the next neuron until all neurons have been visited or the algorithm is stuck in a
// limit cycle like path (i.e. the same neurons get visited over and over)
int visited_neurons = 1;
bool stuck_in_cycle = false;
std::vector<cv::Point> fov_coverage_path;
fov_coverage_path.push_back(cv::Point(starting_neuron->getPosition().x, starting_neuron->getPosition().y));
double previous_traveling_angle = 0.0; // save the travel direction to the current neuron to determine the next neuron
cv::Mat black_map = rotated_room_map.clone();
Neuron* previous_neuron = starting_neuron;
int loop_counter = 0;
do
{
//std::cout << "Point: " << previous_neuron->getPosition() << std::endl;
++loop_counter;
// get the current neighbors and choose the next out of them
std::vector<Neuron*> neighbors;
previous_neuron->getNeighbors(neighbors);
Neuron* next_neuron = 0;
// go through the neighbors and find the next one
double max_value = -1e10, travel_angle = 0.0, best_angle = 0.0;
for(size_t neighbor=0; neighbor<neighbors.size(); ++neighbor)
{
// get travel angle to this neuron
travel_angle = std::atan2(neighbors[neighbor]->getPosition().y-previous_neuron->getPosition().y, neighbors[neighbor]->getPosition().x-previous_neuron->getPosition().x);
// compute penalizing function y_j
double diff_angle = travel_angle - previous_traveling_angle;
while (diff_angle < -PI)
diff_angle += 2*PI;
while (diff_angle > PI)
diff_angle -= 2*PI;
double y = 1 - (std::abs(diff_angle)/PI);
// compute transition function value
//std::cout << " Neighbor: " << neighbors[neighbor]->getPosition() << " " << neighbors[neighbor]->getState(false) << ", " << delta_theta_weight_ * y << std::endl;
double trans_fct_value = neighbors[neighbor]->getState(false) + delta_theta_weight_ * y;
// check if neighbor is next neuron to be visited
if(trans_fct_value > max_value && rotated_room_map.at<uchar>(neighbors[neighbor]->getPosition()) != 0)
{
max_value = trans_fct_value;
next_neuron = neighbors[neighbor];
best_angle = travel_angle;
}
}
// catch errors
if (next_neuron == 0)
{
if (loop_counter <= 20)
continue;
else
break;
}
loop_counter = 0;
// if the next neuron was previously uncleaned, increase number of visited neurons
if(next_neuron->visitedNeuron() == false)
++visited_neurons;
// mark next neuron as visited
next_neuron->markAsVisited();
previous_traveling_angle = best_angle;
// add neuron to path
const cv::Point current_pose(next_neuron->getPosition().x, next_neuron->getPosition().y);
fov_coverage_path.push_back(current_pose);
// check the fov path for a limit cycle by searching the path for the next neuron, if it occurs too often
// and the previous/following neuron is always the same the algorithm probably is stuck in a cycle
int number_of_neuron_in_path = 0;
for(std::vector<cv::Point>::iterator pose=fov_coverage_path.begin(); pose!=fov_coverage_path.end(); ++pose)
if(*pose==current_pose)
++number_of_neuron_in_path;
if(number_of_neuron_in_path >= 20)
{
// check number of previous neuron
cv::Point previous_pose = fov_coverage_path[fov_coverage_path.size()-2];
int number_of_previous_neuron_in_path = 0;
for(std::vector<cv::Point>::iterator pose=fov_coverage_path.begin()+1; pose!=fov_coverage_path.end()-1; ++pose)
{
// check if the the previous pose always has the current pose as neighbor
if(*pose==previous_pose)
{
if(*(pose+1)==current_pose)
++number_of_previous_neuron_in_path;
else if(*(pose-1)==current_pose)
++number_of_previous_neuron_in_path;
}
}
//if(number_of_previous_neuron_in_path >= number_of_neuron_in_path)
if(number_of_previous_neuron_in_path >= 20)
{
std::cout << "Warning: the algorithm is probably stuck in a cycle. Aborting." << std::endl;
stuck_in_cycle = true;
}
}
// update the states of the network
for (int i=0; i<100; ++i)
{
for(size_t row=0; row<neurons_.size(); ++row)
for(size_t column=0; column<neurons_[row].size(); ++column)
neurons_[row][column].saveState();
for(size_t row=0; row<neurons_.size(); ++row)
for(size_t column=0; column<neurons_[row].size(); ++column)
neurons_[row][column].updateState();
}
// printing of the path computation
if(show_path_computation == true)
{
#if CV_MAJOR_VERSION<=3
cv::circle(black_map, next_neuron->getPosition(), 2, cv::Scalar((visited_neurons*5)%250), CV_FILLED);
#else
cv::circle(black_map, next_neuron->getPosition(), 2, cv::Scalar((visited_neurons*5)%250), cv::FILLED);
#endif
cv::line(black_map, previous_neuron->getPosition(), next_neuron->getPosition(), cv::Scalar(128), 1);
cv::imshow("next_neuron", black_map);
cv::waitKey();
}
// save neuron that has been visited
previous_neuron = next_neuron;
} while (visited_neurons < number_of_free_neurons && stuck_in_cycle == false); //TODO: test terminal condition
// transform the calculated path back to the originally rotated map
std::vector<geometry_msgs::Pose2D> fov_poses;
std::vector<cv::Point2f> fov_coverage_path_2f(fov_coverage_path.size());
for (size_t i=0; i<fov_coverage_path.size(); ++i)
fov_coverage_path_2f[i] = cv::Point2f(fov_coverage_path[i].x, fov_coverage_path[i].y);
room_rotation.transformPathBackToOriginalRotation(fov_coverage_path_2f, fov_poses, R);
// // go trough the found fov-path and compute the angles of the poses s.t. it points to the next pose that should be visited
// for(unsigned int point_index=0; point_index<fov_path.size(); ++point_index)
// {
// // get the vector from the current point to the next point
// geometry_msgs::Pose2D current_point = fov_path[point_index];
// geometry_msgs::Pose2D next_point = fov_path[(point_index+1)%(fov_path.size())];
//
// float angle = std::atan2(next_point.y - current_point.y, next_point.x - current_point.x);
//
// // save the found angle
// fov_path[point_index].theta = angle;
// }
// if the path should be planned for the robot footprint create the path and return here
if(plan_for_footprint == true)
{
for(std::vector<geometry_msgs::Pose2D>::iterator pose=fov_poses.begin(); pose != fov_poses.end(); ++pose)
{
geometry_msgs::Pose2D current_pose;
current_pose.x = (pose->x * map_resolution) + map_origin.x;
current_pose.y = (pose->y * map_resolution) + map_origin.y;
current_pose.theta = pose->theta;
path.push_back(current_pose);
}
return;
}
// ****************** III. Map the found fov path to the robot path ******************
// go trough all computed fov poses and compute the corresponding robot pose
ROS_INFO("Starting to map from field of view pose to robot pose");
cv::Point robot_starting_position = (fov_poses.size()>0 ? cv::Point(fov_poses[0].x, fov_poses[0].y) : starting_position);
cv::Mat inflated_room_map;
cv::erode(room_map, inflated_room_map, cv::Mat(), cv::Point(-1, -1), half_grid_spacing_as_int);
mapPath(inflated_room_map, path, fov_poses, robot_to_fov_vector, map_resolution, map_origin, robot_starting_position);
}

View File

@ -0,0 +1,237 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2017 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 02.2017
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#include <ipa_room_exploration/room_rotator.h>
void RoomRotator::rotateRoom(const cv::Mat& room_map, cv::Mat& rotated_room_map, const cv::Mat& R, const cv::Rect& bounding_rect)
{
// rotate the image
cv::warpAffine(room_map, rotated_room_map, R, bounding_rect.size(), cv::INTER_AREA);
// apply a binary filter to create a binary image, also use a closing operator to smooth the output (the rotation might produce
// black pixels reaching into the white area that were not there before, causing new, wrong cells to open)
#if CV_MAJOR_VERSION<=3
cv::threshold(rotated_room_map, rotated_room_map, 127, 255, CV_THRESH_BINARY);
#else
cv::threshold(rotated_room_map, rotated_room_map, 127, 255, cv::THRESH_BINARY);
#endif
// this should not be used because it removes smaller obstacles like thin walls from the room and hence lets a planner generate paths through walls
// cv::dilate(rotated_room_map, rotated_room_map, cv::Mat(), cv::Point(-1,-1), 1);
// cv::erode(rotated_room_map, rotated_room_map, cv::Mat(), cv::Point(-1,-1), 1);
}
// compute the affine rotation matrix for rotating a room into parallel alignment with x-axis (longer side of the room is aligned with x-axis)
// R is the transform
// bounding_rect is the ROI of the warped image
double RoomRotator::computeRoomRotationMatrix(const cv::Mat& room_map, cv::Mat& R, cv::Rect& bounding_rect,
const double map_resolution, const cv::Point* center, const double rotation_offset)
{
// rotation angle of the map s.t. the most occurring gradient is in 90 degree to the x-axis
double rotation_angle = computeRoomMainDirection(room_map, map_resolution) + rotation_offset;
std::cout << "RoomRotator::computeRoomRotationMatrix: main rotation angle: " << rotation_angle << std::endl;
// get rotation matrix R for rotating the image around the center of the room contour
// Remark: rotation angle in degrees for opencv
cv::Point center_of_rotation;
if (center == 0)
{
cv::Point min_room, max_room;
getMinMaxCoordinates(room_map, min_room, max_room);
center_of_rotation.x = 0.5*(min_room.x+max_room.x);
center_of_rotation.y = 0.5*(min_room.y+max_room.y);
}
else
center_of_rotation = *center;
// compute rotation
R = cv::getRotationMatrix2D(center_of_rotation, (rotation_angle*180)/CV_PI, 1.0);
// determine bounding rectangle to find the size of the new image
bounding_rect = cv::RotatedRect(center_of_rotation, room_map.size(), (rotation_angle*180)/CV_PI).boundingRect();
// adjust transformation matrix
R.at<double>(0,2) += 0.5*bounding_rect.width - center_of_rotation.x;
R.at<double>(1,2) += 0.5*bounding_rect.height - center_of_rotation.y;
return rotation_angle;
}
// computes the major direction of the walls from a map (preferably one room)
// the map (room_map, CV_8UC1) is black (0) at impassable areas and white (255) on drivable areas
double RoomRotator::computeRoomMainDirection(const cv::Mat& room_map, const double map_resolution)
{
const double map_resolution_inverse = 1./map_resolution;
// compute Hough transform on edge image of the map
cv::Mat edge_map;
cv::Canny(room_map, edge_map, 50, 150, 3);
std::vector<cv::Vec4i> lines;
double min_line_length = 1.0; // in [m]
for (; min_line_length > 0.1; min_line_length -= 0.2)
{
cv::HoughLinesP(edge_map, lines, 1, CV_PI/180, min_line_length*map_resolution_inverse, min_line_length*map_resolution_inverse, 1.5*min_line_length*map_resolution_inverse);
cv::Mat room_hough = edge_map.clone();
for (size_t i=0; i<lines.size(); ++i)
{
cv::Point p1(lines[i][0], lines[i][1]), p2(lines[i][2], lines[i][3]);
cv::line(room_hough, p1, p2, cv::Scalar(128), 2);
}
//cv::imshow("room_hough", room_hough);
if (lines.size() >= 4)
break;
}
// setup a histogram on the line directions weighted by their length to determine the major direction
Histogram<double> direction_histogram(0, CV_PI, 36);
for (size_t i=0; i<lines.size(); ++i)
{
double dx = lines[i][2] - lines[i][0];
double dy = lines[i][3] - lines[i][1];
if(dy*dy+dx*dx > 0.0)
{
double current_direction = std::atan2(dy, dx);
while (current_direction < 0.)
current_direction += CV_PI;
while (current_direction > CV_PI)
current_direction -= CV_PI;
direction_histogram.addData(current_direction, sqrt(dy*dy+dx*dx));
//std::cout << " dx=" << dx << " dy=" << dy << " dir=" << current_direction << " len=" << sqrt(dy*dy+dx*dx) << std::endl;
}
}
return direction_histogram.getMaxBinPreciseVal();
}
void RoomRotator::transformPathBackToOriginalRotation(const std::vector<cv::Point2f>& fov_middlepoint_path, std::vector<geometry_msgs::Pose2D>& path_fov_poses, const cv::Mat& R)
{
path_fov_poses.clear();
// transform the calculated path back to the originally rotated map
cv::Mat R_inv;
cv::invertAffineTransform(R, R_inv);
std::vector<cv::Point2f> fov_middlepoint_path_transformed;
cv::transform(fov_middlepoint_path, fov_middlepoint_path_transformed, R_inv);
// create poses with an angle
transformPointPathToPosePath(fov_middlepoint_path_transformed, path_fov_poses);
}
void RoomRotator::transformPointPathToPosePath(const std::vector<cv::Point2f>& point_path, std::vector<geometry_msgs::Pose2D>& pose_path)
{
// create poses with an angle
for(size_t point_index = 0; point_index < point_path.size(); ++point_index)
{
// get the vector from the previous to the current point
const cv::Point2f& current_point = point_path[point_index];
// add the next navigation goal to the path
geometry_msgs::Pose2D current_pose;
current_pose.x = current_point.x;
current_pose.y = current_point.y;
current_pose.theta = 0.;
cv::Point2f vector(0,0);
if (point_index > 0)
{
// compute the direction as the line from the previous point to the current point
vector = current_point - point_path[point_index-1];
}
else if (point_path.size() >= 2)
{
// for the first point take the direction between first and second point
vector = point_path[point_index+1] - current_point;
}
// only sample different points
if (vector.x!=0 || vector.y!=0)
{
current_pose.theta = std::atan2(vector.y, vector.x);
pose_path.push_back(current_pose);
}
}
}
void RoomRotator::transformPointPathToPosePath(std::vector<geometry_msgs::Pose2D>& pose_path)
{
// create point vector
std::vector<cv::Point2f> point_path;
for (size_t i=0; i<pose_path.size(); ++i)
point_path.push_back(cv::Point2f(pose_path[i].x, pose_path[i].y));
// create poses with an angle
pose_path.clear();
transformPointPathToPosePath(point_path, pose_path);
}
void RoomRotator::getMinMaxCoordinates(const cv::Mat& map, cv::Point& min_room, cv::Point& max_room)
{
min_room.x = std::numeric_limits<int>::max();
min_room.y = std::numeric_limits<int>::max();
max_room.x = 0;
max_room.y = 0;
for (int v=0; v<map.rows; ++v)
{
for (int u=0; u<map.cols; ++u)
{
if (map.at<uchar>(v,u)==255)
{
min_room.x = std::min(min_room.x, u);
min_room.y = std::min(min_room.y, v);
max_room.x = std::max(max_room.x, u);
max_room.y = std::max(max_room.y, v);
}
}
}
}

View File

@ -0,0 +1,48 @@
<?xml version="1.0"?>
<package format="2">
<name>ipa_room_exploration</name>
<version>0.1.2</version>
<description>
This package plans the navigation in a room such that the floor of this room gets inspected completely for dirt.
</description>
<author email="florian.jordan@ipa.fraunhofer.de">Florian Jordan</author>
<maintainer email="richard.bormann@ipa.fraunhofer.de">Richard Bormann</maintainer>
<license>LGPL for academic and non-commercial use; for commercial use, please contact Fraunhofer IPA</license>
<url>http://ros.org/wiki/ipa_room_exploration</url>
<buildtool_depend>catkin</buildtool_depend>
<depend>angles</depend>
<depend>boost</depend>
<depend>cmake_modules</depend>
<depend>cob_map_accessibility_analysis</depend>
<depend>coinor-libcbc-dev</depend>
<depend>coinor-libcbc3</depend>
<depend>coinor-libcgl-dev</depend>
<depend>coinor-libclp-dev</depend>
<depend>coinor-libcoinutils-dev</depend>
<depend>coinor-libosi-dev</depend>
<depend>cv_bridge</depend>
<depend>dynamic_reconfigure</depend>
<!--depend>eigen</depend-->
<depend>eigen_conversions</depend>
<depend>geometry_msgs</depend>
<depend>ipa_building_msgs</depend>
<depend>ipa_building_navigation</depend>
<depend>laser_geometry</depend>
<!--depend>libconcorde_tsp_solver</depend-->
<depend>libopencv-dev</depend>
<depend>move_base_msgs</depend>
<depend>nav_msgs</depend>
<depend>roscpp</depend>
<depend>roslib</depend>
<depend>sensor_msgs</depend>
<depend>std_msgs</depend>
<depend>std_srvs</depend>
<depend>tf</depend>
<depend>visualization_msgs</depend>
<build_depend>message_generation</build_depend>
<exec_depend>message_runtime</exec_depend>
</package>

View File

@ -0,0 +1,29 @@
# ipa_room_exploration
Algorithms for systematic coverage driving patterns.
If you find this software useful in your work, please cite our corresponding paper:
- R. Bormann, F. Jordan, J. Hampp, and M. Hägele. Indoor coverage path planning: Survey, implementation, analysis. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), pages 17181725, May 2018. https://ieeexplore.ieee.org/abstract/document/8460566 , https://publica.fraunhofer.de/entities/publication/f537c15d-4cbe-4672-9d86-6e756a9ce71b/details
# General Procedure
1. Change the algorithm parameters in ros/launch/room_room_exploration_action_server_params.yaml in ros/launch to the wanted algorithms and settings.
* room_exploration_algorithm: Choose which exploration algorithm you want to use.
* plan_for_footprint: Choose if you want to plan the coverage path for the field of view (fov) or the robot footprint. When planning for the fov, the generated path is transformed to robot poses, s.t. the given fov follows the generated path.
* revisit_areas: Choose if you want to revisit areas, that haven't been covered bythe robot or the fov during the execution of the coverage path.
* left_sections_min_area: When the revisiting of areas is enabled, this parameter defines how small not covered ares can be, before they will not be taken into account any further. This is useful, when you rather want a fast execution, than a very detailed coverage. This parameter is in [meter^2].
* goal_eps: This parameter allows the action server to publish a new navigation goal, when the robot is in a specific range around the current goal. This allows the path to be more smoother, but of course not so exactly. If you don't want this feature, then simpply set this parameter to 0. This parameter is in [pixel].
* Each exploration algorithm has it's own specific parameters, see the ros/launch/room_room_exploration_action_server_params.yaml for further descriptions of these.
2. Start the action server using the file /ros/launch/room_exploration_action_server.launch, which executes the /ros/src/room_exploration_action_server.cpp file. If the server crashes for some reason (e.g. no valid map given by the client) it respawns with a little delay.
3. Start an action client, which sends a goal to the action server, corresponding to the RoomExploration.action message, which lies in ipa_building_msgs/action. The goal consists of the following parts
* input_map: The map of the whole area the robot moves in, as sensor_msgs/Image. **Has to be a 8-Bit single channel image, with 0 as occupied space and 255 as free space**.
* map_resolution: The resolution of the map as float32 in [meter/cell]
* map_origin: The origin of the map, will be used to transform the computed map-coordinates into real-world-coordinates.
* robot_radius: The radius of the robot as float32, used to find the areas that the robot can access, in [meter].
* coverage_radius: The radius of the device that should cover the defined area as float32. This can be a cleaning device or an inspection tool, the value should be given in [meter].
* field_of_view: Array of 4 geometry_msgs/Points32 that define the field of view or footprint (points around robot origin) of the robot. Used to check what areas have been left uncovered during the execution (only used when the action server shall execute the path).
* field_of_view_origin: The mounting position of the camera spanning the field of view, relative to the robot center (x-axis points to robot's front side, y-axis points to robot's left side, z-axis upwards), in [meter].
* starting_position: The position at which the robot shall start the execution of the path in the room coordinate system [meter, meter, rad].
* planning_mode: Int32 that controls if the robot footprint or the defined field-of-view shall cover the whole area (1: footprint, 2: fov).

View File

@ -0,0 +1,15 @@
cleaning_costmap:
global_frame: map
robot_base_frame: base_footprint
update_frequency: 1.0
publish_frequency: 1.0
static_map: true
rolling_window: false
resolution: 0.05
transform_tolerance: 1.0
inflation_radius: 0.15
map_type: costmap
cleaning_plan_nodehandle:
size_of_cell: 3
grid_covered_value: 200

View File

@ -0,0 +1,13 @@
obstacle_range: 2.5 #2.5
raytrace_range: 3.0 #3.0
footprint: [[-0.205, -0.155], [-0.205, 0.155], [0.077, 0.155], [0.077, -0.155]]
#footprint_inflation: 0.01
# robot_radius: 0.175 #0.175
max_obstacle_height: 0.6
min_obstacle_height: 0.0
observation_sources: scan #观察数据来源
scan: {data_type: LaserScan, topic: /scan, marking: true, clearing: true, expected_update_rate: 0}
# if set to false,then obstacle layer take unknow space to be free.
obstacle_layer:
track_unknown_space: true

View File

@ -0,0 +1,12 @@
obstacle_range: 3.0
raytrace_range: 3.5
footprint: [[-0.105, -0.105], [-0.105, 0.105], [0.041, 0.105], [0.041, -0.105]]
#robot_radius: 0.105
inflation_radius: 0.5
cost_scaling_factor: 4.0
map_type: costmap
observation_sources: scan
scan: {sensor_frame: base_scan, data_type: LaserScan, topic: scan, marking: true, clearing: true}

View File

@ -0,0 +1,22 @@
obstacle_range: 3.0
raytrace_range: 3.5
footprint: [[-0.205, -0.155], [-0.205, 0.155], [0.077, 0.155], [0.077, -0.155]]
#robot_radius: 0.17
inflation_radius: 0.3
cost_scaling_factor: 5.0
map_type: costmap
observation_sources: scan
scan: {sensor_frame: base_scan, data_type: LaserScan, topic: scan, marking: true, clearing: true}
sonar_layer:
enabled: true
no_readings_timout: 0.0 #此参数如果为0 ,没有任何影响.否则,如果在这个时间段内没有收到任何声纳数据,超声波层会警告用户,并且此层不被使用
clear_threshold: 0.8 #栅格概率比该值低的标记为free
mark_threshold: 0.9 #栅格概率大于该值标定为占据
# topics: ["/robot0/sonar_0","/robot0/sonar_1","/robot0/sonar_2","/robot0/sonar_3"] #超声波话题
clear_on_max_reading: true #是否将超出sonar最大距离清除
# observation_sources: scan
# scan: {sensor_frame: base_scan, data_type: LaserScan, topic: sonar, marking: true, clearing: true}

View File

@ -0,0 +1,12 @@
obstacle_range: 3.0
raytrace_range: 3.5
footprint: [[-0.205, -0.155], [-0.205, 0.155], [0.077, 0.155], [0.077, -0.155]]
#robot_radius: 0.17
inflation_radius: 0.3
cost_scaling_factor: 5.0
map_type: costmap
observation_sources: scan
scan: {sensor_frame: base_scan, data_type: LaserScan, topic: scan, marking: true, clearing: true}

View File

@ -0,0 +1,15 @@
global_costmap:
global_frame: map
robot_base_frame: base_footprint
update_frequency: 10.0
publish_frequency: 10.0
transform_tolerance: 0.5
static_map: true
# plugins:
# - {name: static_layer, type: "costmap_2d::StaticLayer"}
# - {name: obstacle_layer, type: "costmap_2d::ObstacleLayer"}
# - {name: sonar_layer, type:"range_sensor_layer::RangeSensorLayer"}
# - {name: inflation_layer, type: "costmap_2d::InflationLayer"}

View File

@ -0,0 +1,66 @@
// Ros
#include <ros/ros.h>
// OpenCV
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
// Eigen library
#include <Eigen/Dense>
// c++ standard libraries
#include <iostream>
#include <list>
#include <string>
#include <vector>
#include <algorithm>
#include <cmath>
// services
#include <ipa_building_msgs/CheckCoverage.h>
// Class that provides a service server to check which areas have been covered, when going along the given poses. It returns an image that
// has all covered areas drawn in as 127.
// REMARK: The given map has to be a 8 bit single channel image, with 0 as obstacle and 255 as free space drawn in.
// If wanted, the server can check how often a pixel has been covered during the execution. When this is wished, an image is returned, in
// which the number of observations (or coverages) is assigned to each pixel (32 bit image in this case).
class CoverageCheckServer
{
protected:
// node handle
ros::NodeHandle node_handle_;
// Function to draw the covered areas into the given map. This is done by going through all given robot-poses and calculating
// the field of view. The field of view is given in robot base coordinates (x-axis shows to the front and y-axis to left side).
// The function then calculates the field_of_view in the global frame by using the given robot pose.
// After this the function does a ray casting from the field of view origin (i.e. the camera location expressed in the robot base coordinate system)
// to each cell of the field of view in order to verify whether the cell has been obstructed by an obstacle and could not be observed.
// This ensures that no point is wrongly classified as seen.
// @param fov_origin The mounting position of the camera spanning the field of view, given in robot base coordinates, in [m]
void drawCoveredPointsPolygon(cv::Mat& reachable_areas_map, const std::vector<cv::Point3d>& robot_poses,
const std::vector<Eigen::Matrix<float, 2, 1>>& field_of_view_points, const Eigen::Matrix<float, 2, 1>& fov_origin,
const float map_resolution, const cv::Point2d map_origin, cv::Mat* number_of_coverages_image=NULL);
// Function that takes the given robot poses and draws the circular footprint with coverage_radius at these positions into the given map.
// Used when the server should plan a coverage path for the robot coverage area (a circle). This drawing function does not test occlusions
// since a footprint can usually be assumed to reach all covered map positions (otherwise it would be a collision).
void drawCoveredPointsCircle(cv::Mat& reachable_areas_map, const std::vector<cv::Point3d>& robot_poses,
const double coverage_radius, const float map_resolution,
const cv::Point2d map_origin, cv::Mat* number_of_coverages_image=NULL);
// reduces image coordinates of a point to valid values inside the image, i.e. between [0,rows-1]/[0,cols-1]
cv::Point clampImageCoordinates(const cv::Point& p, const int rows, const int cols);
// ros server object
ros::ServiceServer coverage_check_server_;
public:
// constructor
CoverageCheckServer();
CoverageCheckServer(ros::NodeHandle nh);
// callback function for the server
bool checkCoverage(ipa_building_msgs::CheckCoverageRequest& request, ipa_building_msgs::CheckCoverageResponse& response);
// ROS-independent coverage check library interface
bool checkCoverage(const cv::Mat& map, const float map_resolution, const cv::Point2d& map_origin, const std::vector<cv::Point3d>& path,
const std::vector<Eigen::Matrix<float, 2, 1> >& field_of_view, const Eigen::Matrix<float, 2, 1>& fov_origin, const float coverage_radius,
const bool check_for_footprint, const bool check_number_of_coverages, cv::Mat& coverage_map, cv::Mat& number_of_coverage_image);
};

View File

@ -0,0 +1,248 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2015 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_segmentation
*
* \author
* Author: Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 08.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#ifndef _DYNAMIC_RECONFIGURE_CLIENT_H_
#define _DYNAMIC_RECONFIGURE_CLIENT_H_
#include <ros/ros.h>
#include <ros/subscriber.h>
#include <boost/thread/mutex.hpp>
#include <dynamic_reconfigure/DoubleParameter.h>
#include <dynamic_reconfigure/IntParameter.h>
#include <dynamic_reconfigure/Reconfigure.h>
#include <dynamic_reconfigure/Config.h>
class DynamicReconfigureClient
{
public:
DynamicReconfigureClient(ros::NodeHandle& nh, const std::string& dynamic_reconfigure_service_name, const std::string& parameter_updates_topic)
: dynamic_reconfigure_current_config_received_(false), node_handle_(nh), dynamic_reconfigure_service_name_(dynamic_reconfigure_service_name)
{
dynamic_reconfigure_sub_ = node_handle_.subscribe(parameter_updates_topic, 1, &DynamicReconfigureClient::dynamic_reconfigure_current_config_callback, this);
// receive current configuration
ros::Duration sleep_rate(0.5);
while (dynamic_reconfigure_current_config_received_ == false)
{
ros::spinOnce();
sleep_rate.sleep();
}
}
dynamic_reconfigure::Config& getConfig()
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
return dynamic_reconfigure_config_;
}
bool setConfig(const std::string& param_name, const bool param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.bools.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.bools[i].name) == 0)
{
dynamic_reconfigure_config_.bools[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::BoolParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.bools.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
bool setConfig(const std::string& param_name, const double param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.doubles.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.doubles[i].name) == 0)
{
dynamic_reconfigure_config_.doubles[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::DoubleParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.doubles.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
bool setConfig(const std::string& param_name, const int param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.ints.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.ints[i].name) == 0)
{
dynamic_reconfigure_config_.ints[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::IntParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.ints.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
bool setConfig(const std::string& param_name, const std::string& param_value)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
if (dynamic_reconfigure_current_config_received_ == false)
{
ROS_WARN("DynamicReconfigureClient: Did not receive the current configuration, yet.");
return false;
}
bool found = false;
for (size_t i=0; i<dynamic_reconfigure_config_.strs.size(); ++i)
{
if (param_name.compare(dynamic_reconfigure_config_.strs[i].name) == 0)
{
dynamic_reconfigure_config_.strs[i].value = param_value;
found = true;
break;
}
}
if (found == false)
{
ROS_WARN("DynamicReconfigureClient: Parameter %s does not exist. Appending it to the reconfigure message.", param_name.c_str());
dynamic_reconfigure::StrParameter cfg_param;
cfg_param.name = param_name;
cfg_param.value = param_value;
dynamic_reconfigure_config_.strs.push_back(cfg_param);
}
return sendConfiguration(dynamic_reconfigure_config_);
}
private:
void dynamic_reconfigure_current_config_callback(const dynamic_reconfigure::ConfigConstPtr& current_config)
{
boost::mutex::scoped_lock lock(dynamic_reconfigure_lock_);
dynamic_reconfigure_config_ = *current_config;
dynamic_reconfigure_current_config_received_ = true;
}
bool sendConfiguration(const dynamic_reconfigure::Config& dynamic_reconfigure_config)
{
dynamic_reconfigure::ReconfigureRequest req;
dynamic_reconfigure::ReconfigureResponse res;
req.config = dynamic_reconfigure_config;
const bool success = ros::service::call(dynamic_reconfigure_service_name_, req, res);
return success;
}
// parameters
ros::NodeHandle node_handle_;
ros::Subscriber dynamic_reconfigure_sub_;
dynamic_reconfigure::Config dynamic_reconfigure_config_;
bool dynamic_reconfigure_current_config_received_;
std::string dynamic_reconfigure_service_name_;
boost::mutex dynamic_reconfigure_lock_;
};
#endif //_DYNAMIC_RECONFIGURE_CLIENT_H_

View File

@ -0,0 +1,102 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan, Richard Bormann
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 11.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
// c++ standard libraries
#include <iostream>
#include <list>
#include <string>
#include <vector>
#include <algorithm>
#include <cmath>
// Ros
#include <ros/ros.h>
// service
#include <cob_map_accessibility_analysis/map_accessibility_analysis.h>
// OpenCv
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
// msgs
#include <geometry_msgs/Pose2D.h>
#include <geometry_msgs/Polygon.h>
// Eigen library
#include <Eigen/Dense>
// specific from this package
#include <ipa_building_navigation/A_star_pathplanner.h>
#define PI 3.14159265359
// Function that provides the functionality that a given field of view (fov) path gets mapped to a robot path by using the given parameters.
// To do so simply a vector operation is applied. If the computed robot pose is not in the free space, another accessible
// point is generated by finding it on the radius around the fov middlepoint s.t. the distance to the last robot position
// is minimized.
// Important: the room map needs to be an unsigned char single channel image, if inaccessible areas should be excluded, provide the inflated map
// robot_to_fov_vector in [m]
// returns robot_path in [m,m,rad]
void mapPath(const cv::Mat& room_map, std::vector<geometry_msgs::Pose2D>& robot_path,
const std::vector<geometry_msgs::Pose2D>& fov_path, const Eigen::Matrix<float, 2, 1>& robot_to_fov_vector,
const double map_resolution, const cv::Point2d map_origin, const cv::Point& starting_point);
// computes the field of view center and the radius of the maximum incircle of a given field of view quadrilateral
// fitting_circle_center_point_in_meter this is also considered the center of the field of view, because around this point the maximum radius incircle can be found that is still inside the fov
// fov_resolution resolution of the fov center and incircle computations, in [pixels/m]
void computeFOVCenterAndRadius(const std::vector<Eigen::Matrix<float, 2, 1> >& fov_corners_meter, float& fitting_circle_radius_in_meter,
Eigen::Matrix<float, 2, 1>& fitting_circle_center_point_in_meter, const double fov_resolution=1000);

View File

@ -0,0 +1,325 @@
/*!
*****************************************************************
* \file
*
* \note
* Copyright (c) 2016 \n
* Fraunhofer Institute for Manufacturing Engineering
* and Automation (IPA) \n\n
*
*****************************************************************
*
* \note
* Project name: Care-O-bot
* \note
* ROS stack name: autopnp
* \note
* ROS package name: ipa_room_exploration
*
* \author
* Author: Florian Jordan
* \author
* Supervised by: Richard Bormann
*
* \date Date of creation: 03.2016
*
* \brief
*
*
*****************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. \n
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. \n
* - Neither the name of the Fraunhofer Institute for Manufacturing
* Engineering and Automation (IPA) nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission. \n
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License LGPL as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License LGPL for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License LGPL along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
#pragma once
// Ros specific
#include <ros/ros.h>
#include <actionlib/server/simple_action_server.h>
#include <move_base_msgs/MoveBaseAction.h>
#include <actionlib/client/simple_action_client.h>
#include <ros/time.h>
#include <cv_bridge/cv_bridge.h>
#include <tf/transform_listener.h>
#include <dynamic_reconfigure/server.h>
// OpenCV specific
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
// Eigen library
#include <Eigen/Dense>
#include <eigen_conversions/eigen_msg.h>
// standard c++ libraries
#include <iostream>
#include <list>
#include <string>
#include <vector>
#include <algorithm>
#include <cmath>
// services and actions
#include <ipa_building_msgs/RoomExplorationAction.h>
#include <cob_map_accessibility_analysis/CheckPerimeterAccessibility.h>
#include <ipa_building_msgs/CheckCoverage.h>
// messages
#include <geometry_msgs/Pose2D.h>
#include <geometry_msgs/PoseStamped.h>
#include <geometry_msgs/Polygon.h>
#include <geometry_msgs/Point32.h>
#include <nav_msgs/Path.h>
#include <nav_msgs/OccupancyGrid.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/image_encodings.h>
// specific from this package
#include <ipa_building_navigation/concorde_TSP.h>
#include <ipa_room_exploration/RoomExplorationConfig.h>
#include <ipa_room_exploration/grid_point_explorator.h>
#include <ipa_room_exploration/boustrophedon_explorator.h>
#include <ipa_room_exploration/neural_network_explorator.h>
#include <ipa_room_exploration/convex_sensor_placement_explorator.h>
#include <ipa_room_exploration/flow_network_explorator.h>
#include <ipa_room_exploration/fov_to_robot_mapper.h>
#include <ipa_room_exploration/energy_functional_explorator.h>
#include <ipa_room_exploration/voronoi.hpp>
#include <ipa_room_exploration/room_rotator.h>
#include <ipa_room_exploration/coverage_check_server.h>
#include <geometry_msgs/Twist.h>
#include "std_msgs/String.h"
#include <actionlib_msgs/GoalID.h>
#include "ipa_building_msgs/dis_info.h"
#include "ipa_building_msgs/dis_info_array.h"
#define PI 3.14159265359
typedef actionlib::SimpleActionClient<move_base_msgs::MoveBaseAction> MoveBaseClient;
class RoomExplorationServer
{
protected:
int planning_mode_; // 1 = plans a path for coverage with the robot footprint, 2 = plans a path for coverage with the robot's field of view
ros::Publisher path_pub_; // a publisher sending the path as a nav_msgs::Path before executing
ros::Publisher cmd_vel_pub_;
ros::Publisher cancel_pub_;
geometry_msgs::Twist cmd_vel_msg;
actionlib_msgs::GoalID cancel_msg;
geometry_msgs::Twist cmd_vel_;
GridPointExplorator grid_point_planner; // object that uses the grid point method to plan a path trough a room
BoustrophedonExplorer boustrophedon_explorer_; // object that uses the boustrophedon exploration method to plan a path trough the room
NeuralNetworkExplorator neural_network_explorator_; // object that uses the neural network method to create an exploration path
convexSPPExplorator convex_SPP_explorator_; // object that uses the convex spp exploration methd to create an exploration path
FlowNetworkExplorator flow_network_explorator_; // object that uses the flow network exploration method to create an exploration path
EnergyFunctionalExplorator energy_functional_explorator_; // object that uses the energy functional exploration method to create an exploration path
BoustrophedonVariantExplorer boustrophedon_variant_explorer_; // object that uses the boustrophedon variant exploration method to plan a path trough the room
// parameters
int room_exploration_algorithm_; // variable to specify which algorithm is going to be used to plan a path
// 1: grid point explorator
// 2: boustrophedon explorator
// 3: neural network explorator
// 4: convexSPP explorator
// 5: flowNetwork explorator
// 6: energyFunctional explorator
// 7: Voronoi explorator
// 8: boustrophedon variant explorator
bool display_trajectory_; // display final trajectory plan step by step
// parameters on map correction
int map_correction_closing_neighborhood_size_; // Applies a closing operation to neglect inaccessible areas and map errors/artifacts if the
// map_correction_closing_neighborhood_size parameter is larger than 0.
// The parameter then specifies the iterations (or neighborhood size) of that closing operation.
// parameters specific to the navigation of the robot along the computed coverage trajectory
bool return_path_; // boolean used to determine if the server should return the computed coverage path in the response message
bool execute_path_; // boolean used to determine whether the server should navigate the robot along the computed coverage path
double goal_eps_; // distance between the published navigation goal and the robot to publish the next
// navigation goal in the path
bool use_dyn_goal_eps_; // using a dynamic goal distance criterion: the larger the path's curvature, the more accurate the navigation
bool interrupt_navigation_publishing_; // variable that interrupts the publishing of navigation goals as long as needed, e.g. when during the execution
// of the coverage path a trash bin is found and one wants to empty it directly and resume the path later.
bool revisit_areas_; // variable that turns functionality on/off to revisit areas that haven't been seen during the
// execution of the coverage path, due to uncertainties or dynamic obstacles
double left_sections_min_area_; // variable to determine the minimal area that not seen sections must have before they
// are revisited after one run through the room
std::string global_costmap_topic_; // name of the global costmap topic
std::string coverage_check_service_name_; // name of the service to call for a coverage check of the driven trajectory
std::string map_frame_; // string that carries the name of the map frame, used for tracking of the robot
std::string camera_frame_; // string that carries the name of the camera frame, that is in the same kinematic chain as the map_frame and shows the camera pose
// parameters specific to the grid point explorator
int tsp_solver_; // indicates which TSP solver should be used
// 1 = Nearest Neighbor
// 2 = Genetic solver
// 3 = Concorde solver
int64_t tsp_solver_timeout_; // a sophisticated solver like Concorde or Genetic can be interrupted if it does not find a solution within this time, in [s], and then falls back to the nearest neighbor solver
// parameters specific for the boustrophedon explorator
double min_cell_area_; // minimal area a cell can have, when using the boustrophedon explorator
double path_eps_; // the distance between points when generating a path
double grid_obstacle_offset_; // in [m], the additional offset of the grid to obstacles, i.e. allows to displace the grid by more than the standard half_grid_size from obstacles
int max_deviation_from_track_; // in [pixel], maximal allowed shift off the ideal boustrophedon track to both sides for avoiding obstacles on track
// setting max_deviation_from_track=grid_spacing is usually a good choice
// for negative values (e.g. max_deviation_from_track: -1) max_deviation_from_track is automatically set to grid_spacing
int cell_visiting_order_; // cell visiting order
// 1 = optimal visiting order of the cells determined as TSP problem
// 2 = alternative ordering from left to right (measured on y-coordinates of the cells), visits the cells in a more obvious fashion to the human observer (though it is not optimal)
// parameters specific for the neural network explorator, see "A Neural Network Approach to Complete Coverage Path Planning" from Simon X. Yang and Chaomin Luo
double step_size_; // step size for integrating the state dynamics
int A_; // decaying parameter that pulls the activity of a neuron closer to zero, larger value means faster decreasing
int B_; // increasing parameter that tries to increase the activity of a neuron when it's not too big already, higher value means a higher desired value and a faster increasing at the beginning
int D_; // decreasing parameter when the neuron is labeled as obstacle, higher value means faster decreasing
int E_; // external input parameter of one neuron that is used in the dynamics corresponding to if it is an obstacle or uncleaned/cleaned, E>>B
double mu_; // parameter to set the importance of the states of neighboring neurons to the dynamics, higher value means higher influence
double delta_theta_weight_; // parameter to set the importance of the traveleing direction from the previous step and the next step, a higher value means that the robot should turn less
// parameters specific for the convexSPP explorator
int cell_size_; // size of one cell that is used to discretize the free space
double delta_theta_; // sampling angle when creating possible sensing poses in the convexSPP explorator
// parameters specific for the flowNetwork explorator
double curvature_factor_; // double that shows the factor, an arc can be longer than a straight arc when using the flowNetwork explorator
double max_distance_factor_; // double that shows how much an arc can be longer than the maximal distance of the room, which is determined by the min/max coordinates that are set in the goal
// callback function for dynamic reconfigure
void dynamic_reconfigure_callback(ipa_room_exploration::RoomExplorationConfig &config, uint32_t level);
// this is the execution function used by action server
void exploreRoom(const ipa_building_msgs::RoomExplorationGoalConstPtr &goal);
// remove unconnected, i.e. inaccessible, parts of the room (i.e. obstructed by furniture), only keep the room with the largest area
bool removeUnconnectedRoomParts(cv::Mat& room_map);
// clean path from subsequent double occurrences of the same pose
// min_dist_squared is the squared minimum distance between two points on the trajectory, in [pixel] (i.e. grid cells)
void downsampleTrajectory(const std::vector<geometry_msgs::Pose2D>& path_uncleaned, std::vector<geometry_msgs::Pose2D>& path, const double min_dist_squared);
// excute the planned trajectory and drive to unexplored areas after moving along the computed path
void navigateExplorationPath(const std::vector<geometry_msgs::Pose2D>& exploration_path, const std::vector<geometry_msgs::Point32>& field_of_view,
const geometry_msgs::Point32& field_of_view_origin, const double coverage_radius, const double distance_robot_fov_middlepoint,
const float map_resolution, const geometry_msgs::Pose& map_origin, const double grid_spacing_in_pixel, const double map_height);
// function to publish a navigation goal, it returns true if the goal could be reached
// eps_x and eps_y are used to define a epsilon neighborhood around the goal in which a new nav_goal gets published
// --> may smooth the process, move_base often slows before and stops at the goal
bool publishNavigationGoal(const geometry_msgs::Pose2D& nav_goal, const std::string map_frame,
const std::string camera_frame, std::vector<geometry_msgs::Pose2D>& robot_poses,
const double robot_to_fov_middlepoint_distance, const double eps = 0.0,
const bool perimeter_check = false);
// converter-> Pixel to meter for X coordinate
double convertPixelToMeterForXCoordinate(const int pixel_valued_object_x, const float map_resolution, const cv::Point2d map_origin)
{
double meter_value_obj_x = (pixel_valued_object_x * map_resolution) + map_origin.x;
return meter_value_obj_x;
}
// converter-> Pixel to meter for Y coordinate
double convertPixelToMeterForYCoordinate(int pixel_valued_object_y, const float map_resolution, const cv::Point2d map_origin)
{
double meter_value_obj_y = (pixel_valued_object_y * map_resolution) + map_origin.y;
return meter_value_obj_y;
}
// function to transform the given map in a way s.t. the OpenCV and room coordinate system are the same
// --> the map_saver from ros saves maps as images with the origin laying in the lower left corner of it, but openCV assumes
// that the origin is in the upper left corner, also they are rotated around the image-x-axis about each other
void transformImageToRoomCordinates(cv::Mat& map)
{
cv::Point2f src_center(map.cols/2.0F, map.rows/2.0F);
cv::Mat rot_mat = getRotationMatrix2D(src_center, 180, 1.0);
cv::Mat dst;
cv::warpAffine(map, dst, rot_mat, map.size());
cv::flip(dst, map, 1);
}
// function to create an occupancyGrid map out of a given cv::Mat
void matToMap(nav_msgs::OccupancyGrid &map, const cv::Mat &mat)
{
map.info.width = mat.cols;
map.info.height = mat.rows;
map.data.resize(mat.cols*mat.rows);
for(int x=0; x<mat.cols; x++)
for(int y=0; y<mat.rows; y++)
map.data[y*mat.cols+x] = mat.at<int8_t>(y,x)?0:100;
}
// function to create a cv::Mat out of a given occupancyGrid map
void mapToMat(const nav_msgs::OccupancyGrid &map, cv::Mat &mat)
{
mat = cv::Mat(map.info.height, map.info.width, CV_8U);
for(int x=0; x<mat.cols; x++)
for(int y=0; y<mat.rows; y++)
mat.at<int8_t>(y,x) = map.data[y*mat.cols+x];
}
// !!Important!!
// define the Nodehandle before the action server, or else the server won't start
//
ros::NodeHandle node_handle_;
actionlib::SimpleActionServer<ipa_building_msgs::RoomExplorationAction> room_exploration_server_;
dynamic_reconfigure::Server<ipa_room_exploration::RoomExplorationConfig> room_exploration_dynamic_reconfigure_server_;
private:
void turn(int lr,int time);//rad/s ,0.1s
void gostraight(int time);// 0.1s
void publishZeroVelocity();//Publishes a velocity command of zero to the base
bool flag_=0;
public:
enum PlanningMode {PLAN_FOR_FOOTPRINT=1, PLAN_FOR_FOV=2};
ros::Subscriber obstacles_sub_;
ros::Subscriber cmd_vel_sub_;
void stereo_CB(const ipa_building_msgs::dis_info_array::ConstPtr& flag);
void cmd_CB(const geometry_msgs::Twist::ConstPtr& cmd_vel);
// initialize the action-server
RoomExplorationServer(ros::NodeHandle nh, std::string name_of_the_action);
// default destructor for the class
~RoomExplorationServer(void)
{
}
};

View File

@ -0,0 +1,46 @@
<launch>
<!-- Arguments -->
<arg name="scan_topic" default="scan"/>
<arg name="initial_pose_x" default="0.0"/>
<arg name="initial_pose_y" default="0.0"/>
<arg name="initial_pose_a" default="0.0"/>
<!-- AMCL -->
<node pkg="amcl" type="amcl" name="amcl">
<param name="min_particles" value="500"/>
<param name="max_particles" value="3000"/>
<param name="kld_err" value="0.02"/>
<param name="update_min_d" value="0.20"/>
<param name="update_min_a" value="0.20"/>
<param name="resample_interval" value="1"/>
<param name="transform_tolerance" value="0.5"/>
<param name="recovery_alpha_slow" value="0.00"/>
<param name="recovery_alpha_fast" value="0.00"/>
<param name="initial_pose_x" value="$(arg initial_pose_x)"/>
<param name="initial_pose_y" value="$(arg initial_pose_y)"/>
<param name="initial_pose_a" value="$(arg initial_pose_a)"/>
<param name="gui_publish_rate" value="50.0"/>
<remap from="scan" to="$(arg scan_topic)"/>
<param name="laser_max_range" value="3.5"/>
<param name="laser_max_beams" value="180"/>
<param name="laser_z_hit" value="0.5"/>
<param name="laser_z_short" value="0.05"/>
<param name="laser_z_max" value="0.05"/>
<param name="laser_z_rand" value="0.5"/>
<param name="laser_sigma_hit" value="0.2"/>
<param name="laser_lambda_short" value="0.1"/>
<param name="laser_likelihood_max_dist" value="2.0"/>
<param name="laser_model_type" value="likelihood_field"/>
<param name="odom_model_type" value="diff"/>
<param name="odom_alpha1" value="0.1"/>
<param name="odom_alpha2" value="0.1"/>
<param name="odom_alpha3" value="0.1"/>
<param name="odom_alpha4" value="0.1"/>
<param name="odom_frame_id" value="odom"/>
<param name="base_frame_id" value="base_footprint"/>
</node>
</launch>

View File

@ -0,0 +1,28 @@
<?xml version="1.0"?>
<launch>
<arg name="model" value="waffle" doc="model type [burger, waffle, waffle_pi]"/>
<!-- <arg name="model" value="burger" doc="model type [burger, waffle, waffle_pi]"/> -->
<arg name="configuration_basename" default="turtlebot3_lds_2d.lua"/>
<!-- <arg name="map_file" default="$(find clean_robot)/maps/clean_room.yaml"/> -->
<arg name="map_file" default="$(find ipa_room_exploration)/ros/maps/sim.yaml"/>
<arg name="open_rviz" default="true"/>
<!-- 启动仿真环境 -->
<include file="$(find turtlebot3_bringup)/launch/turtlebot3_remote.launch">
<arg name="model" value="$(arg model)" />
</include>
<include file="$(find ipa_room_exploration)/ros/launch/gazebo.launch"/>
<!-- 启动导航系统,用作目标点路径规划,实现躲避小障碍物-->
<include file="$(find ipa_room_exploration)/ros/launch/turtlebot3_navigation.launch">
<arg name="model" value="$(arg model)" />
<arg name="map_file" value="$(arg map_file)"/>
</include>
<!-- 清扫路径规划器 -->
<!-- <node pkg="ipa_room_exploration" type="room_exploration_client" respawn="false" name="room_exploration_client" output="screen" clear_params="true">
<rosparam file="$(find ipa_room_exploration)/ros/config/costmap_common_params.yaml" command="load" ns="cleaning_costmap" />
<rosparam file="$(find ipa_room_exploration)/ros/config/cleaning_costmap_params.yaml" command="load" />
</node> -->
<!-- 根据清扫的路径向导航系统发送目标点位 -->
<!-- 设定距离当前目标点多进时候发布下一个目标点 -->
<!-- <param name="/NextGoal/tolerance_goal" value="0.25" />
<node pkg="ipa_room_exploration" type="next_goal" respawn="true" name="next_goal" output="screen" /> -->
</launch>

View File

@ -0,0 +1,14 @@
<?xml version="1.0"?>
<launch>
<!-- start map accessibility analysis -->
<node ns="room_exploration" pkg="cob_map_accessibility_analysis" type="map_accessibility_analysis_server" name="map_accessibility_analysis" output="log"> <!-- ns=namespace (arbitrary), type=name of executable, name=node name (arbitrary) -->
<!--launch-prefix="/usr/bin/gdb"-->
<rosparam command="load" file="$(find ipa_room_exploration)/ros/launch/cob_map_accessibility_analysis_params.yaml"/>
<remap from="~map" to="/map"/>
<remap from="~obstacles" to="/move_base/global_costmap/obstacles"/>
<remap from="~inflated_obstacles" to="/move_base/local_costmap/inflated_obstacles"/>
</node>
</launch>

View File

@ -0,0 +1,19 @@
# if true, the path to a goal position must be accessible as well
# bool
approach_path_accessibility_check: true
# link name of the map in tf
# string
map_link_name: "/map"
# link name of the robot base in tf
# string
robot_base_link_name: "/base_footprint"
# update rate for incoming messages about obstacles (provided in [Hz])
# double
obstacle_topic_update_rate: 5.0
# publish the inflated map for viewing purposes
# bool
publish_inflated_map: false

Some files were not shown because too many files have changed in this diff Show More