CleanRobot-UESTC/后处理/物体形状/main.py

1047 lines
39 KiB
Python
Raw Permalink Normal View History

2023-12-11 11:51:46 +08:00
import copy
import math
import cv2
import numpy as np
from matplotlib import pyplot
from numpy import where, mean
from scipy import optimize
from sklearn.cluster import OPTICS
from config import opt
# 图片预处理
def process_map(img, opt):
def get_wall_area(map, stats, area):
'''
. map 获取墙壁的图像
. stats 连通域信息
. area 面积
'''
temp = []
for i in range(1, num_labels):
if stats[i, 4] > area:
temp.append(i)
wall_area = np.zeros((map.shape[0], map.shape[1]), np.uint8)
for i in range(0, len(temp)):
mask = labels == temp[i]
wall_area[:, :][mask] = 255
return wall_area
def get_NONwall_area(map, stats, area):
'''
. map 获取墙壁的图像
. stats 连通域信息
. area 面积
'''
temp = []
for i in range(1, num_labels):
if stats[i, 4] <= area:
temp.append(i)
wall_area = np.zeros((map.shape[0], map.shape[1]), np.uint8)
for i in range(0, len(temp)):
mask = labels == temp[i]
wall_area[:, :][mask] = 255
return wall_area
def get_rotate_angle(wall_area, length, opt):
edges = cv2.Canny(wall_area, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, length)
# output = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
alfa = 0
for line in lines:
rho = line[0][0]
theta = line[0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
alfa = alfa + np.mod(theta, 90 / 360 * np.pi)
alfa = theta
# print(theta)
# cv2.line(output, (x1, y1), (x2, y2), (0, 0, 255), 2)
wall_area_ = rotate_bound(wall_area, -np.mod(theta, 90 / 360 * np.pi) / np.pi * 180) # 摆正图像
# wall_area_ = cv2.dilate(wall_area_, kernel) # 腐蚀膨胀,去除墙上语义点
connect_line = connect_door(wall_area_, opt.k, opt.m) # 封闭区域
connect_line_ = rotate_bound(connect_line, np.mod(theta, 90 / 360 * np.pi) / np.pi * 180)
# cv2.namedWindow("wall", 0)
# cv2.imshow("wall", output)
# cv2.waitKey(0)
alfa = alfa / lines.shape[0]
print(alfa)
# cv2.namedWindow("wall", 0)
# cv2.imshow("wall", output)
# cv2.waitKey(0)
return connect_line_
def rotate_bound(image, angle):
'''
. 旋转图片
. @param image opencv读取后的图像
. @param angle ()旋转角度
'''
# img = cv2.imread("img/1.jpg")
(h, w) = image.shape[:2] # 返回(高,宽,色彩通道数),此处取前两个值返回
# 抓取旋转矩阵(应用角度的负值顺时针旋转)。参数1为旋转中心点;参数2为旋转角度,正的值表示逆时针旋转;参数3为各向同性的比例因子
M = cv2.getRotationMatrix2D((w / 2, h / 2), -angle, 1.0)
# 计算图像的新边界维数
# 调整旋转矩阵以考虑平移
# 执行实际的旋转并返回图像
return cv2.warpAffine(image, M, (image.shape[1], image.shape[0])) # borderValue 缺省,默认是黑色
# return M
def get_map(img, ratio):
"""
:param img:
:param ratio: 获取地图2表示有效面积的2倍
:return:
"""
ratio = (ratio - 1) / 2
b = np.nonzero(img)
x_min = np.min(b[0])
y_min = np.min(b[1])
x_max = np.max(b[0])
y_max = np.max(b[1])
x_l = x_max - x_min
y_l = y_max - y_min
if x_l < 150:
ratio = ratio + 0.5
# map = np.zeros((x_max - x_min, y_max - y_min))
map = img[int(max(x_min - ratio * x_l, 0)):int(min(x_max + x_l * ratio, img.shape[1])),
int(max(0, y_min - ratio * y_l)):int(min(img.shape[0], ratio * y_l + y_max))]
'''
去掉未占用区域
'''
map[map < 70] = 255
map[map <= 100] = 0
map[map == 0] = 1
map[map == 255] = 0
map[map == 1] = 255
"墙壁灰度值为1"
return map
def draw_rgb(map):
def rgb_semantics(number):
output = np.array((5, 5, 5), np.uint8)
if number == 115:
'0号物体'
output[0] = 232
output[1] = 221
output[2] = 203
elif number == 116:
'1号物体'
output[0] = 205
output[1] = 179
output[2] = 128
elif number == 117:
'2号物体'
output[0] = 3
output[1] = 101
output[2] = 100
elif number == 118:
'3号物体'
output[0] = 3
output[1] = 54
output[2] = 73
elif number == 119:
'4号物体'
output[0] = 3
output[1] = 22
output[2] = 52
elif number == 120:
'5号物体'
output[0] = 237
output[1] = 222
output[2] = 139
elif number == 121:
'6号物体'
output[0] = 251
output[1] = 178
output[2] = 23
elif number == 150:
'7号物体'
output[0] = 96
output[1] = 143
output[2] = 159
elif number == 151:
'8号物体'
output[0] = 1
output[1] = 77
output[2] = 103
elif number == 152:
'9号物体'
output[0] = 254
output[1] = 67
output[2] = 101
elif number == 153:
'10号物'
output[0] = 252
output[1] = 157
output[2] = 154
elif number == 154:
'11号物体'
output[0] = 249
output[1] = 205
output[2] = 173
elif number == 155:
'12号物体'
output[0] = 200
output[1] = 200
output[2] = 169
elif number == 156:
'13号物体'
output[0] = 131
output[1] = 175
output[2] = 155
elif number == 200:
'14号物体'
output[0] = 229
output[1] = 187
output[2] = 129
elif number == 201:
'15号物体'
output[0] = 161
output[1] = 23
output[2] = 21
elif number == 202:
'16号物体'
output[0] = 118
output[1] = 77
output[2] = 57
elif number == 203:
'17号物体'
output[0] = 17
output[1] = 63
output[2] = 61
elif number == 204:
'18号物体'
output[0] = 60
output[1] = 79
output[2] = 57
elif number == 205:
'19号物体'
output[0] = 95
output[1] = 92
output[2] = 51
elif number == 206:
'20号物体'
output[0] = 179
output[1] = 214
output[2] = 110
elif number == 207:
'21号物体'
output[0] = 227
output[1] = 160
output[2] = 93
elif number == 208:
'22号物体'
output[0] = 178
output[1] = 190
output[2] = 126
elif number == 209:
'23号物体'
output[0] = 56
output[1] = 13
output[2] = 49
else:
output[0] = 5
output[1] = 5
output[2] = 5
return output
output = np.zeros((map.shape[0], map.shape[1], 3), np.uint8)
output_b = copy.copy(map)
output_g = copy.copy(map)
output_r = copy.copy(map)
for i in range(np.array(Intensity_value).shape[0]):
intensity = Intensity_value[i]
output_r[map == intensity] = rgb_semantics(intensity)[0]
output_g[map == intensity] = rgb_semantics(intensity)[1]
output_b[map == intensity] = rgb_semantics(intensity)[2]
output[:, :, 0] = output_b
output[:, :, 1] = output_g
output[:, :, 2] = output_r
return output
# 行扫描间隔k时进行填充填充值为1
# 将门口连起来
def connect_door(wall_area, k, m):
"""
:param wall_area:
:param k: 门宽
:param m: 墙壁长度
:return:
"""
def edge_connection(img, size, k, m):
for i in range(size):
Yi = np.where(img[i, :] > 220)
# print(Yi)
if len(Yi[0]) >= m: # 可调整 (墙壁长度)
for j in range(0, len(Yi[0]) - 1):
if Yi[0][j + 1] - Yi[0][j] <= k:
img[i, Yi[0][j]:Yi[0][j + 1]] = 255
return img
img = copy.copy(wall_area)
g = edge_connection(img, img.shape[0], k, m) # k设的是门的宽度, m是判为墙壁的长度
g = cv2.rotate(g, 0)
g = edge_connection(g, img.shape[1], k, m)
g = cv2.rotate(g, 2)
g = g.astype(np.uint8)
return g
'定义语义强度'
Intensity_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
154, 155, 156, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209]
# Intensity_value = [153]
map = get_map(img, 1.2)
'插值处理,提高分辨率'
map = cv2.resize(map, (map.shape[1] * 2, map.shape[0] * 2), interpolation=cv2.INTER_NEAREST)
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(map, connectivity=8)
wall_area = get_wall_area(map, stats, opt.acreage) # 获取墙壁区域
NON_wall_area = get_NONwall_area(map, stats, opt.acreage)
NON_wall_area = NON_wall_area / 255
NON_wall_area = NON_wall_area * map
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
wall_area = cv2.dilate(wall_area, kernel)
connect_line_ = get_rotate_angle(wall_area, opt.length, opt) # 连接
# wall_area_ = rotate_bound(wall_area, -alfa / np.pi * 180) # 摆正图像
# # wall_area_ = cv2.dilate(wall_area_, kernel) # 腐蚀膨胀,去除墙上语义点
#
# connect_line = connect_door(wall_area_, opt.k, opt.m) # 封闭区域
# connect_line_ = rotate_bound(connect_line, alfa / np.pi * 180)
connect_line_[connect_line_ < 150] = 0
connect_line_[connect_line_ > 0] = 255
result = connect_line_ + NON_wall_area
result[result > 255] = 255
result = result.astype(np.uint8)
result[result == 0] = 254
result[result == 255] = 0
result[result == 254] = 255
# result = filter_wrong_point(result, Intensity_value)
# output_rgb = draw_rgb(NON_wall_area)
# connect_line_rgb = np.zeros((map.shape[0], map.shape[1], 3), np.uint8)
# connect_line_rgb[:, :, 0] = copy.copy(connect_line_)
# connect_line_rgb[:, :, 1] = copy.copy(connect_line_)
# connect_line_rgb[:, :, 2] = copy.copy(connect_line_)
# connect_line_rgb[connect_line_rgb < 65] = 0
# result_rgb = connect_line_rgb + output_rgb
# cv2.namedWindow("input_map", 0)
# cv2.imshow("input_map", map)
# cv2.namedWindow("wall", 0)
# cv2.imshow("wall", connect_line)
# cv2.namedWindow("map", 0)
# cv2.imshow("map", map)
# cv2.namedWindow("output", 0)
# cv2.imshow('output', result_rgb)
# cv2.namedWindow("result", 0)
# cv2.imshow('result', result)
# cv2.waitKey()
# cv2.destroyAllWindows()
# cv2.imwrite("result_rgb.png", result_rgb)
cv2.imwrite("result.png", result)
return result
# 绘制物体形状
def obj_outline(map, Semantic_map, opt):
'''
. 绘制物体形状
. @param map 语义地图
. @param Semantic_map 场景分类图
. @param opt 参数
'''
# 根据不同语义物体得到不同参数
def get_parameter(item, opt):
'''
. 得到物体参数
. @param item 物体的强度值
. @param opt 物体参数
'''
if (item == 115):
min_samples = opt.min_samples_115
max_eps = opt.max_eps_115
shape = opt.shape_115 # 0是圆形1是矩形
size = opt.size_115 # 0是按轮廓1是直接画正矩形
color = [232, 221, 203]
elif (item == 116):
min_samples = opt.min_samples_116
max_eps = opt.max_eps_116
shape = opt.shape_116 # 0是圆形1是矩形
size = opt.size_116 # 0是按轮廓1是直接画正矩形
color = [205, 179, 128]
elif (item == 117):
min_samples = opt.min_samples_117
max_eps = opt.max_eps_117
shape = opt.shape_117 # 0是圆形1是矩形
size = opt.size_117 # 0是按轮廓1是直接画正矩形
color = [3, 101, 100]
elif (item == 118):
min_samples = opt.min_samples_118
max_eps = opt.max_eps_118
shape = opt.shape_118 # 0是圆形1是矩形
size = opt.size_118 # 0是按轮廓1是直接画正矩形
color = [3, 54, 73]
elif (item == 119):
min_samples = opt.min_samples_119
max_eps = opt.max_eps_119
shape = opt.shape_119 # 0是圆形1是矩形
size = opt.size_119 # 0是按轮廓1是直接画正矩形
color = [3, 22, 52]
elif (item == 120):
min_samples = opt.min_samples_120
max_eps = opt.max_eps_120
shape = opt.shape_120 # 0是圆形1是矩形
size = opt.size_120 # 0是按轮廓1是直接画正矩形
color = [237, 22, 139]
elif (item == 121):
min_samples = opt.min_samples_121
max_eps = opt.max_eps_121
shape = opt.shape_121 # 0是圆形1是矩形
size = opt.size_121 # 0是按轮廓1是直接画正矩形
color = [251, 178, 23]
elif (item == 150):
min_samples = opt.min_samples_150
max_eps = opt.max_eps_150
shape = opt.shape_150 # 0是圆形1是矩形
size = opt.size_150 # 0是按轮廓1是直接画正矩形
color = [96, 143, 159]
elif (item == 151):
min_samples = opt.min_samples_151
max_eps = opt.max_eps_151
shape = opt.shape_151 # 0是圆形1是矩形
size = opt.size_151 # 0是按轮廓1是直接画正矩形
color = [1, 77, 103]
elif (item == 152):
min_samples = opt.min_samples_152
max_eps = opt.max_eps_152
shape = opt.shape_152 # 0是圆形1是矩形
size = opt.size_152 # 0是按轮廓1是直接画正矩形
color = [254, 67, 101]
elif (item == 153):
min_samples = opt.min_samples_153
max_eps = opt.max_eps_153
shape = opt.shape_153 # 0是圆形1是矩形
size = opt.size_153 # 0是按轮廓1是直接画正矩形
color = [252, 157, 154]
elif (item == 154):
min_samples = opt.min_samples_154
max_eps = opt.max_eps_154
shape = opt.shape_154 # 0是圆形1是矩形
size = opt.size_154 # 0是按轮廓1是直接画正矩形
color = [249, 205, 173]
elif (item == 155):
min_samples = opt.min_samples_155
max_eps = opt.max_eps_155
shape = opt.shape_155 # 0是圆形1是矩形
size = opt.size_155 # 0是按轮廓1是直接画正矩形
color = [200, 200, 169]
elif (item == 156):
min_samples = opt.min_samples_156
max_eps = opt.max_eps_156
shape = opt.shape_156 # 0是圆形1是矩形
size = opt.size_156 # 0是按轮廓1是直接画正矩形
color = [131, 175, 155]
elif (item == 200):
min_samples = opt.min_samples_200
max_eps = opt.max_eps_200
shape = opt.shape_200 # 0是圆形1是矩形
size = opt.size_200 # 0是按轮廓1是直接画正矩形
color = [229, 187, 129]
elif (item == 201):
min_samples = opt.min_samples_201
max_eps = opt.max_eps_201
shape = opt.shape_201 # 0是圆形1是矩形
size = opt.size_201 # 0是按轮廓1是直接画正矩形
color = [161, 23, 21]
elif (item == 202):
min_samples = opt.min_samples_202
max_eps = opt.max_eps_202
shape = opt.shape_202 # 0是圆形1是矩形
size = opt.size_202 # 0是按轮廓1是直接画正矩形
color = [118, 77, 57]
elif (item == 203):
min_samples = opt.min_samples_203
max_eps = opt.max_eps_203
shape = opt.shape_203 # 0是圆形1是矩形
size = opt.size_203 # 0是按轮廓1是直接画正矩形
color = [17, 63, 61]
elif (item == 204):
min_samples = opt.min_samples_204
max_eps = opt.max_eps_204
shape = opt.shape_204 # 0是圆形1是矩形
size = opt.size_204 # 0是按轮廓1是直接画正矩形
color = [60, 79, 57]
elif (item == 205):
min_samples = opt.min_samples_205
max_eps = opt.max_eps_205
shape = opt.shape_205 # 0是圆形1是矩形
size = opt.size_205 # 0是按轮廓1是直接画正矩形
color = [95, 92, 51]
elif (item == 209):
min_samples = opt.min_samples_209
max_eps = opt.max_eps_209
shape = opt.shape_209 # 0是圆形1是矩形
size = opt.size_209 # 0是按轮廓1是直接画正矩形
color = [56, 13, 49]
return min_samples, max_eps, shape, size, color
# 画物体轮廓
def draw_outline(X, item, min_samples, max_eps, shape, color, size, output):
'''
. 得到物体参数
. @param X 语义物体的行列坐标
. @param item 语义物体的强度值
. @param min_samples 聚类的最小样本数
. @param max_eps 聚类的最大半径
. @param shape 语义物体绘制形状参数 0代表绘制圆形1代表绘制矩形
. @param color 语义物体的色彩
. @param size 语义物体绘制轮廓的参数0代表按轮廓绘制1代表绘制正矩形
. @param output 已经绘制过某个物体形状的 map图片
'''
# 聚类OPTICS算法
# X = X.astype(np.uint8)
yhat = OPTICS(min_samples=min_samples, max_eps=max_eps, cluster_method='dbscan').fit_predict(X)
# 检索唯一群集
clusters = np.unique(yhat)
# 画散点图
# 为每个群集的样本创建散点图
# for cluster in clusters:
# # 获取此群集的示例的行索引
# row_ix = where(yhat == cluster)
# # 创建这些样本的散布
# pyplot.scatter(X[row_ix, 0], X[row_ix, 1])
# # 绘制散点图
# pyplot.show()
'''
找最小外接矩形 (绘制形状)
'''
color = color[::-1]
for cluster in clusters:
# 获取此群集的示例的行索引将每个物体从map中提取出来
if (cluster == -1):
continue
row_ix = where(yhat == cluster)
# print(row_ix[0].shape[0])
if row_ix[0].shape[0] <= 1:
continue
if (item == 115 and row_ix[0].shape[0] <= 2):
continue
if (item == 116 and row_ix[0].shape[0] <= 4):
continue
if (item == 117 and row_ix[0].shape[0] <= 4):
continue
if (item == 118 and row_ix[0].shape[0] <= 2):
continue
if (item == 119 and row_ix[0].shape[0] <= 2):
continue
if (item == 120 and row_ix[0].shape[0] <= 2):
continue
if (item == 121 and row_ix[0].shape[0] <= 4):
continue
if (item == 150 and row_ix[0].shape[0] <= 4):
continue
if (item == 151 and row_ix[0].shape[0] <= 4):
continue
if (item == 152 and row_ix[0].shape[0] <= 4):
continue
if (item == 153 and row_ix[0].shape[0] <= 2):
continue
if (item == 154 and row_ix[0].shape[0] <= 2):
continue
if (item == 155 and row_ix[0].shape[0] <= 2):
continue
if (item == 156 and row_ix[0].shape[0] <= 2):
continue
if (item == 200 and row_ix[0].shape[0] <= 2):
continue
if (item == 201 and row_ix[0].shape[0] <= 2):
continue
if (item == 202 and row_ix[0].shape[0] <= 2):
continue
if (item == 203 and row_ix[0].shape[0] <= 2):
continue
if (item == 204 and row_ix[0].shape[0] <= 2):
continue
if (item == 205 and row_ix[0].shape[0] <= 2):
continue
if (item == 209 and row_ix[0].shape[0] <= 2):
continue
obj = np.zeros((map.shape[0], map.shape[1]), np.uint8)
row = X[row_ix, 0].astype(int)
col = X[row_ix, 1].astype(int)
obj[row, col] = 1
# cv2.namedWindow("obj", 0)
# cv2.imshow('obj',obj)
if (shape == 1):
# 计算外接矩形
box = outer_rectangle(obj, size)
# color = color[::-1]
# 画图
if (size == 0):
# 外接矩形缩放
# vertices = shrink_rectangle(box)
vertices = box
output = cv2.drawContours(output, [vertices], 0, color, -1, lineType=cv2.LINE_4)
elif (size == 1):
# 外接矩形缩放
# vertices = shrink_rectangle(box)
vertices = box
cv2.rectangle(output, (vertices[0, 0], vertices[0, 1]), (vertices[2, 0], vertices[2, 1]), color, -1)
elif (shape == 0):
circle_x = col
circle_y = row
center, radius = fit_circle(circle_x, circle_y)
# binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# center, radius = outer_circle(obj)
cv2.circle(output, center, radius, color, -1)
return output
# 计算最小外接矩形
def outer_rectangle(obj, size):
'''
. 计算最小外接矩形
. @param obj 语义物体的位置信息
. @param size 语义物体绘制轮廓的参数0代表按轮廓绘制1代表绘制正矩形
'''
binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour = []
for cont in contours:
contour.extend(cont)
if (size == 0):
min_rect = cv2.minAreaRect(np.array(contour))
box = cv2.boxPoints(min_rect)
# box = np.round(box)
box = np.int0(box)
# 排序
temp = np.where(box == np.min(box[:, 0])) # box的第0列是列第1列是行
# print(temp[0].shape[0])
if temp[0].shape[0] > 1:
left = np.min(box[:, 0])
right = np.max(box[:, 0])
up = np.min(box[:, 1])
down = np.max(box[:, 1])
top_point_x = left
top_point_y = up
right_point_x = right
right_point_y = up
bottom_point_x = right
bottom_point_y = down
left_point_x = left
left_point_y = down
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return box
left_point_x = np.min(box[:, 0])
right_point_x = np.max(box[:, 0])
top_point_y = np.min(box[:, 1])
bottom_point_y = np.max(box[:, 1])
left_point_y = box[:, 1][np.where(box[:, 0] == left_point_x)][0]
right_point_y = box[:, 1][np.where(box[:, 0] == right_point_x)][0]
top_point_x = box[:, 0][np.where(box[:, 1] == top_point_y)][0]
bottom_point_x = box[:, 0][np.where(box[:, 1] == bottom_point_y)][0]
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return box
elif (size == 1):
x, y, w, h = cv2.boundingRect(np.array(contour))
vertices = np.array([[x, y], [x + w, y], [x + w, y + h],
[x, y + h]])
return vertices
# 计算最小外接圆
def outer_circle(obj):
'''
. 计算最小外接圆
. @param obj 语义物体的位置信息
'''
binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contour = []
for cont in contours:
contour.extend(cont)
(x, y), radius = cv2.minEnclosingCircle(np.array(contour))
center = (int(round(x)), int(round(y)))
radius = int(round(radius))
print(x, y, radius)
return center, radius
# 计算最小拟合圆
def calc_R(xc, yc):
return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
def f_2(c):
Ri = calc_R(*c)
return Ri - Ri.mean()
def fit_circle(x, y):
'''
. 计算最小外接圆
. @param x 语义物体的所在列
. @param x 语义物体的所在行
'''
x_m = mean(x)
y_m = mean(y)
center_estimate = x_m, y_m
center_2, _ = optimize.leastsq(f_2, center_estimate)
xc_2, yc_2 = center_2
Ri_2 = calc_R(xc_2, yc_2)
# 拟合圆的半径
R_2 = Ri_2.mean()
center = (int(round(xc_2)), int(round(yc_2)))
radius = int(round(R_2))
print(xc_2, yc_2, radius)
return center, radius
# 外接矩形内缩
def shrink_rectangle(box):
'''
. 外接矩形内缩
. @param box 语义物体的轮廓信息
'''
temp = np.where(box == np.min(box[:, 0])) # box的第0列是列第1列是行
# print(temp[0].shape[0])
if temp[0].shape[0] > 1:
left = np.min(box[:, 0])
right = np.max(box[:, 0])
up = np.min(box[:, 1])
down = np.max(box[:, 1])
top_point_x = left + 1
top_point_y = up + 1
right_point_x = right - 1
right_point_y = up + 1
bottom_point_x = right - 1
bottom_point_y = down - 1
left_point_x = left + 1
left_point_y = down - 1
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return vertices
left_point_x = np.min(box[:, 0])
right_point_x = np.max(box[:, 0])
top_point_y = np.min(box[:, 1])
bottom_point_y = np.max(box[:, 1])
left_point_y = box[:, 1][np.where(box[:, 0] == left_point_x)][0]
right_point_y = box[:, 1][np.where(box[:, 0] == right_point_x)][0]
top_point_x = box[:, 0][np.where(box[:, 1] == top_point_y)][0]
bottom_point_x = box[:, 0][np.where(box[:, 1] == bottom_point_y)][0]
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
# print(vertices)
if top_point_x > bottom_point_x:
vertices[0, 0] = vertices[0, 0] - 1
vertices[0, 1] = vertices[0, 1] + 1
vertices[2, 0] = vertices[2, 0] + 1
vertices[2, 1] = vertices[2, 1] - 1
if top_point_x < bottom_point_x:
vertices[0, 0] = vertices[0, 0] + 1
vertices[0, 1] = vertices[0, 1] + 1
vertices[2, 0] = vertices[2, 0] - 1
vertices[2, 1] = vertices[2, 1] - 1
if right_point_y > left_point_y:
vertices[1, 0] = vertices[1, 0] - 1
vertices[1, 1] = vertices[1, 1] - 1
vertices[3, 0] = vertices[3, 0] + 1
vertices[3, 1] = vertices[3, 1] + 1
if right_point_y < left_point_y:
vertices[1, 0] = vertices[1, 0] - 1
vertices[1, 1] = vertices[1, 1] + 1
vertices[3, 0] = vertices[3, 0] + 1
vertices[3, 1] = vertices[3, 1] - 1
return vertices
# 添加图例
def draw_legend(img, item, color, k):
obj_name = {'115': 'Dog basin', '116': 'Bar chair base', '117': 'Fan base', '118': 'Washing machine',
'119': 'Refrigerator', '120': 'Toilet', '121': 'Weighing scale', '150': 'Wire', '152': 'Desk',
'153': 'Carpet', '154': 'Rag', '155': 'Tea table', '156': 'TV cabinet', '200': 'Slippers',
'201': 'Sock', '202': 'Wardrobe', '203': 'Bed', '204': 'Sofa', '205': 'Chair'}
color = color[::-1]
text = obj_name[str(item)]
font = cv2.FONT_HERSHEY_DUPLEX
# cv2.rectangle(img, (output.shape[1] - 140, 10 + k * 17), (output.shape[1] - 120, 20 + k * 17), color, -1)
# cv2.putText(img, text, (output.shape[1] - 100, 20 + k * 17), font, 0.4, color, 1, cv2.LINE_AA)
cv2.rectangle(img, (10, 10 + k * 17), (30, 20 + k * 17), color, -1)
cv2.putText(img, text, (40, 20 + k * 17), font, 0.4, color, 1, cv2.LINE_AA)
return img
# 语义物体的强度值
# obj_value = [152, 121, 200, 115, 205, 117, 203]
# obj_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
# 154, 155, 156, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209]
# obj_value = [152, 117]
obj_value = []
obj_legend = []
# output是最后画上物体形状的总图
output = Semantic_map
# output = np.zeros((map.shape[0], map.shape[1], 3), np.uint8)
# output[:, :, 0] = map
# output[:, :, 1] = map
# output[:, :, 2] = map
# cv2.namedWindow("map", 0)
# cv2.imshow("map", map)
k = 0
# 对每个物体绘制形状
for item in iter(obj_value):
if ((map == item).any()):
# min_samples, max_eps, shape, size, color = get_parameter(item)
min_samples, max_eps, shape, size, color = get_parameter(item, opt)
y, x = np.where(map == item) # y是行x是列
X = np.zeros((x.shape[0], 2))
X[:, 0] = y
X[:, 1] = x
output = draw_outline(X, item, min_samples, max_eps, shape, color, size, output)
if item not in obj_legend:
output = draw_legend(output, item, color, k)
k = k + 1
img1 = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) # 方便统计尺寸
return output
# 场景识别
def get_labels(img):
def get_wall_area(map, stats, area):
'''
. map 获取墙壁的图像
. stats 连通域信息
. area 面积
'''
temp = []
for i in range(1, num_labels):
if stats[i, 4] > area: # 第i个区域的面积大于阈值则为墙壁
temp.append(i)
wall_area = np.zeros((map.shape[0], map.shape[1]), np.uint8)
for i in range(0, len(temp)):
mask = labels == temp[i]
wall_area[:, :][mask] = 255
return wall_area
# def draw_legend(img,scene,color,k):
# obj_name = {'0':'Dog basin','1':'Fan base','2':'Weighing scale','3':'Desk','4':'Slippers'}
# color = color[::-1]
# text = obj_name[str(scene)]
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.rectangle(img, (output.shape[1] - 140, 50+k*17), (output.shape[1] - 120, 60+k*17), color, -1)
# cv2.putText(img, text, (output.shape[1] - 100, 60+k*17), font, 0.4, color, 1, cv2.LINE_AA)
# return img
def Scene(obj_label):
'''
:param obj_label:
:return: index "bedroom","livingroom","bathroom","kitchen" ,"unknow"
'''
print(obj_label)
Intensity_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
154, 155, 156, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209]
p_sce = np.ones((1, 5)) * 0.25
p_sce[0][4] = 0
p_obj_sce = np.array([[0, 0.2, 0, 0],
[0, 0, 0, 0],
[0.2, 0.4, 0, 0.01],
[0.01, 0.04, 0.8, 0],
[0.02, 0.2, 0, 0.8],
[0, 0, 0.99, 0],
[0.6, 0.4, 0.1, 0],
[0.6, 0.6, 0, 0.1],
[0.4, 0.4, 0, 0],
[0, 0.4, 0, 0],
[0, 0, 0, 0],
[0.1, 0.1, 0.2, 0.1],
[0.1, 0.7, 0, 0],
[0.4, 0.4, 0, 0],
[0.45, 0.34, 0.32, 0],
[0.27, 0.26, 0, 0],
[0.8, 0, 0, 0],
[0.99, 0, 0, 0],
[0.05, 0.9, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
p_temp = 1.0
p_obj = 0.3 # 目标检测出的置信度
pre_scene = np.zeros((1, 5))
if (len(obj_label) == 0):
pre_scene[0][4] = 1
else:
for i in range(4):
for j in range(len(obj_label)):
k = obj_label[j]
sum_temp = np.sum(p_obj_sce[k])
if (sum_temp == 0):
pre_scene[0][4] = 1
continue
m = p_obj_sce[k, i]
p_temp = (1.0 - (m / sum_temp * p_obj)) * p_temp
pre_scene[0][i] = 1 - p_temp
p_temp = 1.0
if np.max(pre_scene[0, 0:4]) > 0.1:
pre_scene[0][4] = 0
sce_index = np.argmax(pre_scene)
print(pre_scene)
return sce_index
Intensity_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
154, 155, 156, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209]
'只保留墙壁空间'
temp = copy.copy(img)
temp[temp == 0] = 254
temp[temp == 255] = 0
temp[temp == 254] = 255
temp[temp < 254] = 0
Semantic_map = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
temp = temp.astype(np.uint8)
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(temp, connectivity=8)
wall_area = get_wall_area(img, stats, 400)
wall_area[wall_area == 0] = 254
wall_area[wall_area == 255] = 0
wall_area[wall_area == 254] = 255
wall_area_color = np.zeros((wall_area.shape[0], wall_area.shape[1], 3), np.uint8)
wall_area_color[:, :, 0] = wall_area
wall_area_color[:, :, 1] = wall_area
wall_area_color[:, :, 2] = wall_area
'提取房间'
room_num_labels, room_labels, room_stats, room_centroids = cv2.connectedComponentsWithStats(wall_area,
connectivity=8)
k = 0
scene_legend = []
for i in range(2, room_num_labels):
if room_stats[i, 4] < 200:
continue
room = np.zeros((wall_area.shape[0], wall_area.shape[1]))
obj_label = []
mask = room_labels == i
room[:, :][mask] = 1
Semantic_area = room * img
# cv2.imshow('area', Semantic_area)
# cv2.waitKey(1000)
Semantic_area = list(Semantic_area.flatten())
Semantic_area = list(set(Semantic_area))
Semantic_area.sort()
for i in range(len(Semantic_area)):
if Semantic_area[i] in Intensity_value:
obj_label.append(Intensity_value.index(Semantic_area[i]))
# obj_label_test = [] # 用于测试
scene = Scene(obj_label)
print(scene)
# print(obj_label)
if scene == 0:
Semantic_map[:, :, 0][mask] = 70
Semantic_map[:, :, 1][mask] = 80
Semantic_map[:, :, 2][mask] = 90
color = [70, 80, 90]
if scene == 1:
Semantic_map[:, :, 0][mask] = 100
Semantic_map[:, :, 1][mask] = 180
Semantic_map[:, :, 2][mask] = 120
color = [100, 180, 120]
if scene == 2:
Semantic_map[:, :, 0][mask] = 210
Semantic_map[:, :, 1][mask] = 67
Semantic_map[:, :, 2][mask] = 170
color = [210, 67, 170]
if scene == 3:
Semantic_map[:, :, 0][mask] = 150
Semantic_map[:, :, 1][mask] = 48
Semantic_map[:, :, 2][mask] = 88
color = [150, 48, 88]
# if scene == 4:
# Semantic_map[:, :, 0][mask] = 134
# Semantic_map[:, :, 1][mask] = 231
# Semantic_map[:, :, 2][mask] = 143
# color = [134, 231, 143]
# if scene not in scene_legend:
# scene_legend.append(scene)
# Semantic_map = draw_legend(Semantic_map, scene, color, k)
# k = k + 1
Semantic_map = Semantic_map + wall_area_color
return Semantic_map
if __name__ == "__main__":
img = cv2.imread("map.png", 0)
cv2.flip(img, 0, img) # 图片翻转
map = process_map(img, opt)
'''
对床洗衣机和马桶自己赋语义
'''
# 床
map[276:292, 212:224] = 203
map[276:288, 282:296] = 203
map[332:334, 208:210] = 255
map[330:344, 210:220] = 203
map[326:344, 282:294] = 203
# 洗衣机
map[368:392, 438:448] = 118
# 马桶
map[340:356, 428:448] = 120
# 袜子
map[238:244, 531:533] = 201
map[242:244, 529:533] = 201
# 电线
map[259:263, 509:511] = 150
map[261:263, 507:509] = 150
cv2.imwrite("mappp.jpg", map)
Semantic_map = get_labels(map)
output = obj_outline(map, Semantic_map, opt)
cv2.imwrite('output.png', output)
cv2.namedWindow("output", 0)
cv2.imshow("output", output)
cv2.waitKey(0)