| 일 | 월 | 화 | 수 | 목 | 금 | 토 |
|---|---|---|---|---|---|---|
| 1 | 2 | 3 | 4 | 5 | 6 | |
| 7 | 8 | 9 | 10 | 11 | 12 | 13 |
| 14 | 15 | 16 | 17 | 18 | 19 | 20 |
| 21 | 22 | 23 | 24 | 25 | 26 | 27 |
| 28 | 29 | 30 | 31 |
- adaptivedensitycontrol
- opencv
- rostopics
- alphablending
- 3dmapping
- imageprocessing
- vectorfields
- pointcloud
- rosnoetic
- tilebasedrasterizer
- usbcamera
- sensorfusion
- realtimerendering
- ComputerVision
- Ros
- LIDAR
- Slam
- differentiablerendering
- turtlebot3
- NERF
- raspberrypi
- turtlebot
- electromagnetism
- gaussiansplatting
- covariancematrix
- catkinworkspace
- rospackages
- 3dgaussiansplatting
- roslaunch
- vectorcalculus
- Today
- Total
Wiredwisdom
Puppy 본문
1. Roscore
roscore
2. Bingup
export TURTLEBOT3_MODEL=${TB3_MODEL}
roslaunch turtlebot3_bringup turtlebot3_robot.launch
3. usb_camera
roslaunch turtlebot3_custom usb_cam.launch
4. SLAM
export TURTLEBOT3_MODEL=burger
roslaunch turtlebot3_slam turtlebot3_slam.launch
rosrun sensor_fusion Puppy.py
rosrun sensor_fusion yolo_visualization.py
코드
#!/usr/bin/env python3
import rospy
from sensor_msgs.msg import LaserScan, Image
from geometry_msgs.msg import Twist
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
from message_filters import ApproximateTimeSynchronizer, Subscriber
from ultralytics import YOLO
class TurtlebotYOLODetector:
def __init__(self):
self.bridge = CvBridge()
# 동기화를 위한 Subscriber 설정
self.image_sub = Subscriber("/usb_cam/image_raw", Image)
self.scan_sub = Subscriber("/scan", LaserScan)
self.fusion_pub = rospy.Publisher("/fusion_image", Image, queue_size=1)
# 터틀봇 제어를 위한 Publisher
self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
# 동기화 설정
self.ts = ApproximateTimeSynchronizer([self.image_sub, self.scan_sub], 10, 0.1)
self.ts.registerCallback(self.sync_callback)
# 미리 계산된 색상 맵
self.color_map = self.create_color_map()
# YOLOv8 모델 로드
try:
self.yolo_model = YOLO('yolov8n.pt')
rospy.loginfo("YOLOv8 model loaded successfully")
except Exception as e:
rospy.logerr(f"Failed to load YOLOv8 model: {e}")
self.yolo_model = None
# 제어 관련 변수
self.target_distance = 0.5 # 목표 거리 (미터)
self.angular_speed = 0.3 # 회전 속도
self.linear_speed = 0.05 # 선형 속도
self.centering_threshold = 40 # 중앙 정렬 허용 오차 (픽셀)
def sync_callback(self, image_msg, scan_msg):
try:
image_data = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
self.process_image(image_data, scan_msg)
except CvBridgeError as e:
rospy.logerr(f"CvBridge Error: {e}")
def process_image(self, image_data, scan_msg):
height, width = image_data.shape[:2]
center_x = width // 2
if self.yolo_model is not None:
results = self.yolo_model(image_data)
annotated_image = results[0].plot()
person_detected = False
for result in results:
boxes = result.boxes.cpu().numpy()
for box in boxes:
cls = int(box.cls[0])
if result.names[cls] == 'person':
x1, y1, x2, y2 = box.xyxy[0].astype(int)
person_center_x = (x1 + x2) // 2
# 사람을 중앙으로 정렬
if abs(person_center_x - center_x) > self.centering_threshold:
if person_center_x < center_x:
self.rotate_robot(1) # 오른쪽으로 회전
rospy.loginfo("Rotating right to center person")
else:
self.rotate_robot(-1) # 왼쪽으로 회전
rospy.loginfo("Rotating left to center person")
else:
self.adjust_distance(scan_msg)
rospy.loginfo("Person centered, adjusting distance")
person_detected = True
break # 첫 번째 감지된 사람에 대해서만 처리
if person_detected:
break
if not person_detected:
self.stop_robot()
rospy.loginfo("No person detected, stopping robot")
else:
annotated_image = image_data.copy()
rospy.logwarn("YOLO model not available, using original image without detections")
# LiDAR 데이터 처리 및 결합
fusion_image = self.fuse_data(annotated_image, scan_msg)
# 결과 퍼블리시
try:
fusion_msg = self.bridge.cv2_to_imgmsg(fusion_image, "bgr8")
self.fusion_pub.publish(fusion_msg)
except CvBridgeError as e:
rospy.logerr(f"CvBridge Error: {e}")
def rotate_robot(self, direction):
twist = Twist()
twist.angular.z = self.angular_speed * direction
self.cmd_vel_pub.publish(twist)
def adjust_distance(self, scan_msg):
# 전방 거리 측정 (0도 방향)
front_distance = scan_msg.ranges[0]
twist = Twist()
if front_distance > self.target_distance + 0.1:
twist.linear.x = self.linear_speed # 전진
rospy.loginfo(f"Moving forward, current distance: {front_distance:.2f}m")
elif front_distance < self.target_distance - 0.1:
twist.linear.x = -self.linear_speed # 후진
rospy.loginfo(f"Moving backward, current distance: {front_distance:.2f}m")
else:
twist.linear.x = 0 # 정지
rospy.loginfo(f"Target distance reached: {front_distance:.2f}m")
self.cmd_vel_pub.publish(twist)
def stop_robot(self):
twist = Twist()
self.cmd_vel_pub.publish(twist)
def fuse_data(self, image_data, scan_msg):
height, width = image_data.shape[:2]
fusion_image = image_data.copy()
angles = np.arange(len(scan_msg.ranges)) * scan_msg.angle_increment + scan_msg.angle_min
degrees = np.degrees(angles)
# 각도 범위 조정
valid_indices = np.logical_or(
(degrees >= 0) & (degrees <= 15),
(degrees >= 345) & (degrees < 360)
)
valid_degrees = degrees[valid_indices]
valid_ranges = np.array(scan_msg.ranges)[valid_indices]
if len(valid_ranges) == 0:
rospy.logwarn("No valid range data")
return fusion_image
# 이미지 좌표로 변환
u = np.zeros_like(valid_degrees)
u[valid_degrees <= 15] = ((-valid_degrees[valid_degrees <= 15]+15) / 15 * (width/2)).astype(int)
u[valid_degrees >= 345] = ((-valid_degrees[valid_degrees >= 345] + 360) / 15 * (width / 2)+(width/2)).astype(int)
v = np.full_like(u, height // 2, dtype=int)
for ui, vi, range_val in zip(u, v, valid_ranges):
if 0 <= ui < width:
color_index = min(int(range_val * 63), 255)
color = tuple(map(int, self.color_map[color_index]))
# 최적화된 사각형 채우기
x_min, x_max = max(0, int(ui) - 10), min(width, int(ui) + 12)
y_min, y_max = max(0, int(vi) - 25), min(height, int(vi) + 26)
cv2.rectangle(fusion_image, (x_min, y_min), (x_max, y_max), color, 2)
# 점 그리기
cv2.circle(fusion_image, (int(ui), int(vi)), 2, color, -1)
return fusion_image
def create_color_map(self):
color_map = np.zeros((256, 3), dtype=np.uint8)
for i in range(256):
normalized_distance = i / 255.0
hue = 20 # Orange hue
saturation = 255
value = int(255 * (1 - normalized_distance))
color = cv2.cvtColor(np.uint8([[[hue, saturation, value]]]), cv2.COLOR_HSV2BGR)[0][0]
color_map[i] = color
return color_map
if __name__ == '__main__':
rospy.init_node('turtlebot_yolo_detector', anonymous=True)
try:
detector = TurtlebotYOLODetector()
rospy.spin()
except rospy.ROSInterruptException:
pass
버전2
#!/usr/bin/env python3
import rospy
from sensor_msgs.msg import LaserScan, Image
from geometry_msgs.msg import Twist
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
from message_filters import ApproximateTimeSynchronizer, Subscriber
from ultralytics import YOLO
class TurtlebotYOLODetector:
def __init__(self):
self.bridge = CvBridge()
# 동기화를 위한 Subscriber 설정
self.image_sub = Subscriber("/usb_cam/image_raw", Image)
self.scan_sub = Subscriber("/scan", LaserScan)
self.fusion_pub = rospy.Publisher("/fusion_image", Image, queue_size=1)
# 터틀봇 제어를 위한 Publisher
self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
# 동기화 설정
self.ts = ApproximateTimeSynchronizer([self.image_sub, self.scan_sub], 10, 0.1)
self.ts.registerCallback(self.sync_callback)
# 미리 계산된 색상 맵
self.color_map = self.create_color_map()
# YOLOv8 모델 로드
try:
self.yolo_model = YOLO('yolov8n.pt')
rospy.loginfo("YOLOv8 model loaded successfully")
except Exception as e:
rospy.logerr(f"Failed to load YOLOv8 model: {e}")
self.yolo_model = None
# 제어 관련 변수 (수정된 부분)
self.target_distance = 0.5 # 목표 거리를 50cm로 변경
self.angular_speed = 0.3 # 회전 속도
self.linear_speed = 0.1 # 선형 속도를 0.1 m/s로 증가
self.centering_threshold = 40 # 중앙 정렬 허용 오차 (픽셀)
def sync_callback(self, image_msg, scan_msg):
try:
image_data = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
self.process_image(image_data, scan_msg)
except CvBridgeError as e:
rospy.logerr(f"CvBridge Error: {e}")
def process_image(self, image_data, scan_msg):
height, width = image_data.shape[:2]
center_x = width // 2
if self.yolo_model is not None:
results = self.yolo_model(image_data)
annotated_image = results[0].plot()
person_detected = False
for result in results:
boxes = result.boxes.cpu().numpy()
for box in boxes:
cls = int(box.cls[0])
if result.names[cls] == 'person':
x1, y1, x2, y2 = box.xyxy[0].astype(int)
person_center_x = (x1 + x2) // 2
# 사람을 중앙으로 정렬
if abs(person_center_x - center_x) > self.centering_threshold:
if person_center_x < center_x:
self.rotate_robot(1) # 오른쪽으로 회전
rospy.loginfo("Rotating right to center person")
else:
self.rotate_robot(-1) # 왼쪽으로 회전
rospy.loginfo("Rotating left to center person")
else:
self.adjust_distance(scan_msg)
rospy.loginfo("Person centered, adjusting distance")
person_detected = True
break # 첫 번째 감지된 사람에 대해서만 처리
if person_detected:
break
if not person_detected:
self.stop_robot()
rospy.loginfo("No person detected, stopping robot")
else:
annotated_image = image_data.copy()
rospy.logwarn("YOLO model not available, using original image without detections")
# LiDAR 데이터 처리 및 결합
fusion_image = self.fuse_data(annotated_image, scan_msg)
# 결과 퍼블리시
try:
fusion_msg = self.bridge.cv2_to_imgmsg(fusion_image, "bgr8")
self.fusion_pub.publish(fusion_msg)
except CvBridgeError as e:
rospy.logerr(f"CvBridge Error: {e}")
def rotate_robot(self, direction):
twist = Twist()
twist.angular.z = self.angular_speed * direction
self.cmd_vel_pub.publish(twist)
def adjust_distance(self, scan_msg):
# 전방 거리 측정 (0도 방향)
front_distance = scan_msg.ranges[0]
twist = Twist()
distance_diff = front_distance - self.target_distance
if abs(distance_diff) > 0.05: # 5cm 이상 차이날 때만 움직임
# 거리 차이에 비례하여 속도 설정 (최대 속도는 self.linear_speed)
speed = min(abs(distance_diff), self.linear_speed)
twist.linear.x = speed if distance_diff > 0 else -speed
action = "forward" if distance_diff > 0 else "backward"
rospy.loginfo(f"Moving {action}, current distance: {front_distance:.2f}m, speed: {speed:.2f}m/s")
else:
twist.linear.x = 0 # 정지
rospy.loginfo(f"Target distance reached: {front_distance:.2f}m")
self.cmd_vel_pub.publish(twist)
def stop_robot(self):
twist = Twist()
self.cmd_vel_pub.publish(twist)
def fuse_data(self, image_data, scan_msg):
height, width = image_data.shape[:2]
fusion_image = image_data.copy()
angles = np.arange(len(scan_msg.ranges)) * scan_msg.angle_increment + scan_msg.angle_min
degrees = np.degrees(angles)
# 각도 범위 조정
valid_indices = np.logical_or(
(degrees >= 0) & (degrees <= 15),
(degrees >= 345) & (degrees < 360)
)
valid_degrees = degrees[valid_indices]
valid_ranges = np.array(scan_msg.ranges)[valid_indices]
if len(valid_ranges) == 0:
rospy.logwarn("No valid range data")
return fusion_image
# 이미지 좌표로 변환
u = np.zeros_like(valid_degrees)
u[valid_degrees <= 15] = ((-valid_degrees[valid_degrees <= 15]+15) / 15 * (width/2)).astype(int)
u[valid_degrees >= 345] = ((-valid_degrees[valid_degrees >= 345] + 360) / 15 * (width / 2)+(width/2)).astype(int)
v = np.full_like(u, height // 2, dtype=int)
for ui, vi, range_val in zip(u, v, valid_ranges):
if 0 <= ui < width:
# 사각형 그리기 (수정된 부분 - 녹색 사각형과 거리 텍스트)
x_min, x_max = max(0, int(ui) - 10), min(width, int(ui) + 12)
y_min, y_max = max(0, int(vi) - 25), min(height, int(vi) + 26)
cv2.rectangle(fusion_image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)
# 거리 값 표시 (소수점 첫째자리까지)
distance_text = f"{range_val:.1f}"
text_size = cv2.getTextSize(distance_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
text_x = int((x_min + x_max - text_size[0]) / 2)
text_y = int((y_min + y_max + text_size[1]) / 2)
cv2.putText(fusion_image, distance_text, (text_x, text_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
return fusion_image
def create_color_map(self):
color_map = np.zeros((256, 3), dtype=np.uint8)
for i in range(256):
normalized_distance = i / 255.0
hue = 20 # Orange hue
saturation = 255
value = int(255 * (1 - normalized_distance))
color = cv2.cvtColor(np.uint8([[[hue, saturation, value]]]), cv2.COLOR_HSV2BGR)[0][0]
color_map[i] = color
return color_map
if __name__ == '__main__':
rospy.init_node('turtlebot_yolo_detector', anonymous=True)
try:
detector = TurtlebotYOLODetector()
rospy.spin()
except rospy.ROSInterruptException:
pass
This Python script implements an advanced TurtlebotYOLODetector class that combines YOLO object detection with LiDAR sensor data for autonomous robot navigation and distance control. The system integrates multiple sensor streams using ROS message filters with approximate time synchronization, enabling real-time fusion of camera images and laser scan data for comprehensive environmental awareness. The fuse_data method cleverly maps LiDAR range data to image coordinates, focusing on a 30-degree front-facing field of view (0-15 degrees and 345-360 degrees), and overlays distance information directly onto the camera feed with green rectangles and distance text. The adjust_distance method implements precise distance maintenance by measuring the front-facing distance (0-degree direction) and automatically adjusting the robot's linear velocity to maintain a target distance of 50cm from obstacles. The system uses proportional control with a maximum linear speed of 0.1 m/s, only triggering movement when the distance difference exceeds 5cm, ensuring smooth and stable robot behavior while preventing unnecessary oscillations. The implementation leverages ROS's publish-subscribe architecture with continuous sensor data processing, YOLO object detection integration, and real-time visualization of fused sensor data for comprehensive robotic perception. This code represents a sophisticated approach to autonomous robotics, combining state-of-the-art computer vision with precise sensor fusion and intelligent motion control for practical robotic applications.
'Autonomous Driving > Turtlebot3' 카테고리의 다른 글
| Turtlebot3 Puppy_Mode (1) | 2024.10.27 |
|---|---|
| Sensor-Fusion 1차 총 정리 (0) | 2024.08.01 |
| Fusion Final Code analysis (1) | 2024.07.14 |
| FUSION [LiDAR+Camera] (0) | 2024.07.11 |
| FUSION Code review (0) | 2024.07.08 |