基于Python的实物协作臂与Realsense相机的外参标定(眼在手外)

文章目录

  • 前言
  • 一、深度相机采集含标定板的图像
  • 二、计算深度相机相对于机械臂基坐标系的位姿(外参标定)
  • 总结


前言

艾利特协作臂与Realsense深度相机的外参标定,内容包括采集照片、计算相机外参

一、深度相机采集含标定板的图像

使用键盘上的“空格”保存图像,每保存一张图像,注意记录机械臂末端的直角坐标系下的位姿。

import cv2
import numpy as np
import pyrealsense2 as rs
import os
# 配置
pipe = rs.pipeline()
cfg = rs.config()
cfg.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
i = 0
profile = pipe.start(cfg)
while True:
 # 获取图片帧
 frameset = pipe.wait_for_frames()
 color_frame = frameset.get_color_frame()
 color_img = np.asanyarray(color_frame.get_data())
 #交换颜色通道
 t = color_img[:,:, 2].copy()
 color_img[:,:, 2] = color_img[:,:, 0]
 color_img[:,:, 0] = t
 # 更改通道的顺序为RGB
 cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
 cv2.imshow('RealSense', color_img)
 k = cv2.waitKey(1)
 # Esc退出,
 if k == 27:
 cv2.destroyAllWindows()
 break
 # 输入空格保存图片
 elif k == ord(' '):
 i = i + 1
 # cv2.imwrite(os.path.join("E:\\Realsense\\pic_capture", str(i) + '.jpg'), color_img)
 cv2.imwrite(os.path.join('img_wcbd_11.26', str(i) + '.jpg'), color_img)
 print("Frames{} Captured".format(i))
pipe.stop()

二、计算深度相机相对于机械臂基坐标系的位姿(外参标定)

自行修改相机内参,棋盘格参数,机械臂的末端位姿

import cv2
import numpy as np
import glob
from math import *
import pandas as pd
import os
"""
参数
"""
num=10 #用于标定的图片数
#相机内参
fx=915.757
fy=914.598
cx=648.262
cy=350.743
K=np.array([[fx,0,cx],
 [0,fy,cy],
 [0,0,1]],dtype=np.float64)
#棋盘格参数
chess_board_x_num=8
chess_board_y_num=5
chess_board_len=26.5 #单位棋盘格长度,mm,这里要修改
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1) #用于查找棋盘格角点
"""
根据欧拉角计算旋转矩阵
"""
#用于根据欧拉角计算旋转矩阵
def myRPY2R_robot(x, y, z):
 Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]])
 Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
 Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
 R = Rz@Ry@Rx #@表示矩阵乘法
 return R
"""
根据平移和旋转计算齐次矩阵
"""
#用于根据位姿计算变换矩阵
def pose_robot(x, y, z, Tx, Ty, Tz): #旋转角度:x,y,z;平移分量:Tx,Ty,Tz
 # thetaX = x / 180 * pi
 # thetaY = y / 180 * pi
 # thetaZ = z / 180 * pi
 thetaX = x
 thetaY = y
 thetaZ = z
 R = myRPY2R_robot(thetaX,thetaY,thetaZ)
 t = np.array([[Tx], [Ty], [Tz]])
 RT1 = np.column_stack([R, t]) # 列合并
 RT1 = np.row_stack((RT1, np.array([0,0,0,1])))
 return RT1
"""
根据棋盘格图像获取标定板相对于相机的位姿
"""
#用来从棋盘格图片得到相机外参
def get_RT_from_chessboard(img_path,chess_board_x_num,chess_board_y_num,K,chess_board_len):
 img=cv2.imread(img_path)
 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
 # ret, corners = cv2.findChessboardCornersSB(gray, (chess_board_y_num, chess_board_x_num), None)
 ret, corners = cv2.findChessboardCornersSB(gray, (chess_board_x_num, chess_board_y_num), None) #调换以后方向才对
 if ret:
 # 精细查找角点
 corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
 # 显示角点
 # cv2.drawChessboardCorners(img,(chess_board_y_num, chess_board_x_num), corners2, ret)
 cv2.drawChessboardCorners(img, (chess_board_x_num, chess_board_y_num), corners2, ret) #调换以后方向才对
 cv2.imshow("img", img)
 cv2.waitKey(600)
 corner_points=np.zeros((2,corners.shape[0]),dtype=np.float64)
 for i in range(corners.shape[0]):
 corner_points[:,i]=corners[i,0,:]
 object_points=np.zeros((3,chess_board_x_num*chess_board_y_num),dtype=np.float64)
 flag=0
 for i in range(chess_board_y_num):
 for j in range(chess_board_x_num):
 object_points[:2,flag]=np.array([(11-j-1)*chess_board_len,(8-i-1)*chess_board_len])
 flag+=1
 retval,rvec,tvec = cv2.solvePnP(object_points.T,corner_points.T, K, distCoeffs=None)
 RT=np.column_stack(((cv2.Rodrigues(rvec))[0],tvec))
 RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
 return RT
"""
标定板2相机
"""
#计算board to cam 变换矩阵
R_all_chess_to_cam_1=[]
T_all_chess_to_cam_1=[]
for i in range(num):
 image_path='img_wcbd_11.26/{}.jpg'.format(i+1)
 RT=get_RT_from_chessboard(image_path, chess_board_x_num, chess_board_y_num, K, chess_board_len)
 R_all_chess_to_cam_1.append(RT[:3,:3])
 T_all_chess_to_cam_1.append(RT[:3, 3].reshape((3,1)))
"""
末端2基座
"""
#位姿参数,修改
pose=np.array([[-622.4452510680815,524.9752698923884,156.06834188650686],
 [-627.8618566771845,520.3385452748523,155.95649892911888],
 [-627.8603117576374,520.3395366048145,155.96015182265455],
 [-608.5479357433793,542.7989266050945,155.9582535151922],
 [-608.7390060992511,543.0210484916622,151.91097181613594],
 [-602.8760786176158,549.5229480943764,151.91097181613594],
 [-603.552411795536,550.3470321566062,133.70817720248263],
 [-611.5465863717345,541.1627275715787,134.34846121499896],
 [-611.5443671647014,541.1628776214664,134.34718510333377],
 [-602.7423615676076,550.5760656441264,143.8503303749094]])
ang=np.array([[-3.0398882702599774,1.044591855511297,-0.7693595597257228],
 [-3.066247439671175,1.0454779707979422,-0.7692025753376092],
 [-3.122619548308192,1.047022325344027,-0.8343332090498904],
 [-3.122607921681094,1.0470154640492697,-0.8706462420205126],
 [-3.122629578093413,1.0420052892962037,-0.8706604603523166],
 [-3.122629578093413,1.0420052892962037,-0.8813929731338211],
 [-3.083008314702459,1.018524644841849,-0.8348392811215427],
 [-3.1262532894037527,1.0331396840313745,-0.8336434111432051],
 [3.119411368295909,1.033064358179091,-0.8773344026002032],
 [-3.11584155786669,1.0667034468346508,-0.8769577446455527]])
R_all_end_to_base_1=[]
T_all_end_to_base_1=[]
#计算end to base变换矩阵
for i in range(num):
 RT_=pose_robot(ang[i,0],ang[i,1],ang[i,2],pose[i,0],pose[i,1],pose[i,2])
 RT = np.linalg.inv(RT_) # 这里加一步求逆,求的其实是base2end
 R_all_end_to_base_1.append(RT[:3, :3])
 # T_all_end_to_base_1.append(RT[:3, 3].reshape((3, 1)))
 T_all_end_to_base_1.append(RT[:3, 3])
"""
外参标定
"""
R,T=cv2.calibrateHandEye(R_all_end_to_base_1,T_all_end_to_base_1,R_all_chess_to_cam_1,T_all_chess_to_cam_1)#手眼标定
RT=np.column_stack((R,T))
RT = np.row_stack((RT, np.array([0, 0, 0, 1])))#即为cam to end变换矩阵
print('相机相对于末端的变换矩阵为:')
print(RT)
"""
结果验证
"""
#结果验证,原则上来说,每次结果相差较小
for i in range(num):
 RT_end_to_base=np.column_stack((R_all_end_to_base_1[i],T_all_end_to_base_1[i]))
 RT_end_to_base=np.row_stack((RT_end_to_base,np.array([0,0,0,1])))
 RT_chess_to_cam=np.column_stack((R_all_chess_to_cam_1[i],T_all_chess_to_cam_1[i]))
 RT_chess_to_cam=np.row_stack((RT_chess_to_cam,np.array([0,0,0,1])))
 RT_cam_to_end=np.column_stack((R,T))
 RT_cam_to_end=np.row_stack((RT_cam_to_end,np.array([0,0,0,1])))
 RT_chess_to_base=RT_end_to_base@RT_cam_to_end@RT_chess_to_cam #即为固定的棋盘格相对于机器人基坐标系位姿
 RT_chess_to_base=np.linalg.inv(RT_chess_to_base)
 print('第',i,'次')
 print(RT_chess_to_base[:3,:])
 print('')


总结

1、注意眼在手外,需要求逆;

2、棋盘格角点设置时是格子数减一;

3、注意显示的角点的图应逐行而不是逐列检测

(chess_board_x_num, chess_board_y_num)。
作者:梦之船原文地址:https://blog.csdn.net/m0_54795435/article/details/128054017

%s 个评论

要回复文章请先登录注册