Commit 69fc18d5 authored by Vincent POLLET's avatar Vincent POLLET
Browse files

Compute extrinsics only with respect to reference, save output with previous format

parent 08c619ff
Pipeline #50704 failed with stage
in 3 minutes and 34 seconds
......@@ -363,6 +363,8 @@ def parse_arguments():
help="An absolute path to the JSON file containing the calibration configuration.")
parser.add_argument("-i", "--intrinsics-only", type=bool, default=False,
help="Compute intrinsics only.")
parser.add_argument("-r", "--reference", type=str, required=True,
help="Stream name in the data files of camera to use as reference.")
parser.add_argument("-o", "--output_file", type=str, default=None,
help="Output calibration JSON file.")
parser.add_argument("-v", "--verbosity", action="count", default=0,
......@@ -428,8 +430,8 @@ def main():
err, cam_mat, dist_coefs = compute_intrinsics(im_pts, image_size, ids=ids, charuco_board=charuco_board, params=config['intrinsics'])
else:
err, cam_mat, dist_coefs = compute_intrinsics(im_pts, image_size, obj_pts=obj_pts, params=config['intrinsics'])
calibration[camera]["cam_mat"] = cam_mat
calibration[camera]["distortion"] = dist_coefs
calibration[camera]["camera_matrix"] = cam_mat
calibration[camera]["distortion_coefs"] = dist_coefs
if verbosity > 0:
print("\n=== Intrinsics {} ===".format(camera))
print("Camera matrix \n {} \n Distortion coeffs \n {} \n".format(cam_mat, dist_coefs))
......@@ -446,63 +448,67 @@ def main():
print("\n=== Extrinsics {}-{} dataframe ===".format(camera, camera_2))
display_df(df_extrinsics)
# Then the extrinsics between camera pair are computed
# Then the extrinsics between the reference camera and the others are computed
reference = args.reference
if intrinsics_only:
return
stream = [k for k, v in calibration.items() if v is not None]
for idx, camera in enumerate(stream):
image_size = image_data[camera]
for camera_2 in stream:# or with itertools.tee()
if camera_2 == camera:
continue
if image_data[camera_2] != image_size:
raise ValueError("Image size between {} and {} does not match.".format(camera, camera_2))
if not "df_extrinsics_{}".format(camera_2) in calibration[camera].keys():
continue
df = calibration[camera]["df_extrinsics_{}".format(camera_2)]
cam_mat1 = calibration[camera]["cam_mat"]
cam_mat2 = calibration[camera_2]["cam_mat"]
dist_coefs1 = calibration[camera]["distortion"]
dist_coefs2 = calibration[camera_2]["distortion"]
im_pts1, obj_pts, ids = define_calibration_points(df[camera], object_points, pattern_type)
im_pts2, obj_pts2, ids2 = define_calibration_points(df[camera_2], object_points, pattern_type)
for i1, i2, o1, o2 in zip(ids, ids2, obj_pts, obj_pts2):
if np.linalg.norm(o1 - o2) != 0.0:
raise ValueError("Difference between both object points")
if np.linalg.norm(i1 - i2) != 0.0:
raise ValueError("Difference between both ids")
err, R, T = compute_relative_extrinsics(obj_pts,
im_pts1, cam_mat1, dist_coefs1,
im_pts2, cam_mat2, dist_coefs2,
image_size)
H = np.eye(4)
H[0:3, 0:3] = R
H[0:3, 3] = T.T
calibration[camera]["extrinsics_{}".format(camera_2)] = H
if verbosity > 0:
print("\n=== Extrinsics {}-{} ===".format(camera, camera_2))
print("Rotation \n {} \nTranslation \n {} \n".format(R, T.T))
print("Reprojection error \n {}\n".format(err))
# for camera_2 in stream:# or with itertools.tee()
# if camera_2 == camera:
# continue
if image_data[reference] != image_size:
raise ValueError("Image size between {} and {} does not match.".format(camera, reference))
if not "df_extrinsics_{}".format(reference) in calibration[camera].keys():
continue
df = calibration[camera]["df_extrinsics_{}".format(reference)]
cam_mat1 = calibration[camera]["camera_matrix"]
cam_mat2 = calibration[reference]["camera_matrix"]
dist_coefs1 = calibration[camera]["distortion_coefs"]
dist_coefs2 = calibration[reference]["distortion_coefs"]
im_pts1, obj_pts, ids = define_calibration_points(df[camera], object_points, pattern_type)
im_pts2, obj_pts2, ids2 = define_calibration_points(df[reference], object_points, pattern_type)
for i1, i2, o1, o2 in zip(ids, ids2, obj_pts, obj_pts2):
if np.linalg.norm(o1 - o2) != 0.0:
raise ValueError("Difference between both object points")
if np.linalg.norm(i1 - i2) != 0.0:
raise ValueError("Difference between both ids")
err, R, T = compute_relative_extrinsics(obj_pts,
im_pts1, cam_mat1, dist_coefs1,
im_pts2, cam_mat2, dist_coefs2,
image_size)
H = np.eye(4)
H[0:3, 0:3] = R
H[0:3, 3] = T.T
calibration[camera]["extrinsics_{}".format(reference)] = H
calibration[camera]["relative_rotation"] = R
calibration[camera]["relative_translation"] = T
if verbosity > 0:
print("\n=== Extrinsics {}-{} ===".format(camera, reference))
print("Rotation \n {} \nTranslation \n {} \n".format(R, T.T))
print("Reprojection error \n {}\n".format(err))
if args.output_file is not None:
data = {}
for camera in stream:
# As all value are ndarray, tolist() is used to serialized the values
data[camera] = {k:v.tolist() for k,v in calibration[camera].items() if "df_" not in k}
data[camera] = {k:v.tolist() for k,v in calibration[camera].items() if "df_" not in k and "extrinsics_" not in k}
data[camera]["markers"] = None # markers should be computed with linear calibration
data[camera]["reference"] = reference
with open(args.output_file, 'w') as f:
json.dump(data, f, indent=4, sort_keys=True)
print("{} written.".format(args.output_file))
for reference in config["display"]["references"]:
poses = {}
#reference = "left"
for camera in stream:
if camera ==reference:
poses[reference] = np.eye(4)
else:
poses[camera] = calibration[reference]["extrinsics_{}".format(camera)]
display_cameras_poses(poses, config['display'])
poses = {}
for camera in stream:
if camera == reference:
poses[reference] = np.eye(4)
else:
poses[camera] = calibration[camera]["extrinsics_{}".format(reference)]
display_cameras_poses(poses, config['display'])
if __name__ == "__main__":
......
{
"capture_directory" : "/Users/david/Desktop/calib_trim/",
"stream_config" : "/Users/david/Cloud/idiap/candy/candy/candy/config/candy_stream_config.json",
"capture_directory" : "/idiap/project/candy/calib_trim/",
"stream_config" : "/idiap/temp/vpollet/projects/batl/bob.ip.stereo/bob/ip/stereo/config/minibatl_data_config.json",
"pattern_type": "charuco",
"pattern_rows": 10,
"pattern_columns": 7,
......@@ -47,7 +47,6 @@
},
"display":
{
"references" : ["left"],
"axe_x" : [-25, 25],
"axe_y" : [-25, 25],
"axe_z" : [-10, 10]
......
......@@ -153,6 +153,8 @@ class StreamWarp(bob.io.stream.StreamFilter):
:obj:`numpy.ndarray`
`data` warped to `warp_to` image shape.
"""
if self.warp_to.camera.markers is None or self.camear.markers is None:
raise ValueError("Camera markers are not defined ! Did you run linear calibration of your cameras ?")
self.markers = (self.warp_to.camera.markers, self.camera.markers) # camera added to Stream by add_stream_camera
self.warp_transform = transform.ProjectiveTransform()
self.warp_transform.estimate(*self.markers)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment