diff --git a/README.rst b/README.rst index 8ed3f013495801efba0108e5c613588781d9ed4f..72c762aa54e8c4874eb3d6a255d19a5baf77b2d3 100644 --- a/README.rst +++ b/README.rst @@ -20,6 +20,11 @@ Bob IP Stereo This package is part of the signal-processing and machine learning toolbox Bob_. +This package implements stereo mapping and projection functionalities and provides +them as ``bob.io.stream.StreamFilter`` to easiily integrate them in processing +pipelines. + + Installation ------------ @@ -29,6 +34,41 @@ package, run:: $ conda install bob.ip.stereo +Example +------- + +Here is an example of how to use the package to load data recorded with 3 different cameras, build a depth map using +2 of them and use it to project the third camera's data. + +.. code-block:: python + + # Import Stream and StreamFile classes + # Generic processing filters are available through the Stream class + from bob.io.stream import StreamFile, Stream + + # Import bob.ip.stereo: this updates the Stream class with the stereo filters + # It also upates the StreamFile class to allow to set camera configuration information. + + f = StreamFile( + resource_path("test/data/input_example.h5", "bob.io.stream"), + resource_path("config/idiap_face_streams.json", "bob.io.stream"), + ) + f.set_camera_configs(resource_path("config/idiap_face_calibration.json")) + + # stream for stereo and projection tests + color = Stream("color", f) + nir_left = Stream("nir_left_stereo", f).adjust(color) + nir_right = Stream("nir_right_stereo", f).adjust(color) + + # reproject operations + map_3d = nir_left.stereo(nir_right) + depth = map_3d.select(channel=2).colormap(colormap="jet") + rep_color = color.reproject(nir_left, nir_right, map_3d) + + # Use the stereo data: + rep_color[0:2] # ... + + Contact ------- diff --git a/bob/ip/stereo/calibration.py b/bob/ip/stereo/calibration.py index ea921178963caabdba5a86c3d1a14ad155f1975e..469619a8730e2384d1a91e128298dba955716cc3 100644 --- a/bob/ip/stereo/calibration.py +++ b/bob/ip/stereo/calibration.py @@ -1,20 +1,52 @@ #!/usr/bin/env python3 +"""Calibration script computing intrinsic and extrinsic parameters of cameras from capture files. + +This script performs the following steps: +1. For all capture files specified in the configuration json, detect the pattern points (first on the image, if failed +processes the image (grey-closing, thresholding) and tries again) +2. Using the frames were the pattern points were detected in step 1: estimate the intrinsic parameters for each stream +(camera) in the capture files. +3. Againg using the pattern points detected in step 1, the extrinsic parameters of the cameras are estimated with +respect to the reference. +4. The camera poses are displayed, and the results are saved to a json file. +""" + import argparse import os import json +import warnings + import numpy as np from scipy.ndimage.morphology import grey_closing -from matplotlib import pyplot as plt import pandas as pd import cv2 +from matplotlib import pyplot as plt from bob.io.stream import Stream, StreamFile from bob.io.image import bob_to_opencvbgr def preprocess_image(image, closing_size=15, threshold=201): - """ """ + """Image processing to improve pattern detection for calibration. + + Performs grey closing and adaptative threshold to enhance pattern visibility in calibration image. + + Parameters + ---------- + image : :obj:`numpy.ndarray` + Calibration image. (uint16) + closing_size : int + Grey closing size, by default 15. + threshold : int + Adaptative threshold passed to opencv adaptativeThreshold, by default 201. + + Returns + ------- + image : :obj:`numpy.ndarray` + Processed image. (uint8) + """ + image = grey_closing(image, closing_size) image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, threshold, 1).astype( np.uint8 @@ -22,133 +54,155 @@ def preprocess_image(image, closing_size=15, threshold=201): return image -def detect_chessboard_corners(image, prep_im, pattern_size, verbosity): - """ """ - if prep_im is None: - ret, corners = cv2.findChessboardCorners(image, pattern_size, 0) - else: - ret, corners = cv2.findChessboardCorners(prep_im, pattern_size, 0) +def detect_chessboard_corners(image, pattern_size, verbosity): + """Detect the chessboard pattern corners in a calibration image. + + This function returns the coordinates of the corners in between the chessboard squares, in the coordinate system of + the image pixels. + With a chessboard pattern, either all corners are found, or the detection fails and None is returned. + + Parameters + ---------- + image : :obj:`numpy.ndarray` + Calibration image. (uint8) + pattern_size : tuple(int, int) + Number of rows, columns in the pattern. These are the number of corners in between the board squares, so it is 1 + less than the actual number of squares in a row/column. + verbosity : int + If verbosity is strictly superior to 2, the detected chessboard will be plotted on the image. + + Returns + ------- + im_points : :obj:`numpy.ndarray` + Array of the detected corner coordinates in the image. + """ + + ret, corners = cv2.findChessboardCorners(image, pattern_size, 0) im_points = None if ret: criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 300, 0.0001) im_points = cv2.cornerSubPix(image, corners, (11, 11), (-1, -1), criteria) if verbosity > 2: - if ret: - im = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) - else: - im = cv2.cvtColor(prep_im, cv2.COLOR_GRAY2BGR) - draw_image = cv2.drawChessboardCorners(im, pattern_size, im_points, True) - plt.imshow(draw_image, vmin=0, vmax=255) + draw_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) + draw_image = cv2.drawChessboardCorners(draw_image, pattern_size, im_points, True) + plt.imshow(draw_image, cmap="gray", vmin=0, vmax=255) plt.show() - return ret, im_points + return im_points -def detect_charuco_corners(image, prep_im, charuco_board, verbosity): - """ """ - corners, ids = None, None - if prep_im is None: - markers, ids, rejected_points = cv2.aruco.detectMarkers(image, charuco_board.dictionary) - else: - markers, ids, rejected_points = cv2.aruco.detectMarkers(prep_im, charuco_board.dictionary) +def detect_charuco_corners(image, charuco_board, verbosity): + """Detects charuco patterns corners in a calibration image. + + This function returns the coordinates of the corners in between the charuco board squares, in the coordinate system + of the image pixels. + The charuco detection method is able to detect individual charuco symbol in the pattern, therefore the detection + doesn't necessarilly return a subset of the points if not all pattern symbol are detected. In order to know which + symbol was detected, the function also returns the identifiers of the detected points. + + Note: This function detects the charuco pattern inside the board squares, therefore the pattern size of the board + (#rows, #cols) corresponds to the number of squares, not the number of corners in between squares. + + Parameters + ---------- + image : :obj:`numpy.ndarray` + Calibration image (grayscale, uint8) + charuco_board : :obj:`cv2.aruco_CharucoBoard` + Dictionary like structure defining the charuco patterns. + verbosity : int + If verbosity is strictly superior to 2, the detected pattersn will be plotted on the image. + + Returns + ------- + im_points : :obj:`numpy.ndarray` + Array of the detected corners coordinates in the image. Shape: (#detected_pattern, 1, 2) + ids : :obj:`numpy.ndarray` + Identifiers of the detected patterns. Shape: (#detected_pattern, 1) + """ + + im_points, ids = None, None + markers, ids, rejected_points = cv2.aruco.detectMarkers(image, charuco_board.dictionary) + if ids is not None: if len(ids) < 3: ids = None markers = None - ret = False else: - ret, corners, ids = cv2.aruco.interpolateCornersCharuco(markers, ids, image, charuco_board) - else: - ret = False + _, im_points, ids = cv2.aruco.interpolateCornersCharuco(markers, ids, image, charuco_board) if verbosity > 2: - if ret: - im = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) - else: - im = cv2.cvtColor(prep_im, cv2.COLOR_GRAY2BGR) - im = cv2.aruco.drawDetectedCornersCharuco(im, corners, ids) - plt.imshow(im, cmap="gray", vmin=0, vmax=255) + draw_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) + draw_image = cv2.aruco.drawDetectedCornersCharuco(draw_image, im_points, ids) + plt.imshow(draw_image, cmap="gray", vmin=0, vmax=255) plt.show() - return ret, corners, ids - - -def compute_intrinsics(im_pts, image_size, obj_pts=None, ids=None, charuco_board=None, params=None): - """ """ - flags = 0 - cam_m, dist_coefs = None, None - if params["use_intrinsic_guess"]: - flags += cv2.CALIB_USE_INTRINSIC_GUESS - fx, fy = params["intrinsic_guess"]["f"] - cx, cy = params["intrinsic_guess"]["center_point"] - dist_coefs = params["intrinsic_guess"]["dist_coefs"] - cam_m = np.array( - [ - [fx, 0, cx], - [0, fy, cy], - [0, 0, 1], - ] - ) - if params["fix_aspect_ratio"]: - flags += cv2.CALIB_FIX_ASPECT_RATIO - if params["fix_center_point"]: - flags += cv2.CALIB_FIX_PRINCIPAL_POINT - if params["fix_focal_length"]: - flags += cv2.CALIB_FIX_FOCAL_LENGTH - if params["zero_tang_dist"]: - flags += cv2.CALIB_ZERO_TANGENT_DIST - - image_size = (image_size[1], image_size[0]) - if not isinstance(im_pts, list): - im_pts = [im_pts] - - if charuco_board is not None: - if not isinstance(ids, list): - ids = [ids] - reprojection_error, cam_mat, dist_coefs, r, t = cv2.aruco.calibrateCameraCharuco( - im_pts, ids, charuco_board, image_size, cam_m, dist_coefs, flags=flags - ) - - else: - if not isinstance(obj_pts, list): - obj_pts = [obj_pts] - reprojection_error, cam_mat, dist_coefs, r, t = cv2.calibrateCamera( - obj_pts, im_pts, image_size, cam_m, dist_coefs, flags=flags - ) - return reprojection_error, cam_mat, dist_coefs - - -def compute_relative_extrinsics(obj_pts, im_pts1, cam_m1, dist_1, im_pts2, cam_m2, dist_2, image_size=None): - """ """ - R, T, E, F = None, None, None, None - criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 1000, 1e-5) - flags = cv2.CALIB_FIX_INTRINSIC - # In cv2.stereoCalibrate, image_size is used to initialize the intrinsics matrix. As the intrinsics are - # already computed, the flags can be set to cv2.CALIB_FIX_INTRINSIC - (reprojection_error, _, _, _, _, R, T, E, F) = cv2.stereoCalibrate( - obj_pts, - im_pts1, - im_pts2, - cam_m1, - dist_1, - cam_m2, - dist_2, - image_size, - R, - T, - E, - F, - flags=flags, - criteria=criteria, - ) - if R is None or T is None: - print("WARNING: No target pair was detected in the same capture") - return reprojection_error, R, T + return im_points, ids def detect_patterns( - directory_path, data_config_path, pattern_type, pattern_size, charuco_board=None, params=None, verbosity=0 + directory_path, data_config_path, pattern_type, pattern_size, params, charuco_board=None, verbosity=0 ): + """Detects chessboard or charuco patterns in the capture files. + + For all files in the calibration parameters (json loaded in `params`), this function detects the pattern and returns + the pattern points coordinates for each stream in the files. The results (image points and IDs for charuco) are + stored in a DataFrame. This function also returns a dictionary with the image shape per camera, which is used to + initialize the camera intrinsic matrices. + + If the pattern is a chessboard, the same number of corners are found for all detected pattern. For charuco board, + the detection can find a subset of the symbol present on the pattern, so the number not always the same. The ID of + the detected symbol is then required to estimate the camera parameters. + + The pattern points dataframe has the following structure: + + cameraXXX cameraYYY cameraZZZ + + fileAA:frameAA ptr_pts ptr_pts ptr_pts + fileBB:frameBB ptr_pts ptr_pts ptr_pts + fileCC:frameCC ptr_pts ptr_pts ptr_pts + + Notes + ----- + Color images (in bob's format) are first converted to opencv's grayscale format + + The detection is first run on the raw images. If it fails, it is tried on the pre-processed images. + + Parameters + ---------- + directory_path : str + Path to the directory containing the calibration data. + data_config_path : str + Path to the json file describing the data (names, shape, ...). + pattern_type : str + "charuco" or "chessboard": Type of pattern to detect. + pattern_size : :obj:`tuple` of int + Number of rows, number of columns in the pattern. + params : dict + Calibration parameters (image processing parameters, etc...) + charuco_board : :obj:`cv2.aruco_CharucoBoard` + Dictionary like structure defining the charuco pattern, by default None. Required if pattern_type is "charuco". + verbosity : int + Verbosity level, higher for more output, by default 0. + + Returns + ------- + pattern_points : :obj:`pandas.DataFrame` + Dataframe containing the detected patterns points in the data. + image_size : dict + Shape of the image for each stream in the data, which is required to initialize the camera parameters + estimations. + + Raises + ------ + ValueError + If the charuco board is not specified when the pattern_type is "charuco" + ValueError + If the streams in the data files do not have the same number of frames. + ValueError + If the specified list of frames length doesn't match the number of calibration files . + ValueError + If the specified pattern type is not supported. (Only "chessboard" and "charuco" supported.) + """ if charuco_board is None and pattern_type == "charuco": raise ValueError("Charuco board not specified.") @@ -167,30 +221,15 @@ def detect_patterns( streams = [Stream(stream, f) for stream in f.get_available_streams()] - # check that all streams have the same number of frames - if any(stream.shape[0] != streams[0].shape[0] for stream in streams): - file_name = os.path.join("processed_calibration_data/dark_demosaiced", pattern_type) - error_str = ( - "Capture files must contain synced data: " - + "all streams must have the same number of frames!" - + "\n" - + "Streams with shapes " - ) - for stream in f.get_available_streams(): - error_str = error_str + str(stream.shape) + " " - error_str = error_str + "\n" + "in file " + file_name - raise ValueError(error_str) - - # add columns for streams, if not already present in dataframe + # Prepare dictionary to store the detection results for each camera in the data file. for stream in streams: if stream.name not in capture_dict.keys(): capture_dict[stream.name] = {} - image_size[stream.name] = {} # Get the indices of frames to use in data files - if frames is None: + if frames is None: # take all frames frame_idx = range(streams[0].shape[0]) - elif isinstance(frames, list): + elif isinstance(frames, list): # if len(frames) != len(files): raise ValueError("List of frames to use does not match number of data files.") frame_idx = frames[file_idx] @@ -213,9 +252,18 @@ def detect_patterns( # if detection fails, pattern_points are set to None if pattern_type == "chessboard": - ret, ptrn_pts = detect_chessboard_corners(image, prep_image, pattern_size, verbosity) + ptrn_pts = detect_chessboard_corners(image, pattern_size, verbosity) + if ptrn_pts is None: + warnings.warn( + "Detection failed for " + + stream + + " in " + + capture_name + + ". Now trying on preprocessed image." + ) + ptrn_pts = detect_chessboard_corners(prep_image, pattern_size, verbosity) capture_dict[stream.name][capture_name] = ptrn_pts - if ret: + if ptrn_pts is not None: print("Capture {} / stream : {} : Detect chessboard corners.".format(capture_name, stream.name)) else: print( @@ -225,7 +273,16 @@ def detect_patterns( ) elif pattern_type == "charuco": - ret, ptrn_pts, ids = detect_charuco_corners(image, prep_image, charuco_board, verbosity) + ptrn_pts, ids = detect_charuco_corners(image, charuco_board, verbosity) + if ptrn_pts is None: + warnings.warn( + "Detection failed for " + + stream.name + + " in " + + capture_name + + ". Now trying on preprocessed image." + ) + ptrn_pts, ids = detect_charuco_corners(prep_image, charuco_board, verbosity) if ids is not None: capture_dict[stream.name][capture_name] = (ids, ptrn_pts) print("Capture {} / stream : {} : Detect charuco corners.".format(capture_name, stream.name)) @@ -243,10 +300,70 @@ def detect_patterns( return pattern_points, image_size +def create_3Dpoints(pattern_size, square_size): + """Returns the 3D coordinates of a board corners, in the coordinate system of the calibration pattern. + + With a pattern size (X,Y) or (rows, cols), the order of the corners iterate along X first, before Y. + The 3D object points are created as that: (0,0,0), ..., (X-1,0,0), (0,1,0), ... , (X-1,Y-1,0). + The 3rd coordinates is always 0, since all the points are in the plane of the board. + Warning: for a charuco board with a pattern size (X,Y) is not equivalent a pattern (Y,X) rotated of +/- 90 degrees, + but has different aruco markers on its board. + + The coordinates are then scaled by the size of square in the pattern, to use metric distances. (This scale is + reported to the translaltion distance between the cameras in the extrinsinc parameters estimation) + + Parameters + ---------- + pattern_size : :obj:`tuple` of int + Number of rows, number of columns in the pattern. These are the number of corners in between the pattern + squares, which is 1 less than the number of squares. + square_size : float + Width (or height) of a square in the pattern. The unit of this distance (eg, mm) will be the unit of distance + in the calibration results. + + Returns + ------- + object_points : :obj:`numpy.ndarray` + Coordinates of the pattern corners, with respect to the board. + """ + + rows, cols = pattern_size + object_points = np.zeros((rows * cols, 3), np.float32) + for i in range(0, cols): + for j in range(0, rows): + object_points[i * rows + j] = [j, i, 0] + + object_points *= square_size + return object_points + def get_valid_frames_for_intrinsic_calibration(detection_df, camera, pattern_type): + """Filters the detection df to keep only the frames that allow to estimate the intrinsic parameters of `camera`. + + A frame in the calibration file has images for multiple streams. However, it is possible that the pattern was not + detected for a stream in a frame. This function filters out the frames for which the pattern was not detected for + the `camera` stream, returning only valid frames for the intrinsic parameter estimation of `camera`. + + Parameters + ---------- + detection_df : :obj:`pandas.DataFrame` + DataFrame to filter, containing the detected points coordinates (and ID if charuco pattern) for all frames in + all calibration files. + camera : str + Camera which intrinsic parameters will be estimated. + pattern_type : str + "chessbord" or "charuco": type of pattern. + + Returns + ------- + detection_df : :obj:`pandas.DataFrame` + Filtered dataframe with only valid frames for intrinsic parameter estimation of `camera`. + """ + + # Drop the frames where the pattern was not detected at all. detection_df = detection_df[~detection_df[camera].isnull()][camera] - # If charuco board, filter capture with len(corners) < 4 + # When using a charuco board, only a subset of the pattern symbol can be detected. We filter the frames where less + # than 4 corners were detected, because at least 4 are required for intrinsic parameter estimation. index_to_delete = [] if pattern_type == "charuco": for capture_id, capture in detection_df.iteritems(): @@ -257,57 +374,263 @@ def get_valid_frames_for_intrinsic_calibration(detection_df, camera, pattern_typ return detection_df +def format_calibration_points_from_df(df, object_points, pattern_type): + """Format the detected pattern points in df and objects points in appropriate format for parameter estimation. + + This is a utility function to take the image points in `df`, the corresponding 3D points in `object_points` + and output them in the format required by opencv's camera parameters estimation functions. + `df` contains the image points (calibration points in the coordinate system of the image pixels), and + `object_points` the pattern points in the coordinate system of the pattern. If a chessboard pattern is used, this + function simply return the points as separate arrays. If a charuco board is used, the function also selects the + object points corresponding to the detected image points. + + Parameters + ---------- + df : pandas.DataFrame + Dataframe containing the detected image points to use for calibration of a camera. Index is the capture filename + and frame, values are the image points coordinates. (if a charuco pattern is used), the values a tuple of the + images points ids and coordinates + object_points : numpy.ndarray + 3D coordinates of the pattern points, in the coordinate system of the pattern. shape: (#points, 3) + pattern_type : str + "chessboard" or "charuco". Type of the pattern used for calibration. + + Returns + ------- + im_pts: : list + Images points list. The length of the list is the number of capture files. Each element in the list is an array + of detected image points for this file. shape: (#points, 1, 2) + obj_pts : list + Object points list. The object points are the coordinates, in the coordinate system of the pattern, of the + detected image points. + ids : list + list of the points ids. If chessboard, the list is empty (the points don't have an id). + """ + + obj_pts, im_pts, ids = [], [], [] + for capture_id, capture in df.iteritems(): + if pattern_type == "charuco": + ids.append(capture[0]) # ids of the points + im_pts.append(capture[1]) # coordinates of the points + else: + im_pts.append(capture) + + # If charuco board, mask all 3D points in the board not detected in the ids for each frame + if pattern_type == "charuco": + for frame in range(0, len(im_pts)): + list_id = ids[frame] # get the ids of the detected image points + obj_pts.append( + object_points[list_id, :] + ) # select only the object points corresponding to detected image points. + else: + obj_pts = [object_points for _ in range(0, len(im_pts))] # if chessboard, take them all + + return im_pts, obj_pts, ids + + +def compute_intrinsics(im_pts, image_size, params, obj_pts=None, ids=None, charuco_board=None): + """Estimates intrinsic parameters of a camera + + This function is basically a wrapper of opencv's calibration function `calibrateCameraCharuco` or `calibrateCamera`. + For more details on the format or meaning of the return values, please consult opencv's documentation, eg: + https://docs.opencv.org/4.5.0/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d + https://docs.opencv.org/4.5.0/d9/d6a/group__aruco.html#ga54cf81c2e39119a84101258338aa7383 + + + Parameters + ---------- + im_pts : list + Coordinates of the detected pattern corners, in the image pixels coordinate system. Length of the list is the + number of calibration capture files. Each element is an array of image points coordinates + (shape: (#points, 1, 2)) + image_size : dict + Shape of the images of the camera which parameters are computed. + params : dict + Parameters for the intrisinc parameters estimation algorithms. These are the parameters in the "intrinsics" + field of the config passed to this script. + obj_pts : list + List of detected pattern points, in the coordinate system of the pattern, by default None. This is required if + the pattern used was a chessboard pattern, but is not necessary for charuco patterns: the charuco calibration + function builds the object points coordinates using the detected points ids (For a given size, a pattern always + has the same points in the same place). + ids : list + Ids of the image points, by default None. This is only required with a charuco pattern. + charuco_board : :obj:`cv2.aruco_CharucoBoard` + Dictionary like structure defining the charuco pattern, by default None. Required if pattern_type is "charuco". + + Returns + ------- + reprojection_error : float + Reprojection error + cam_mat : :obj:`numpy.ndarray` + opencv 3x3 camera matrix: [[fx, 0, cx]] + [0, fy, cy] + [0, 0, 1 ]] + dist_coefs : :obj:`numpy.ndarray` + Opencv's distortion coefficients (length depends on the pattern type and estimation algorithm parameters) + """ + + # set the flags and parameters to pass to opencv's functions + flags = 0 + cam_m, dist_coefs = None, None + if params["use_intrinsic_guess"]: + flags += cv2.CALIB_USE_INTRINSIC_GUESS + fx, fy = params["intrinsic_guess"]["f"] + cx, cy = params["intrinsic_guess"]["center_point"] + dist_coefs = params["intrinsic_guess"]["dist_coefs"] + cam_m = np.array( + [ + [fx, 0, cx], + [0, fy, cy], + [0, 0, 1], + ] + ) + if params["fix_aspect_ratio"]: + flags += cv2.CALIB_FIX_ASPECT_RATIO + if params["fix_center_point"]: + flags += cv2.CALIB_FIX_PRINCIPAL_POINT + if params["fix_focal_length"]: + flags += cv2.CALIB_FIX_FOCAL_LENGTH + if params["zero_tang_dist"]: + flags += cv2.CALIB_ZERO_TANGENT_DIST + + image_size = (image_size[1], image_size[0]) + if not isinstance(im_pts, list): + im_pts = [im_pts] + + if charuco_board is not None: # Using a charuco pattern + if not isinstance(ids, list): + ids = [ids] + reprojection_error, cam_mat, dist_coefs, r, t = cv2.aruco.calibrateCameraCharuco( + im_pts, ids, charuco_board, image_size, cam_m, dist_coefs, flags=flags + ) + + else: + if not isinstance(obj_pts, list): + obj_pts = [obj_pts] + reprojection_error, cam_mat, dist_coefs, r, t = cv2.calibrateCamera( + obj_pts, im_pts, image_size, cam_m, dist_coefs, flags=flags + ) + return reprojection_error, cam_mat, dist_coefs + + def get_valid_frames_for_extrinsic_calibration(detection_df, camera_1, camera_2, pattern_type): - # df = detection_df.copy() + """Select the frames in which enough pattern points were detected to estimate extrinsic parameters between 2 cameras + + If a chessboard pattern is used, the detection either find all points, or none, so it just selects frames where the + detection was successful for both camera. + For charuco pattern, the detection can detect a subset of the pattern points. In this case, selects the frames wit + enough matching points (ie with id) detected in both cameras. + + Parameters + ---------- + detection_df : :obj:`pandas.DataFrame` + DataFrame to filter, containing the detected points coordinates (and ID if charuco pattern) for all frames in + all calibration files. + camera_1 : str + First camera which extrinsic parameters will be estimated. + camera_2 : str + Second camera which extrinsic parameters will be estimated. + pattern_type : str + "chessboard" or "charuco": type of pattern. + + Returns + ------- + detection_df : :obj:`pandas.DataFrame` + Filtered dataframe with only valid frames for extrinsic parameter estimation between `camera_1` and `camera_2`. + """ + + # subselect only the 2 cameras detection_df = detection_df.loc[:, [camera_1, camera_2]] + # drop any row where pattern points were not detected for a camera detection_df = detection_df[(~detection_df[camera_1].isnull()) & (~detection_df[camera_2].isnull())] - # If charuco board, filter only matching ids for each frame + # If charuco board, pattern points are not necessary all detected. We need to remove those detected in one camera + # but not the other. index_to_delete = [] if pattern_type == "charuco": for capture_id, capture in detection_df.iterrows(): + # capture[cam][0] are the pattern ids, capture[cam][1] are the pattern coordinates + # ids of pattern points in camera 1 also in camera 2 mask_camera_1 = np.in1d(capture[camera_1][0], capture[camera_2][0]) + # ids of pattern points in camera 2 also in camera 1 mask_camera_2 = np.in1d(capture[camera_2][0], capture[camera_1][0]) if np.count_nonzero(mask_camera_1) < 4 or np.count_nonzero(mask_camera_2) < 4: + # Not enough matching pattern points were detected, remove this capture from the df. index_to_delete.append(capture_id) else: + # select the pattern points that were detected for both cameras capture[camera_1] = (capture[camera_1][0][mask_camera_1], capture[camera_1][1][mask_camera_1]) capture[camera_2] = (capture[camera_2][0][mask_camera_2], capture[camera_2][1][mask_camera_2]) detection_df = detection_df.drop(index_to_delete, axis=0) return detection_df -def create_3Dpoints(pattern_size, square_size): - # create object points - rows, cols = pattern_size - object_points = np.zeros((rows * cols, 3), np.float32) - for i in range(0, cols): - for j in range(0, rows): - object_points[i * rows + j] = [cols - 1 - i, j, 0] - object_points *= square_size - return object_points - - -def define_calibration_points(df, object_points, pattern_type): - obj_pts, im_pts, ids = [], [], [] - for capture_id, capture in df.iteritems(): - if pattern_type == "charuco": - ids.append(capture[0]) - im_pts.append(capture[1]) - else: - im_pts.append(capture) +def compute_relative_extrinsics(obj_pts, im_pts1, cam_m1, dist_1, im_pts2, cam_m2, dist_2, image_size=None): + """Compute extrinsic parameters of camera 1 with respect to camera 2. + + This function is basically a wrapper around opencv's stereoCalibrate function. For more information on the output + values and signification, please refer to opencv's documentation, eg: + https://docs.opencv.org/4.5.0/d9/d0c/group__calib3d.html#ga91018d80e2a93ade37539f01e6f07de5 + + Parameters + ---------- + obj_pts : list + Object points list: Coordinates of the detected pattern points in the coordinate system of the pattern + im_pts1 : list + Image points of camera 1: the coordinates of the pattern points in the coordinate system of the camera 1 pixels. + Each element of the list is a numpy array with shape (#points, 1, 2) + cam_m1 : :obj:`numpy.ndarray` + Camera matrix of camera 1. + dist_1 : :obj:`numpy.ndarray` + Distortion coefficients of camera 1. + im_pts2 : :obj:`numpy.ndarray` + Image points of camera 2: the coordinates of the pattern points in the coordinate system of the camera 2 pixels. + Each element of the list is a numpy array with shape (#points, 1, 2) + cam_m2 : :obj:`numpy.ndarray` + Camera matrix of camera 2. + dist_2 : :obj:`numpy.ndarray` + Distortion coefficients of camera 2. + image_size : tuple + Shaped of the image used only to initialize the camera intrinsic matrices, by default None. + + Returns + ------- + reprojection_error : float + Reprojection error + R : :obj:`numpy.ndarray` + Relative rotation matrix of camera 1 with respect to camera 2. shape: (3, 3) + T : :obj:`numpy.ndarray` + Relative translation of camera 1 with respect to camera 2. shape: (3, 1) + """ - # If charuco board, mask all 3D points in the board not detected in the ids for each frame - if pattern_type == "charuco": - for frame in range(0, len(im_pts)): - list_id = ids[frame] - obj_pts.append(object_points[list_id, :]) - else: - obj_pts = [object_points for _ in range(0, len(im_pts))] - return im_pts, obj_pts, ids + R, T, E, F = None, None, None, None + criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 1000, 1e-5) + flags = cv2.CALIB_FIX_INTRINSIC + # In cv2.stereoCalibrate, image_size is used to initialize the intrinsics matrix. As the intrinsics are + # already computed, the flags can be set to cv2.CALIB_FIX_INTRINSIC + (reprojection_error, _, _, _, _, R, T, E, F) = cv2.stereoCalibrate( + obj_pts, + im_pts1, + im_pts2, + cam_m1, + dist_1, + cam_m2, + dist_2, + image_size, + R, + T, + E, + F, + flags=flags, + criteria=criteria, + ) + if R is None or T is None: + print("WARNING: No target pair was detected in the same capture") + return reprojection_error, R, T -# only for debugging, will be removed further +# only for debugging def display_df(df): for i, col in df.iteritems(): print(i) @@ -329,11 +652,24 @@ def display_df(df): def display_cameras_poses(camera_poses, ax_limits): + """Plot the camera poses. + + Parameters + ---------- + camera_poses : dict + Camera poses: joint rotation-translation matrix H = [R|T] of the camera with respect to the reference. + ax_limits : dict + Limit values for the (matplotlib) axes of the plot. Defined in the "display" section of the configuration file. + """ fig = plt.figure() ax = fig.add_subplot(111, projection="3d") ax.set_xlim(ax_limits["axe_x"]) ax.set_ylim(ax_limits["axe_y"]) ax.set_zlim(ax_limits["axe_z"]) + ax.set_xlabel("$X$") + ax.set_ylabel("$Y$") + ax.set_zlabel("$Z$") + ax.view_init(elev=135, azim=90) for camera, H in camera_poses.items(): R = H[0:3, 0:3] p = H[0:3, 3] @@ -393,6 +729,9 @@ def main(): if pattern_type == "charuco": pattern_size = (config["pattern_rows"], config["pattern_columns"]) if pattern_type == "chessboard": + # The chessboard detection counts the number of corners in between the chessboard squares, which is 1 less than + # the number of squares in a line or column. + # The points detection of charuco counts the number of squares, so we don't need to change it. pattern_size = (config["pattern_rows"] - 1, config["pattern_columns"] - 1) charuco_board = None @@ -405,6 +744,7 @@ def main(): pattern_size[0], pattern_size[1], square_size, marker_size, aruco_dict ) + # Run the pattern detection in all specified files in the calibration configuration. pts, image_data = detect_patterns( capture_dir_path, data_config_path, @@ -415,17 +755,20 @@ def main(): verbosity=verbosity, ) - # Create 3D object points for 1 frame - if pattern_type == "chessboard": - object_points = create_3Dpoints(pattern_size, square_size) - else: + # Create 3D object points of the pattern corners, in the coordinate system of the calibration pattern + if pattern_type == "charuco": + # The intrinsic parameter estimations functions from opencv use the number of corners between the pattern + # (charuco or chessboard) squares. The pattern detection however uses the number of squares, which is one more, + # so we need to reduce the pattern size by one. pattern_size = (pattern_size[0] - 1, pattern_size[1] - 1) - object_points = create_3Dpoints(pattern_size, square_size) + object_points = create_3Dpoints(pattern_size, square_size) + + # Set print options for debuging np.set_printoptions(formatter={"float": lambda x: "{0:0.3f}".format(x)}) # First all dataframe must be set and intrinsics computed for each camera stream = [k for k in image_data.keys()] - calibration = dict.fromkeys(stream) + calibration = dict.fromkeys(stream) # To store the calibration results for each camera for idx, camera in enumerate(stream): image_size = image_data[camera] df_intrinsics = get_valid_frames_for_intrinsic_calibration(pts, camera, pattern_type) @@ -437,7 +780,7 @@ def main(): print("\n=== Intrinsics {} dataframe ===".format(camera)) display_df(df_intrinsics) calibration[camera] = {} - im_pts, obj_pts, ids = define_calibration_points(df_intrinsics, object_points, pattern_type) + im_pts, obj_pts, ids = format_calibration_points_from_df(df_intrinsics, object_points, pattern_type) if pattern_type == "charuco": err, cam_mat, dist_coefs = compute_intrinsics( im_pts, image_size, ids=ids, charuco_board=charuco_board, params=config["intrinsics"] @@ -453,6 +796,13 @@ def main(): print("Camera matrix \n {} \n Distortion coeffs \n {} \n".format(cam_mat, dist_coefs)) print("Reprojection error \n {}\n".format(err)) + reference = args.reference + if intrinsics_only: + return + if reference not in stream: + raise ValueError("Reference camera " + reference + " is not in the available streams " + str(stream)) + + # Get the frames that can be used for extrinsic parameters estimation if not intrinsics_only: for idx, camera in enumerate(stream): for camera_2 in stream: @@ -464,37 +814,33 @@ def main(): print("\n=== Extrinsics {}-{} dataframe ===".format(camera, camera_2)) display_df(df_extrinsics) - # Then the extrinsics between the reference camera and the others are computed - reference = args.reference - if intrinsics_only: - return + # Then the extrinsics between the cameras with respect to the reference are computed stream = [k for k, v in calibration.items() if v is not None] for idx, camera in enumerate(stream): image_size = image_data[camera] - # for camera_2 in stream:# or with itertools.tee() - # if camera_2 == camera: - # continue if not "df_extrinsics_{}".format(reference) in calibration[camera].keys(): + if reference != camera: + warnings.warn( + "Can not perform extrinsic parameters estimation for cameras " + + camera + + " and " + + reference + + ", because no frames provided enough detected pattern points." + ) continue df = calibration[camera]["df_extrinsics_{}".format(reference)] cam_mat1 = calibration[camera]["camera_matrix"] cam_mat2 = calibration[reference]["camera_matrix"] dist_coefs1 = calibration[camera]["distortion_coefs"] dist_coefs2 = calibration[reference]["distortion_coefs"] - im_pts1, obj_pts, ids = define_calibration_points(df[camera], object_points, pattern_type) - im_pts2, obj_pts2, ids2 = define_calibration_points(df[reference], object_points, pattern_type) - for i1, i2, o1, o2 in zip(ids, ids2, obj_pts, obj_pts2): - if np.linalg.norm(o1 - o2) != 0.0: - raise ValueError("Difference between both object points") - if np.linalg.norm(i1 - i2) != 0.0: - raise ValueError("Difference between both ids") - err, R, T = compute_relative_extrinsics( - obj_pts, im_pts1, cam_mat1, dist_coefs1, im_pts2, cam_mat2, dist_coefs2 - ) + im_pts1, obj_pts, ids = format_calibration_points_from_df(df[camera], object_points, pattern_type) + im_pts2, obj_pts2, ids2 = format_calibration_points_from_df(df[reference], object_points, pattern_type) + err, R, T = compute_relative_extrinsics(obj_pts, im_pts1, cam_mat1, dist_coefs1, im_pts2, cam_mat2, dist_coefs2) + # Build the camera poses H: combination of R and T. H = np.eye(4) H[0:3, 0:3] = R - H[0:3, 3] = T.T + H[0:3, 3] = T.squeeze() # T shape is (3, 1), doesn't broadcast to (3,) without squeeze calibration[camera]["extrinsics_{}".format(reference)] = H calibration[camera]["relative_rotation"] = R calibration[camera]["relative_translation"] = T @@ -503,6 +849,7 @@ def main(): print("Rotation \n {} \nTranslation \n {} \n".format(R, T.T)) print("Reprojection error \n {}\n".format(err)) + # Write results in format of calibration file if args.output_file is not None: data = {} for camera in stream: diff --git a/bob/ip/stereo/stereo.py b/bob/ip/stereo/stereo.py index db2e0487003763e87e65254b908d61f1ad446443..8de76a0a58b3f9bf5c4124ed1bc3bb7454a227b3 100644 --- a/bob/ip/stereo/stereo.py +++ b/bob/ip/stereo/stereo.py @@ -286,7 +286,7 @@ def main(): im_left = bob.io.image.load(args.left_image) im_right = bob.io.image.load(args.right_image) - im_out = stereo_match(im_left, img_right, CameraPair()) + im_out = stereo_match(im_left, im_right, CameraPair()) # rescale and write file diff --git a/bob/ip/stereo/stereo_stream_filters.py b/bob/ip/stereo/stereo_stream_filters.py index 721470538d8eeefcb1b532a946b012e504cad37a..f155ad8430552c6685f0c1e99c96985e5695ff3c 100644 --- a/bob/ip/stereo/stereo_stream_filters.py +++ b/bob/ip/stereo/stereo_stream_filters.py @@ -153,7 +153,7 @@ class StreamWarp(bob.io.stream.StreamFilter): :obj:`numpy.ndarray` `data` warped to `warp_to` image shape. """ - if self.warp_to.camera.markers is None or self.camear.markers is None: + if self.warp_to.camera.markers is None or self.camera.markers is None: raise ValueError("Camera markers are not defined ! Did you run linear calibration of your cameras ?") self.markers = (self.warp_to.camera.markers, self.camera.markers) # camera added to Stream by add_stream_camera self.warp_transform = transform.ProjectiveTransform() diff --git a/conda/meta.yaml b/conda/meta.yaml index f0944b53e154869291a266f0a2206a3616ef9e09..41555de73c8f7b16bea9055319bcb085b269d943 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -34,6 +34,7 @@ requirements: - opencv {{ opencv }} - scikit-image {{ scikit_image }} - pandas {{ pandas }} + - matplotlib {{ matplotlib }} - bob.io.image - bob.ip.color - bob.io.stream @@ -43,6 +44,7 @@ requirements: - {{ pin_compatible('numpy') }} - {{ pin_compatible('opencv') }} - {{ pin_compatible('pandas') }} + - {{ pin_compatible('matplotlib') }} test: imports: diff --git a/doc/calibration.rst b/doc/calibration.rst new file mode 100644 index 0000000000000000000000000000000000000000..22699688bd71864fa6956597e6504096ad673a75 --- /dev/null +++ b/doc/calibration.rst @@ -0,0 +1,228 @@ +.. _bob.ip.stereo.calibration: + +------------------ +Camera Calibration +------------------ + +Camera calibration consists in estimating the parameters of a camera, using a set of images of a pattern from the +camera. The calibration procedure implemented in ``calibration.py`` uses capture files containing synchronous images of +several cameras to first estimate the intrinsic, then the extrinsic, parameters of the cameras. The script mostly wraps +opencv's algorithms to use with ``bob.io.stream`` data files, therefore it is higly recommended to read opencv's +documentation before using this script. + + +Camera Parameters +----------------- + +The intrinsic parameters of a camera are the paramters that do not depend on the camera's environment. There are composed +of the camera matrix - which describes the transformation between a 3D point in the world coordinate system, to a 2D +pixel in an image plane, assuming a distortion free projection - and the distortion coefficients of a camera. Opencv's +camera and distortion model is explained `here +`_. + +The extrinsic paramters are the rotation and translation of the camera representing the change of referential from the +world coordinate to the camera coordinate system. Usually, the position and orientation of one camera, the reference, is +used as the world coordinate system. In that case, the extrinsic parameters of the other cameras encode the relative +position and rotation of these camera with respect to the reference, which allows to use stereo processing. + + +Intrinsic Parameters Estimation +------------------------------- + +To estimate the intrinsic parameters of a camera, an image of a pattern is captured. The pattern contains points which +position and distance with respect to each other is known. The first type of patterns to be used were the checkerboard +patterns: the points of interest of the patterns are the inner corners of the checkerboard. The points are alligned, and +the distance between the points is given by the length of a checkerboard square. However, in an image of the pattern, +the checkerboard corners are not aligned due to distortion, which allows to estimate the distortion coefficients +of the camera's lense. The number of pixels between corners maps to the distance in the real world (square length) and +allows to estimate the camera matrix. + +Opencv's implements methods to detect checkerboard corners, which give precise measurements of the corners in the +image, as is required for intrinsic parameters estimation. However these methods are not robust to occlusion: if a part +of the pattern is hidden, the detection fails. To circumvent this problem, a Charuco pattern can be used: the Charuco +pattern has unique (for a given pattern size) Aruco symbols placed in its white squares. The symbols allow to +individually identify each corner, so the detection can work even when not all corners are visible. + +See opencv's documentation for more explanations on Charuco patterns and how to generate them: +`Detection of ArUco Markers +`_, +`Detection of ChArUco Corners +`_, +`Calibration with ArUco and ChArUco +`_. + + +Extrinsic Parameters Estimation +------------------------------- + +Once the intrinsic parameters of a cameras are known, we are able to "rectify" the camera's image, that is to correct +the distortion and align the images. When this is done, we can tackle the problem of finding the +position of the camera with respect to the real world coordinate system. Indeed, the mapping between a 3D point in the +real world and the corresponding 2D point in the camra's pixels is the projection parametrized by the camera's pose. +Given enough pair of (3D, 2D) points, the projection parameters can be estimated: this is the so-called +Perspective-n-Point (PnP) problem. +`Opencv's PnP solver documentation +`_. + +.. note:: + + The PnP algorithm works with a set of points which coordinates are known in the coordinate system of the camera's + images (pixel position in the image) and in the world coordinate system. The world coordinate system origin and + orientation can be arbitrary chosen, the result of the PnP will be the camera Pose with respect to that choice. The + simplest choice is to place the coordinate system along the checkerboard axis: that way the 3rd coordinate of the + corners are (``square_number_along_X`` * ``square_length``, ``square_number_along_Y`` * ``square_length``, 0). + + A minimum of 4 points is needed for the PnP algorithm, however more points helps improve the precision and + robustness to outliers. + + +However, we are not so much interested in knowning 1 camera's pose, rather than to know the position of one camera with +respect to another. In opencv, this is performed by the `stereo calibrate +`_ function, which +performs the following steps: + +* Receive a list of images for each camera, of the same pattern positions (for instance, the images were taken + simultaneously with both cameras looking at the pattern). +* For each pair of image from camera 1, camera 2, solve the PnP problem for each camera. Then from the pose of the 2 + camera with respect to the pattern, compute the pose of one camera with respect to the other. +* Average of the pose obtained for each image pair in previous step. +* Fine tune the pose by optimizing the reprojection error of all points in both cameras. (Levenberg–Marquardt optimizer) + + +.. Note:: + + The ``stereoCalibrate`` is also able to optimize the intrinsic parameters of the cameras. However, as the number of + parameters to optimize becomes big, the results may not be satisfactory. It is often preferable to first find the + intrinsic parameters of each camera, then fix them and optimize the extrinsic parameters. + + +.. image:: img/stereo_diagram.png + + + +``calibration.py`` +------------------ + +The calibration script performs the following steps: + +* Find all the capture files in the input folder. The capture files are ``hdf5`` files containing a dataset per camera, + and one or more frames per dataset. If specified in the config, only 1 frame will be loaded. +* For each frames for each camera, run the detection to find the pattern points. +* For each camera, using the frames where the pattern could be detected in the previous step, estimate the camera's + intrinsic parameters. +* Finaly, the extrinsic parameters of all cameras with respect to a reference (passed as argument) are computed, using + the frames where enough pattern points for both cameras were detected in the second step. The poses of the cameras are + then displayed, and the calibration results are written to a ``json`` file. + + +The calibration script can be called with: + +.. code-block:: bash + + $ python calibration.py -c calibration_config.json -r ref_camera -o output.json -v + usage: calibration.py [-h] [-c CONFIG] [-i INTRINSICS_ONLY] -r REFERENCE + [-o OUTPUT_FILE] [-v] + + Calibration script computing intrinsic and extrinsic parameters of cameras + from capture files. This script performs the following steps: 1. For all + capture files specified in the configuration json, detect the pattern points + (first on the image, if failed processes the image (grey-closing, + thresholding) and tries again) 2. Using the frames were the pattern points + were detected in step 1: estimate the intrinsic parameters for each stream + (camera) in the capture files. 3. Againg using the pattern points detected in + step 1, the extrinsic parameters of the cameras are estimated with respect to + the reference. 4. The camera poses are displayed, and the results are saved to + a json file. + + optional arguments: + -h, --help show this help message and exit + -c CONFIG, --config CONFIG + An absolute path to the JSON file containing the + calibration configuration. + -i INTRINSICS_ONLY, --intrinsics-only INTRINSICS_ONLY + Compute intrinsics only. + -r REFERENCE, --reference REFERENCE + Stream name in the data files of camera to use as + reference. + -o OUTPUT_FILE, --output_file OUTPUT_FILE + Output calibration JSON file. + -v, --verbosity Output verbosity: -v output calibration result, -vv + output the dataframe, -vvv plots the target detection. + +An example of a calibration configuration file is given bellow. The ``-r`` switch is used to specify the camera to uses +as the reference for extrinsic parameters estimation. The ``-v`` switch controls the verbosity of the script: setting +``-vvv`` will for instance display the detected patterns points in each frame, to help debugging. + + +Calibration configuration example: + +.. code-block:: json + + { + "capture_directory" : "/path/to/capture/folder/", + "stream_config" : "/path/to/data/streams/config.json", + "pattern_type": "charuco", + "pattern_rows": 10, + "pattern_columns": 7, + "square_size" : 25, + "marker_size" : 10.5, + "threshold" : 151, + "closing_size" : 2, + "intrinsics": + { + "fix_center_point" : false, + "fix_aspect_ratio" : false, + "fix_focal_length" : false, + "zero_tang_dist" : false, + "use_intrinsic_guess" : false, + "intrinsics_guess" : + { + "f" : [1200, 1200], + "center_point" : [540.5, 768], + "dist_coefs" : [0 ,0, 0, 0, 0] + } + }, + "frames_list": { + "file_1.h5" : [0], + "file_2.h5" : [2], + }, + "display": + { + "axe_x" : [-60, 60], + "axe_y" : [-60, 60], + "axe_z" : [-60, 60] + } + } + +| - ``capture_directory`` : Path to the folder containing the capture files. +| - ``stream_config`` : Path to the configuration files describing the streams in the capture files. (stream + configuration files from ``bob.io.stream``) +| - ``pattern_type`` : Type of pattern to detect in the captures. Only ``chessboard`` and ``charuco`` are currently + supported. +| - ``pattern_rows`` : Number of squares in the Y axis in the target. +| - ``pattern_columns`` : Number of squares in the X axis in the target. +| - ``square_size`` : Length of a square in the pattern. This is used as a scale for the real world distances, for + instance the distance between the cameras in the extrinsic parameters will have the same unit as the square size. +| - ``marker_size`` : Size of a marker in a charuco pattern. +| - ``threshold`` : Threshold to use for image processing (Adaptative Thresholding) to help with the pattern detection. +| - ``closing_size`` : Size of the grey closing for image processing to help with the pattern detection. +| - ``intrinsics`` : Regroups the parameters of the algorithm used for intrinsic parameters estimation, that is the + parameters of opencv\'s ``calibrateCameraCharuco`` or ``calibrateCamera`` functions. +| - ``fix_center_point`` : The principal point is not changed during the global optimization. It stays at the center or + at the location specified in the intrinsic guess if the guess is used. +| - ``fix_aspect_ratio`` : The functions consider only fy as a free parameter. The ratio fx/fy stays the same as in the + input cameraMatrix. (fx, fy: focal lengths in the camera matrix). +| - ``fix_focal_length`` : Fix fx and fy (focal lengths in camera matrix). +| - ``zero_tang_dist`` : Tangential distortion coefficients are fixed to 0. +| - ``use_intrinsic_guess`` : Use the ``intrinsic_guess`` as a starting point for the algorithm. +| - ``intrinsics_guess`` : Starting point for the intrinsic paramters values. +| - ``frames_list`` : Dictionary which keys are the capture files, and values are a list of frames to use in this file. + **Only the files listed here will be used for the calibration.** +| - ``display`` : Axis limits when displaying the camera poses. + + +================= + Calibration API +================= + +.. automodule:: bob.ip.stereo.calibration \ No newline at end of file diff --git a/doc/conf.py b/doc/conf.py index 93267b47030c31e40547bdc0d5fcec993ec2b923..bb070e4bb7aa46229a2de93ec80885fadca3681e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -233,6 +233,8 @@ if os.path.exists(sphinx_requirements): else: intersphinx_mapping = link_documentation() +intersphinx_mapping['pandas'] = ('http://pandas.pydata.org/pandas-docs/dev', None) + # We want to remove all private (i.e. _. or __.__) members # that are not in the list of accepted functions accepted_private_functions = ['__array__'] @@ -253,4 +255,4 @@ def member_function_test(app, what, name, obj, skip, options): def setup(app): - app.connect('autodoc-skip-member', member_function_test) \ No newline at end of file + app.connect('autodoc-skip-member', member_function_test) diff --git a/doc/extra-intersphinx.txt b/doc/extra-intersphinx.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ad3b26d9431e5fe5959987aca72f512ba6d2f44 --- /dev/null +++ b/doc/extra-intersphinx.txt @@ -0,0 +1,2 @@ +pandas +cv2 \ No newline at end of file diff --git a/doc/img/stereo_diagram.png b/doc/img/stereo_diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..afcf62274fc9b6cc333e5abbaa99e331883d7eda Binary files /dev/null and b/doc/img/stereo_diagram.png differ diff --git a/doc/index.rst b/doc/index.rst index 26d4b8bf69064df8e62e8ecab7d130afcad76aff..60e605e0831e402f94378d1b8037f027d2f14a13 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -17,8 +17,9 @@ Users Guide .. toctree:: :maxdepth: 2 - api - warp_calibration + calibration.rst + warp_calibration.rst + api.rst .. todolist:: diff --git a/doc/nitpick-exceptions.txt b/doc/nitpick-exceptions.txt new file mode 100644 index 0000000000000000000000000000000000000000..411ff1e333430fb96cacd62147ec97db488b613e --- /dev/null +++ b/doc/nitpick-exceptions.txt @@ -0,0 +1 @@ +py:obj cv2.aruco_CharucoBoard diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..9d0c6ece3445d335bb0fa3b838fdeb8964a77047 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel", "bob.extension", "pybind11~=2.6.1"] +build-backend = "setuptools.build_meta" diff --git a/requirements.txt b/requirements.txt index bdfd4740f4b6988981aff8fe337067b71c556017..7915fdda43d653392d094fa24fa5541735b24d1b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ numpy # opencv We need opencv, but the opencv package is broken and can't be installed with pip. Install it with conda. scikit-image pandas +matplotlib bob.io.image bob.ip.color bob.io.stream diff --git a/setup.py b/setup.py index ecb9e12a0687359c416b4d67c2711d8f6813c033..74c6bb061727349e3abdcb6fe4ab3d8cf94105ca 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,8 @@ bob_packages = [] from setuptools import setup, dist, find_packages, distutils dist.Distribution(dict(setup_requires=setup_packages + bob_packages)) -from bob.extension import Extension, build_ext +# from bob.extension import Extension, build_ext +from pybind11.setup_helpers import Pybind11Extension, build_ext import sys @@ -110,7 +111,7 @@ setup( [ # The second extension contains the actual C++ code and the Python bindings - Extension("bob.ip.stereo._library", + Pybind11Extension("bob.ip.stereo._library", # list of files compiled into this extension [ # the pure C++ code