Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- """
- (*)~---------------------------------------------------------------------------
- Pupil - eye tracking platform
- Copyright (C) 2012-2019 Pupil Labs
- Distributed under the terms of the GNU
- Lesser General Public License (LGPL v3.0).
- See COPYING and COPYING.LESSER for license details.
- ---------------------------------------------------------------------------~(*)
- """
- import json
- import os
- import cv2
- import numpy as np
- export_dir = "/cluster/users/Ching/datasets/PI_extrinsic"
- path = os.path.join(export_dir, "matrix_diff_to_golden.json")
- class Transformation:
- def __init__(self):
- with open(path, "r") as file:
- data = json.load(file)
- self._matrix_diff_to_golden = {
- scene_sn: np.array(transformation_matrix)
- for scene_sn, transformation_matrix in data.items()
- }
- def calculate_points_2d_undist_golden(self, points_3d_world, scene_sn):
- """
- This function calculate N undistorted points in golden headset coordinate
- based on the given frame and scene camera
- :param points_3d_world: detected 3d gaze target points (unit: mm),
- shape: (N x 3)
- :param scene_sn: serial number of the scene camera
- :return: undistorted points in golden coordinate, shape: (N x 2)
- Example:
- >>> points = [[0.0, 0.0, 1000.0], [-1000.0, 1000.0, 2000.0]]
- >>> calculate_points_2d_undist_golden(points, "krxdw")
- """
- points_3d_world = np.array(points_3d_world, dtype=np.float64)
- points_3d_world_golden = self._transform_points_3d_world_to_golden(
- points_3d_world, scene_sn
- )
- points_2d_undist_golden = _normalize_points(points_3d_world_golden)
- return points_2d_undist_golden
- def _transform_points_3d_world_to_golden(self, points_3d_world, scene_sn):
- transform_matrix = self._matrix_diff_to_golden[scene_sn]
- points_3d_world_golden = _transform_points(transform_matrix, points_3d_world)
- return points_3d_world_golden
- def _transform_points(transform_matrix, points_3d_cam1):
- """
- Transform 3d points from cam1 coordinate to cam2 coordinate
- :param points_3d_cam1: 3d points in cam1 coordinate, shape: (N x 3)
- :param transform_matrix: transform_matrix of cam2 in cam1 coordinate,
- shape: (4, 4)
- :return: 3d points in cam2 coordinate, shape: (N x 3)
- """
- points_3d_cam1.shape = -1, 3
- points_3d_cam1_h = cv2.convertPointsToHomogeneous(points_3d_cam1).reshape(-1, 4)
- points_3d_cam2_h = np.matmul(transform_matrix, points_3d_cam1_h.T).T
- points_3d_cam2 = cv2.convertPointsFromHomogeneous(points_3d_cam2_h)
- points_3d_cam2.shape = -1, 3
- return points_3d_cam2
- def _inverse_extrinsic(extrinsic):
- rotation_ext, translation_ext = _split_extrinsic(extrinsic)
- rotation_inv = -rotation_ext
- translation_inv = np.matmul(
- -cv2.Rodrigues(rotation_ext.copy())[0].T, translation_ext
- )
- return _merge_extrinsic(rotation_inv, translation_inv)
- def _convert_extrinsic_to_matrix(extrinsic):
- rotation, translation = _split_extrinsic(extrinsic)
- extrinsic_matrix = np.eye(4, dtype=np.float64)
- extrinsic_matrix[0:3, 0:3] = cv2.Rodrigues(rotation.copy())[0]
- extrinsic_matrix[0:3, 3] = translation
- return extrinsic_matrix
- def _split_extrinsic(extrinsic):
- extrinsic = np.array(extrinsic.copy(), dtype=np.float64)
- assert extrinsic.size == 6
- rotation = extrinsic.ravel()[0:3]
- translation = extrinsic.ravel()[3:6]
- return rotation, translation
- def _merge_extrinsic(rotation, translation):
- assert rotation.size == 3 and translation.size == 3
- extrinsic = np.concatenate((rotation.ravel(), translation.ravel()))
- return extrinsic
- def _normalize_points(points_3d):
- return points_3d[:, :-1] / points_3d[:, -1][:, np.newaxis]
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement