Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- $ rg camera
- smplifyx/cmd_parser.py
- 101: parser.add_argument('--camera_type', type=str, default='persp',
- 103: help='The type of camera used')
- 168: help='Which joints to use for initializing the camera')
- 174: ' the initial depth of the camera. The format' +
- 236: ' z coordinate of the camera translation')
- smplifyx/main.py
- 38:from camera import create_camera
- 123: # Create the camera object
- 125: camera = create_camera(focal_length_x=focal_length,
- 130: if hasattr(camera, 'rotation'):
- 131: camera.rotation.requires_grad = False
- 178: camera = camera.to(device=device)
- 247: camera=camera,
- smplifyx/mesh_viewer.py
- 50: camera_pose = np.eye(4)
- 51: camera_pose[:3, 3] = np.array([0, 0, 3])
- 52: self.scene.add(pc, pose=camera_pose)
- smplifyx/camera.py
- 35:def create_camera(camera_type='persp', **kwargs):
- 36: if camera_type.lower() == 'persp':
- 39: raise ValueError('Uknown camera type: {}'.format(camera_type))
- 54: # the camera matrix
- 97: camera_mat = torch.zeros([self.batch_size, 2, 2],
- 99: camera_mat[:, 0, 0] = self.focal_length_x
- 100: camera_mat[:, 1, 1] = self.focal_length_y
- 102: camera_transform = transform_mat(self.rotation,
- 111: [camera_transform, points_h])
- 115: img_points = torch.einsum('bki,bji->bjk', [camera_mat, img_points]) \
- smplifyx/fitting.py
- 47: ''' Initializes the camera translation vector
- 57: the camera translation
- 59: The focal length of the camera
- 70: The vector with the estimated camera location
- 217: optimizer, body_model, camera=None,
- 246: total_loss = loss(body_model_output, camera=camera,
- 275: elif loss_type == 'camera_init':
- 365: def forward(self, body_model_output, camera, gt_joints, joints_conf,
- 369: projected_joints = camera(body_model_output.joints)
- 486: def forward(self, body_model_output, camera, gt_joints,
- 489: projected_joints = camera(body_model_output.joints)
- 501: camera.translation[:, 2] - self.trans_estimation[:, 2]).pow(2))
- smplifyx/fit_single_frame.py
- 52: camera,
- 266: # The indices of the joints used for the initialization of the camera
- 276: camera_loss = fitting.create_loss('camera_init',
- 281: camera_loss.trans_estimation[:] = init_t
- 314: camera_loss.reset_loss_weights({'data_weight': data_weight})
- 327: # Update the value of the translation of the camera as well as
- 330: camera.translation[:] = init_t.view_as(camera.translation)
- 331: camera.center[:] = torch.tensor([W, H], dtype=dtype) * 0.5
- 333: # Re-enable gradient calculation for the camera translation
- 334: camera.translation.requires_grad = True
- 336: camera_opt_params = [camera.translation, body_model.global_orient]
- 338: camera_optimizer, camera_create_graph = optim_factory.create_optimizer(
- 339: camera_opt_params,
- 343: fit_camera = monitor.create_fitting_closure(
- 344: camera_optimizer, body_model, camera, gt_joints,
- 345: camera_loss, create_graph=camera_create_graph,
- 350: # Step 1: Optimize over the torso joints the camera translation
- 352: # of the camera and the initial pose of the body model.
- 353: camera_init_start = time.time()
- 354: cam_init_loss_val = monitor.run_fitting(camera_optimizer,
- 355: fit_camera,
- 356: camera_opt_params, body_model,
- 365: time.time() - camera_init_start))
- 427: camera=camera, gt_joints=gt_joints,
- 467: result = {'camera_' + str(key): val.detach().cpu().numpy()
- 468: for key, val in camera.named_parameters()}
- 524: camera_center = camera.center.detach().cpu().numpy().squeeze()
- 525: camera_transl = camera.translation.detach().cpu().numpy().squeeze()
- 528: camera_transl[0] *= -1.0
- 530: camera_pose = np.eye(4)
- 531: camera_pose[:3, 3] = camera_transl
- 533: camera = pyrender.camera.IntrinsicsCamera(
- 535: cx=camera_center[0], cy=camera_center[1])
- 536: scene.add(camera, pose=camera_pose)
- 4938/31772MB(smplifyx)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement