-
Notifications
You must be signed in to change notification settings - Fork 56
/
Copy pathConfig.toml
324 lines (278 loc) · 17.7 KB
/
Config.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
###############################################################################
## PROJECT PARAMETERS ##
###############################################################################
# Configure your project parameters here.
#
# IMPORTANT:
# If a parameter is not found here, Pose2Sim will look for its value in the
# Config.toml file of the level above. This way, you can set global
# instructions for the Session and alter them for specific Participants or Trials.
#
# If you wish to overwrite a parameter for a specific trial or participant,
# edit its Config.toml file by uncommenting its key (e.g., [project])
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
[project]
multi_person = false # true for trials with multiple participants. If false, only the person with lowest reprojection error is analyzed.
participant_height = 'auto' # 'auto', float (eg 1.72), or list of floats (eg [1.72, 1.40]) # meters # Only used for marker augmentation
participant_mass = 70.0 # float (eg 70.0), or list of floats (eg [70.0, 63.5]) # kg # Only used for marker augmentation and scaling, no impact on results unless you need to further compute forces rather than only kinematics
frame_rate = 'auto' # fps # int or 'auto'. If 'auto', finds from video (or defaults to 60 fps if you work with images)
frame_range = [] # For example [10,300], or [] for all frames.
## If cameras are not synchronized, designates the frame range of the camera with the shortest recording time
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
[pose]
vid_img_extension = 'mp4' # any video or image extension
pose_model = 'Body_with_feet' #With RTMLib:
# - Body_with_feet (default HALPE_26 model),
# - Whole_body_wrist (COCO_133: body + feet + 2 hand_points),
# - Whole_body (COCO_133: body + feet + hands),
# - Body (COCO_17). Marker augmentation won't work, Kinematic analysis will work,
# - Hand (HAND_21, only lightweight mode. Potentially better results with Whole_body),
# - Face (FACE_106),
# - Animal (ANIMAL2D_17)
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
# /!\ For Face and Animal, use mode="""{dictionary}""", and find the corresponding .onnx model there https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
#With mediapipe: BLAZEPOSE
#With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133
#With deeplabcut: CUSTOM. See example at the end of the file
mode = 'balanced' # 'lightweight', 'balanced', 'performance',
# or """{dictionary}""" (see below)
# A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
# Models can be local paths or URLs.
# Make sure the input_sizes are within square brackets, and that they are in the opposite order from the one in the model path (for example, it would be [192,256] for rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip).
# If your pose_model is not provided in skeletons.py, you may have to create your own one (see example at the end of the file).
# Example, equivalent to mode='balanced':
# mode = """{'det_class':'YOLOX',
# 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip',
# 'det_input_size':[640, 640],
# 'pose_class':'RTMPose',
# 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip',
# 'pose_input_size':[192,256]}"""
# Example with one-stage RTMO model (Requires pose_model = 'Body'):
# mode = """{'pose_class':'RTMO',
# 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip',
# 'pose_input_size':[640, 640]}"""
# Example with animal pose estimation:
# mode = """{'pose_class':'RTMPose',
# 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.zip',
# 'pose_input_size':[256,256]}"""
det_frequency = 4 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (pose estimation is still run on all frames).
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower but more robust in difficult configurations
deepsort_params = """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8}""" # """{dictionary between 3 double quotes}"""
# More robust in crowded scenes but Can be tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
# Note: For faster and more robust tracking, use {'embedder_gpu': True, embedder':'torchreid'}, which uses the GPU and runs osnet_ain_x1_0 by default. requires `pip install torch torchvision torchreid gdown tensorboard`
display_detection = true
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
[synchronization]
display_sync_plots = true # true or false (lowercase)
keypoints_to_consider = 'all' # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least
# ['RWrist', 'RElbow'] list of keypoint names if you want to specify keypoints with a sharp vertical motion.
approx_time_maxspeed = 'auto' # 'auto' if you want to consider the whole capture (default, slower if long sequences)
# [10.0, 2.0, 8.0, 11.0] list of times (seconds) if you want to specify the approximate time of a clear vertical event for each camera
time_range_around_maxspeed = 2.0 # Search for best correlation in the range [approx_time_maxspeed - time_range_around_maxspeed, approx_time_maxspeed + time_range_around_maxspeed]
likelihood_threshold = 0.4 # Keypoints whose likelihood is below likelihood_threshold are filtered out
filter_cutoff = 6 # time series are smoothed to get coherent time-lagged correlation
filter_order = 4
# Take heart, calibration is not that complicated once you get the hang of it!
[calibration]
calibration_type = 'convert' # 'convert' or 'calculate'
[calibration.convert]
convert_from = 'qualisys' # 'caliscope', 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
[calibration.convert.caliscope] # No parameter needed
[calibration.convert.qualisys]
binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
[calibration.convert.optitrack] # See readme for instructions
[calibration.convert.vicon] # No parameter needed
[calibration.convert.opencap] # No parameter needed
[calibration.convert.easymocap] # No parameter needed
[calibration.convert.biocv] # No parameter needed
[calibration.convert.anipose] # No parameter needed
[calibration.convert.freemocap] # No parameter needed
[calibration.calculate]
# Camera properties, theoretically need to be calculated only once in a camera lifetime
[calibration.calculate.intrinsics]
overwrite_intrinsics = false # set to false if you don't want to recalculate intrinsic parameters
show_detection_intrinsics = true # true or false (lowercase)
intrinsics_extension = 'jpg' # any video or image extension
extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
intrinsics_corners_nb = [4,7]
intrinsics_square_size = 60 # mm
# Camera placements, need to be done before every session
[calibration.calculate.extrinsics]
calculate_extrinsics = true # true or false (lowercase)
extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
# 'board' should be large enough to be detected when laid on the floor. Not recommended.
# 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
# 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
moving_cameras = false # Not implemented yet
[calibration.calculate.extrinsics.board]
show_reprojection_error = true # true or false (lowercase)
extrinsics_extension = 'png' # any video or image extension
extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
[calibration.calculate.extrinsics.scene]
show_reprojection_error = true # true or false (lowercase)
extrinsics_extension = 'png' # any video or image extension
# list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
# in m -> unlike for intrinsics, NOT in mm!
object_coords_3d = [[-2.0, 0.3, 0.0],
[-2.0 , 0.0, 0.0],
[-2.0, 0.0, 0.05],
[-2.0, -0.3 , 0.0],
[0.0, 0.3, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.05],
[0.0, -0.3, 0.0]]
[calibration.calculate.extrinsics.keypoints]
# Coming soon!
[personAssociation]
likelihood_threshold_association = 0.3
[personAssociation.single_person]
reproj_error_threshold_association = 20 # px
tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
# and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' or 'RShoulder')
[personAssociation.multi_person]
reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
min_affinity = 0.2 # affinity below which a correspondence is ignored
[triangulation]
reproj_error_threshold_triangulation = 15 # px
likelihood_threshold_triangulation= 0.3
min_cameras_for_triangulation = 2
interpolation = 'linear' #linear, slinear, quadratic, cubic, or none
# 'none' if you don't want to interpolate missing points
interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
make_c3d = true # save triangulated data in c3d format in addition to trc
[filtering]
type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
display_figures = true # true or false (lowercase)
make_c3d = true # also save triangulated data in c3d format
[filtering.butterworth]
order = 4
cut_off_frequency = 6 # Hz
[filtering.kalman]
# How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
smooth = true # should be true, unless you need real-time filtering
[filtering.butterworth_on_speed]
order = 4
cut_off_frequency = 10 # Hz
[filtering.gaussian]
sigma_kernel = 2 #px
[filtering.LOESS]
nb_values_used = 30 # = fraction of data used * nb frames
[filtering.median]
kernel_size = 9
[markerAugmentation]
## Requires the following markers: ["Neck", "RShoulder", "LShoulder", "RHip", "LHip", "RKnee", "LKnee",
## "RAnkle", "LAnkle", "RHeel", "LHeel", "RSmallToe", "LSmallToe",
## "RBigToe", "LBigToe", "RElbow", "LElbow", "RWrist", "LWrist"]
make_c3d = true # save triangulated data in c3d format in addition to trc
[kinematics]
use_augmentation = true # true or false (lowercase) # Set to true if you want to use the model with augmented markers
use_contacts_muscles = true # true or false (lowercase) # If true, contact spheres and muscles are added to the model
right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
default_height = 1.7 # meters # If automatic height calculation did not work, this value is used to scale the model
remove_individual_scaling_setup = true # true or false (lowercase) # If true, the individual scaling setup files are removed to avoid cluttering
remove_individual_IK_setup = true # true or false (lowercase) # If true, the individual IK setup files are removed to avoid cluttering
fastest_frames_to_remove_percent = 0.1 # Frames with high speed are considered as outliers
close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
large_hip_knee_angles = 45 # Hip and knee angles below this value are considered as imprecise
trimmed_extrema_percent = 0.5 # Proportion of the most extreme segment values to remove before calculating their mean)
[logging]
use_custom_logging = false # if integrated in an API that already has logging
# CUSTOM skeleton
# If you use a model with different keypoints and/or different ordering
# Useful if you trained your own model, from DeepLabCut or MMPose for example.
# Make sure the ids are set in the right order and start from zero.
#
# If you want to perform inverse kinematics, you will also need to create an OpenSim model
# and add to its markerset the location where you expect the triangulated keypoints to be detected.
#
# In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
# You can create as many custom skeletons as you want, just add them further down and rename them.
#
# Check your model hierarchy with:
# from anytree import Node, RenderTree
# for pre, _, node in RenderTree(model):
# print(f'{pre}{node.name} id={node.id}')
[pose.CUSTOM]
name = "Hip"
id = 19
[[pose.CUSTOM.children]]
name = "RHip"
id = 12
[[pose.CUSTOM.children.children]]
name = "RKnee"
id = 14
[[pose.CUSTOM.children.children.children]]
name = "RAnkle"
id = 16
[[pose.CUSTOM.children.children.children.children]]
name = "RBigToe"
id = 21
[[pose.CUSTOM.children.children.children.children.children]]
name = "RSmallToe"
id = 23
[[pose.CUSTOM.children.children.children.children]]
name = "RHeel"
id = 25
[[pose.CUSTOM.children]]
name = "LHip"
id = 11
[[pose.CUSTOM.children.children]]
name = "LKnee"
id = 13
[[pose.CUSTOM.children.children.children]]
name = "LAnkle"
id = 15
[[pose.CUSTOM.children.children.children.children]]
name = "LBigToe"
id = 20
[[pose.CUSTOM.children.children.children.children.children]]
name = "LSmallToe"
id = 22
[[pose.CUSTOM.children.children.children.children]]
name = "LHeel"
id = 24
[[pose.CUSTOM.children]]
name = "Neck"
id = 18
[[pose.CUSTOM.children.children]]
name = "Head"
id = 17
[[pose.CUSTOM.children.children.children]]
name = "Nose"
id = 0
[[pose.CUSTOM.children.children]]
name = "RShoulder"
id = 6
[[pose.CUSTOM.children.children.children]]
name = "RElbow"
id = 8
[[pose.CUSTOM.children.children.children.children]]
name = "RWrist"
id = 10
[[pose.CUSTOM.children.children]]
name = "LShoulder"
id = 5
[[pose.CUSTOM.children.children.children]]
name = "LElbow"
id = 7
[[pose.CUSTOM.children.children.children.children]]
name = "LWrist"
id = 9