diff --git a/README.md b/README.md index 282551ec0..8400e4241 100644 --- a/README.md +++ b/README.md @@ -2098,7 +2098,7 @@ Let us take a look at the structure of the code. First, tau_min, tau_max, and ta to -1, 1 and 0 if the integer `actuator_type` (a parameter of the `prepare_ocp` function) equals 1. In this case, the dynamics function used is `DynamicsFcn.TORQUE_ACTIVATIONS_DRIVEN`. -### The [trampo_quaternions.py](./bioptim/examples/torque_driven_ocp/trampo_quaternions.py) file +### The [example_quaternions.py](./bioptim/examples/torque_driven_ocp/example_quaternions.py) file This example uses a representation of a human body by a trunk_leg segment and two arms. It is designed to show how to use a model that has quaternions in their degrees of freedom. diff --git a/bioptim/__init__.py b/bioptim/__init__.py index ffdaa892b..982d99801 100644 --- a/bioptim/__init__.py +++ b/bioptim/__init__.py @@ -188,6 +188,7 @@ from .limits.path_conditions import BoundsList, InitialGuessList from .limits.fatigue_path_conditions import FatigueBounds, FatigueInitialGuess from .limits.penalty_controller import PenaltyController +from .limits.penalty_helpers import PenaltyHelpers from .misc.enums import ( Axis, Node, diff --git a/bioptim/dynamics/configure_new_variable.py b/bioptim/dynamics/configure_new_variable.py index 57b698a9d..ac5bc6ad0 100644 --- a/bioptim/dynamics/configure_new_variable.py +++ b/bioptim/dynamics/configure_new_variable.py @@ -418,9 +418,11 @@ def _declare_cx_and_plot(self): ) if not self.skip_plot: self.nlp.plot[f"{self.name}_states"] = CustomPlot( - lambda t0, phases_dt, node_idx, x, u, p, a: x[self.nlp.states.key_index(self.name), :] - if x.any() - else np.ndarray((cx[0][0].shape[0], 1)) * np.nan, + lambda t0, phases_dt, node_idx, x, u, p, a: ( + x[self.nlp.states.key_index(self.name), :] + if x.any() + else np.ndarray((cx[0][0].shape[0], 1)) * np.nan + ), plot_type=PlotType.INTEGRATED, axes_idx=self.axes_idx, legend=self.legend, @@ -453,15 +455,19 @@ def _declare_cx_and_plot(self): plot_type = PlotType.PLOT if self.nlp.control_type == ControlType.LINEAR_CONTINUOUS else PlotType.STEP if not self.skip_plot: self.nlp.plot[f"{self.name}_controls"] = CustomPlot( - lambda t0, phases_dt, node_idx, x, u, p, a: u[self.nlp.controls.key_index(self.name), :] - if u.any() - else np.ndarray((cx[0][0].shape[0], 1)) * np.nan, + lambda t0, phases_dt, node_idx, x, u, p, a: ( + u[self.nlp.controls.key_index(self.name), :] + if u.any() + else np.ndarray((cx[0][0].shape[0], 1)) * np.nan + ), plot_type=plot_type, axes_idx=self.axes_idx, legend=self.legend, - combine_to=f"{self.name}_states" - if self.as_states and self.combine_state_control_plot - else self.combine_name, + combine_to=( + f"{self.name}_states" + if self.as_states and self.combine_state_control_plot + else self.combine_name + ), ) if self.as_states_dot: @@ -601,9 +607,9 @@ def _manage_fatigue_to_new_variable( var_names_with_suffix[-1], name_elements, ocp, nlp, as_states, as_controls, skip_plot=True ) nlp.plot[f"{var_names_with_suffix[-1]}_controls"] = CustomPlot( - lambda t0, phases_dt, node_idx, x, u, p, a, key: u[nlp.controls.key_index(key), :] - if u.any() - else np.ndarray((len(name_elements), 1)) * np.nan, + lambda t0, phases_dt, node_idx, x, u, p, a, key: ( + u[nlp.controls.key_index(key), :] if u.any() else np.ndarray((len(name_elements), 1)) * np.nan + ), plot_type=PlotType.STEP, combine_to=control_plot_name, key=var_names_with_suffix[-1], @@ -612,9 +618,9 @@ def _manage_fatigue_to_new_variable( elif i == 0: NewVariableConfiguration(f"{name}", name_elements, ocp, nlp, as_states, as_controls, skip_plot=True) nlp.plot[f"{name}_controls"] = CustomPlot( - lambda t0, phases_dt, node_idx, x, u, p, a, key: u[nlp.controls.key_index(key), :] - if u.any() - else np.ndarray((len(name_elements), 1)) * np.nan, + lambda t0, phases_dt, node_idx, x, u, p, a, key: ( + u[nlp.controls.key_index(key), :] if u.any() else np.ndarray((len(name_elements), 1)) * np.nan + ), plot_type=PlotType.STEP, combine_to=control_plot_name, key=f"{name}", @@ -625,9 +631,9 @@ def _manage_fatigue_to_new_variable( name_tp = f"{var_names_with_suffix[-1]}_{params}" NewVariableConfiguration(name_tp, name_elements, ocp, nlp, True, False, skip_plot=True) nlp.plot[name_tp] = CustomPlot( - lambda t0, phases_dt, node_idx, x, u, p, a, key, mod: mod * x[nlp.states.key_index(key), :] - if x.any() - else np.ndarray((len(name_elements), 1)) * np.nan, + lambda t0, phases_dt, node_idx, x, u, p, a, key, mod: ( + mod * x[nlp.states.key_index(key), :] if x.any() else np.ndarray((len(name_elements), 1)) * np.nan + ), plot_type=PlotType.INTEGRATED, combine_to=fatigue_plot_name, key=name_tp, diff --git a/bioptim/dynamics/configure_problem.py b/bioptim/dynamics/configure_problem.py index 2f3fc43ed..8ae7a12db 100644 --- a/bioptim/dynamics/configure_problem.py +++ b/bioptim/dynamics/configure_problem.py @@ -298,18 +298,157 @@ def torque_driven( phase=nlp.phase_idx, ) + @staticmethod + def torque_driven_free_floating_base( + ocp, + nlp, + with_contact: bool = False, + with_passive_torque: bool = False, + with_ligament: bool = False, + with_friction: bool = False, + external_forces: list = None, + ): + """ + Configure the dynamics for a torque driven program with a free floating base. + This version of the torque driven dynamics avoids defining a mapping to force the root to generate null forces and torques. + (states are q_root, q_joints, qdot_root, and qdot_joints, controls are tau_joints) + Please note that it was not meant to be used with quaternions yet. + + Parameters + ---------- + ocp: OptimalControlProgram + A reference to the ocp + nlp: NonLinearProgram + A reference to the phase + with_contact: bool + If the dynamic with contact should be used + with_passive_torque: bool + If the dynamic with passive torque should be used + with_ligament: bool + If the dynamic with ligament should be used + with_friction: bool + If the dynamic with joint friction should be used (friction = coefficients * qdot) + external_forces: list[Any] + A list of external forces + """ + + _check_contacts_in_biorbd_model(with_contact, nlp.model.nb_contacts, nlp.phase_idx) + _check_external_forces_format(external_forces, nlp.ns, nlp.phase_idx) + _check_external_forces_and_phase_dynamics(external_forces, nlp.phase_dynamics, nlp.phase_idx) + + nb_q = nlp.model.nb_q + nb_qdot = nlp.model.nb_qdot + nb_root = nlp.model.nb_root + + # Declared rigidbody states and controls + name_q_roots = [str(i) for i in range(nb_root)] + ConfigureProblem.configure_new_variable( + "q_roots", + name_q_roots, + ocp, + nlp, + as_states=True, + as_controls=False, + as_states_dot=False, + ) + + name_q_joints = [str(i) for i in range(nb_root, nb_q)] + ConfigureProblem.configure_new_variable( + "q_joints", + name_q_joints, + ocp, + nlp, + as_states=True, + as_controls=False, + as_states_dot=False, + ) + + ConfigureProblem.configure_new_variable( + "qdot_roots", + name_q_roots, + ocp, + nlp, + as_states=True, + as_controls=False, + as_states_dot=True, + ) + + name_qdot_joints = [str(i) for i in range(nb_root, nb_qdot)] + ConfigureProblem.configure_new_variable( + "qdot_joints", + name_qdot_joints, + ocp, + nlp, + as_states=True, + as_controls=False, + as_states_dot=True, + ) + + ConfigureProblem.configure_new_variable( + "qddot_roots", + name_q_roots, + ocp, + nlp, + as_states=False, + as_controls=False, + as_states_dot=True, + ) + + ConfigureProblem.configure_new_variable( + "qddot_joints", + name_qdot_joints, + ocp, + nlp, + as_states=False, + as_controls=False, + as_states_dot=True, + ) + + ConfigureProblem.configure_new_variable( + "tau_joints", + name_qdot_joints, + ocp, + nlp, + as_states=False, + as_controls=True, + as_states_dot=False, + ) + + # TODO: add implicit constraints + soft contacts + fatigue + + # Configure the actual ODE of the dynamics + if nlp.dynamics_type.dynamic_function: + ConfigureProblem.configure_dynamics_function(ocp, nlp, DynamicsFunctions.custom) + else: + ConfigureProblem.configure_dynamics_function( + ocp, + nlp, + DynamicsFunctions.torque_driven_free_floating_base, + with_contact=with_contact, + with_passive_torque=with_passive_torque, + with_ligament=with_ligament, + with_friction=with_friction, + external_forces=external_forces, + ) + + # Configure the contact forces + if with_contact: + ConfigureProblem.configure_contact_function( + ocp, nlp, DynamicsFunctions.forces_from_torque_driven, external_forces=external_forces + ) + @staticmethod def stochastic_torque_driven( ocp, nlp, problem_type, with_contact: bool = False, - with_friction: bool = True, + with_friction: bool = False, with_cholesky: bool = False, initial_matrix: DM = None, ): """ - Configure the dynamics for a torque driven program (states are q and qdot, controls are tau) + Configure the dynamics for a torque driven stochastic program (states are q and qdot, controls are tau) Parameters ---------- @@ -375,6 +514,80 @@ def stochastic_torque_driven( with_friction=with_friction, ) + @staticmethod + def stochastic_torque_driven_free_floating_base( + ocp, + nlp, + problem_type, + with_contact: bool = False, + with_friction: bool = False, + with_cholesky: bool = False, + initial_matrix: DM = None, + ): + """ + Configure the dynamics for a stochastic torque driven program with a free floating base. + (states are q_roots, q_joints, qdot_roots, and qdot_joints, controls are tau_joints) + + Parameters + ---------- + ocp: OptimalControlProgram + A reference to the ocp + nlp: NonLinearProgram + A reference to the phase + with_contact: bool + If the dynamic with contact should be used + with_friction: bool + If the dynamic with joint friction should be used (friction = coefficient * qdot) + """ + n_noised_tau = nlp.model.n_noised_controls + n_noise = nlp.model.motor_noise_magnitude.shape[0] + nlp.model.sensory_noise_magnitude.shape[0] + n_noised_states = nlp.model.n_noised_states + + # Stochastic variables + ConfigureProblem.configure_stochastic_k( + ocp, nlp, n_noised_controls=n_noised_tau, n_references=nlp.model.n_references + ) + ConfigureProblem.configure_stochastic_ref(ocp, nlp, n_references=nlp.model.n_references) + n_collocation_points = 1 + if isinstance(problem_type, SocpType.COLLOCATION): + n_collocation_points += problem_type.polynomial_degree + ConfigureProblem.configure_stochastic_m( + ocp, nlp, n_noised_states=n_noised_states, n_collocation_points=n_collocation_points + ) + + if isinstance(problem_type, SocpType.TRAPEZOIDAL_EXPLICIT): + if initial_matrix is None: + raise RuntimeError( + "The initial value for the covariance matrix must be provided for TRAPEZOIDAL_EXPLICIT" + ) + ConfigureProblem.configure_stochastic_cov_explicit( + ocp, nlp, n_noised_states=n_noised_states, initial_matrix=initial_matrix + ) + else: + if with_cholesky: + ConfigureProblem.configure_stochastic_cholesky_cov(ocp, nlp, n_noised_states=n_noised_states) + else: + ConfigureProblem.configure_stochastic_cov_implicit(ocp, nlp, n_noised_states=n_noised_states) + + if isinstance(problem_type, SocpType.TRAPEZOIDAL_IMPLICIT): + ConfigureProblem.configure_stochastic_a(ocp, nlp, n_noised_states=n_noised_states) + ConfigureProblem.configure_stochastic_c(ocp, nlp, n_noised_states=n_noised_states, n_noise=n_noise) + + ConfigureProblem.torque_driven_free_floating_base( + ocp=ocp, + nlp=nlp, + with_contact=with_contact, + with_friction=with_friction, + ) + + ConfigureProblem.configure_dynamics_function( + ocp, + nlp, + DynamicsFunctions.stochastic_torque_driven_free_floating_base, + with_contact=with_contact, + with_friction=with_friction, + ) + @staticmethod def torque_derivative_driven( ocp, @@ -1576,7 +1789,9 @@ class DynamicsFcn(FcnEnum): """ TORQUE_DRIVEN = (ConfigureProblem.torque_driven,) + TORQUE_DRIVEN_FREE_FLOATING_BASE = (ConfigureProblem.torque_driven_free_floating_base,) STOCHASTIC_TORQUE_DRIVEN = (ConfigureProblem.stochastic_torque_driven,) + STOCHASTIC_TORQUE_DRIVEN_FREE_FLOATING_BASE = (ConfigureProblem.stochastic_torque_driven_free_floating_base,) TORQUE_DERIVATIVE_DRIVEN = (ConfigureProblem.torque_derivative_driven,) TORQUE_ACTIVATIONS_DRIVEN = (ConfigureProblem.torque_activations_driven,) JOINTS_ACCELERATION_DRIVEN = (ConfigureProblem.joints_acceleration_driven,) diff --git a/bioptim/dynamics/dynamics_functions.py b/bioptim/dynamics/dynamics_functions.py index e6f63c22d..2373a6fe3 100644 --- a/bioptim/dynamics/dynamics_functions.py +++ b/bioptim/dynamics/dynamics_functions.py @@ -3,9 +3,8 @@ from ..misc.enums import RigidBodyDynamics, DefectType from .fatigue.fatigue_dynamics import FatigueList from ..optimization.optimization_variable import OptimizationVariable -from ..optimization.non_linear_program import NonLinearProgram from .dynamics_evaluation import DynamicsEvaluation -from ..models.protocols.stochastic_biomodel import StochasticBioModel +from ..misc.mapping import BiMapping class DynamicsFunctions: @@ -189,6 +188,83 @@ def torque_driven( return DynamicsEvaluation(dxdt, defects) + @staticmethod + def torque_driven_free_floating_base( + time: MX.sym, + states: MX.sym, + controls: MX.sym, + parameters: MX.sym, + algebraic_states: MX.sym, + nlp, + with_contact: bool, + with_passive_torque: bool, + with_ligament: bool, + with_friction: bool, + external_forces: list = None, + ) -> DynamicsEvaluation: + """ + Forward dynamics driven by joint torques without actuation of the free floating base, optional external forces can be declared. + + Parameters + ---------- + time: MX.sym + The time of the system + states: MX.sym + The state of the system + controls: MX.sym + The controls of the system + parameters: MX.sym + The parameters of the system + algebraic_states: MX.sym + The algebraic states of the system + nlp: NonLinearProgram + The definition of the system + with_contact: bool + If the dynamic with contact should be used + with_passive_torque: bool + If the dynamic with passive torque should be used + with_ligament: bool + If the dynamic with ligament should be used + with_friction: bool + If the dynamic with friction should be used + external_forces: list[Any] + The external forces + + Returns + ---------- + DynamicsEvaluation + The derivative of the states and the defects of the implicit dynamics + """ + + q_roots = DynamicsFunctions.get(nlp.states["q_roots"], states) + q_joints = DynamicsFunctions.get(nlp.states["q_joints"], states) + qdot_roots = DynamicsFunctions.get(nlp.states["qdot_roots"], states) + qdot_joints = DynamicsFunctions.get(nlp.states["qdot_joints"], states) + tau_joints = DynamicsFunctions.get(nlp.controls["tau_joints"], controls) + + q_full = vertcat(q_roots, q_joints) + qdot_full = vertcat(qdot_roots, qdot_joints) + dq = DynamicsFunctions.compute_qdot(nlp, q_full, qdot_full) + n_q = nlp.model.nb_q + n_qdot = nlp.model.nb_qdot + + tau_joints = ( + tau_joints + nlp.model.passive_joint_torque(q_full, qdot_full) if with_passive_torque else tau_joints + ) + tau_joints = tau_joints + nlp.model.ligament_joint_torque(q_full, qdot_full) if with_ligament else tau_joints + tau_joints = tau_joints + nlp.model.friction_coefficients @ qdot_joints if with_friction else tau_joints + + tau_full = vertcat(MX.zeros(nlp.model.nb_root), tau_joints) + + ddq = DynamicsFunctions.forward_dynamics(nlp, q_full, qdot_full, tau_full, with_contact, external_forces) + dxdt = MX(n_q + n_qdot, ddq.shape[1]) + dxdt[:n_q, :] = horzcat(*[dq for _ in range(ddq.shape[1])]) + dxdt[n_q:, :] = ddq + + defects = None + + return DynamicsEvaluation(dxdt, defects) + @staticmethod def stochastic_torque_driven( time: MX.sym, @@ -201,7 +277,7 @@ def stochastic_torque_driven( with_friction: bool, ) -> DynamicsEvaluation: """ - Forward dynamics subject to motor and sensory noise driven by joint torques, optional external forces can be declared. + Forward dynamics subject to motor and sensory noise driven by torques, optional external forces can be declared. Parameters ---------- @@ -214,7 +290,7 @@ def stochastic_torque_driven( parameters: MX.sym The parameters of the system algebraic_states: MX.sym - The algebraic states of the system + The algebraic states variables of the system nlp: NonLinearProgram The definition of the system with_contact: bool @@ -232,24 +308,16 @@ def stochastic_torque_driven( qdot = DynamicsFunctions.get(nlp.states["qdot"], states) tau = DynamicsFunctions.get(nlp.controls["tau"], controls) - ref = DynamicsFunctions.get(nlp.algebraic_states["ref"], algebraic_states) - k = DynamicsFunctions.get(nlp.algebraic_states["k"], algebraic_states) - k_matrix = StochasticBioModel.reshape_to_matrix(k, nlp.model.matrix_shape_k) - - sensory_input = nlp.model.sensory_reference(states, controls, parameters, algebraic_states, nlp) - - mapped_motor_noise = parameters[nlp.parameters["motor_noise"].index] - mapped_sensory_noise = parameters[nlp.parameters["sensory_noise"].index] - mapped_sensory_feedback_torque = nlp.model.compute_torques_from_noise_and_feedback( - mapped_sensory_noise, k_matrix, sensory_input, ref + tau += nlp.model.compute_torques_from_noise_and_feedback( + nlp=nlp, + time=time, + states=states, + controls=controls, + parameters=parameters, + algebraic_states=algebraic_states, + sensory_noise=nlp.parameters["sensory_noise"].mx, + motor_noise=nlp.parameters["motor_noise"].mx, ) - - if "tau" in nlp.model.motor_noise_mapping.keys(): - mapped_motor_noise = nlp.model.motor_noise_mapping["tau"].to_second.map(nlp.parameters["motor_noise"].mx) - mapped_sensory_feedback_torque = nlp.model.motor_noise_mapping["tau"].to_second.map( - mapped_sensory_feedback_torque - ) - tau += mapped_motor_noise + mapped_sensory_feedback_torque tau = tau + nlp.model.friction_coefficients @ qdot if with_friction else tau dq = DynamicsFunctions.compute_qdot(nlp, q, qdot) @@ -261,7 +329,78 @@ def stochastic_torque_driven( return DynamicsEvaluation(dxdt=dxdt, defects=None) @staticmethod - def __get_fatigable_tau(nlp: NonLinearProgram, states: MX, controls: MX, fatigue: FatigueList) -> MX: + def stochastic_torque_driven_free_floating_base( + time: MX.sym, + states: MX.sym, + controls: MX.sym, + parameters: MX.sym, + algebraic_states: MX.sym, + nlp, + with_contact: bool, + with_friction: bool, + ) -> DynamicsEvaluation: + """ + Forward dynamics subject to motor and sensory noise driven by joint torques, optional external forces can be declared. + + Parameters + ---------- + time: MX.sym + The time + states: MX.sym + The state of the system + controls: MX.sym + The controls of the system + parameters: MX.sym + The parameters of the system + algebraic_states: MX.sym + The algebraic states of the system + nlp: NonLinearProgram + The definition of the system + with_contact: bool + If the dynamic with contact should be used + with_friction: bool + If the dynamic with friction should be used + + Returns + ---------- + DynamicsEvaluation + The derivative of the states and the defects of the implicit dynamics + """ + + q_roots = DynamicsFunctions.get(nlp.states["q_roots"], states) + q_joints = DynamicsFunctions.get(nlp.states["q_joints"], states) + qdot_roots = DynamicsFunctions.get(nlp.states["qdot_roots"], states) + qdot_joints = DynamicsFunctions.get(nlp.states["qdot_joints"], states) + tau_joints = DynamicsFunctions.get(nlp.controls["tau_joints"], controls) + + q_full = vertcat(q_roots, q_joints) + qdot_full = vertcat(qdot_roots, qdot_joints) + n_q = q_full.shape[0] + + tau_joints += nlp.model.compute_torques_from_noise_and_feedback( + nlp=nlp, + time=time, + states=states, + controls=controls, + parameters=parameters, + algebraic_states=algebraic_states, + motor_noise=nlp.parameters["motor_noise"].mx, + sensory_noise=nlp.parameters["sensory_noise"].mx, + ) + tau_joints = tau_joints + nlp.model.friction_coefficients @ qdot_joints if with_friction else tau_joints + + tau_full = vertcat(MX.zeros(nlp.model.nb_root), tau_joints) + + dq = DynamicsFunctions.compute_qdot(nlp, q_full, qdot_full) + ddq = DynamicsFunctions.forward_dynamics(nlp, q_full, qdot_full, tau_full, with_contact) + dxdt = MX(nlp.states.shape, ddq.shape[1]) + dxdt[:n_q, :] = horzcat(*[dq for _ in range(ddq.shape[1])]) + dxdt[n_q:, :] = ddq + + return DynamicsEvaluation(dxdt=dxdt, defects=None) + + @staticmethod + def __get_fatigable_tau(nlp, states: MX, controls: MX, fatigue: FatigueList) -> MX: """ Apply the forward dynamics including (or not) the torque fatigue @@ -280,9 +419,8 @@ def __get_fatigable_tau(nlp: NonLinearProgram, states: MX, controls: MX, fatigue ------- The generalized accelerations """ - tau_var, tau_mx = (nlp.controls, controls) if "tau" in nlp.controls else (nlp.states, states) - tau = DynamicsFunctions.get(tau_var["tau"], tau_mx) + tau = nlp.get_var_from_states_or_controls("tau", states, controls) if fatigue is not None and "tau" in fatigue: tau_fatigue = fatigue["tau"] tau_suffix = fatigue["tau"].suffix @@ -504,13 +642,9 @@ def forces_from_torque_driven( The contact forces that ensure no acceleration at these contact points """ - q_nlp, q_var = (nlp.states["q"], states) if "q" in nlp.states else (nlp.controls["q"], controls) - qdot_nlp, qdot_var = (nlp.states["qdot"], states) if "qdot" in nlp.states else (nlp.controls["qdot"], controls) - tau_nlp, tau_var = (nlp.states["tau"], states) if "tau" in nlp.states else (nlp.controls["tau"], controls) - - q = DynamicsFunctions.get(q_nlp, q_var) - qdot = DynamicsFunctions.get(qdot_nlp, qdot_var) - tau = DynamicsFunctions.get(tau_nlp, tau_var) + q = nlp.get_var_from_states_or_controls("q", states, controls) + qdot = nlp.get_var_from_states_or_controls("qdot", states, controls) + tau = nlp.get_var_from_states_or_controls("tau", states, controls) tau = tau + nlp.model.passive_joint_torque(q, qdot) if with_passive_torque else tau tau = tau + nlp.model.ligament_joint_torque(q, qdot) if with_ligament else tau @@ -557,13 +691,9 @@ def forces_from_torque_activation_driven( MX.sym The contact forces that ensure no acceleration at these contact points """ - - q_nlp, q_var = (nlp.states["q"], states) if "q" in nlp.states else (nlp.controls["q"], controls) - qdot_nlp, qdot_var = (nlp.states["qdot"], states) if "qdot" in nlp.states else (nlp.controls["qdot"], controls) - tau_nlp, tau_var = (nlp.states["tau"], states) if "tau" in nlp.states else (nlp.controls["tau"], controls) - q = DynamicsFunctions.get(q_nlp, q_var) - qdot = DynamicsFunctions.get(qdot_nlp, qdot_var) - tau_activations = DynamicsFunctions.get(tau_nlp, tau_var) + q = nlp.get_var_from_states_or_controls("q", states, controls) + qdot = nlp.get_var_from_states_or_controls("qdot", states, controls) + tau_activations = nlp.get_var_from_states_or_controls("tau", states, controls) tau = nlp.model.torque(tau_activations, q, qdot) tau = tau + nlp.model.passive_joint_torque(q, qdot) if with_passive_torque else tau tau = tau + nlp.model.ligament_joint_torque(q, qdot) if with_ligament else tau @@ -624,14 +754,12 @@ def muscles_driven( The derivative of the states and the defects of the implicit dynamics """ - q = DynamicsFunctions.get(nlp.states["q"], states) - qdot = DynamicsFunctions.get(nlp.states["qdot"], states) + q = nlp.get_var_from_states_or_controls("q", states, controls) + qdot = nlp.get_var_from_states_or_controls("qdot", states, controls) residual_tau = ( DynamicsFunctions.__get_fatigable_tau(nlp, states, controls, fatigue) if with_residual_torque else None ) - - mus_act_nlp, mus_act = (nlp.states, states) if "muscles" in nlp.states else (nlp.controls, controls) - mus_activations = DynamicsFunctions.get(mus_act_nlp["muscles"], mus_act) + mus_activations = nlp.get_var_from_states_or_controls("muscles", states, controls) fatigue_states = None if fatigue is not None and "muscles" in fatigue: mus_fatigue = fatigue["muscles"] @@ -758,12 +886,10 @@ def forces_from_muscle_driven( The contact forces that ensure no acceleration at these contact points """ - q = DynamicsFunctions.get(nlp.states["q"], states) - qdot = DynamicsFunctions.get(nlp.states["qdot"], states) - residual_tau = DynamicsFunctions.get(nlp.controls["tau"], controls) if "tau" in nlp.controls else None - - mus_act_nlp, mus_act = (nlp.states, states) if "muscles" in nlp.states else (nlp.controls, controls) - mus_activations = DynamicsFunctions.get(mus_act_nlp["muscles"], mus_act) + q = nlp.get_var_from_states_or_controls("q", states, controls) + qdot = nlp.get_var_from_states_or_controls("qdot", states, controls) + residual_tau = nlp.get_var_from_states_or_controls("tau", states, controls) if "tau" in nlp.controls else None + mus_activations = nlp.get_var_from_states_or_controls("muscles", states, controls) muscles_tau = DynamicsFunctions.compute_tau_from_muscle(nlp, q, qdot, mus_activations) tau = muscles_tau + residual_tau if residual_tau is not None else muscles_tau @@ -810,9 +936,9 @@ def joints_acceleration_driven( if rigidbody_dynamics != RigidBodyDynamics.ODE: raise NotImplementedError("Implicit dynamics not implemented yet.") - q = DynamicsFunctions.get(nlp.states["q"], states) - qdot = DynamicsFunctions.get(nlp.states["qdot"], states) - qddot_joints = DynamicsFunctions.get(nlp.controls["qddot_joints"], controls) + q = nlp.get_var_from_states_or_controls("q", states, controls) + qdot = nlp.get_var_from_states_or_controls("qdot", states, controls) + qddot_joints = nlp.get_var_from_states_or_controls("qddot", states, controls) qddot_root = nlp.model.forward_dynamics_free_floating_base(q, qdot, qddot_joints) qddot_reordered = nlp.model.reorder_qddot_root_joints(qddot_root, qddot_joints) @@ -840,13 +966,14 @@ def joints_acceleration_driven( ) ) - defects[ - qdot_mapped.shape[0] : (qdot_mapped.shape[0] + qddot_root_mapped.shape[0]), : - ] = floating_base_constraint - defects[ - (qdot_mapped.shape[0] + qddot_root_mapped.shape[0]) :, : - ] = qddot_joints_mapped - nlp.variable_mappings["qddot_joints"].to_first.map( - DynamicsFunctions.get(nlp.states_dot["qddot_joints"], nlp.states_dot.mx_reduced) + defects[qdot_mapped.shape[0] : (qdot_mapped.shape[0] + qddot_root_mapped.shape[0]), :] = ( + floating_base_constraint + ) + defects[(qdot_mapped.shape[0] + qddot_root_mapped.shape[0]) :, :] = ( + qddot_joints_mapped + - nlp.variable_mappings["qddot_joints"].to_first.map( + DynamicsFunctions.get(nlp.states_dot["qddot_joints"], nlp.states_dot.mx_reduced) + ) ) return DynamicsEvaluation(dxdt=vertcat(qdot_mapped, qddot_mapped), defects=defects) @@ -889,7 +1016,7 @@ def apply_parameters(parameters: MX.sym, nlp): param.function[0](nlp.model, parameters[param.index], **param.params) @staticmethod - def compute_qdot(nlp: NonLinearProgram, q: MX | SX, qdot: MX | SX): + def compute_qdot(nlp, q: MX | SX, qdot: MX | SX): """ Easy accessor to derivative of q @@ -907,12 +1034,24 @@ def compute_qdot(nlp: NonLinearProgram, q: MX | SX, qdot: MX | SX): The derivative of q """ - q_nlp = nlp.states["q"] if "q" in nlp.states else nlp.controls["q"] - return q_nlp.mapping.to_first.map(nlp.model.reshape_qdot(q, qdot)) + if "q" in nlp.states: + mapping = nlp.states["q"].mapping + elif "q_roots" and "q_joints" in nlp.states: + mapping = BiMapping( + to_first=list(nlp.states["q_roots"].mapping.to_first.map_idx) + + [i + nlp.model.nb_root for i in nlp.states["q_joints"].mapping.to_first.map_idx], + to_second=list(nlp.states["q_roots"].mapping.to_second.map_idx) + + [i + nlp.model.nb_root for i in nlp.states["q_joints"].mapping.to_second.map_idx], + ) + elif q in nlp.controls: + mapping = nlp.controls["q"].mapping + else: + raise RuntimeError("Your q key combination was not found in states or controls") + return mapping.to_first.map(nlp.model.reshape_qdot(q, qdot)) @staticmethod def forward_dynamics( - nlp: NonLinearProgram, + nlp, q: MX | SX, qdot: MX | SX, tau: MX | SX, @@ -940,7 +1079,12 @@ def forward_dynamics( ------- The derivative of qdot """ - qdot_var = nlp.states["qdot"] if "qdot" in nlp.states else nlp.controls["qdot"] + if "qdot" in nlp.states: + qdot_var_mapping = nlp.states["qdot"].mapping.to_first + elif "qdot" in nlp.controls: + qdot_var_mapping = nlp.controls["qdot"].mapping.to_first + else: + qdot_var_mapping = BiMapping([i for i in range(qdot.shape[0])], [i for i in range(qdot.shape[0])]).to_first if external_forces is None: if with_contact: @@ -948,9 +1092,9 @@ def forward_dynamics( else: qddot = nlp.model.forward_dynamics(q, qdot, tau) - return qdot_var.mapping.to_first.map(qddot) + return qdot_var_mapping.map(qddot) else: - dxdt = MX(len(qdot_var.mapping.to_first), nlp.ns) + dxdt = MX(len(qdot_var_mapping), nlp.ns) # Todo: Should be added to pass f_ext in controls (as a symoblic value) # this would avoid to create multiple equations of motions per node for i, f_ext in enumerate(external_forces): @@ -958,12 +1102,12 @@ def forward_dynamics( qddot = nlp.model.constrained_forward_dynamics(q, qdot, tau, f_ext) else: qddot = nlp.model.forward_dynamics(q, qdot, tau, f_ext) - dxdt[:, i] = qdot_var.mapping.to_first.map(qddot) + dxdt[:, i] = qdot_var_mapping.map(qddot) return dxdt @staticmethod def inverse_dynamics( - nlp: NonLinearProgram, + nlp, q: MX | SX, qdot: MX | SX, qddot: MX | SX, @@ -1010,7 +1154,7 @@ def inverse_dynamics( return tau # We ignore on purpose the mapping to keep zeros in the defects of the dynamic. @staticmethod - def compute_muscle_dot(nlp: NonLinearProgram, muscle_excitations: MX | SX): + def compute_muscle_dot(nlp, muscle_excitations: MX | SX): """ Easy accessor to derivative of muscle activations @@ -1030,7 +1174,7 @@ def compute_muscle_dot(nlp: NonLinearProgram, muscle_excitations: MX | SX): @staticmethod def compute_tau_from_muscle( - nlp: NonLinearProgram, + nlp, q: MX | SX, qdot: MX | SX, muscle_activations: MX | SX, @@ -1072,7 +1216,7 @@ def holonomic_torque_driven( controls: MX.sym, parameters: MX.sym, algebraic_states: MX.sym, - nlp: NonLinearProgram, + nlp, external_forces: list = None, ) -> DynamicsEvaluation: """ diff --git a/bioptim/dynamics/ode_solver.py b/bioptim/dynamics/ode_solver.py index d7e4f1e23..8e74ae1fe 100644 --- a/bioptim/dynamics/ode_solver.py +++ b/bioptim/dynamics/ode_solver.py @@ -204,9 +204,11 @@ def initialize_integrator(self, ocp, nlp, dynamics_index: int, node_index: int, "param": self.param_ode(nlp), "ode": nlp.dynamics_func[dynamics_index], # TODO this actually checks "not nlp.implicit_dynamics_func" (or that nlp.implicit_dynamics_func == []) - "implicit_ode": nlp.implicit_dynamics_func[dynamics_index] - if len(nlp.implicit_dynamics_func) > 0 - else nlp.implicit_dynamics_func, + "implicit_ode": ( + nlp.implicit_dynamics_func[dynamics_index] + if len(nlp.implicit_dynamics_func) > 0 + else nlp.implicit_dynamics_func + ), } return nlp.ode_solver.integrator(ode, ode_opt) diff --git a/bioptim/examples/__main__.py b/bioptim/examples/__main__.py index 54fdfbfea..d4c9a0a31 100644 --- a/bioptim/examples/__main__.py +++ b/bioptim/examples/__main__.py @@ -122,7 +122,7 @@ ("spring load", "spring_load.py"), ("Track markers 2D pendulum", "track_markers_2D_pendulum.py"), ("Track markers with torque actuators", "track_markers_with_torque_actuators.py"), - ("Trampo quaternions", "trampo_quaternions.py"), + ("quaternions", "example_quaternions.py"), ("Soft contact", "example_soft_contact.py"), ] ), diff --git a/bioptim/examples/custom_model/custom_package/custom_dynamics.py b/bioptim/examples/custom_model/custom_package/custom_dynamics.py index e502723a3..6d50ea7e3 100644 --- a/bioptim/examples/custom_model/custom_package/custom_dynamics.py +++ b/bioptim/examples/custom_model/custom_package/custom_dynamics.py @@ -3,6 +3,7 @@ but it is possible to use bioptim without biorbd. This is an example of how to use bioptim with a custom dynamics. """ + from casadi import MX, vertcat from bioptim import ( diff --git a/bioptim/examples/custom_model/custom_package/my_model.py b/bioptim/examples/custom_model/custom_package/my_model.py index 8bd68ed43..8ee6c8e4b 100644 --- a/bioptim/examples/custom_model/custom_package/my_model.py +++ b/bioptim/examples/custom_model/custom_package/my_model.py @@ -2,6 +2,9 @@ This script implements a custom model to work with bioptim. Bioptim has a deep connection with biorbd, but it is possible to use bioptim without biorbd. This is an example of how to use bioptim with a custom model. """ + +from typing import Callable + import numpy as np from casadi import sin, MX from typing import Callable diff --git a/bioptim/examples/custom_model/main.py b/bioptim/examples/custom_model/main.py index 151806ad2..55452e65e 100644 --- a/bioptim/examples/custom_model/main.py +++ b/bioptim/examples/custom_model/main.py @@ -2,6 +2,7 @@ This script doesn't use biorbd This an example of how to use bioptim to solve a simple pendulum problem """ + import numpy as np from bioptim import ( diff --git a/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_pendulum.py b/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_pendulum.py index 659ff133a..c4e0ecc0a 100644 --- a/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_pendulum.py +++ b/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_pendulum.py @@ -3,6 +3,7 @@ The simulation is a pendulum simulation, its behaviour should be the same as the one in bioptim/examples/getting_started/pendulum.py """ + from bioptim import ( BoundsList, InitialGuessList, diff --git a/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_with_holonomic_constraints_pendulum.py b/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_with_holonomic_constraints_pendulum.py index 3ce9ec510..ddb423a1a 100644 --- a/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_with_holonomic_constraints_pendulum.py +++ b/bioptim/examples/discrete_mechanics_and_optimal_control/example_variational_integrator_with_holonomic_constraints_pendulum.py @@ -4,6 +4,7 @@ constrains the z-distance between two markers to remain null. The behaviour of the pendulum should be the same as the one in bioptim/examples/getting_started/pendulum.py """ + from bioptim import ( BoundsList, HolonomicConstraintsFcn, diff --git a/bioptim/examples/getting_started/example_multinode_objective.py b/bioptim/examples/getting_started/example_multinode_objective.py index a886cbb8b..461f23b5d 100644 --- a/bioptim/examples/getting_started/example_multinode_objective.py +++ b/bioptim/examples/getting_started/example_multinode_objective.py @@ -2,6 +2,7 @@ This example shows how to use multinode_objectives. It replicates the results from getting_started/pendulum.py """ + import platform from casadi import MX, sum1 diff --git a/bioptim/examples/getting_started/example_multistart.py b/bioptim/examples/getting_started/example_multistart.py index 9485fa87e..0577dc3c8 100644 --- a/bioptim/examples/getting_started/example_multistart.py +++ b/bioptim/examples/getting_started/example_multistart.py @@ -2,6 +2,7 @@ An example of how to use multi-start to find local minima from different initial guesses. This example is a variation of the pendulum example in getting_started/pendulum.py. """ + import pickle import os import shutil diff --git a/bioptim/examples/holonomic_constraints/two_pendulums.py b/bioptim/examples/holonomic_constraints/two_pendulums.py index 1e04df847..0081193be 100644 --- a/bioptim/examples/holonomic_constraints/two_pendulums.py +++ b/bioptim/examples/holonomic_constraints/two_pendulums.py @@ -3,6 +3,7 @@ The simulation is two single pendulum that are forced to be coherent with a holonomic constraint. It is then a double pendulum simulation. """ + import matplotlib.pyplot as plt import numpy as np diff --git a/bioptim/examples/stochastic_optimal_control/arm_reaching_muscle_driven.py b/bioptim/examples/stochastic_optimal_control/arm_reaching_muscle_driven.py index 000ab285f..964da6ed0 100644 --- a/bioptim/examples/stochastic_optimal_control/arm_reaching_muscle_driven.py +++ b/bioptim/examples/stochastic_optimal_control/arm_reaching_muscle_driven.py @@ -48,6 +48,7 @@ def sensory_reference( + time: cas.MX | cas.SX, states: cas.MX | cas.SX, controls: cas.MX | cas.SX, parameters: cas.MX | cas.SX, @@ -64,6 +65,7 @@ def sensory_reference( def stochastic_forward_dynamics( + time: cas.MX | cas.SX, states: cas.MX | cas.SX, controls: cas.MX | cas.SX, parameters: cas.MX | cas.SX, @@ -90,7 +92,7 @@ def stochastic_forward_dynamics( k = DynamicsFunctions.get(nlp.algebraic_states["k"], algebraic_states) k_matrix = StochasticBioModel.reshape_to_matrix(k, nlp.model.matrix_shape_k) - hand_pos_velo = nlp.model.sensory_reference(states, controls, parameters, algebraic_states, nlp) + hand_pos_velo = nlp.model.sensory_reference(time, states, controls, parameters, algebraic_states, nlp) mus_excitations_fb += nlp.model.get_excitation_with_feedback(k_matrix, hand_pos_velo, ref, sensory_noise) noise_torque = motor_noise @@ -191,6 +193,7 @@ def get_cov_mat(nlp, node_index): cov_matrix = StochasticBioModel.reshape_to_matrix(cov_sym, nlp.model.matrix_shape_cov) dx = stochastic_forward_dynamics( + nlp.time_cx, nlp.states.mx, nlp.controls.mx, nlp.parameters.mx, @@ -295,6 +298,7 @@ def expected_feedback_effort(controllers: list[PenaltyController], sensory_noise # Compute the expected effort hand_pos_velo = controllers[0].model.sensory_reference( + controllers[0].time.cx, controllers[0].states.cx, controllers[0].controls.cx, controllers[0].parameters.cx, @@ -326,6 +330,7 @@ def zero_acceleration(controller: PenaltyController, force_field_magnitude: floa No acceleration of the joints at the beginning and end of the movement. """ dx = stochastic_forward_dynamics( + controller.time.cx, controller.states.cx, controller.controls.cx, controller.parameters.cx, @@ -464,6 +469,7 @@ def prepare_socp( dynamics.add( configure_stochastic_optimal_control_problem, dynamic_function=lambda time, states, controls, parameters, algebraic_states, nlp, with_noise: stochastic_forward_dynamics( + time, states, controls, parameters, @@ -696,12 +702,15 @@ def main(): hand_pos_fcn = cas.Function("hand_pos", [q_sym], [model.end_effector_position(q_sym)]) hand_vel_fcn = cas.Function("hand_vel", [q_sym, qdot_sym], [model.end_effector_velocity(q_sym, qdot_sym)]) + time = socp.nlp[0].time.cx + dt = socp.nlp[0].dt.cx states = socp.nlp[0].states.cx controls = socp.nlp[0].controls.cx parameters = socp.nlp[0].parameters.cx algebraic_states = socp.nlp[0].algebraic_states.cx nlp = socp.nlp[0] out = stochastic_forward_dynamics( + cas.vertcat(time, time + dt), states, controls, parameters, @@ -710,7 +719,7 @@ def main(): force_field_magnitude=force_field_magnitude, with_noise=True, ) - dyn_fun = cas.Function("dyn_fun", [states, controls, parameters, algebraic_states], [out.dxdt]) + dyn_fun = cas.Function("dyn_fun", [dt, time, states, controls, parameters, algebraic_states], [out.dxdt]) fig, axs = plt.subplots(3, 2) n_simulations = 30 @@ -719,6 +728,7 @@ def main(): mus_activation_simulated = np.zeros((n_simulations, 6, n_shooting + 1)) hand_pos_simulated = np.zeros((n_simulations, 2, n_shooting + 1)) hand_vel_simulated = np.zeros((n_simulations, 2, n_shooting + 1)) + dt_actual = final_time / n_shooting for i_simulation in range(n_simulations): motor_noise = np.random.normal(0, motor_noise_std, (2, n_shooting + 1)) wPq = np.random.normal(0, wPq_std, (2, n_shooting + 1)) @@ -739,9 +749,23 @@ def main(): ) u = excitations_sol[:, i_node] s = algebraic_states_sol[:, i_node] - k1 = dyn_fun(x_prev, u, [], s, motor_noise[:, i_node], sensory_noise[:, i_node]) + k1 = dyn_fun( + cas.vertcat(dt_actual * i_node, dt_actual), + x_prev, + u, + [], + s, + motor_noise[:, i_node], + sensory_noise[:, i_node], + ) x_next = x_prev + dt * dyn_fun( - x_prev + dt / 2 * k1, u, [], s, motor_noise[:, i_node], sensory_noise[:, i_node] + cas.vertcat(dt_actual * i_node, dt_actual), + x_prev + dt / 2 * k1, + u, + [], + s, + motor_noise[:, i_node], + sensory_noise[:, i_node], ) q_simulated[i_simulation, :, i_node + 1] = np.reshape(x_next[:2], (2,)) qdot_simulated[i_simulation, :, i_node + 1] = np.reshape(x_next[2:4], (2,)) diff --git a/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_collocations.py b/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_collocations.py index 5cb6371e8..b373cf926 100644 --- a/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_collocations.py +++ b/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_collocations.py @@ -34,6 +34,7 @@ def sensory_reference( + time: cas.MX | cas.SX, states: cas.MX | cas.SX, controls: cas.MX | cas.SX, parameters: cas.MX | cas.SX, @@ -60,6 +61,9 @@ def prepare_socp( motor_noise_magnitude: cas.DM, sensory_noise_magnitude: cas.DM, example_type=ExampleType.CIRCLE, + q_opt: np.ndarray = None, + qdot_opt: np.ndarray = None, + tau_opt: np.ndarray = None, use_sx=False, ) -> StochasticOptimalControlProgram: """ @@ -90,7 +94,14 @@ def prepare_socp( The OptimalControlProgram ready to be solved """ - problem_type = SocpType.COLLOCATION(polynomial_degree=polynomial_degree, method="legendre") + auto_initialization = True if q_opt is not None else False + initial_cov = cas.DM_eye(4) * np.array([1e-4, 1e-4, 1e-7, 1e-7]) + problem_type = SocpType.COLLOCATION( + polynomial_degree=polynomial_degree, + method="legendre", + auto_initialization=auto_initialization, + initial_cov=initial_cov, + ) bio_model = StochasticBiorbdModel( biorbd_model_path, @@ -98,6 +109,7 @@ def prepare_socp( motor_noise_magnitude=motor_noise_magnitude, sensory_reference=sensory_reference, n_references=4, # This number must be in agreement with what is declared in sensory_reference + n_feedbacks=4, n_noised_states=4, n_noised_controls=2, n_collocation_points=polynomial_degree + 1, @@ -171,7 +183,12 @@ def prepare_socp( # Dynamics dynamics = DynamicsList() - dynamics.add(DynamicsFcn.STOCHASTIC_TORQUE_DRIVEN, problem_type=problem_type, expand_dynamics=True) + dynamics.add( + DynamicsFcn.STOCHASTIC_TORQUE_DRIVEN, + problem_type=problem_type, + expand_dynamics=True, + with_friction=True, + ) x_bounds = BoundsList() x_bounds.add("q", min_bound=[-cas.inf] * n_q, max_bound=[cas.inf] * n_q, interpolation=InterpolationType.CONSTANT) @@ -191,13 +208,20 @@ def prepare_socp( states_init[n_states:, :] = 0.01 x_init = InitialGuessList() - x_init.add("q", initial_guess=states_init[:n_q, :], interpolation=InterpolationType.EACH_FRAME) - x_init.add("qdot", initial_guess=states_init[n_q : n_q + n_qdot, :], interpolation=InterpolationType.EACH_FRAME) + if q_opt is not None: + x_init.add("q", initial_guess=q_opt, interpolation=InterpolationType.ALL_POINTS) + x_init.add("qdot", initial_guess=qdot_opt, interpolation=InterpolationType.ALL_POINTS) + else: + x_init.add("q", initial_guess=states_init[:n_q, :], interpolation=InterpolationType.EACH_FRAME) + x_init.add("qdot", initial_guess=states_init[n_q : n_q + n_qdot, :], interpolation=InterpolationType.EACH_FRAME) controls_init = np.ones((n_tau, n_shooting + 1)) * 0.01 u_init = InitialGuessList() - u_init.add("tau", initial_guess=controls_init, interpolation=InterpolationType.EACH_FRAME) + if tau_opt is not None: + u_init.add("tau", initial_guess=tau_opt, interpolation=InterpolationType.EACH_FRAME) + else: + u_init.add("tau", initial_guess=controls_init, interpolation=InterpolationType.EACH_FRAME) a_init = InitialGuessList() a_bounds = BoundsList() @@ -206,33 +230,25 @@ def prepare_socp( n_m = 4 * 4 * (3 + 1) n_cov = 4 * 4 - a_init.add("k", initial_guess=[0.01] * n_k, interpolation=InterpolationType.CONSTANT) - a_bounds.add("k", min_bound=[-cas.inf] * n_k, max_bound=[cas.inf] * n_k, interpolation=InterpolationType.CONSTANT) + if q_opt is None: + a_init.add("k", initial_guess=[0.01] * n_k, interpolation=InterpolationType.CONSTANT) + a_init.add("ref", initial_guess=[0.01] * n_ref, interpolation=InterpolationType.CONSTANT) + a_init.add("m", initial_guess=[0.01] * n_m, interpolation=InterpolationType.CONSTANT) - a_init.add("ref", initial_guess=[0.01] * n_ref, interpolation=InterpolationType.CONSTANT) + idx = 0 + cov_init_vector = np.zeros((n_states * n_states, 1)) + for i in range(n_states): + for j in range(n_states): + cov_init_vector[idx] = initial_cov[i, j] + a_init.add("cov", initial_guess=cov_init_vector, interpolation=InterpolationType.CONSTANT) + + a_bounds.add("k", min_bound=[-cas.inf] * n_k, max_bound=[cas.inf] * n_k, interpolation=InterpolationType.CONSTANT) a_bounds.add( "ref", min_bound=[-cas.inf] * n_ref, max_bound=[cas.inf] * n_ref, interpolation=InterpolationType.CONSTANT ) - - a_init.add("m", initial_guess=[0.01] * n_m, interpolation=InterpolationType.CONSTANT) a_bounds.add("m", min_bound=[-cas.inf] * n_m, max_bound=[cas.inf] * n_m, interpolation=InterpolationType.CONSTANT) - - cov_init = cas.DM_eye(n_states) * np.array([1e-4, 1e-4, 1e-7, 1e-7]) - idx = 0 - cov_init_vector = np.zeros((n_states * n_states, 1)) - for i in range(n_states): - for j in range(n_states): - cov_init_vector[idx] = cov_init[i, j] - a_init.add( - "cov", - initial_guess=cov_init_vector, - interpolation=InterpolationType.CONSTANT, - ) a_bounds.add( - "cov", - min_bound=[-cas.inf] * n_cov, - max_bound=[cas.inf] * n_cov, - interpolation=InterpolationType.CONSTANT, + "cov", min_bound=[-cas.inf] * n_cov, max_bound=[cas.inf] * n_cov, interpolation=InterpolationType.CONSTANT ) return StochasticOptimalControlProgram( diff --git a/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_explicit.py b/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_explicit.py index b5eddb359..1cefec5e6 100644 --- a/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_explicit.py +++ b/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_explicit.py @@ -38,10 +38,13 @@ ) from bioptim.examples.stochastic_optimal_control.arm_reaching_torque_driven_implicit import ExampleType -from bioptim.examples.stochastic_optimal_control.common import dynamics_torque_driven_with_feedbacks +from bioptim.examples.stochastic_optimal_control.common import ( + dynamics_torque_driven_with_feedbacks, +) def stochastic_forward_dynamics( + time: cas.MX | cas.SX, states: cas.MX | cas.SX, controls: cas.MX | cas.SX, parameters: cas.MX | cas.SX, @@ -73,7 +76,7 @@ def stochastic_forward_dynamics( qdddot = DynamicsFunctions.get(nlp.controls["qdddot"], controls) dqdot_constraint = dynamics_torque_driven_with_feedbacks( - states, controls, parameters, algebraic_states, nlp, with_noise=with_noise + time, states, controls, parameters, algebraic_states, nlp, with_noise=with_noise ) defects = cas.vertcat(dqdot_constraint - qddot) @@ -102,13 +105,14 @@ def configure_stochastic_optimal_control_problem(ocp: OptimalControlProgram, nlp ocp, nlp, dyn_func=lambda time, states, controls, parameters, algebraic_states, nlp: nlp.dynamics_type.dynamic_function( - states, controls, parameters, algebraic_states, nlp, with_noise=False + time, states, controls, parameters, algebraic_states, nlp, with_noise=False ), ) ConfigureProblem.configure_dynamics_function( ocp, nlp, dyn_func=lambda time, states, controls, parameters, algebraic_states, nlp: nlp.dynamics_type.dynamic_function( + time, states, controls, parameters, @@ -133,6 +137,7 @@ def minimize_uncertainty(controllers: list[PenaltyController], key: str) -> cas. def sensory_reference( + time: cas.MX | cas.SX, states: cas.MX | cas.SX, controls: cas.MX | cas.SX, parameters: cas.MX | cas.SX, @@ -181,7 +186,7 @@ def get_cov_mat(nlp, node_index, use_sx): cov_matrix = StochasticBioModel.reshape_to_matrix(cov_sym, nlp.model.matrix_shape_cov) dx = stochastic_forward_dynamics( - nlp.states.mx, nlp.controls.mx, nlp.parameters, nlp.algebraic_states.mx, nlp, with_noise=True + nlp.time_cx, nlp.states.mx, nlp.controls.mx, nlp.parameters.mx, nlp.algebraic_states.mx, nlp, with_noise=True ) dx.dxdt = cas.Function( "tp", @@ -280,6 +285,7 @@ def expected_feedback_effort(controllers: list[PenaltyController]) -> cas.MX: # Compute the expected effort trace_k_sensor_k = cas.trace(k_matrix @ sensory_noise_matrix @ k_matrix.T) estimated_ref = controllers[0].model.sensory_reference( + controllers[0].time.cx, controllers[0].states.cx, controllers[0].controls.cx, controllers[0].parameters.cx, @@ -345,6 +351,7 @@ def prepare_socp( bio_model = StochasticBiorbdModel( biorbd_model_path, n_references=4, + n_feedbacks=4, n_noised_states=6, n_noised_controls=2, sensory_noise_magnitude=sensory_noise_magnitude, diff --git a/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_implicit.py b/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_implicit.py index 92ca188b4..d912fa891 100644 --- a/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_implicit.py +++ b/bioptim/examples/stochastic_optimal_control/arm_reaching_torque_driven_implicit.py @@ -45,6 +45,7 @@ class ExampleType(Enum): def sensory_reference( + time: cas.MX | cas.SX, states: cas.MX | cas.SX, controls: cas.MX | cas.SX, parameters: cas.MX | cas.SX, @@ -108,6 +109,7 @@ def prepare_socp( motor_noise_magnitude=motor_noise_magnitude, sensory_reference=sensory_reference, n_references=4, # This number must be in agreement with what is declared in sensory_reference + n_feedbacks=4, n_noised_states=4, n_noised_controls=2, friction_coefficients=np.array([[0.05, 0.025], [0.025, 0.05]]), @@ -185,6 +187,7 @@ def prepare_socp( with_cholesky=with_cholesky, expand_dynamics=False, phase_dynamics=PhaseDynamics.ONE_PER_NODE, + with_friction=True, ) states_min = np.ones((n_states, n_shooting + 1)) * -cas.inf diff --git a/bioptim/examples/stochastic_optimal_control/common.py b/bioptim/examples/stochastic_optimal_control/common.py index 6b58ef37d..c6009742d 100644 --- a/bioptim/examples/stochastic_optimal_control/common.py +++ b/bioptim/examples/stochastic_optimal_control/common.py @@ -10,7 +10,7 @@ import matplotlib.transforms as transforms -def dynamics_torque_driven_with_feedbacks(states, controls, parameters, algebraic_states, nlp, with_noise): +def dynamics_torque_driven_with_feedbacks(time, states, controls, parameters, algebraic_states, nlp, with_noise): q = DynamicsFunctions.get(nlp.states["q"], states) qdot = DynamicsFunctions.get(nlp.states["qdot"], states) tau = DynamicsFunctions.get(nlp.controls["tau"], controls) @@ -24,7 +24,8 @@ def dynamics_torque_driven_with_feedbacks(states, controls, parameters, algebrai motor_noise = nlp.parameters["motor_noise"].mx sensory_noise = nlp.parameters["sensory_noise"].mx - end_effector = nlp.model.sensory_reference(states, controls, parameters, algebraic_states, nlp) + end_effector = nlp.model.sensory_reference(time, states, controls, parameters, algebraic_states, nlp) + tau_feedback = get_excitation_with_feedback(k_matrix, end_effector, ref, sensory_noise) tau_force_field = get_force_field(q, nlp.model.force_field_magnitude) @@ -244,6 +245,10 @@ def reshape_to_matrix(var, shape): """ Restore the matrix form of the variables """ + + if var.shape[0] != shape[0] * shape[1]: + raise RuntimeError(f"Cannot reshape: the variable shape is {var.shape} and the expected shape is {shape}") + shape_0, shape_1 = shape if isinstance(var, np.ndarray): matrix = np.zeros((shape_0, shape_1)) diff --git a/bioptim/examples/stochastic_optimal_control/leuven_arm_model.py b/bioptim/examples/stochastic_optimal_control/leuven_arm_model.py index ede766ded..a969935ad 100644 --- a/bioptim/examples/stochastic_optimal_control/leuven_arm_model.py +++ b/bioptim/examples/stochastic_optimal_control/leuven_arm_model.py @@ -3,10 +3,29 @@ """ from typing import Callable -from casadi import MX, SX, DM, sqrt, sin, cos, repmat, exp, log, horzcat, vertcat +from casadi import MX, DM, sqrt, sin, cos, repmat, exp, log, horzcat, vertcat import numpy as np -from bioptim import NonLinearProgram, DynamicsFunctions +from bioptim import DynamicsFunctions, StochasticBioModel + + +def _compute_torques_from_noise_and_feedback_default( + nlp, time, states, controls, parameters, algebraic_states, sensory_noise, motor_noise +): + tau_nominal = DynamicsFunctions.get(nlp.controls["tau"], controls) + + ref = DynamicsFunctions.get(nlp.algebraic_states["ref"], algebraic_states) + k = DynamicsFunctions.get(nlp.algebraic_states["k"], algebraic_states) + k_matrix = StochasticBioModel.reshape_to_matrix(k, nlp.model.matrix_shape_k) + + sensory_input = nlp.model.sensory_reference(time, states, controls, parameters, algebraic_states, nlp) + tau_fb = k_matrix @ ((sensory_input - ref) + sensory_noise) + + tau_motor_noise = motor_noise + + tau = tau_nominal + tau_fb + tau_motor_noise + + return tau class LeuvenArmModel: @@ -18,14 +37,18 @@ def __init__( self, sensory_noise_magnitude: np.ndarray | DM, motor_noise_magnitude: np.ndarray | DM, - sensory_reference: callable, + sensory_reference: Callable, + compute_torques_from_noise_and_feedback: Callable = _compute_torques_from_noise_and_feedback_default, ): self.motor_noise_magnitude = motor_noise_magnitude self.sensory_noise_magnitude = sensory_noise_magnitude self.sensory_reference = sensory_reference + self.compute_torques_from_noise_and_feedback = (compute_torques_from_noise_and_feedback,) + n_noised_controls = 6 n_references = 4 + self.n_feedbacks = 4 n_noised_states = 10 n_noise = motor_noise_magnitude.shape[0] + sensory_noise_magnitude.shape[0] self.matrix_shape_k = (n_noised_controls, n_references) diff --git a/bioptim/examples/torque_driven_ocp/example_quaternions.py b/bioptim/examples/torque_driven_ocp/example_quaternions.py new file mode 100644 index 000000000..984deeede --- /dev/null +++ b/bioptim/examples/torque_driven_ocp/example_quaternions.py @@ -0,0 +1,275 @@ +""" +This example uses a representation of a human body by a trunk-leg segment and two arms which orientation is represented +using quaternions. +The goal of the OCP is to elevate the position of the trunk in a environment without gravity with minimal efforts. +It is designed to show how to use a model that has quaternions in their degrees of freedom. +""" + +import platform + +import numpy as np +import biorbd_casadi as biorbd +from casadi import MX, Function +from bioptim import ( + BiorbdModel, + OptimalControlProgram, + DynamicsList, + DynamicsFcn, + ObjectiveList, + ObjectiveFcn, + BoundsList, + InitialGuessList, + InterpolationType, + OdeSolver, + OdeSolverBase, + Node, + Solver, + PhaseDynamics, + ConstraintList, + ConstraintFcn, +) + + +def eul2quat(eul: np.ndarray) -> np.ndarray: + """ + Converts Euler angles to quaternion. It assumes a sequence angle of XYZ + + Parameters + ---------- + eul: np.ndarray + The 3 angles of sequence XYZ + + Returns + ------- + The quaternion associated to the Euler angles in the format [W, X, Y, Z] + """ + eul_sym = MX.sym("eul", 3) + Quat = Function("Quaternion_fromEulerAngles", [eul_sym], [biorbd.Quaternion.fromXYZAngles(eul_sym).to_mx()])(eul) + return Quat + + +def quat2eul(quat: np.ndarray) -> np.ndarray: + """ + Converts quaternion to Euler angles. It assumes a sequence angle of XYZ + + Parameters + ---------- + quat: np.ndarray + The quaternion in the format [W, X, Y, Z] + + Returns + ------- + The Euler angles associated to the quaternion in the format [X, Y, Z] + """ + quat_sym = MX.sym("quat", 4) + quat_biorbd = biorbd.Quaternion(quat_sym[3], quat_sym[0], quat_sym[1], quat_sym[2]) + eul_mx = biorbd.Rotation.toEulerAngles(biorbd.Quaternion.toMatrix(quat_biorbd), "xyz").to_mx() + eul = Function("EulerAngles_fromQuaternion", [quat_sym], [eul_mx])(quat) + return eul + + +def euler_dot2omega(eul: np.ndarray, eul_dot: np.ndarray, quat: np.ndarray) -> np.ndarray: + """ + Converts Euler angle rates to body velocity. + + Parameters + ---------- + eul: np.ndarray + The 3 angles of sequence XYZ + eul_dot: np.ndarray + The 3 angle rates of sequence XYZ + quat: np.ndarray + The associated quaternion + + Returns + ------- + The angular velocity associated to the Euler angles in the format [X, Y, Z] + """ + + eul_sym = MX.sym("eul", 3) + eul_dot_sym = MX.sym("eul_dot", 3) + quat_sym = MX.sym("quat", 4) + quat_biorbd = biorbd.Quaternion(quat_sym[3], quat_sym[0], quat_sym[1], quat_sym[2]) + omega_mx = biorbd.Quaternion.eulerDotToOmega(quat_biorbd, eul_sym, eul_dot_sym, "xyz").to_mx() + omega = Function("omega", [quat_sym, eul_sym, eul_dot_sym], [omega_mx])(quat, eul, eul_dot) + return omega + + +def joint_angles_rate2body_velcities(q: np.ndarray, eul_dot: np.ndarray) -> np.ndarray: + """ + Converts joint angle rate to body velocity because of quaternions. + + Parameters + ---------- + q: np.ndarray + The generalized coordinates + eul_dot: np.ndarray + The desired Euler joint angle rate + + Returns + ------- + The body velocities + """ + right_arm_omega = np.array( + euler_dot2omega(eul=quat2eul(q[[6, 7, 8, 12]]), eul_dot=eul_dot[6:9], quat=q[[6, 7, 8, 12]]) + ).reshape( + -1, + ) + left_arm_omega = np.array( + euler_dot2omega(eul=quat2eul(q[[9, 10, 11, 13]]), eul_dot=eul_dot[9:12], quat=q[[9, 10, 11, 13]]) + ).reshape(-1) + qdot = np.hstack((right_arm_omega, left_arm_omega)) + return qdot + + +def define_x_init(bio_model) -> np.ndarray: + """ + Defines the initial guess for the states. + The intial guess for the quaternion of the arms are based on the positions of the arms in Euler angles. + """ + x = np.vstack((np.zeros((bio_model.nb_q, 2)), np.ones((bio_model.nb_qdot, 2)))) + right_arm_init = np.zeros((3, 2)) + right_arm_init[1, 0] = -np.pi + 0.01 + right_arm_init[1, 1] = 0 + left_arm_init = np.zeros((3, 2)) + left_arm_init[1, 0] = np.pi - 0.01 + left_arm_init[1, 1] = 0 + for i in range(2): + right_arm_quaterion = eul2quat(right_arm_init[:, i]) + left_arm_quaterion = eul2quat(left_arm_init[:, i]) + x[6:9, i] = np.reshape(right_arm_quaterion[1:], 3) + x[12, i] = right_arm_quaterion[0] + x[9:12, i] = np.reshape(left_arm_quaterion[1:], 3) + x[13, i] = left_arm_quaterion[0] + return x + + +def prepare_ocp( + biorbd_model_path: str, + n_shooting: int, + final_time: float, + ode_solver: OdeSolverBase = OdeSolver.RK4(), + phase_dynamics: PhaseDynamics = PhaseDynamics.SHARED_DURING_THE_PHASE, + expand_dynamics: bool = True, +) -> OptimalControlProgram: + """ + Prepare the ocp + + Parameters + ---------- + biorbd_model_path: str + The path to the bioMod file + n_shooting: int + The number of shooting points + final_time: float + The time at the final node + ode_solver: OdeSolverBase + The ode solver to use + phase_dynamics: PhaseDynamics + If the dynamics equation within a phase is unique or changes at each node. + PhaseDynamics.SHARED_DURING_THE_PHASE is much faster, but lacks the capability to have changing dynamics within + a phase. A good example of when PhaseDynamics.ONE_PER_NODE should be used is when different external forces + are applied at each node + expand_dynamics: bool + If the dynamics function should be expanded. Please note, this will solve the problem faster, but will slow down + the declaration of the OCP, so it is a trade-off. Also depending on the solver, it may or may not work + (for instance IRK is not compatible with expanded dynamics) + + Returns + ------- + The OptimalControlProgram ready to be solved + """ + + bio_model = BiorbdModel(biorbd_model_path) + + # Add objective functions + objective_functions = ObjectiveList() + objective_functions.add( + ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau_joints", node=Node.ALL_SHOOTING, weight=100 + ) + objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_TIME, weight=1) + + # Add constraints + constraints = ConstraintList() + constraints.add( + ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.START, first_marker="Target_START", second_marker="Neck" + ) + constraints.add( + ConstraintFcn.SUPERIMPOSE_MARKERS, + node=Node.END, + first_marker="Target_END", + second_marker="Neck", + min_bound=0, + max_bound=np.inf, + ) + + # Dynamics + dynamics = DynamicsList() + dynamics.add( + DynamicsFcn.TORQUE_DRIVEN_FREE_FLOATING_BASE, expand_dynamics=expand_dynamics, phase_dynamics=phase_dynamics + ) + + # Define control path constraint + n_root = bio_model.nb_root + n_q = bio_model.nb_q + n_qdot = bio_model.nb_qdot + n_tau = bio_model.nb_tau - n_root + tau_min, tau_max = -100, 100 + u_bounds = BoundsList() + u_bounds["tau_joints"] = [tau_min] * n_tau, [tau_max] * n_tau + + # Initial guesses + x_init = InitialGuessList() + x = define_x_init(bio_model) + x_init.add("q_roots", x[:n_root, :], interpolation=InterpolationType.LINEAR) + x_init.add("q_joints", x[n_root:n_q, :], interpolation=InterpolationType.LINEAR) + x_init.add("qdot_roots", x[n_q : n_q + n_root, :], interpolation=InterpolationType.LINEAR) + x_init.add("qdot_joints", x[n_q + n_root :, :], interpolation=InterpolationType.LINEAR) + + # Path constraint + x_bounds = BoundsList() + x_bounds["q_roots"] = bio_model.bounds_from_ranges("q_roots") + x_bounds["q_joints"] = bio_model.bounds_from_ranges("q_joints") + x_bounds["qdot_roots"] = bio_model.bounds_from_ranges("qdot_roots") + x_bounds["qdot_joints"] = bio_model.bounds_from_ranges("qdot_joints") + x_bounds["q_roots"][:, 0] = 0 + x_bounds["qdot_roots"][:, 0] = 0 + x_bounds["q_joints"][:, 0] = x_init["q_joints"].init[:, 0] + omega_arms = joint_angles_rate2body_velcities( + np.hstack((np.zeros((n_root,)), x_init["q_joints"].init[:, 0])), np.zeros((n_qdot,)) + ) + x_bounds["qdot_joints"].min[:, 0] = omega_arms - 0.1 + x_bounds["qdot_joints"].max[:, 0] = omega_arms + 0.1 + + return OptimalControlProgram( + bio_model, + dynamics, + n_shooting, + final_time, + x_bounds=x_bounds, + u_bounds=u_bounds, + x_init=x_init, + objective_functions=objective_functions, + constraints=constraints, + ode_solver=ode_solver, + ) + + +def main(): + """ + Prepares and solves an ocp that has quaternion in it. Animates the results + """ + + n_shooting = 6 + ocp = prepare_ocp("models/trunk_and_2arm_quaternion.bioMod", n_shooting=n_shooting, final_time=0.25) + sol = ocp.solve(Solver.IPOPT(show_online_optim=platform.system() == "Linux")) + + # --- Show results --- # + # sol.graphs() + # If you get an error message in animate with quaternions, it is due to the interpolation of quaternions in bioviz. + # To avoid problems, specify the number of frames to be the same as the number of shooting points + sol.animate(n_frames=n_shooting + 1, show_gravity_vector=False) + + +if __name__ == "__main__": + main() diff --git a/bioptim/examples/torque_driven_ocp/minimize_maximum_torque_by_extra_parameter.py b/bioptim/examples/torque_driven_ocp/minimize_maximum_torque_by_extra_parameter.py index a99445a68..9c1dc7095 100644 --- a/bioptim/examples/torque_driven_ocp/minimize_maximum_torque_by_extra_parameter.py +++ b/bioptim/examples/torque_driven_ocp/minimize_maximum_torque_by_extra_parameter.py @@ -31,7 +31,7 @@ def custom_constraint_parameters(controller: PenaltyController) -> MX: - tau = controller.controls["tau"].cx_start + tau = controller.controls["tau_joints"].cx_start max_param = controller.parameters["max_tau"].cx val = max_param - tau return val @@ -50,11 +50,9 @@ def prepare_ocp( n_shooting = (40, 40) final_time = (1, 1) tau_min, tau_max, tau_init = -300, 300, 0 - - # Mapping - tau_mappings = BiMappingList() - tau_mappings.add("tau", to_second=[None, 0], to_first=[1], phase=0) - tau_mappings.add("tau", to_second=[None, 0], to_first=[1], phase=1) + n_root = bio_model[0].nb_root + n_q = bio_model[0].nb_q + n_tau = n_q - n_root # Define the parameter to optimize parameters = ParameterList() @@ -83,8 +81,8 @@ def prepare_ocp( objective_functions = ObjectiveList() objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_TIME, weight=1, phase=0, min_bound=0.1, max_bound=3) objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_TIME, weight=1, phase=1, min_bound=0.1, max_bound=3) - objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau", weight=10, phase=0) - objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau", weight=10, phase=1) + objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau_joints", weight=10, phase=0) + objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau_joints", weight=10, phase=1) # Constraints constraints = ConstraintList() @@ -92,56 +90,53 @@ def prepare_ocp( # Dynamics dynamics = DynamicsList() - dynamics.add(DynamicsFcn.TORQUE_DRIVEN, with_contact=False) - dynamics.add(DynamicsFcn.TORQUE_DRIVEN, with_contact=False) + dynamics.add(DynamicsFcn.TORQUE_DRIVEN_FREE_FLOATING_BASE, with_contact=False) + dynamics.add(DynamicsFcn.TORQUE_DRIVEN_FREE_FLOATING_BASE, with_contact=False) # Path constraint x_bounds = BoundsList() - x_bounds.add(key="q", bounds=bio_model[0].bounds_from_ranges("q"), phase=0) - x_bounds.add(key="qdot", bounds=bio_model[0].bounds_from_ranges("qdot"), phase=0) - x_bounds.add(key="q", bounds=bio_model[1].bounds_from_ranges("q"), phase=1) - x_bounds.add(key="qdot", bounds=bio_model[1].bounds_from_ranges("qdot"), phase=1) + q_roots_min = bio_model[0].bounds_from_ranges("q").min[:n_root, :] + q_roots_max = bio_model[0].bounds_from_ranges("q").max[:n_root, :] + q_joints_min = bio_model[0].bounds_from_ranges("q").min[n_root:, :] + q_joints_max = bio_model[0].bounds_from_ranges("q").max[n_root:, :] + qdot_roots_min = bio_model[0].bounds_from_ranges("qdot").min[:n_root, :] + qdot_roots_max = bio_model[0].bounds_from_ranges("qdot").max[:n_root, :] + qdot_joints_min = bio_model[0].bounds_from_ranges("qdot").min[n_root:, :] + qdot_joints_max = bio_model[0].bounds_from_ranges("qdot").max[n_root:, :] + x_bounds.add("q_roots", min_bound=q_roots_min, max_bound=q_roots_max, phase=0) + x_bounds.add("q_joints", min_bound=q_joints_min, max_bound=q_joints_max, phase=0) + x_bounds.add("qdot_roots", min_bound=qdot_roots_min, max_bound=qdot_roots_max, phase=0) + x_bounds.add("qdot_joints", min_bound=qdot_joints_min, max_bound=qdot_joints_max, phase=0) + x_bounds.add("q_roots", min_bound=q_roots_min, max_bound=q_roots_max, phase=1) + x_bounds.add("q_joints", min_bound=q_joints_min, max_bound=q_joints_max, phase=1) + x_bounds.add("qdot_roots", min_bound=qdot_roots_min, max_bound=qdot_roots_max, phase=1) + x_bounds.add("qdot_joints", min_bound=qdot_joints_min, max_bound=qdot_joints_max, phase=1) # change model bound for -pi, pi for i in range(len(bio_model)): - x_bounds[i]["q"].min[1, :] = -np.pi - x_bounds[i]["q"].max[1, :] = np.pi + x_bounds[i]["q_joints"].min[0, :] = -np.pi + x_bounds[i]["q_joints"].max[0, :] = np.pi # Phase 0 - x_bounds[0]["q"][0, 0] = np.pi - x_bounds[0]["q"][1, 0] = 0 - x_bounds[0]["q"].min[1, -1] = 6 * np.pi / 8 - 0.1 - x_bounds[0]["q"].max[1, -1] = 6 * np.pi / 8 + 0.1 + x_bounds[0]["q_roots"][0, 0] = np.pi + x_bounds[0]["q_joints"][0, 0] = 0 + x_bounds[0]["q_joints"].min[0, -1] = 6 * np.pi / 8 - 0.1 + x_bounds[0]["q_joints"].max[0, -1] = 6 * np.pi / 8 + 0.1 # Phase 1 - x_bounds[1]["q"][0, -1] = 3 * np.pi - x_bounds[1]["q"][1, -1] = 0 - - # Initial guess - x_init = InitialGuessList() - x_init.add(key="q", initial_guess=[0] * bio_model[0].nb_q, phase=0) - x_init.add(key="qdot", initial_guess=[0] * bio_model[0].nb_qdot, phase=0) - x_init.add(key="q", initial_guess=[1] * bio_model[1].nb_q, phase=1) - x_init.add(key="qdot", initial_guess=[1] * bio_model[1].nb_qdot, phase=1) + x_bounds[1]["q_roots"][0, -1] = 3 * np.pi + x_bounds[1]["q_joints"][0, -1] = 0 # Define control path constraint - n_tau = len(tau_mappings[0]["tau"].to_first) u_bounds = BoundsList() - u_bounds.add(key="tau", min_bound=[tau_min] * n_tau, max_bound=[tau_max] * n_tau, phase=0) - u_bounds.add(key="tau", min_bound=[tau_min] * n_tau, max_bound=[tau_max] * n_tau, phase=1) - - # Control initial guess - u_init = InitialGuessList() - u_init.add(key="tau", initial_guess=[tau_init] * n_tau, phase=0) - u_init.add(key="tau", initial_guess=[tau_init] * n_tau, phase=1) + u_bounds.add(key="tau_joints", min_bound=[tau_min] * n_tau, max_bound=[tau_max] * n_tau, phase=0) + u_bounds.add(key="tau_joints", min_bound=[tau_min] * n_tau, max_bound=[tau_max] * n_tau, phase=1) return OptimalControlProgram( bio_model, dynamics, n_shooting, final_time, - x_init=x_init, - u_init=u_init, x_bounds=x_bounds, u_bounds=u_bounds, objective_functions=objective_functions, @@ -150,7 +145,6 @@ def prepare_ocp( parameter_bounds=parameter_bounds, parameter_init=parameter_init, constraints=constraints, - variable_mappings=tau_mappings, parameters=parameters, ) diff --git a/bioptim/examples/torque_driven_ocp/models/TruncAnd2Arm_Quaternion.bioMod b/bioptim/examples/torque_driven_ocp/models/trunk_and_2arm.bioMod similarity index 95% rename from bioptim/examples/torque_driven_ocp/models/TruncAnd2Arm_Quaternion.bioMod rename to bioptim/examples/torque_driven_ocp/models/trunk_and_2arm.bioMod index 6c3f0df55..05d9cbb92 100644 --- a/bioptim/examples/torque_driven_ocp/models/TruncAnd2Arm_Quaternion.bioMod +++ b/bioptim/examples/torque_driven_ocp/models/trunk_and_2arm.bioMod @@ -29,8 +29,8 @@ endsegment segment RightArm parent Pelvis - RT 0 0 0 xyz 0.1875 0.0205 0.4719 - rotations q + RT 0 0 0 xyz -0.1875 0.0205 0.4719 + rotations y mass 3.106 inertia 0.0910283 0.00000 0.00000 @@ -58,7 +58,7 @@ endsegment segment LeftArm parent Pelvis RT 0 0 0 xyz 0.1875 0.0205 0.4719 - rotations q + rotations y mass 3.106 inertia 0.0910283 0.00000 0.00000 diff --git a/bioptim/examples/torque_driven_ocp/models/trunk_and_2arm_quaternion.bioMod b/bioptim/examples/torque_driven_ocp/models/trunk_and_2arm_quaternion.bioMod new file mode 100644 index 000000000..4e7d252dd --- /dev/null +++ b/bioptim/examples/torque_driven_ocp/models/trunk_and_2arm_quaternion.bioMod @@ -0,0 +1,99 @@ +version 3 +root_actuated 0 +external_forces 0 + +gravity 0 0 0 + +segment Global +endsegment + +marker Target_START + parent Global + position 0 0 1.0 + endmarker + +marker Target_END + parent Global + position 0 0 1.03 + endmarker + +segment Pelvis + translations xyz + rotations xyz + mass 58.801 + inertia + 7.85829 0.00000 0.00000 + 0.00000 8.12771 0.00000 + 0.00000 0.00000 0.51802 + com 0 0 0.09410 + mesh 0 0 -1 + mesh 0 0 1 + mesh 0 0 -1 +endsegment + + // Markers + marker Leg + parent Pelvis + position 0 0 -1 + technical 1 + endmarker + marker Neck + parent Pelvis + position 0 0 1 + endmarker + + +segment RightArm + parent Pelvis + RT 0 0 0 xyz -0.1875 0.0205 0.4719 + rotations q + mass 3.106 + inertia + 0.0910283 0.00000 0.00000 + 0.00000 0.0909983 0.00000 + 0.00000 0.00000 0.00292 + com 0 0 -0.233088 + mesh 0 0 -0.4662 + mesh 0 0 0 + mesh 0 0 -0.4662 +endsegment + + // Markers + marker RightShoulder + parent RightArm + position 0 0 0 + technical 1 + endmarker + marker RightWrist + parent RightArm + position 0 0 -0.4662 + technical 1 + endmarker + + +segment LeftArm + parent Pelvis + RT 0 0 0 xyz 0.1875 0.0205 0.4719 + rotations q + mass 3.106 + inertia + 0.0910283 0.00000 0.00000 + 0.00000 0.0909983 0.00000 + 0.00000 0.00000 0.00292 + com 0 0 -0.233088 + mesh 0 0 -0.4662 + mesh 0 0 0 + mesh 0 0 -0.4662 +endsegment + + // Markers + marker LeftShoulder + parent LeftArm + position 0 0 0 + technical 1 + endmarker + marker LeftWrist + parent LeftArm + position 0 0 -0.4662 + technical 1 + endmarker diff --git a/bioptim/examples/torque_driven_ocp/torque_driven_free_floating_base.py b/bioptim/examples/torque_driven_ocp/torque_driven_free_floating_base.py new file mode 100644 index 000000000..4a6a53a65 --- /dev/null +++ b/bioptim/examples/torque_driven_ocp/torque_driven_free_floating_base.py @@ -0,0 +1,193 @@ +""" +This file examplifies how to use the torque driven free floating base dynamics. +The advantage of this formulation is that yo do not need to specify a mapping to constrain the root to be unactuated. +This specific problem generates one somersault in straight position with 1 twist. +""" + +import numpy as np +import platform + +from bioptim import ( + BiorbdModel, + OptimalControlProgram, + DynamicsList, + DynamicsFcn, + BoundsList, + InitialGuessList, + ObjectiveList, + ObjectiveFcn, + InterpolationType, + BiMappingList, + Solver, + Node, +) + + +def prepare_ocp(biorbd_model_path: str): + """ + This function build the optimal control program. + + Parameters + ---------- + biorbd_model_path: str + The path to the bioMod file + + Returns + ------- + The OptimalControlProgram ready to be solved + """ + + # Declaration of generic elements + n_shooting = [10] + phase_time = [1.0] + + bio_model = BiorbdModel(biorbd_model_path) + + # Declaration of the objectives + objective_functions = ObjectiveList() + objective_functions.add( + objective=ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, + key="tau_joints", + weight=1.0, + ) + objective_functions.add( + objective=ObjectiveFcn.Mayer.MINIMIZE_TIME, + min_bound=0.5, + max_bound=1.5, + node=Node.END, + weight=-0.001, + ) + + # Declaration of the dynamics function used during integration + dynamics = DynamicsList() + dynamics.add(DynamicsFcn.TORQUE_DRIVEN_FREE_FLOATING_BASE) + + # Declaration of optimization variables bounds and initial guesses + # Path constraint + x_bounds = BoundsList() + x_initial_guesses = InitialGuessList() + + u_bounds = BoundsList() + u_initial_guesses = InitialGuessList() + + x_bounds.add( + "q_roots", + min_bound=[ + [0.0, -1.0, -0.1], + [0.0, -1.0, -0.1], + [0.0, -0.1, -0.1], + [0.0, -0.1, 2 * np.pi - 0.1], + [0.0, -np.pi / 4, -np.pi / 4], + [0.0, -0.1, -0.1], + ], + max_bound=[ + [0.0, 1.0, 0.1], + [0.0, 1.0, 0.1], + [0.0, 10.0, 0.1], + [0.0, 2 * np.pi + 0.1, 2 * np.pi + 0.1], + [0.0, np.pi / 4, np.pi / 4], + [0.0, np.pi + 0.1, np.pi + 0.1], + ], + ) + x_bounds.add( + "q_joints", + min_bound=[ + [2.9, -0.05, -0.05], + [-2.9, -3.0, -3.0], + ], + max_bound=[ + [2.9, 3.0, 3.0], + [-2.9, 0.05, 0.05], + ], + ) + + x_bounds.add( + "qdot_roots", + min_bound=[ + [-0.5, -10.0, -10.0], + [-0.5, -10.0, -10.0], + [5.0, -100.0, -100.0], + [0.5, 0.5, 0.5], + [0.0, -100.0, -100.0], + [0.0, -100.0, -100.0], + ], + max_bound=[ + [0.5, 10.0, 10.0], + [0.5, 10.0, 10.0], + [10.0, 100.0, 100.0], + [20.0, 20.0, 20.0], + [-0.0, 100.0, 100.0], + [-0.0, 100.0, 100.0], + ], + ) + x_bounds.add( + "qdot_joints", + min_bound=[ + [0.0, -100.0, -100.0], + [0.0, -100.0, -100.0], + ], + max_bound=[ + [-0.0, 100.0, 100.0], + [-0.0, 100.0, 100.0], + ], + ) + + x_initial_guesses.add( + "q_roots", + initial_guess=[ + [0.0, 0.0], + [0.0, 0.0], + [0.0, 0.0], + [0.0, 2 * np.pi], + [0.0, 0.0], + [0.0, np.pi], + ], + interpolation=InterpolationType.LINEAR, + ) + x_initial_guesses.add( + "q_joints", + initial_guess=[ + [2.9, 0.0], + [-2.9, 0.0], + ], + interpolation=InterpolationType.LINEAR, + ) + + x_initial_guesses.add( + "qdot_roots", + initial_guess=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + interpolation=InterpolationType.CONSTANT, + ) + x_initial_guesses.add( + "qdot_joints", + initial_guess=[0.0, 0.0], + interpolation=InterpolationType.CONSTANT, + ) + + u_bounds.add("tau_joints", min_bound=[-100, -100], max_bound=[100, 100], interpolation=InterpolationType.CONSTANT) + + return OptimalControlProgram( + bio_model=bio_model, + n_shooting=n_shooting, + phase_time=phase_time, + dynamics=dynamics, + x_bounds=x_bounds, + u_bounds=u_bounds, + x_init=x_initial_guesses, + u_init=u_initial_guesses, + objective_functions=objective_functions, + use_sx=False, + ) + + +if __name__ == "__main__": + # --- Prepare the ocp --- # + ocp = prepare_ocp(biorbd_model_path="models/trunk_and_2arm.bioMod") + + # --- Solve the ocp --- # + solver = Solver.IPOPT(show_online_optim=platform.system() == "Linux") + sol = ocp.solve(solver=solver) + + # --- Show results --- # + # sol.graphs() + sol.animate() diff --git a/bioptim/examples/torque_driven_ocp/trampo_quaternions.py b/bioptim/examples/torque_driven_ocp/trampo_quaternions.py index 0c65c712f..75a6450d4 100644 --- a/bioptim/examples/torque_driven_ocp/trampo_quaternions.py +++ b/bioptim/examples/torque_driven_ocp/trampo_quaternions.py @@ -143,7 +143,7 @@ def main(): Prepares and solves an ocp that has quaternion in it. Animates the results """ - ocp = prepare_ocp("models/TruncAnd2Arm_Quaternion.bioMod", n_shooting=25, final_time=0.25) + ocp = prepare_ocp("models/trunk_and_2arm_quaternion.bioMod", n_shooting=25, final_time=0.25) sol = ocp.solve(Solver.IPOPT(show_online_optim=platform.system() == "Linux"), expand_during_shake_tree=False) # Print the last solution diff --git a/bioptim/gui/plot.py b/bioptim/gui/plot.py index b60fad52a..01ba1fda4 100644 --- a/bioptim/gui/plot.py +++ b/bioptim/gui/plot.py @@ -380,12 +380,14 @@ def legend_without_duplicate_labels(ax): else: nb_subplots = max( [ - max( - len(nlp.plot[variable].phase_mappings.to_first.map_idx), - max(nlp.plot[variable].phase_mappings.to_first.map_idx) + 1, + ( + max( + len(nlp.plot[variable].phase_mappings.to_first.map_idx), + max(nlp.plot[variable].phase_mappings.to_first.map_idx) + 1, + ) + if variable in nlp.plot + else 0 ) - if variable in nlp.plot - else 0 for nlp in self.ocp.nlp ] ) diff --git a/bioptim/limits/constraints.py b/bioptim/limits/constraints.py index 031f73311..78000f6bd 100644 --- a/bioptim/limits/constraints.py +++ b/bioptim/limits/constraints.py @@ -298,26 +298,24 @@ def torque_max_from_q_and_qdot( if min_torque and min_torque < 0: raise ValueError("min_torque cannot be negative in tau_max_from_actuators") - bound = controller.model.tau_max(controller.states["q"].mx, controller.states["qdot"].mx) + bound = controller.model.tau_max(controller.q.mx, controller.qdot.mx) min_bound = controller.mx_to_cx( "min_bound", - controller.controls["tau"].mapping.to_first.map(bound[1]), - controller.states["q"], - controller.states["qdot"], + controller.tau.mapping.to_first.map(bound[1]), + controller.q, + controller.qdot, ) max_bound = controller.mx_to_cx( "max_bound", - controller.controls["tau"].mapping.to_first.map(bound[0]), - controller.states["q"], - controller.states["qdot"], + controller.tau.mapping.to_first.map(bound[0]), + controller.q, + controller.qdot, ) if min_torque: min_bound = if_else(lt(min_bound, min_torque), min_torque, min_bound) max_bound = if_else(lt(max_bound, min_torque), min_torque, max_bound) - value = vertcat( - controller.controls["tau"].cx_start + min_bound, controller.controls["tau"].cx_start - max_bound - ) + value = vertcat(controller.tau.cx_start + min_bound, controller.tau.cx_start - max_bound) if constraint.rows is None or isinstance(constraint.rows, (tuple, list, np.ndarray)): n_rows = value.shape[0] // 2 @@ -415,10 +413,10 @@ def qddot_equals_forward_dynamics( Since the function does nothing, we can safely ignore any argument """ - q = controller.states["q"].mx - qdot = controller.states["qdot"].mx + q = controller.q.mx + qdot = controller.qdot.mx passive_torque = controller.model.passive_joint_torque(q, qdot) - tau = controller.states["tau"].mx if "tau" in controller.states else controller.controls["tau"].mx + tau = controller.states["tau"].mx if "tau" in controller.states else controller.tau.mx tau = tau + passive_torque if with_passive_torque else tau tau = tau + controller.model.ligament_joint_torque(q, qdot) if with_ligament else tau @@ -464,9 +462,9 @@ def tau_equals_inverse_dynamics( Since the function does nothing, we can safely ignore any argument """ - q = controller.states["q"].mx - qdot = controller.states["qdot"].mx - tau = controller.states["tau"].mx if "tau" in controller.states else controller.controls["tau"].mx + q = controller.q.mx + qdot = controller.qdot.mx + tau = controller.states["tau"].mx if "tau" in controller.states else controller.tau.mx qddot = controller.states["qddot"].mx if "qddot" in controller.states else controller.controls["qddot"].mx passive_torque = controller.model.passive_joint_torque(q, qdot) tau = tau + passive_torque if with_passive_torque else tau @@ -515,8 +513,8 @@ def implicit_marker_acceleration( Since the function does nothing, we can safely ignore any argument """ - q = controller.states["q"].mx - qdot = controller.states["qdot"].mx + q = controller.q.mx + qdot = controller.qdot.mx qddot = controller.states["qddot"].mx if "qddot" in controller.states else controller.controls["qddot"].mx # TODO get the index of the marker @@ -553,8 +551,8 @@ def tau_from_muscle_equal_inverse_dynamics( Since the function does nothing, we can safely ignore any argument """ - q = controller.states["q"].mx - qdot = controller.states["qdot"].mx + q = controller.q.mx + qdot = controller.qdot.mx muscle_activations = controller.controls["muscles"].mx muscles_states = controller.model.state_set() passive_torque = controller.model.passive_joint_torque(q, qdot) @@ -624,7 +622,6 @@ def stochastic_covariance_matrix_continuity_implicit( It is explained in more details here: https://doi.org/10.1109/CDC.2013.6761121 P_k+1 = M_k @ (dg/dx @ P @ dg/dx + dg/dw @ sigma_w @ dg/dw) @ M_k """ - # TODO: Charbie -> This is only True for x=[q, qdot], u=[tau] (have to think on how to generalize it) if not controller.get_nlp.is_stochastic: raise RuntimeError("This function is only valid for stochastic problems") @@ -661,9 +658,7 @@ def stochastic_covariance_matrix_continuity_implicit( cov_next = m_matrix @ (dg_dx @ cov_matrix @ dg_dx.T + dg_dw @ sigma_w @ dg_dw.T) @ m_matrix.T cov_implicit_deffect = cov_next - cov_matrix - penalty.expand = ( - controller.get_nlp.dynamics_type.expand_dynamics - ) # TODO: Charbie -> should this be always true? + penalty.expand = controller.get_nlp.dynamics_type.expand_dynamics penalty.explicit_derivative = True penalty.multi_thread = True @@ -713,8 +708,7 @@ def stochastic_df_dx_implicit( DF_DX_fun = Function( "DF_DX_fun", [ - controller.time.mx, - controller.dt.mx, + vertcat(controller.time.mx, controller.dt.mx), q_root, q_joints, qdot_root, @@ -731,12 +725,11 @@ def stochastic_df_dx_implicit( parameters[controller.parameters["sensory_noise"].index] = controller.model.sensory_noise_magnitude DF_DX = DF_DX_fun( - controller.time.cx, - controller.dt.cx, - controller.states["q"].cx[:nb_root], - controller.states["q"].cx[nb_root:], - controller.states["qdot"].cx[:nb_root], - controller.states["qdot"].cx[nb_root:], + vertcat(controller.time.cx, controller.dt.cx), + controller.q.cx[:nb_root], + controller.q.cx[nb_root:], + controller.qdot.cx[:nb_root], + controller.qdot.cx[nb_root:], controller.controls.cx, parameters, controller.algebraic_states.cx, @@ -763,32 +756,19 @@ def stochastic_helper_matrix_collocation( if not controller.get_nlp.is_stochastic: raise RuntimeError("This function is only valid for stochastic problems") - polynomial_degree = controller.get_nlp.ode_solver.polynomial_degree - Mc, _ = ConstraintFunction.Functions.collocation_jacobians( + Mc, _, _, _, _, _ = ConstraintFunction.Functions.collocation_jacobians( penalty, controller, - polynomial_degree, ) - nb_root = controller.model.nb_root - nu = controller.model.nb_q - nb_root - z_joints = horzcat(*(controller.states.cx_intermediates_list)) - parameters = controller.parameters.cx parameters[controller.parameters["motor_noise"].index] = controller.model.motor_noise_magnitude parameters[controller.parameters["sensory_noise"].index] = controller.model.sensory_noise_magnitude constraint = Mc( - controller.time.cx, - controller.dt.cx, - controller.states.cx_start[:nb_root], # x_q_root - controller.states.cx_start[nb_root : nb_root + nu], # x_q_joints - controller.states.cx_start[nb_root + nu : 2 * nb_root + nu], # x_qdot_root - controller.states.cx_start[2 * nb_root + nu : 2 * (nb_root + nu)], # x_qdot_joints - z_joints[:nb_root, :], # z_q_root - z_joints[nb_root : nb_root + nu, :], # z_q_joints - z_joints[nb_root + nu : 2 * nb_root + nu, :], # z_qdot_root - z_joints[2 * nb_root + nu : 2 * (nb_root + nu), :], # z_qdot_joints + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*(controller.states.cx_intermediates_list)), controller.controls.cx_start, parameters, controller.algebraic_states.cx_start, @@ -806,41 +786,27 @@ def stochastic_covariance_matrix_continuity_collocation( It is explained in more details here: https://doi.org/10.1109/CDC.2013.6761121 P_k+1 = M_k @ (dg/dx @ P_k @ dg/dx + dg/dw @ sigma_w @ dg/dw) @ M_k """ - # TODO: Charbie -> This is only True for x=[q, qdot], u=[tau] (have to think on how to generalize it) if not controller.get_nlp.is_stochastic: raise RuntimeError("This function is only valid for stochastic problems") - polynomial_degree = controller.get_nlp.ode_solver.polynomial_degree - _, Pf = ConstraintFunction.Functions.collocation_jacobians( + _, Pf, _, _, _, _ = ConstraintFunction.Functions.collocation_jacobians( penalty, controller, - polynomial_degree, ) cov_matrix_next = StochasticBioModel.reshape_to_matrix( controller.algebraic_states["cov"].cx_end, controller.model.matrix_shape_cov ) - nb_root = controller.model.nb_root - nu = controller.model.nb_q - nb_root - z_joints = horzcat(*(controller.states.cx_intermediates_list)) - parameters = controller.parameters.cx parameters[controller.parameters["motor_noise"].index] = controller.model.motor_noise_magnitude parameters[controller.parameters["sensory_noise"].index] = controller.model.sensory_noise_magnitude cov_next_computed = Pf( - controller.time.cx, - controller.dt.cx, - controller.states.cx_start[:nb_root], # x_q_root - controller.states.cx_start[nb_root : nb_root + nu], # x_q_joints - controller.states.cx_start[nb_root + nu : 2 * nb_root + nu], # x_qdot_root - controller.states.cx_start[2 * nb_root + nu : 2 * (nb_root + nu)], # x_qdot_joints - z_joints[:nb_root, :], # z_q_root - z_joints[nb_root : nb_root + nu, :], # z_q_joints - z_joints[nb_root + nu : 2 * nb_root + nu, :], # z_qdot_root - z_joints[2 * nb_root + nu : 2 * (nb_root + nu), :], # z_qdot_joints + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*(controller.states.cx_intermediates_list)), controller.controls.cx_start, parameters, controller.algebraic_states.cx_start, @@ -852,6 +818,7 @@ def stochastic_covariance_matrix_continuity_collocation( penalty.explicit_derivative = True penalty.multi_thread = True + penalty.integrate = True return out_vector @@ -863,8 +830,10 @@ def stochastic_mean_sensory_input_equals_reference( """ Get the error between the hand position and the reference. """ + ref = controller.algebraic_states["ref"].cx_start sensory_input = controller.model.sensory_reference( + time=controller.time.mx, states=controller.states.mx, controls=controller.controls.mx, parameters=controller.parameters.mx, @@ -875,6 +844,8 @@ def stochastic_mean_sensory_input_equals_reference( sensory_input = Function( "tp", [ + controller.time.mx, + controller.dt.mx, controller.states.mx, controller.controls.mx, controller.parameters.mx, @@ -882,13 +853,15 @@ def stochastic_mean_sensory_input_equals_reference( ], [sensory_input], )( + controller.time.cx, + controller.dt.cx, controller.states.cx_start, controller.controls.cx_start, controller.parameters.cx, controller.algebraic_states.cx_start, ) - return sensory_input - ref + return sensory_input[: controller.model.n_feedbacks] - ref[: controller.model.n_feedbacks] @staticmethod def symmetric_matrix( @@ -933,7 +906,7 @@ def semidefinite_positive_matrix( return diagonal_terms @staticmethod - def collocation_jacobians(penalty, controller, polynomial_degree): + def collocation_jacobians(penalty, controller): """ This function computes the jacobians of the collocation equation and of the continuity equation with respect to the collocation points and the noise """ @@ -948,55 +921,29 @@ def collocation_jacobians(penalty, controller, polynomial_degree): controller.algebraic_states["m"].cx_start, controller.model.matrix_shape_m ) - nb_root = controller.model.nb_root - nu = controller.model.nb_q - nb_root - joints_index = list(range(nb_root, nb_root + nu)) + list(range(2 * nb_root + nu, 2 * (nb_root + nu))) - - x_q_root = controller.cx.sym("x_q_root", nb_root, 1) - x_q_joints = controller.cx.sym("x_q_joints", nu, 1) - x_qdot_root = controller.cx.sym("x_qdot_root", nb_root, 1) - x_qdot_joints = controller.cx.sym("x_qdot_joints", nu, 1) - z_q_root = controller.cx.sym("z_q_root", nb_root, polynomial_degree + 1) - z_q_joints = controller.cx.sym("z_q_joints", nu, polynomial_degree + 1) - z_qdot_root = controller.cx.sym("z_qdot_root", nb_root, polynomial_degree + 1) - z_qdot_joints = controller.cx.sym("z_qdot_joints", nu, polynomial_degree + 1) - x_full_cx = vertcat(x_q_root, x_q_joints, x_qdot_root, x_qdot_joints) - z_full_cx = vertcat(z_q_root, z_q_joints, z_qdot_root, z_qdot_joints) - xf, _, defects = controller.integrate_extra_dynamics(0).function( vertcat(controller.time.cx, controller.time.cx + controller.dt.cx), - horzcat(x_full_cx, z_full_cx), + horzcat(controller.states.cx, horzcat(*controller.states.cx_intermediates_list)), controller.controls.cx, controller.parameters.cx, controller.algebraic_states.cx, ) - G_joints = [x_full_cx - z_full_cx[:, 0]] - nx = 2 * (nb_root + nu) - for i in range(controller.ode_solver.polynomial_degree): - idx = [j + i * nx for j in joints_index] - G_joints.append(defects[idx]) - G_joints = vertcat(*G_joints) + initial_defect = controller.states.cx_start - controller.states.cx_intermediates_list[0] + defects = vertcat(initial_defect, defects) - Gdx = jacobian(G_joints, horzcat(x_q_joints, x_qdot_joints)) - Gdz = jacobian(G_joints, horzcat(z_q_joints, z_qdot_joints)) - Gdw = jacobian(G_joints, vertcat(motor_noise, sensory_noise)) - Fdz = jacobian(xf, horzcat(z_q_joints, z_qdot_joints)) + Gdx = jacobian(defects, controller.states.cx) + Gdz = jacobian(defects, horzcat(*controller.states.cx_intermediates_list)) + Gdw = jacobian(defects, vertcat(motor_noise, sensory_noise)) + Fdz = jacobian(xf, horzcat(*controller.states.cx_intermediates_list)) # Constraint Equality defining M Mc = Function( "M_cons", [ - controller.time.cx, - controller.dt.cx, - x_q_root, - x_q_joints, - x_qdot_root, - x_qdot_joints, - z_q_root, - z_q_joints, - z_qdot_root, - z_qdot_joints, + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*controller.states.cx_intermediates_list), controller.controls.cx_start, controller.parameters.cx_start, controller.algebraic_states.cx_start, @@ -1010,16 +957,9 @@ def collocation_jacobians(penalty, controller, polynomial_degree): Pf = Function( "P_next", [ - controller.time.cx, - controller.dt.cx, - x_q_root, - x_q_joints, - x_qdot_root, - x_qdot_joints, - z_q_root, - z_q_joints, - z_qdot_root, - z_qdot_joints, + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*controller.states.cx_intermediates_list), controller.controls.cx_start, controller.parameters.cx_start, controller.algebraic_states.cx_start, @@ -1029,7 +969,59 @@ def collocation_jacobians(penalty, controller, polynomial_degree): if penalty.expand: Pf = Pf.expand() - return Mc, Pf + Gdx_fun = Function( + "Gdx_fun", + [ + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*controller.states.cx_intermediates_list), + controller.controls.cx_start, + controller.parameters.cx_start, + controller.algebraic_states.cx_start, + ], + [Gdx], + ) + + Gdz_fun = Function( + "Gdz_fun", + [ + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*controller.states.cx_intermediates_list), + controller.controls.cx_start, + controller.parameters.cx_start, + controller.algebraic_states.cx_start, + ], + [Gdz], + ) + + Gdw_fun = Function( + "Gdw_fun", + [ + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*controller.states.cx_intermediates_list), + controller.controls.cx_start, + controller.parameters.cx_start, + controller.algebraic_states.cx_start, + ], + [Gdw], + ) + + Fdz_fun = Function( + "Fdz_fun", + [ + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, + horzcat(*controller.states.cx_intermediates_list), + controller.controls.cx_start, + controller.parameters.cx_start, + controller.algebraic_states.cx_start, + ], + [Fdz], + ) + + return Mc, Pf, Gdx_fun, Gdz_fun, Gdw_fun, Fdz_fun @staticmethod def get_dt(_): diff --git a/bioptim/limits/multinode_penalty.py b/bioptim/limits/multinode_penalty.py index 0950ea397..349477c6e 100644 --- a/bioptim/limits/multinode_penalty.py +++ b/bioptim/limits/multinode_penalty.py @@ -208,7 +208,7 @@ def algebraic_states_equality( key: str = "all", ): """ - The most common continuity function, that is the covariance before equals covariance after for algebraic_states ocp + Continuity function, that is the algebraic states before algebraic states after for algebraic_states ocp Parameters ---------- diff --git a/bioptim/limits/path_conditions.py b/bioptim/limits/path_conditions.py index 16af66b10..fad3e1b9d 100644 --- a/bioptim/limits/path_conditions.py +++ b/bioptim/limits/path_conditions.py @@ -941,9 +941,12 @@ def __init__( super(NoisedInitialGuess, self).__init__( key=key, initial_guess=self.noised_initial_guess, - interpolation=interpolation - if interpolation == InterpolationType.ALL_POINTS # interpolation should always be done at each data point - else InterpolationType.EACH_FRAME, + interpolation=( + interpolation + if interpolation + == InterpolationType.ALL_POINTS # interpolation should always be done at each data point + else InterpolationType.EACH_FRAME + ), **parameters, ) @@ -1052,9 +1055,9 @@ def _create_noise_matrix( init_instance = InitialGuess( "noised", initial_guess_matrix, - interpolation=interpolation - if interpolation == InterpolationType.ALL_POINTS - else InterpolationType.EACH_FRAME, + interpolation=( + interpolation if interpolation == InterpolationType.ALL_POINTS else InterpolationType.EACH_FRAME + ), ) self.noised_initial_guess = init_instance.init + self.noise diff --git a/bioptim/limits/penalty.py b/bioptim/limits/penalty.py index df31adbbb..cfdb86a15 100644 --- a/bioptim/limits/penalty.py +++ b/bioptim/limits/penalty.py @@ -10,6 +10,7 @@ from ..misc.enums import Node, Axis, ControlType, QuadratureRule from ..misc.mapping import BiMapping from ..models.protocols.stochastic_biomodel import StochasticBioModel +from ..models.biorbd.biorbd_model import BiorbdModel class PenaltyFunctionAbstract: @@ -129,14 +130,12 @@ def minimize_power( controls = controller.controls[key_control].cx_start # select only actuacted states if key_control == "tau": - return controls * controller.states["qdot"].cx_start + return controls * controller.qdot.cx_start elif key_control == "muscles": - q_mx = controller.states["q"].mx - qdot_mx = controller.states["qdot"].mx + q_mx = controller.q.mx + qdot_mx = controller.qdot.mx muscles_dot = controller.model.muscle_velocity(q_mx, qdot_mx) - objective = controller.mx_to_cx( - "muscle_velocity", muscles_dot, controller.states["q"], controller.states["qdot"] - ) + objective = controller.mx_to_cx("muscle_velocity", muscles_dot, controller.q, controller.qdot) return controls * objective @@ -182,7 +181,6 @@ def stochastic_minimize_expected_feedback_efforts(penalty: PenaltyOption, contro # create the casadi function to be evaluated # Get the symbolic variables - ref = controller.algebraic_states["ref"].cx_start if "cholesky_cov" in controller.algebraic_states.keys(): l_cov_matrix = StochasticBioModel.reshape_to_cholesky_matrix( controller.algebraic_states["cholesky_cov"].cx_start, controller.model.matrix_shape_cov_cholesky @@ -201,77 +199,41 @@ def stochastic_minimize_expected_feedback_efforts(penalty: PenaltyOption, contro controller.algebraic_states["k"].cx_start, controller.model.matrix_shape_k ) - nb_root = controller.model.nb_root - nu = controller.model.nb_q - controller.model.nb_root - - q_root_mx = MX.sym("q_root", nb_root, 1) - q_joints_mx = MX.sym("q_joints", nu, 1) - qdot_root_mx = MX.sym("qdot_root", nb_root, 1) - qdot_joints_mx = MX.sym("qdot_joints", nu, 1) - q_root = controller.cx.sym("q_root", nb_root, 1) - q_joints = controller.cx.sym("q_joints", nu, 1) - qdot_root = controller.cx.sym("qdot_root", nb_root, 1) - qdot_joints = controller.cx.sym("qdot_joints", nu, 1) - # Compute the expected effort trace_k_sensor_k = trace(k_matrix @ sensory_noise_matrix @ k_matrix.T) - ee = controller.model.sensory_reference( - states=vertcat(q_root_mx, q_joints_mx, qdot_root_mx, qdot_joints_mx), + + e_fb_mx = controller.model.compute_torques_from_noise_and_feedback( + nlp=controller.get_nlp, + time=controller.time.mx, + states=controller.states.mx, controls=controller.controls.mx, parameters=controller.parameters.mx, algebraic_states=controller.algebraic_states.mx, - nlp=controller.get_nlp, + sensory_noise=controller.model.sensory_noise_magnitude, + motor_noise=controller.model.motor_noise_magnitude, ) - # ee to cx - ee = Function( - "tp", + e_fb = Function( + "e_fb_tp", [ - q_root_mx, - q_joints_mx, - qdot_root_mx, - qdot_joints_mx, + vertcat(controller.time.mx, controller.dt.mx), + controller.states.mx, controller.controls.mx, controller.parameters.mx, controller.algebraic_states.mx, ], - [ee], + [e_fb_mx], )( - q_root, - q_joints, - qdot_root, - qdot_joints, + vertcat(controller.time.cx, controller.dt.cx), + controller.states.cx_start, controller.controls.cx_start, controller.parameters.cx_start, controller.algebraic_states.cx_start, ) - e_fb = k_matrix @ ((ee - ref) + controller.model.sensory_noise_magnitude) - jac_e_fb_x = jacobian(e_fb, vertcat(q_joints, qdot_joints)) + jac_e_fb_x = jacobian(e_fb, controller.states.cx_start) - fun_jac_e_fb_x = Function( - "jac_e_fb_x", - [ - q_root, - q_joints, - qdot_root, - qdot_joints, - controller.controls_scaled.cx_start, - controller.parameters.cx_start, - controller.algebraic_states_scaled.cx_start, - ], - [jac_e_fb_x], - ) + trace_jac_p_jack = trace(jac_e_fb_x @ cov_matrix @ jac_e_fb_x.T) - eval_jac_e_fb_x = fun_jac_e_fb_x( - controller.states.cx_start[:nb_root], - controller.states.cx_start[nb_root : nb_root + nu], - controller.states.cx_start[nb_root + nu : 2 * nb_root + nu], - controller.states.cx_start[2 * nb_root + nu : 2 * (nb_root + nu)], - controller.controls_scaled.cx_start, - controller.parameters.cx_start, - controller.algebraic_states_scaled.cx_start, - ) - trace_jac_p_jack = trace(eval_jac_e_fb_x @ cov_matrix @ eval_jac_e_fb_x.T) expectedEffort_fb_mx = trace_jac_p_jack + trace_k_sensor_k return expectedEffort_fb_mx @@ -322,6 +284,8 @@ def minimize_markers( The index or name of the segment to use as reference. Default [None] is the global coordinate system """ + controller.model: BiorbdModel + # Adjust the cols and rows PenaltyFunctionAbstract.set_idx_columns(penalty, controller, marker_index, "marker") PenaltyFunctionAbstract.set_axes_rows(penalty, axes) @@ -330,20 +294,20 @@ def minimize_markers( penalty.plot_target = False # Compute the position of the marker in the requested reference frame (None for global) - q = controller.states["q"].mx + q = controller.q model = controller.model jcs_t = ( biorbd.RotoTrans() if reference_jcs is None - else model.homogeneous_matrices_in_global(q, reference_jcs, inverse=True) + else model.biorbd_homogeneous_matrices_in_global(q.mx, reference_jcs, inverse=True) ) markers = [] - for m in model.markers(q): + for m in model.markers(q.mx): markers_in_jcs = jcs_t.to_mx() @ vertcat(m, 1) markers = horzcat(markers, markers_in_jcs[:3]) - markers_objective = controller.mx_to_cx("markers", markers, controller.states["q"]) + markers_objective = controller.mx_to_cx("markers", markers, controller.q) return markers_objective @staticmethod @@ -381,14 +345,12 @@ def minimize_markers_velocity( penalty.quadratic = True if penalty.quadratic is None else penalty.quadratic # Add the penalty in the requested reference frame. None for global - q_mx = controller.states["q"].mx - qdot_mx = controller.states["qdot"].mx + q_mx = controller.q.mx + qdot_mx = controller.qdot.mx markers = horzcat(*controller.model.marker_velocities(q_mx, qdot_mx, reference_index=reference_jcs)) - markers_objective = controller.mx_to_cx( - "markers_velocity", markers, controller.states["q"], controller.states["qdot"] - ) + markers_objective = controller.mx_to_cx("markers_velocity", markers, controller.q, controller.qdot) return markers_objective @staticmethod @@ -423,8 +385,8 @@ def minimize_markers_acceleration( PenaltyFunctionAbstract.set_axes_rows(penalty, axes) penalty.quadratic = True if penalty.quadratic is None else penalty.quadratic - q_mx = controller.states["q"].mx - qdot_mx = controller.states["qdot"].mx + q_mx = controller.q.mx + qdot_mx = controller.qdot.mx qddot_mx = PenaltyFunctionAbstract._get_qddot(controller, "mx") markers = horzcat( @@ -472,14 +434,14 @@ def superimpose_markers( PenaltyFunctionAbstract.set_axes_rows(penalty, axes) penalty.quadratic = True if penalty.quadratic is None else penalty.quadratic - diff_markers = controller.model.marker( - controller.states["q"].mx, second_marker_idx - ) - controller.model.marker(controller.states["q"].mx, first_marker_idx) + diff_markers = controller.model.marker(controller.q.mx, second_marker_idx) - controller.model.marker( + controller.q.mx, first_marker_idx + ) return controller.mx_to_cx( f"diff_markers", diff_markers, - controller.states["q"], + controller.q, ) @staticmethod @@ -520,9 +482,7 @@ def superimpose_markers_velocity( PenaltyFunctionAbstract.set_axes_rows(penalty, axes) penalty.quadratic = True if penalty.quadratic is None else penalty.quadratic - marker_velocity = controller.model.marker_velocities( - controller.states["q"].mx, controller.states["qdot"].mx - ) + marker_velocity = controller.model.marker_velocities(controller.q.mx, controller.qdot.mx) marker_1 = marker_velocity[first_marker_idx][:] marker_2 = marker_velocity[second_marker_idx][:] @@ -531,8 +491,8 @@ def superimpose_markers_velocity( return controller.mx_to_cx( f"diff_markers", diff_markers, - controller.states["q"], - controller.states["qdot"], + controller.q, + controller.qdot, ) @staticmethod @@ -656,12 +616,10 @@ def minimize_predicted_com_height(_: PenaltyOption, controller: PenaltyControlle """ g = controller.model.gravity[2] - com = controller.model.center_of_mass(controller.states["q"].mx) - com_dot = controller.model.center_of_mass_velocity(controller.states["q"].mx, controller.states["qdot"].mx) + com = controller.model.center_of_mass(controller.q.mx) + com_dot = controller.model.center_of_mass_velocity(controller.q.mx, controller.qdot.mx) com_height = (com_dot[2] * com_dot[2]) / (2 * -g) + com[2] - com_height_cx = controller.mx_to_cx( - "com_height", com_height, controller.states["q"], controller.states["qdot"] - ) + com_height_cx = controller.mx_to_cx("com_height", com_height, controller.q, controller.qdot) return com_height_cx @staticmethod @@ -685,7 +643,7 @@ def minimize_com_position(penalty: PenaltyOption, controller: PenaltyController, PenaltyFunctionAbstract.set_axes_rows(penalty, axes) penalty.quadratic = True if penalty.quadratic is None else penalty.quadratic - com_cx = controller.mx_to_cx("com", controller.model.center_of_mass, controller.states["q"]) + com_cx = controller.mx_to_cx("com", controller.model.center_of_mass, controller.q) return com_cx @staticmethod @@ -710,7 +668,7 @@ def minimize_com_velocity(penalty: PenaltyOption, controller: PenaltyController, penalty.quadratic = True if penalty.quadratic is None else penalty.quadratic com_dot_cx = controller.mx_to_cx( - "com_dot", controller.model.center_of_mass_velocity, controller.states["q"], controller.states["qdot"] + "com_dot", controller.model.center_of_mass_velocity, controller.q, controller.qdot ) return com_dot_cx @@ -735,8 +693,8 @@ def minimize_com_acceleration(penalty: PenaltyOption, controller: PenaltyControl PenaltyFunctionAbstract.set_axes_rows(penalty, axes) penalty.quadratic = True if penalty.quadratic is None else penalty.quadratic - q_mx = controller.states["q"].mx - qdot_mx = controller.states["qdot"].mx + q_mx = controller.q.mx + qdot_mx = controller.qdot.mx qddot_mx = PenaltyFunctionAbstract._get_qddot(controller, "mx") marker = controller.model.center_of_mass_acceleration(q_mx, qdot_mx, qddot_mx) @@ -765,8 +723,8 @@ def minimize_angular_momentum(penalty: PenaltyOption, controller: PenaltyControl angular_momentum_cx = controller.mx_to_cx( "angular_momentum", controller.model.angular_momentum, - controller.states["q"], - controller.states["qdot"], + controller.q, + controller.qdot, ) return angular_momentum_cx @@ -793,8 +751,8 @@ def minimize_linear_momentum(penalty: PenaltyOption, controller: PenaltyControll com_velocity = controller.mx_to_cx( "com_velocity", controller.model.center_of_mass_velocity, - controller.states["q"], - controller.states["qdot"], + controller.q, + controller.qdot, ) if isinstance(com_velocity, SX): mass = Function("mass", [], [controller.model.mass]).expand() @@ -905,11 +863,11 @@ def track_segment_with_custom_rt( "The track_segment_with_custom_rt penalty can only be called with a BiorbdModel" ) model: BiorbdModel = controller.model - r_seg_transposed = model.model.globalJCS(controller.states["q"].mx, segment_index).rot().transpose() - r_rt = model.model.RT(controller.states["q"].mx, rt).rot() + r_seg_transposed = model.model.globalJCS(controller.q.mx, segment_index).rot().transpose() + r_rt = model.model.RT(controller.q.mx, rt).rot() angles_diff = biorbd.Rotation.toEulerAngles(r_seg_transposed * r_rt, "zyx").to_mx() - angle_objective = controller.mx_to_cx(f"track_segment", angles_diff, controller.states["q"]) + angle_objective = controller.mx_to_cx(f"track_segment", angles_diff, controller.q) return angle_objective @staticmethod @@ -947,8 +905,8 @@ def track_marker_with_segment_axis( segment_idx = controller.model.segment_index(segment) if isinstance(segment, str) else segment # Get the marker in rt reference frame - marker = controller.model.marker(controller.states["q"].mx, marker_idx, segment_idx) - marker_objective = controller.mx_to_cx("marker", marker, controller.states["q"]) + marker = controller.model.marker(controller.q.mx, marker_idx, segment_idx) + marker_objective = controller.mx_to_cx("marker", marker, controller.q) # To align an axis, the other must be equal to 0 if not penalty.rows_is_set: @@ -995,7 +953,7 @@ def minimize_segment_rotation( if not isinstance(controller.model, BiorbdModel): raise NotImplementedError("The minimize_segment_rotation penalty can only be called with a BiorbdModel") model: BiorbdModel = controller.model - jcs_segment = model.model.globalJCS(controller.states["q"].mx, segment_idx).rot() + jcs_segment = model.model.globalJCS(controller.q.mx, segment_idx).rot() angles_segment = biorbd.Rotation.toEulerAngles(jcs_segment, "xyz").to_mx() if axes is None: @@ -1005,9 +963,7 @@ def minimize_segment_rotation( if not isinstance(ax, Axis): raise RuntimeError("axes must be a list of bioptim.Axis") - segment_rotation_objective = controller.mx_to_cx( - "segment_rotation", angles_segment[axes], controller.states["q"] - ) + segment_rotation_objective = controller.mx_to_cx("segment_rotation", angles_segment[axes], controller.q) return segment_rotation_objective @@ -1044,9 +1000,7 @@ def minimize_segment_velocity( "The minimize_segments_velocity penalty can only be called with a BiorbdModel" ) model: BiorbdModel = controller.model - segment_angular_velocity = model.segment_angular_velocity( - controller.states["q"].mx, controller.states["qdot"].mx, segment_idx - ) + segment_angular_velocity = model.segment_angular_velocity(controller.q.mx, controller.qdot.mx, segment_idx) if axes is None: axes = [Axis.X, Axis.Y, Axis.Z] @@ -1058,8 +1012,8 @@ def minimize_segment_velocity( segment_velocity_objective = controller.mx_to_cx( "segment_velocity", segment_angular_velocity[axes], - controller.states["q"], - controller.states["qdot"], + controller.q, + controller.qdot, ) return segment_velocity_objective @@ -1120,10 +1074,10 @@ def track_vector_orientations_from_markers( else vector_1_marker_1 ) - vector_0_marker_0_position = controller.model.marker(controller.states["q"].mx, vector_0_marker_0_idx) - vector_0_marker_1_position = controller.model.marker(controller.states["q"].mx, vector_0_marker_1_idx) - vector_1_marker_0_position = controller.model.marker(controller.states["q"].mx, vector_1_marker_0_idx) - vector_1_marker_1_position = controller.model.marker(controller.states["q"].mx, vector_1_marker_1_idx) + vector_0_marker_0_position = controller.model.marker(controller.q.mx, vector_0_marker_0_idx) + vector_0_marker_1_position = controller.model.marker(controller.q.mx, vector_0_marker_1_idx) + vector_1_marker_0_position = controller.model.marker(controller.q.mx, vector_1_marker_0_idx) + vector_1_marker_1_position = controller.model.marker(controller.q.mx, vector_1_marker_1_idx) vector_0 = vector_0_marker_1_position - vector_0_marker_0_position vector_1 = vector_1_marker_1_position - vector_1_marker_0_position @@ -1131,7 +1085,7 @@ def track_vector_orientations_from_markers( cross_prod_norm = sqrt(cross_prod[0] ** 2 + cross_prod[1] ** 2 + cross_prod[2] ** 2) out = atan2(cross_prod_norm, dot(vector_0, vector_1)) - return controller.mx_to_cx("vector_orientations_difference", out, controller.states["q"]) + return controller.mx_to_cx("vector_orientations_difference", out, controller.q) @staticmethod def state_continuity(penalty: PenaltyOption, controller: PenaltyController | list): @@ -1439,7 +1393,7 @@ def _get_qddot(controller: PenaltyController, attribute: str): getattr(controller.controls, attribute), getattr(controller.parameters, attribute), getattr(controller.algebraic_states, attribute), - )[controller.states["qdot"].index, :] + )[controller.qdot.index, :] source = controller.states if "qddot" in controller.states else controller.controls return getattr(source["qddot"], attribute) @@ -1473,7 +1427,7 @@ def _get_markers_acceleration(controller: PenaltyController, markers, CoM=False) markers, controller.time, controller.parameters, - controller.states["q"], - controller.states["qdot"], + controller.q, + controller.qdot, last_param, ) diff --git a/bioptim/limits/penalty_controller.py b/bioptim/limits/penalty_controller.py index 699c97d90..a5044fee9 100644 --- a/bioptim/limits/penalty_controller.py +++ b/bioptim/limits/penalty_controller.py @@ -342,6 +342,61 @@ def parameters_scaled(self) -> OptimizationVariableList: """ return self._nlp.parameters.scaled + @property + def q(self) -> OptimizationVariable: + if "q" in self.states: + return self.states["q"] + elif "q_roots" in self.states and "q_joints" in self.states: + cx_start = vertcat(self.states["q_roots"].cx_start, self.states["q_joints"].cx_start) + q_parent_list = OptimizationVariableList( + self._nlp.cx, self._nlp.phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE + ) + q_parent_list._cx_start = cx_start + q = OptimizationVariable( + name="q", + mx=vertcat(self.states["q_roots"].mx, self.states["q_joints"].mx), + cx_start=cx_start, + index=[i for i in range(self.states["q_roots"].shape + self.states["q_joints"].shape)], + mapping=BiMapping( + [i for i in range(self.states["q_roots"].shape + self.states["q_joints"].shape)], + [i for i in range(self.states["q_roots"].shape + self.states["q_joints"].shape)], + ), + parent_list=q_parent_list, + ) + return q + else: + raise RuntimeError("q is not defined in the states") + + @property + def qdot(self) -> OptimizationVariable: + if "qdot" in self.states: + return self.states["qdot"] + elif "qdot_roots" in self.states and "qdot_joints" in self.states: + cx_start = vertcat(self.states["qdot_roots"].cx_start, self.states["qdot_joints"].cx_start) + qdot_parent_list = OptimizationVariableList( + self._nlp.cx, self._nlp.phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE + ) + qdot_parent_list._cx_start = cx_start + qdot = OptimizationVariable( + name="qdot", + mx=vertcat(self.states["qdot_roots"].mx, self.states["qdot_joints"].mx), + cx_start=cx_start, + index=[i for i in range(self.states["qdot_roots"].shape + self.states["qdot_joints"].shape)], + mapping=BiMapping( + [i for i in range(self.states["qdot_roots"].shape + self.states["qdot_joints"].shape)], + [i for i in range(self.states["qdot_roots"].shape + self.states["qdot_joints"].shape)], + ), + parent_list=qdot_parent_list, + ) + return qdot + + @property + def tau(self) -> OptimizationVariable: + if "tau" in self.controls: + return self.controls["tau"] + elif "tau_joints" in self.controls: + return self.controls["tau_joints"] + def copy(self): return PenaltyController( self.ocp, diff --git a/bioptim/limits/penalty_option.py b/bioptim/limits/penalty_option.py index f29b3ab03..4bb6fa468 100644 --- a/bioptim/limits/penalty_option.py +++ b/bioptim/limits/penalty_option.py @@ -314,7 +314,7 @@ def transform_penalty_to_stochastic(self, controller: PenaltyController, fcn, st # TODO: Charbie -> This is just a first implementation (x=[q, qdot]), it should then be generalized - nx = controller.states["q"].cx_start.shape[0] + nx = controller.q.cx_start.shape[0] n_root = controller.model.nb_root n_joints = nx - n_root diff --git a/bioptim/limits/phase_transition.py b/bioptim/limits/phase_transition.py index 2ffe4ce87..81ebf8bcd 100644 --- a/bioptim/limits/phase_transition.py +++ b/bioptim/limits/phase_transition.py @@ -57,9 +57,11 @@ def __init__( transition = PhaseTransitionFcn.CUSTOM super(PhaseTransition, self).__init__( PhaseTransitionFcn, - nodes_phase=(-1, 0) - if transition in [transition.CYCLIC, transition.COVARIANCE_CYCLIC] - else (phase_pre_idx, phase_pre_idx + 1), + nodes_phase=( + (-1, 0) + if transition in [transition.CYCLIC, transition.COVARIANCE_CYCLIC] + else (phase_pre_idx, phase_pre_idx + 1) + ), nodes=(Node.END, Node.START), multinode_penalty=transition, custom_function=custom_function, diff --git a/bioptim/misc/__version__.py b/bioptim/misc/__version__.py index e485cbe61..664e09419 100644 --- a/bioptim/misc/__version__.py +++ b/bioptim/misc/__version__.py @@ -1,4 +1,5 @@ """ The current version of bioptim """ + __version__ = "3.2.0" diff --git a/bioptim/models/biorbd/biorbd_model.py b/bioptim/models/biorbd/biorbd_model.py index cc97810c1..123bdc304 100644 --- a/bioptim/models/biorbd/biorbd_model.py +++ b/bioptim/models/biorbd/biorbd_model.py @@ -12,7 +12,7 @@ from ...limits.path_conditions import Bounds from ...misc.utils import check_version from ...misc.mapping import BiMapping, BiMappingList -from ..utils import _q_mapping, _qdot_mapping, _qddot_mapping, bounds_from_ranges +from ..utils import _var_mapping, bounds_from_ranges check_version(biorbd, "1.10.0", "1.11.0") @@ -95,11 +95,20 @@ def nb_root(self) -> int: def segments(self) -> tuple[biorbd.Segment]: return self.model.segments() - def homogeneous_matrices_in_global(self, q, segment_id, inverse=False) -> tuple: - # Todo: one of the last ouput of BiorbdModel which is not a MX but a biorbd object - rt_matrix = self.model.globalJCS(GeneralizedCoordinates(q), segment_id) + def biorbd_homogeneous_matrices_in_global(self, q, segment_idx, inverse=False) -> tuple: + """ + Returns a biorbd object containing the roto-translation matrix of the segment in the global reference frame. + This is useful if you want to interact with biorbd directly later on. + """ + rt_matrix = self.model.globalJCS(GeneralizedCoordinates(q), segment_idx) return rt_matrix.transpose() if inverse else rt_matrix + def homogeneous_matrices_in_global(self, q, segment_idx, inverse=False) -> MX: + """ + Returns the roto-translation matrix of the segment in the global reference frame. + """ + return self.biorbd_homogeneous_matrices_in_global(q, segment_idx, inverse).to_mx() + def homogeneous_matrices_in_child(self, segment_id) -> MX: return self.model.localJCS(segment_id).to_mx() @@ -149,10 +158,35 @@ def reshape_qdot(self, q, qdot, k_stab=1) -> MX: ).to_mx() def segment_angular_velocity(self, q, qdot, idx) -> MX: + """ + Returns the angular velocity of the segment in the global reference frame. + """ q_biorbd = GeneralizedCoordinates(q) qdot_biorbd = GeneralizedVelocity(qdot) return self.model.segmentAngularVelocity(q_biorbd, qdot_biorbd, idx, True).to_mx() + def segment_orientation(self, q, idx) -> MX: + """ + Returns the angular position of the segment in the global reference frame. + """ + q_biorbd = GeneralizedCoordinates(q) + rotation_matrix = self.homogeneous_matrices_in_global(q_biorbd, idx)[:3, :3] + segment_orientation = biorbd.Rotation.toEulerAngles( + biorbd.Rotation( + rotation_matrix[0, 0], + rotation_matrix[0, 1], + rotation_matrix[0, 2], + rotation_matrix[1, 0], + rotation_matrix[1, 1], + rotation_matrix[1, 2], + rotation_matrix[2, 0], + rotation_matrix[2, 1], + rotation_matrix[2, 2], + ), + "xyz", + ).to_mx() + return segment_orientation + @property def name_dof(self) -> tuple[str, ...]: return tuple(s.to_string() for s in self.model.nameDof()) @@ -368,7 +402,7 @@ def marker_velocities(self, q, qdot, reference_index=None) -> list[MX]: else: out = [] - homogeneous_matrix_transposed = self.homogeneous_matrices_in_global( + homogeneous_matrix_transposed = self.biorbd_homogeneous_matrices_in_global( GeneralizedCoordinates(q), reference_index, inverse=True, @@ -393,7 +427,7 @@ def marker_accelerations(self, q, qdot, qddot, reference_index=None) -> list[MX] else: out = [] - homogeneous_matrix_transposed = self.homogeneous_matrices_in_global( + homogeneous_matrix_transposed = self.biorbd_homogeneous_matrices_in_global( GeneralizedCoordinates(q), reference_index, inverse=True, @@ -505,31 +539,40 @@ def ranges_from_model(self, variable: str): qdot_ranges = [] qddot_ranges = [] - for segment in self.segments: + for i_segment, segment in enumerate(self.segments): if variable == "q": q_ranges += [q_range for q_range in segment.QRanges()] + elif variable == "q_roots": + if segment.parent().to_string().lower() == "root": + q_ranges += [q_range for q_range in segment.QRanges()] + elif variable == "q_joints": + if segment.parent().to_string().lower() != "root": + q_ranges += [q_range for q_range in segment.QRanges()] elif variable == "qdot": qdot_ranges += [qdot_range for qdot_range in segment.QDotRanges()] + elif variable == "qdot_roots": + if segment.parent().to_string().lower() == "root": + qdot_ranges += [qdot_range for qdot_range in segment.QDotRanges()] + elif variable == "qdot_joints": + if segment.parent().to_string().lower() != "root": + qdot_ranges += [qdot_range for qdot_range in segment.QDotRanges()] elif variable == "qddot": qddot_ranges += [qddot_range for qddot_range in segment.QDDotRanges()] + elif variable == "qddot_joints": + if segment.parent().to_string().lower() != "root": + qddot_ranges += [qddot_range for qddot_range in segment.QDDotRanges()] - if variable == "q": + if variable == "q" or variable == "q_roots" or variable == "q_joints": return q_ranges - elif variable == "qdot": + elif variable == "qdot" or variable == "qdot_roots" or variable == "qdot_joints": return qdot_ranges - elif variable == "qddot": + elif variable == "qddot" or variable == "qddot_joints": return qddot_ranges else: raise RuntimeError("Wrong variable name") - def _q_mapping(self, mapping: BiMapping = None) -> dict: - return _q_mapping(self, mapping) - - def _qdot_mapping(self, mapping: BiMapping = None) -> dict: - return _qdot_mapping(self, mapping) - - def _qddot_mapping(self, mapping: BiMapping = None) -> dict: - return _qddot_mapping(self, mapping) + def _var_mapping(self, key: str, range_for_mapping: int | list | tuple | range, mapping: BiMapping = None) -> dict: + return _var_mapping(key, range_for_mapping, mapping) def bounds_from_ranges(self, variables: str | list[str, ...], mapping: BiMapping | BiMappingList = None) -> Bounds: return bounds_from_ranges(self, variables, mapping) @@ -557,7 +600,10 @@ def animate( check_version(bioviz, "2.0.0", "2.4.0") - states = solution["q"] + if "q_roots" in solution and "q_joints" in solution: + states = np.vstack((solution["q_roots"], solution["q_joints"])) + else: + states = solution["q"] if not isinstance(states, (list, tuple)): states = [states] @@ -577,7 +623,14 @@ def animate( biorbd_model: BiorbdModel = nlp.model all_bioviz.append(bioviz.Viz(biorbd_model.path, **kwargs)) - all_bioviz[-1].load_movement(ocp.nlp[idx_phase].variable_mappings["q"].to_second.map(data)) + + if "q_roots" in solution and "q_joints" in solution: + # TODO: Fix the mapping for this case + raise NotImplementedError("Mapping is not implemented for this case") + q = data + else: + q = ocp.nlp[idx_phase].variable_mappings["q"].to_second.map(data) + all_bioviz[-1].load_movement(q) if tracked_markers[idx_phase] is not None: all_bioviz[-1].load_experimental_markers(tracked_markers[idx_phase]) diff --git a/bioptim/models/biorbd/multi_biorbd_model.py b/bioptim/models/biorbd/multi_biorbd_model.py index b54d81ddc..a7dffc03d 100644 --- a/bioptim/models/biorbd/multi_biorbd_model.py +++ b/bioptim/models/biorbd/multi_biorbd_model.py @@ -10,8 +10,9 @@ from ...misc.utils import check_version from ...limits.path_conditions import Bounds from ...misc.mapping import BiMapping, BiMappingList +from ..utils import _var_mapping, bounds_from_ranges from ...optimization.solution.solution_data import SolutionMerge -from ..utils import _q_mapping, _qdot_mapping, _qddot_mapping, bounds_from_ranges +from ..utils import bounds_from_ranges from .biorbd_model import BiorbdModel @@ -269,11 +270,14 @@ def segments(self) -> tuple[biorbd.Segment, ...]: out += model.segments return out - def homogeneous_matrices_in_global(self, q, segment_id, inverse=False) -> biorbd.RotoTrans: - local_segment_id, model_id = self.local_variable_id("segment", segment_id) + def biorbd_homogeneous_matrices_in_global(self, q, segment_idx, inverse=False) -> biorbd.RotoTrans: + local_segment_id, model_id = self.local_variable_id("segment", segment_idx) q_model = q[self.variable_index("q", model_id)] return self.models[model_id].homogeneous_matrices_in_global(q_model, local_segment_id, inverse) + def homogeneous_matrices_in_global(self, q, segment_idx, inverse=False) -> MX: + return self.biorbd_homogeneous_matrices_in_global(q, segment_idx, inverse).to_mx() + def homogeneous_matrices_in_child(self, segment_id) -> MX: local_id, model_id = self.local_variable_id("segment", segment_id) return self.models[model_id].homogeneous_matrices_in_child(local_id) @@ -724,15 +728,8 @@ def ranges_from_model(self, variable: str): def bounds_from_ranges(self, variables: str | list[str, ...], mapping: BiMapping | BiMappingList = None) -> Bounds: return bounds_from_ranges(self, variables, mapping) - def _q_mapping(self, mapping: BiMapping = None) -> dict: - return _q_mapping(self, mapping) - - # - def _qdot_mapping(self, mapping: BiMapping = None) -> dict: - return _qdot_mapping(self, mapping) - - def _qddot_mapping(self, mapping: BiMapping = None) -> dict: - return _qddot_mapping(self, mapping) + def _var_mapping(self, key: str, range_for_mapping: int | list | tuple | range, mapping: BiMapping = None) -> dict: + return _var_mapping(key, range_for_mapping, mapping) def lagrangian(self): raise NotImplementedError("lagrangian is not implemented yet for MultiBiorbdModel") diff --git a/bioptim/models/biorbd/stochastic_biorbd_model.py b/bioptim/models/biorbd/stochastic_biorbd_model.py index 98d58ae87..0212de065 100644 --- a/bioptim/models/biorbd/stochastic_biorbd_model.py +++ b/bioptim/models/biorbd/stochastic_biorbd_model.py @@ -4,7 +4,28 @@ import numpy as np from ...misc.mapping import BiMappingList -from .biorbd_model import BiorbdModel +from bioptim import BiorbdModel, DynamicsFunctions + + +def _compute_torques_from_noise_and_feedback_default( + nlp, time, states, controls, parameters, algebraic_states, sensory_noise, motor_noise +): + tau_nominal = DynamicsFunctions.get(nlp.controls["tau"], controls) + + ref = DynamicsFunctions.get(nlp.algebraic_states["ref"], algebraic_states) + k = DynamicsFunctions.get(nlp.algebraic_states["k"], algebraic_states) + from bioptim import StochasticBioModel + + k_matrix = StochasticBioModel.reshape_to_matrix(k, nlp.model.matrix_shape_k) + + sensory_input = nlp.model.sensory_reference(time, states, controls, parameters, algebraic_states, nlp) + tau_fb = k_matrix @ ((sensory_input - ref) + sensory_noise) + + tau_motor_noise = motor_noise + + tau = tau_nominal + tau_fb + tau_motor_noise + + return tau class StochasticBiorbdModel(BiorbdModel): @@ -16,11 +37,13 @@ def __init__( self, bio_model: str | BiorbdModel, n_references: int, + n_feedbacks: int, n_noised_states: int, n_noised_controls: int, sensory_noise_magnitude: np.ndarray | DM, motor_noise_magnitude: np.ndarray | DM, sensory_reference: Callable, + compute_torques_from_noise_and_feedback: Callable = _compute_torques_from_noise_and_feedback_default, motor_noise_mapping: BiMappingList = BiMappingList(), n_collocation_points: int = 1, **kwargs, @@ -30,11 +53,14 @@ def __init__( self.motor_noise_magnitude = motor_noise_magnitude self.sensory_noise_magnitude = sensory_noise_magnitude + self.compute_torques_from_noise_and_feedback = compute_torques_from_noise_and_feedback + self.sensory_reference = sensory_reference self.motor_noise_mapping = motor_noise_mapping self.n_references = n_references + self.n_feedbacks = n_feedbacks self.n_noised_states = n_noised_states self.n_noise = motor_noise_magnitude.shape[0] + sensory_noise_magnitude.shape[0] self.n_noised_controls = n_noised_controls @@ -49,8 +75,3 @@ def __init__( self.matrix_shape_cov = (self.n_noised_states, self.n_noised_states) self.matrix_shape_cov_cholesky = (self.n_noised_states, self.n_noised_states) self.matrix_shape_m = (self.n_noised_states, self.n_noised_states * self.n_collocation_points) - - def compute_torques_from_noise_and_feedback(self, sensory_noise_mx, k_matrix, sensory_input, ref): - """Compute the torques from the sensory feedback""" - mapped_sensory_feedback_torque = k_matrix @ ((sensory_input - ref) + sensory_noise_mx) - return mapped_sensory_feedback_torque diff --git a/bioptim/models/biorbd/variational_biorbd_model.py b/bioptim/models/biorbd/variational_biorbd_model.py index 59696f50c..72e78d0ae 100644 --- a/bioptim/models/biorbd/variational_biorbd_model.py +++ b/bioptim/models/biorbd/variational_biorbd_model.py @@ -1,6 +1,7 @@ """ Biorbd model for holonomic constraints and variational integrator. """ + import biorbd_casadi as biorbd from casadi import SX, MX, vertcat, jacobian, transpose diff --git a/bioptim/models/holonomic_constraints.py b/bioptim/models/holonomic_constraints.py index 13c4b8d8f..248a97e3e 100644 --- a/bioptim/models/holonomic_constraints.py +++ b/bioptim/models/holonomic_constraints.py @@ -1,6 +1,7 @@ """ This class contains different holonomic constraint function. """ + from typing import Any, Callable from casadi import MX, Function, jacobian, vertcat @@ -61,8 +62,8 @@ def superimpose_markers( # if local frame is provided, the markers are expressed in the same local frame if local_frame_index is not None: jcs_t = biorbd_model.homogeneous_matrices_in_global(q_sym, local_frame_index, inverse=True) - marker_1_sym = (jcs_t.to_mx() @ vertcat(marker_1_sym, 1))[:3] - marker_2_sym = (jcs_t.to_mx() @ vertcat(marker_2_sym, 1))[:3] + marker_1_sym = (jcs_t @ vertcat(marker_1_sym, 1))[:3] + marker_2_sym = (jcs_t @ vertcat(marker_2_sym, 1))[:3] # the constraint is the distance between the two markers, set to zero constraint = (marker_1_sym - marker_2_sym)[index] diff --git a/bioptim/models/protocols/biomodel.py b/bioptim/models/protocols/biomodel.py index d4c45f519..13b457ac2 100644 --- a/bioptim/models/protocols/biomodel.py +++ b/bioptim/models/protocols/biomodel.py @@ -85,15 +85,6 @@ def segments(self) -> tuple: """Get all segments""" return () - def homogeneous_matrices_in_global(self, q, segment_id, inverse=False) -> tuple: - """ - Get the homogeneous matrices of all segments in the world frame, - such as: P_R0 = T_R0_R1 * P_R1 - with P_R0 the position of any point P in the world frame, - T_R0_R1 the homogeneous matrix that transform any point in R1 frame to R0. - P_R1 the position of any point P in the segment R1 frame. - """ - def homogeneous_matrices_in_child(self, segment_id) -> MX: """ Get the homogeneous matrices of one segment in its parent frame, diff --git a/bioptim/models/protocols/stochastic_biomodel.py b/bioptim/models/protocols/stochastic_biomodel.py index a729baa53..82bebb4c3 100644 --- a/bioptim/models/protocols/stochastic_biomodel.py +++ b/bioptim/models/protocols/stochastic_biomodel.py @@ -1,5 +1,4 @@ -from typing import Callable, Union -from casadi import MX, SX +from typing import Callable import numpy as np from ..protocols.biomodel import BioModel @@ -27,14 +26,23 @@ class StochasticBioModel(BioModel): def stochastic_dynamics(self, q, qdot, tau, ref, k, with_noise=True): """The stochastic dynamics that should be applied to the model""" - def compute_torques_from_noise_and_feedback(self, sensory_noise_mx, k_matrix, sensory_input, ref): - """Compute the torques from the sensory feedback""" + def compute_torques_from_noise_and_feedback( + self, nlp, time, states, controls, parameters, algebraic_states, sensory_noise, motor_noise + ): + """Compute the torques from the noises, feedbacks and feedforwards""" + + def sensory_reference(self, time, states, controls, parameters, algebraic_states, nlp): + """Compute the sensory reference""" @staticmethod def reshape_to_matrix(var, shape): """ Restore the matrix form of the variables """ + + if var.shape[0] != shape[0] * shape[1]: + raise RuntimeError(f"Cannot reshape: the variable shape is {var.shape} and the expected shape is {shape}") + shape_0, shape_1 = shape if isinstance(var, np.ndarray): matrix = np.zeros((shape_0, shape_1)) @@ -50,9 +58,20 @@ def reshape_to_cholesky_matrix(var, shape): """ Restore the lower diagonal matrix form of the variables vector """ - shape_0, _ = shape matrix = type(var).zeros(shape_0, shape_0) + + i = 0 + for s0 in range(shape_0): + for s1 in range(s0 + 1): + matrix[s1, s0] = var[i] + i += 1 + + if var.shape[0] != i: + raise RuntimeError( + f"Cannot reshape: the variable shape is {var.shape} and the expected shape is the number of element in a triangular matrix of size {shape_0}, which means {i} elements" + ) + i = 0 for s0 in range(shape_0): for s1 in range(s0 + 1): diff --git a/bioptim/models/utils.py b/bioptim/models/utils.py index d1b8fef1a..696e06aff 100644 --- a/bioptim/models/utils.py +++ b/bioptim/models/utils.py @@ -4,59 +4,53 @@ def _dof_mapping(key, model, mapping: BiMapping = None) -> dict: if key == "q": - return _q_mapping(model, mapping) + if model.nb_quaternions > 0 and mapping is not None: + if "q" in mapping and "qdot" not in mapping: + raise RuntimeError( + "It is not possible to provide a q_mapping but not a qdot_mapping if the model have quaternion" + ) + return _var_mapping(key, range(model.nb_q), mapping) + elif key == "q_joints": + if model.nb_quaternions > 0 and mapping is not None: + if "q_joints" in mapping and "qdot_joints" not in mapping: + raise RuntimeError( + "It is not possible to provide a q_joints_mapping but not a qdot_joints_mapping if the model have quaternion" + ) + return _var_mapping(key, range(model.nb_q - model.nb_root), mapping) + elif key == "q_roots": + return _var_mapping(key, range(model.nb_root), mapping) elif key == "qdot": - return _qdot_mapping(model, mapping) + if model.nb_quaternions > 0 and mapping is not None: + if "qdot" in mapping and "q" not in mapping: + raise RuntimeError( + "It is not possible to provide a qdot_mapping but not a q_mapping if the model have quaternion" + ) + return _var_mapping(key, range(model.nb_qdot), mapping) + elif key == "qdot_joints": + if model.nb_quaternions > 0 and mapping is not None: + if "qdot_joints" in mapping and "q_joints" not in mapping: + raise RuntimeError( + "It is not possible to provide a qdot_joints_mapping but not a q_joints_mapping if the model have quaternion" + ) + return _var_mapping(key, range(model.nb_qdot - model.nb_root), mapping) + elif key == "qdot_roots": + return _var_mapping(key, range(model.nb_root), mapping) elif key == "qddot": - return _qddot_mapping(model, mapping) + return _var_mapping(key, range(model.nb_qdot), mapping) + elif key == "qddot_joints": + return _var_mapping(key, range(model.nb_qdot - model.nb_root), mapping) else: raise NotImplementedError("Wrong dof mapping") -def _q_mapping(model, mapping: BiMapping = None) -> dict: +def _var_mapping(key, range_for_mapping, mapping: BiMapping = None) -> dict: """ - This function returns a standard mapping for the q states if None - and checks if the model has quaternions + This function returns a standard mapping for the variable key if None. """ if mapping is None: mapping = {} - if model.nb_quaternions > 0: - if "q" in mapping and "qdot" not in mapping: - raise RuntimeError( - "It is not possible to provide a q_mapping but not a qdot_mapping if the model have quaternion" - ) - elif "q" not in mapping and "qdot" in mapping: - raise RuntimeError( - "It is not possible to provide a qdot_mapping but not a q_mapping if the model have quaternion" - ) - if "q" not in mapping: - mapping["q"] = BiMapping(range(model.nb_q), range(model.nb_q)) - return mapping - - -def _qdot_mapping(model, mapping: BiMapping = None) -> dict: - """ - This function returns a standard mapping for the qdot states if None - and checks if the model has quaternions - """ - if mapping is None: - mapping = {} - if "qdot" not in mapping: - mapping["qdot"] = BiMapping(range(model.nb_qdot), range(model.nb_qdot)) - - return mapping - - -def _qddot_mapping(model, mapping: BiMapping = None) -> dict: - """ - This function returns a standard mapping for the qddot states if None - and checks if the model has quaternions - """ - if mapping is None: - mapping = {} - if "qddot" not in mapping: - mapping["qddot"] = BiMapping(range(model.nb_qddot), range(model.nb_qddot)) - + if key not in mapping: + mapping[key] = BiMapping(range_for_mapping, range_for_mapping) return mapping diff --git a/bioptim/optimization/non_linear_program.py b/bioptim/optimization/non_linear_program.py index 0076ccb56..d18269ee7 100644 --- a/bioptim/optimization/non_linear_program.py +++ b/bioptim/optimization/non_linear_program.py @@ -1,7 +1,7 @@ from typing import Callable, Any import casadi -from casadi import SX, MX, Function, horzcat +from casadi import SX, MX, Function, horzcat, vertcat from .optimization_variable import OptimizationVariable, OptimizationVariableContainer from ..dynamics.ode_solver import OdeSolver @@ -10,6 +10,7 @@ from ..misc.options import OptionList from ..misc.mapping import NodeMapping from ..dynamics.dynamics_evaluation import DynamicsEvaluation +from ..dynamics.dynamics_functions import DynamicsFunctions from ..models.protocols.biomodel import BioModel from ..models.protocols.holonomic_biomodel import HolonomicBioModel from ..models.protocols.variational_biomodel import VariationalBioModel @@ -499,3 +500,66 @@ def to_casadi_func(name, symbolic_expression: MX | SX | Callable, *all_param, ex ) return func.expand() if expand else func + + def node_time(self, node_idx: int): + """ + Gives the time for a specific index + + Parameters + ---------- + node_idx: int + Index of the node + + Returns + ------- + The time for a specific index + """ + if node_idx < 0 or node_idx > self.ns: + return ValueError(f"node_index out of range [0:{self.ns}]") + return self.tf / self.ns * node_idx + + def get_var_from_states_or_controls( + self, key: str, states: MX.sym, controls: MX.sym, algebraic_states: MX.sym = None + ) -> MX: + """ + This function returns the requested variable from the states, controls, or algebraic_states. + If the variable is present in more than one type of variables, it returns the following priority: + 1) states + 2) controls + 3) algebraic_states + If the variable is split in its roots and joints components, it returns the concatenation of for the states, + and only the joints for the controls. + Please note that this function is not meant to be used by the user directly, but should be an internal function. + + Parameters + ---------- + key: str + The name of the variable to return + states: MX.sym + The states + controls: MX.sym + The controls + algebraic_states: MX.sym + The algebraic_states + """ + if key in self.states: + out_nlp, out_var = (self.states[key], states) + out = DynamicsFunctions.get(out_nlp, out_var) + elif f"{key}_roots" in self.states and f"{key}_joints" in self.states: + out_roots_nlp, out_roots_var = (self.states[f"{key}_roots"], states) + out_roots = DynamicsFunctions.get(out_roots_nlp, out_roots_var) + out_joints_nlp, out_joints_var = (self.states[f"{key}_joints"], states) + out_joints = DynamicsFunctions.get(out_joints_nlp, out_joints_var) + out = vertcat(out_roots, out_joints) + elif key in self.controls: + out_nlp, out_var = (self.controls[key], controls) + out = DynamicsFunctions.get(out_nlp, out_var) + elif f"{key}_joints" in self.controls: + out_joints_nlp, out_joints_var = (self.controls[f"{key}_joints"], controls) + out = DynamicsFunctions.get(out_joints_nlp, out_joints_var) + elif key in self.algebraic_states: + out_nlp, out_var = (self.algebraic_states[key], algebraic_states) + out = DynamicsFunctions.get(out_nlp, out_var) + else: + raise RuntimeError(f"{key} not found in states or controls") + return out diff --git a/bioptim/optimization/optimal_control_program.py b/bioptim/optimization/optimal_control_program.py index a634f8f05..baf11115a 100644 --- a/bioptim/optimization/optimal_control_program.py +++ b/bioptim/optimization/optimal_control_program.py @@ -1010,12 +1010,20 @@ def update_bounds( raise RuntimeError("x_bounds should be built from a BoundsList") origin_phase = 0 if len(x_bounds) == 1 else i for key in x_bounds[origin_phase].keys(): + if key not in self.nlp[i].states.keys() + ["None"]: + raise ValueError( + f"{key} is not a state variable, please check for typos in the declaration of x_bounds" + ) self.nlp[i].x_bounds.add(key, x_bounds[origin_phase][key], phase=0) if u_bounds is not None: if not isinstance(u_bounds, BoundsList): raise RuntimeError("u_bounds should be built from a BoundsList") for key in u_bounds.keys(): + if key not in self.nlp[i].controls.keys() + ["None"]: + raise ValueError( + f"{key} is not a control variable, please check for typos in the declaration of u_bounds" + ) origin_phase = 0 if len(u_bounds) == 1 else i self.nlp[i].u_bounds.add(key, u_bounds[origin_phase][key], phase=0) @@ -1023,6 +1031,10 @@ def update_bounds( if not isinstance(a_bounds, BoundsList): raise RuntimeError("a_bounds should be built from a BoundsList") for key in a_bounds.keys(): + if key not in self.nlp[i].algebraic_states.keys() + ["None"]: + raise ValueError( + f"{key} is not an algebraic variable, please check for typos in the declaration of a_bounds" + ) origin_phase = 0 if len(a_bounds) == 1 else i self.nlp[i].a_bounds.add(key, a_bounds[origin_phase][key], phase=0) @@ -1030,6 +1042,10 @@ def update_bounds( if not isinstance(parameter_bounds, BoundsList): raise RuntimeError("parameter_bounds should be built from a BoundsList") for key in parameter_bounds.keys(): + if key not in self.parameters.keys() + ["None"]: + raise ValueError( + f"{key} is not a parameter variable, please check for typos in the declaration of parameter_bounds" + ) self.parameter_bounds.add(key, parameter_bounds[key], phase=0) for nlp in self.nlp: @@ -1068,6 +1084,10 @@ def update_initial_guess( raise RuntimeError("x_init should be built from a InitialGuessList") origin_phase = 0 if len(x_init) == 1 else i for key in x_init[origin_phase].keys(): + if key not in self.nlp[i].states.keys() + ["None"]: + raise ValueError( + f"{key} is not a state variable, please check for typos in the declaration of x_init" + ) if ( not self.nlp[i].ode_solver.is_direct_collocation and x_init[origin_phase].type == InterpolationType.ALL_POINTS @@ -1080,6 +1100,10 @@ def update_initial_guess( raise RuntimeError("u_init should be built from a InitialGuessList") origin_phase = 0 if len(u_init) == 1 else i for key in u_init.keys(): + if key not in self.nlp[i].controls.keys() + ["None"]: + raise ValueError( + f"{key} is not a control variable, please check for typos in the declaration of u_init" + ) if ( not self.nlp[i].ode_solver.is_direct_collocation and x_init[origin_phase].type == InterpolationType.ALL_POINTS @@ -1092,12 +1116,20 @@ def update_initial_guess( raise RuntimeError("a_init should be built from a InitialGuessList") origin_phase = 0 if len(a_init) == 1 else i for key in a_init[origin_phase].keys(): + if key not in self.nlp[i].algebraic_states.keys() + ["None"]: + raise ValueError( + f"{key} is not an algebraic variable, please check for typos in the declaration of a_init" + ) self.nlp[i].a_init.add(key, a_init[origin_phase][key], phase=0) if parameter_init is not None: if not isinstance(parameter_init, InitialGuessList): raise RuntimeError("parameter_init should be built from a InitialGuessList") for key in parameter_init.keys(): + if key not in self.parameters.keys() + ["None"]: + raise ValueError( + f"{key} is not a parameter variable, please check for typos in the declaration of parameter_init" + ) self.parameter_init.add(key, parameter_init[key], phase=0) def add_plot(self, fig_name: str, update_function: Callable, phase: int = -1, **parameters: Any): diff --git a/bioptim/optimization/optimization_variable.py b/bioptim/optimization/optimization_variable.py index 538d19d39..7427cd384 100644 --- a/bioptim/optimization/optimization_variable.py +++ b/bioptim/optimization/optimization_variable.py @@ -136,6 +136,14 @@ def cx_end(self): ) return self.parent_list.cx_end[self.index, :] + @property + def cx_intermediates_list(self): + """ + The cx of all elements together (starting point) + """ + + return [collocation_point[self.index, :] for collocation_point in self.parent_list.cx_intermediates_list] + class OptimizationVariableList: """ diff --git a/bioptim/optimization/optimization_vector.py b/bioptim/optimization/optimization_vector.py index 07a234b70..084ed3c5a 100644 --- a/bioptim/optimization/optimization_vector.py +++ b/bioptim/optimization/optimization_vector.py @@ -263,7 +263,6 @@ def init_vector(ocp): init = _dispatch_state_initial_guess( nlp, nlp.states, nlp.x_init, nlp.x_scaling, lambda n: nlp.n_states_decision_steps(n) ) - v_init = np.concatenate((v_init, init)) # For controls diff --git a/bioptim/optimization/problem_type.py b/bioptim/optimization/problem_type.py index 4cc66cdf7..dae38fe4e 100644 --- a/bioptim/optimization/problem_type.py +++ b/bioptim/optimization/problem_type.py @@ -1,3 +1,6 @@ +import numpy as np + + class SocpType: """ Selection of the type of optimization problem to be solved. @@ -43,6 +46,30 @@ class COLLOCATION: The method to use during the collocation integration """ - def __init__(self, polynomial_degree: int = 4, method: str = "legendre"): + def __init__( + self, + polynomial_degree: int = 4, + method: str = "legendre", + auto_initialization: bool = False, + initial_cov: np.ndarray = None, + ): + """ + Parameters + ---------- + polynomial_degree: int + The order of the polynomial to use during the collocation integration + method: str + The method to use during the collocation integration (Legendre or Radau) + auto_initialization: bool + If True, the initial guess of the states and controls are automatically initialized + initial_cov: np.ndarray + The initial covariance matrix of the states and controls + """ self.polynomial_degree = polynomial_degree self.method = method + self.auto_initialization = auto_initialization + if auto_initialization and initial_cov is None: + raise RuntimeError( + "To initialize automatically the values of the stochastic variables, you need to provide the value of the covariance matrix at the first node (initial_cov)." + ) + self.initial_cov = initial_cov diff --git a/bioptim/optimization/solution/solution.py b/bioptim/optimization/solution/solution.py index 4606deb89..3e15eafa7 100644 --- a/bioptim/optimization/solution/solution.py +++ b/bioptim/optimization/solution/solution.py @@ -738,9 +738,9 @@ def _states_for_phase_integration( s = PenaltyHelpers.states( penalty, 0, - lambda p, n, sn: decision_algebraic_states[p][n][:, sn] - if n < len(decision_algebraic_states[p]) - else np.ndarray((0, 1)), + lambda p, n, sn: ( + decision_algebraic_states[p][n][:, sn] if n < len(decision_algebraic_states[p]) else np.ndarray((0, 1)) + ), ) dx = penalty.function[-1](t0, dt, x, u, params, s) @@ -1043,23 +1043,23 @@ def _get_penalty_cost(self, nlp, penalty): x = PenaltyHelpers.states( penalty, idx, - lambda p_idx, n_idx, sn_idx: merged_x[p_idx][n_idx][:, sn_idx] - if n_idx < len(merged_x[p_idx]) - else np.array(()), + lambda p_idx, n_idx, sn_idx: ( + merged_x[p_idx][n_idx][:, sn_idx] if n_idx < len(merged_x[p_idx]) else np.array(()) + ), ) u = PenaltyHelpers.controls( penalty, idx, - lambda p_idx, n_idx, sn_idx: merged_u[p_idx][n_idx][:, sn_idx] - if n_idx < len(merged_u[p_idx]) - else np.array(()), + lambda p_idx, n_idx, sn_idx: ( + merged_u[p_idx][n_idx][:, sn_idx] if n_idx < len(merged_u[p_idx]) else np.array(()) + ), ) a = PenaltyHelpers.states( penalty, idx, - lambda p_idx, n_idx, sn_idx: merged_a[p_idx][n_idx][:, sn_idx] - if n_idx < len(merged_a[p_idx]) - else np.array(()), + lambda p_idx, n_idx, sn_idx: ( + merged_a[p_idx][n_idx][:, sn_idx] if n_idx < len(merged_a[p_idx]) else np.array(()) + ), ) weight = PenaltyHelpers.weight(penalty) target = PenaltyHelpers.target(penalty, idx) @@ -1170,7 +1170,7 @@ def print_penalty_list(nlp, penalties, print_only_weighted): if print_only_weighted: print(f"{penalty.type}: {val_weighted}") else: - print(f"{penalty.type}: {val_weighted} (non weighted {val: .2f})") + print(f"{penalty.type}: {val_weighted} (non weighted {val})") return running_total diff --git a/bioptim/optimization/stochastic_optimal_control_program.py b/bioptim/optimization/stochastic_optimal_control_program.py index a03823a05..9edef225d 100644 --- a/bioptim/optimization/stochastic_optimal_control_program.py +++ b/bioptim/optimization/stochastic_optimal_control_program.py @@ -1,8 +1,11 @@ from typing import Callable import sys +import numpy as np +from casadi import DM_eye, vertcat, Function import pickle +from .optimization_vector import OptimizationVectorHelper from .non_linear_program import NonLinearProgram as NLP from ..dynamics.configure_problem import DynamicsList, Dynamics from ..dynamics.ode_solver import OdeSolver @@ -17,7 +20,10 @@ from ..limits.multinode_constraint import MultinodeConstraintList, MultinodeConstraintFcn from ..limits.multinode_objective import MultinodeObjectiveList from ..limits.objective_functions import ObjectiveList, Objective, ParameterObjectiveList +from ..limits.constraints import ConstraintFunction from ..limits.path_conditions import BoundsList +from ..limits.path_conditions import InitialGuessList, InitialGuess +from ..limits.penalty_controller import PenaltyController from ..limits.path_conditions import InitialGuessList from ..misc.enums import PhaseDynamics, InterpolationType from ..misc.__version__ import __version__ @@ -295,6 +301,243 @@ def _prepare_stochastic_dynamics_collocation(self, constraints, phase_transition if len(self.nlp) > 1 and i_phase < len(self.nlp) - 1: phase_transition.add(PhaseTransitionFcn.COVARIANCE_CONTINUOUS, phase_pre_idx=i_phase) + def _auto_initialize(self, x_init, u_init, parameter_init, a_init): + def replace_initial_guess(key, n_var, var_init, a_init, i_phase): + if n_var != 0: + if key in a_init: + a_init[key] = InitialGuess(var_init, interpolation=InterpolationType.EACH_FRAME, phase=i_phase) + else: + a_init.add(key, initial_guess=var_init, interpolation=InterpolationType.EACH_FRAME, phase=i_phase) + + def get_ref_init(time_vector, x_guess, u_guess, p_guess, nlp): + casadi_func = Function( + "sensory_reference", + [nlp.dt_mx, nlp.time_mx, nlp.states.mx, nlp.controls.mx, nlp.parameters.mx], + [ + nlp.model.sensory_reference( + time=nlp.time_mx, + states=nlp.states.mx, + controls=nlp.controls.mx, + parameters=nlp.parameters.mx, + algebraic_states=None, # Sensory reference should not depend on stochastic variables + nlp=nlp, + ) + ], + ) + + x_guess = x_guess[:, 0 :: (self.problem_type.polynomial_degree + 2)] + for i in range(nlp.ns + 1): + ref_init_this_time = casadi_func( + time_vector[-1] / nlp.ns, + time_vector[i], + x_guess[:, i], + u_guess[:, i], + p_guess, + ) + ref_init = ref_init_this_time if i == 0 else np.hstack((ref_init, ref_init_this_time)) + return ref_init + + def get_m_init(time_vector, x_guess, u_guess, p_guess, fake_algebraic_states, nlp, variable_sizes, Fdz, Gdz): + m_init = np.zeros((variable_sizes["n_m"], nlp.ns + 1)) + for i in range(nlp.ns): + index_this_time = [ + i * (self.problem_type.polynomial_degree + 2) + j + for j in range(self.problem_type.polynomial_degree + 2) + ] + df_dz = Fdz( + vertcat(time_vector[i], time_vector[i + 1] - time_vector[i]), + x_guess[:, index_this_time[0]], + x_guess[:, index_this_time[1:]], + u_guess[:, i], + p_guess, + fake_algebraic_states[:, i], + ) + dg_dz = Gdz( + vertcat(time_vector[i], time_vector[i + 1] - time_vector[i]), + x_guess[:, index_this_time[0]], + x_guess[:, index_this_time[1:]], + u_guess[:, i], + p_guess, + fake_algebraic_states[:, i], + ) + + m_this_time = df_dz @ np.linalg.inv(dg_dz) + m_init[:, i] = np.reshape(StochasticBioModel.reshape_to_vector(m_this_time), (-1,)) + + m_init[:, -1] = m_init[ + :, -2 + ] # Wrong, but necessary since we do not have the collocation points at the last node + return m_init + + def get_cov_init( + time_vector, + x_guess, + u_guess, + p_guess, + fake_algebraic_states, + nlp, + variable_sizes, + m_init, + Gdx, + Gdw, + initial_covariance, + ): + sigma_w_dm = vertcat(nlp.model.motor_noise_magnitude, nlp.model.sensory_noise_magnitude) * DM_eye( + vertcat(nlp.model.motor_noise_magnitude, nlp.model.sensory_noise_magnitude).shape[0] + ) + + cov_init = np.zeros((variable_sizes["n_cov"], nlp.ns + 1)) + cov_init[:, 0] = np.reshape(StochasticBioModel.reshape_to_vector(initial_covariance), (-1,)) + for i in range(nlp.ns): + index_this_time = [ + i * (self.problem_type.polynomial_degree + 2) + j + for j in range(self.problem_type.polynomial_degree + 2) + ] + dg_dx = Gdx( + vertcat(time_vector[i], time_vector[i + 1] - time_vector[i]), + x_guess[:, index_this_time[0]], + x_guess[:, index_this_time[1:]], + u_guess[:, i], + p_guess, + fake_algebraic_states[:, i], + ) + dg_dw = Gdw( + vertcat(time_vector[i], time_vector[i + 1] - time_vector[i]), + x_guess[:, index_this_time[0]], + x_guess[:, index_this_time[1:]], + u_guess[:, i], + p_guess, + fake_algebraic_states[:, i], + ) + + m_matrix = StochasticBioModel.reshape_to_matrix(m_init[:, i], nlp.model.matrix_shape_m) + cov_matrix = StochasticBioModel.reshape_to_matrix(cov_init[:, i], nlp.model.matrix_shape_cov) + cov_this_time = m_matrix @ (dg_dx @ cov_matrix @ dg_dx.T + dg_dw @ sigma_w_dm @ dg_dw.T) @ m_matrix.T + cov_init[:, i + 1] = np.reshape(StochasticBioModel.reshape_to_vector(cov_this_time), (-1)) + return cov_init + + if not isinstance(self.phase_time, list): + phase_time = [self.phase_time] + else: + phase_time = self.phase_time + + if x_init.type != InterpolationType.ALL_POINTS: + raise RuntimeError( + "To initialize automatically the stochastic variables, you need to provide an x_init of type InterpolationType.ALL_POINTS" + ) + if u_init.type != InterpolationType.EACH_FRAME: + raise RuntimeError( + "To initialize automatically the stochastic variables, you need to provide an u_init of type InterpolationType.EACH_FRAME" + ) + + # concatenate parameters into a single vector + p_guess = np.zeros((0, 1)) + for key in self.parameters.keys(): + p_guess = np.concatenate((p_guess, parameter_init[key].init), axis=0) + + for i_phase, nlp in enumerate(self.nlp): + time_vector = np.linspace(0, phase_time[i_phase], nlp.ns + 1) + n_ref = nlp.model.n_references + n_k = nlp.model.matrix_shape_k[0] * nlp.model.matrix_shape_k[1] + n_m = nlp.model.matrix_shape_m[0] * nlp.model.matrix_shape_m[1] + n_cov = nlp.model.matrix_shape_cov[0] * nlp.model.matrix_shape_cov[1] + n_stochastic = n_ref + n_k + n_m + n_cov + variable_sizes = { + "n_ref": n_ref, + "n_k": n_k, + "n_m": n_m, + "n_cov": n_cov, + "n_stochastic": n_stochastic, + } + + # concatenate x_init into a single matrix + x_guess = np.zeros((0, (self.problem_type.polynomial_degree + 2) * nlp.ns + 1)) + for key in x_init[i_phase].keys(): + x_guess = np.concatenate((x_guess, x_init[i_phase][key].init), axis=0) + + # concatenate u_init into a single matrix + if nlp.control_type == ControlType.CONSTANT: + u_guess = np.zeros((0, nlp.ns)) + elif ( + nlp.control_type == ControlType.LINEAR_CONTINUOUS + or nlp.control_type == ControlType.CONSTANT_WITH_LAST_NODE + ): + u_guess = np.zeros((0, nlp.ns + 1)) + elif nlp.control_type == ControlType.NONE: + u_guess = np.zeros((0, 0)) + else: + raise RuntimeError( + "The automatic initialization of stochastic variables is not implemented yet for nlp with control_type other than ControlType.CONSTANT, ControlType.CONSTANT_WITH_LAST_NODE, ControlType.LINEAR_CONTINUOUS or ControlType.NONE." + ) + for key in u_init[i_phase].keys(): + u_guess = np.concatenate((u_guess, u_init[i_phase][key].init), axis=0) + if u_guess.shape[1] == nlp.ns: + u_guess = np.concatenate((u_guess, u_init[i_phase][key].init[:, -1].reshape(-1, 1)), axis=1) + + k_init = np.ones((n_k, nlp.ns + 1)) * 0.01 + replace_initial_guess("k", n_k, k_init, a_init, i_phase) + + ref_init = get_ref_init(time_vector, x_guess, u_guess, p_guess, nlp) + replace_initial_guess("ref", n_ref, ref_init, a_init, i_phase) + + # Define the casadi functions needed to initialize m and cov + penalty = Constraint(ConstraintFcn.STOCHASTIC_HELPER_MATRIX_COLLOCATION) + fake_algebraic_states = np.zeros((variable_sizes["n_stochastic"], nlp.ns + 1)) + fake_algebraic_states[: variable_sizes["n_k"], :] = k_init + fake_algebraic_states[variable_sizes["n_k"] : variable_sizes["n_k"] + variable_sizes["n_ref"], :] = ref_init + penalty_controller = PenaltyController( + ocp=self, + nlp=nlp, + t=time_vector, + x=x_guess, + u=u_guess, + x_scaled=[], + u_scaled=[], + p=p_guess, + a=fake_algebraic_states, + a_scaled=[], + node_index=0, + ) + _, _, Gdx, Gdz, Gdw, Fdz = ConstraintFunction.Functions.collocation_jacobians(penalty, penalty_controller) + + m_init = get_m_init( + time_vector, x_guess, u_guess, p_guess, fake_algebraic_states, nlp, variable_sizes, Fdz, Gdz + ) + replace_initial_guess("m", n_m, m_init, a_init, i_phase) + + if i_phase == 0: + initial_covariance = self.problem_type.initial_cov + else: + initial_covariance = cov_init[:, -1] + cov_init = get_cov_init( + time_vector, + x_guess, + u_guess, + p_guess, + fake_algebraic_states, + nlp, + variable_sizes, + m_init, + Gdx, + Gdw, + initial_covariance, + ) + replace_initial_guess("cov", n_cov, cov_init, a_init, i_phase) + + def _prepare_bounds_and_init( + self, x_bounds, u_bounds, parameter_bounds, a_bounds, x_init, u_init, parameter_init, a_init + ): + self.parameter_bounds = BoundsList() + self.parameter_init = InitialGuessList() + + if isinstance(self.problem_type, SocpType.COLLOCATION) and self.problem_type.auto_initialization == True: + self._auto_initialize(x_init, u_init, parameter_init, a_init) + + self.update_bounds(x_bounds, u_bounds, parameter_bounds, a_bounds) + self.update_initial_guess(x_init, u_init, parameter_init, a_init) + # Define the actual NLP problem + OptimizationVectorHelper.declare_ocp_shooting_points(self) + @staticmethod def load(file_path: str) -> list: """ diff --git a/bioptim/optimization/variational_optimal_control_program.py b/bioptim/optimization/variational_optimal_control_program.py index 5c34ed995..25d4ac5ef 100644 --- a/bioptim/optimization/variational_optimal_control_program.py +++ b/bioptim/optimization/variational_optimal_control_program.py @@ -1,6 +1,7 @@ """ Optimal control program with the variational integrator for the dynamics. """ + import numpy as np from casadi import MX, Function, vertcat diff --git a/tests/shard1/test__global_plots.py b/tests/shard1/test__global_plots.py index 6465428cc..a5dd26c01 100644 --- a/tests/shard1/test__global_plots.py +++ b/tests/shard1/test__global_plots.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import io import sys import os @@ -260,14 +261,14 @@ def override_penalty(pen: list[PenaltyOption]): "\n" "---- COST FUNCTION VALUES ----\n" "PHASE 0\n" - "Lagrange.MINIMIZE_CONTROL: 120.0 (non weighted 60.00)\n" + "Lagrange.MINIMIZE_CONTROL: 120.0 (non weighted 60.0)\n" "\n" "PHASE 1\n" - "MultinodeObjectiveFcn.CUSTOM: 6.0 (non weighted 3.00)\n" - "Lagrange.MINIMIZE_CONTROL: 180.0 (non weighted 90.00)\n" + "MultinodeObjectiveFcn.CUSTOM: 6.0 (non weighted 3.0)\n" + "Lagrange.MINIMIZE_CONTROL: 180.0 (non weighted 90.0)\n" "\n" "PHASE 2\n" - "Lagrange.MINIMIZE_CONTROL: 120.0 (non weighted 60.00)\n" + "Lagrange.MINIMIZE_CONTROL: 120.0 (non weighted 60.0)\n" "\n" "Sum cost functions: 426.0\n" "------------------------------\n" diff --git a/tests/shard1/test__run_examples.py b/tests/shard1/test__run_examples.py index 14762b006..6b4a9f917 100644 --- a/tests/shard1/test__run_examples.py +++ b/tests/shard1/test__run_examples.py @@ -1,6 +1,7 @@ """ Test for file IO """ + from sys import platform diff --git a/tests/shard1/test_acados_interface.py b/tests/shard1/test_acados_interface.py index 73103ee7a..d8c3158b1 100644 --- a/tests/shard1/test_acados_interface.py +++ b/tests/shard1/test_acados_interface.py @@ -3,6 +3,7 @@ It tests the results of an optimal control problem with acados regarding the proper functioning of : - the handling of mayer and lagrange obj """ + import os import shutil import pytest @@ -736,10 +737,6 @@ def test_acados_bounds_not_implemented(failing): n_cycles = 3 window_len = 5 window_duration = 0.2 - x_init = InitialGuessList() - x_init["final"] = np.zeros((nq * 2, 1)) - u_init = InitialGuessList() - u_init["final"] = np.zeros((ntau, 1)) if failing == "u_bounds": x_bounds = BoundsList() x_bounds.add("q", min_bound=np.zeros((nq, 1)), max_bound=np.zeros((nq, 1))) @@ -769,8 +766,6 @@ def test_acados_bounds_not_implemented(failing): Dynamics(DynamicsFcn.TORQUE_DRIVEN, expand_dynamics=True), window_len, window_duration, - x_init=x_init, - u_init=u_init, x_bounds=x_bounds, u_bounds=u_bounds, n_threads=4, diff --git a/tests/shard1/test_biorbd_multi_model.py b/tests/shard1/test_biorbd_multi_model.py index f5c0563fc..0ffd2bf36 100644 --- a/tests/shard1/test_biorbd_multi_model.py +++ b/tests/shard1/test_biorbd_multi_model.py @@ -106,7 +106,7 @@ def test_biorbd_model(): TestUtils.assert_equal( # one of the last ouput of BiorbdModel which is not a MX but a biorbd object - models.homogeneous_matrices_in_global(np.array([1, 2, 3]), 0, 0).to_mx(), + models.biorbd_homogeneous_matrices_in_global(np.array([1, 2, 3]), 0, 0), np.array( [ [1.0, 0.0, 0.0, 0.0], @@ -371,10 +371,10 @@ def test_biorbd_model(): np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), ) - q_mapping = models._q_mapping(variable_mappings) - qdot_mapping = models._q_mapping(variable_mappings) - qddot_mapping = models._q_mapping(variable_mappings) - tau_mapping = models._q_mapping(variable_mappings) + q_mapping = models._var_mapping("q", None, variable_mappings) + qdot_mapping = models._var_mapping("qdot", None, variable_mappings) + qddot_mapping = models._var_mapping("qddot", None, variable_mappings) + tau_mapping = models._var_mapping("tau", None, variable_mappings) np.testing.assert_equal(q_mapping["q"].to_first.map_idx, [0, 1, 2, 3, 4, 5]) np.testing.assert_equal(qdot_mapping["qdot"].to_first.map_idx, [0, 1, 2, 3, 4, 5]) diff --git a/tests/shard1/test_dynamics.py b/tests/shard1/test_dynamics.py index 368c471ce..9f98694d3 100644 --- a/tests/shard1/test_dynamics.py +++ b/tests/shard1/test_dynamics.py @@ -59,7 +59,7 @@ def test_torque_driven(with_contact, with_external_force, cx, rigidbody_dynamics nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() external_forces = ( [ @@ -300,7 +300,7 @@ def test_torque_driven_implicit(with_contact, cx, phase_dynamics): nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT @@ -374,7 +374,7 @@ def test_torque_driven_soft_contacts_dynamics(with_contact, cx, implicit_contact nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT @@ -448,7 +448,7 @@ def test_torque_derivative_driven(with_contact, with_external_force, cx, phase_d nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT @@ -680,7 +680,7 @@ def test_torque_derivative_driven_implicit(with_contact, cx, phase_dynamics): nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT @@ -786,7 +786,7 @@ def test_torque_derivative_driven_soft_contacts_dynamics(with_contact, cx, impli nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT @@ -972,7 +972,7 @@ def test_torque_activation_driven(with_contact, with_external_force, cx, phase_d nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() external_forces = ( [ @@ -1175,7 +1175,7 @@ def test_torque_activation_driven_with_residual_torque( nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() external_forces = ( [ @@ -1362,6 +1362,186 @@ def test_torque_activation_driven_with_residual_torque( ) +@pytest.mark.parametrize("phase_dynamics", [PhaseDynamics.SHARED_DURING_THE_PHASE, PhaseDynamics.ONE_PER_NODE]) +@pytest.mark.parametrize("cx", [MX, SX]) +@pytest.mark.parametrize("with_external_force", [False, True]) +@pytest.mark.parametrize("with_contact", [False, True]) +def test_torque_driven(with_contact, with_external_force, cx, phase_dynamics): + # Prepare the program + nlp = NonLinearProgram(phase_dynamics=phase_dynamics) + nlp.model = BiorbdModel( + TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod" + ) + nlp.ns = 5 + nlp.cx = cx + nlp.time_mx = MX.sym("time", 1, 1) + nlp.dt_mx = MX.sym("dt", 1, 1) + nlp.initialize(cx) + + nlp.x_bounds = np.zeros((nlp.model.nb_q * 3, 1)) + nlp.u_bounds = np.zeros((nlp.model.nb_q, 1)) + nlp.x_scaling = VariableScalingList() + nlp.xdot_scaling = VariableScalingList() + nlp.u_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() + + external_forces = ( + [ + [ + [ + "Seg0", + np.array( + [ + 0.374540118847362, + 0.950714306409916, + 0.731993941811405, + 0.598658484197037, + 0.156018640442437, + 0.155994520336203, + ] + ), + ] + ], + [ + [ + "Seg0", + np.array( + [ + 0.058083612168199, + 0.866176145774935, + 0.601115011743209, + 0.708072577796045, + 0.020584494295802, + 0.969909852161994, + ] + ), + ] + ], + [ + [ + "Seg0", + np.array( + [ + 0.832442640800422, + 0.212339110678276, + 0.181824967207101, + 0.183404509853434, + 0.304242242959538, + 0.524756431632238, + ] + ), + ] + ], + [ + [ + "Seg0", + np.array( + [ + 0.431945018642116, + 0.291229140198042, + 0.611852894722379, + 0.139493860652042, + 0.292144648535218, + 0.366361843293692, + ] + ), + ] + ], + [ + [ + "Seg0", + np.array( + [ + 0.456069984217036, + 0.785175961393014, + 0.19967378215836, + 0.514234438413612, + 0.592414568862042, + 0.046450412719998, + ] + ), + ] + ], + ] + if with_external_force + else None + ) + + ocp = OptimalControlProgram(nlp) + nlp.control_type = ControlType.CONSTANT + NonLinearProgram.add( + ocp, + "dynamics_type", + Dynamics( + DynamicsFcn.TORQUE_DRIVEN_FREE_FLOATING_BASE, + with_contact=with_contact, + expand_dynamics=True, + phase_dynamics=phase_dynamics, + external_forces=external_forces, + ), + False, + ) + phase_index = [i for i in range(ocp.n_phases)] + NonLinearProgram.add(ocp, "phase_idx", phase_index, False) + use_states_from_phase_idx = [i for i in range(ocp.n_phases)] + use_states_dot_from_phase_idx = [i for i in range(ocp.n_phases)] + use_controls_from_phase_idx = [i for i in range(ocp.n_phases)] + NonLinearProgram.add(ocp, "use_states_from_phase_idx", use_states_from_phase_idx, False) + NonLinearProgram.add(ocp, "use_states_dot_from_phase_idx", use_states_dot_from_phase_idx, False) + NonLinearProgram.add(ocp, "use_controls_from_phase_idx", use_controls_from_phase_idx, False) + + np.random.seed(42) + if with_external_force: + np.random.rand(nlp.ns, 6) # just not to change the values of the next random values + + # Prepare the dynamics + if phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE and with_external_force: + with pytest.raises( + RuntimeError, + match="Phase 0 has external_forces but the phase_dynamics is PhaseDynamics.SHARED_DURING_THE_PHASE.Please set phase_dynamics=PhaseDynamics.ONE_PER_NODE", + ): + ConfigureProblem.initialize(ocp, nlp) + return + else: + ConfigureProblem.initialize(ocp, nlp) + + # Test the results + states = np.random.rand(nlp.states.shape, nlp.ns) + controls = np.random.rand(nlp.controls.shape, nlp.ns) + params = np.random.rand(nlp.parameters.shape, nlp.ns) + algebraic_states = np.random.rand(nlp.algebraic_states.shape, nlp.ns) + time = np.random.rand(2, 1) + x_out = np.array(nlp.dynamics_func[0](time, states, controls, params, algebraic_states)) + + if with_contact: + contact_out = np.array(nlp.contact_forces_func(time, states, controls, params, algebraic_states)) + if with_external_force: + np.testing.assert_almost_equal( + x_out[:, 0], + [0.96958463, 0.92187424, 0.38867729, 0.54269608, -1.71642952, -0.28661718, 3.47711038, -2.61110605], + ) + np.testing.assert_almost_equal(contact_out[:, 0], [-12.59366904, 128.27098855, 2.35829492]) + + else: + np.testing.assert_almost_equal( + x_out[:, 0], + [0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.26487805, -0.19004763, 0.53746739, -0.12272266], + ) + np.testing.assert_almost_equal(contact_out[:, 0], [-2.30360748, 127.09143179, 5.05814294]) + + else: + if with_external_force: + np.testing.assert_almost_equal( + x_out[:, 0], + [0.96958463, 0.92187424, 0.38867729, 0.54269608, -0.21234549, -10.23941443, 2.07066831, 31.68033189], + ) + else: + np.testing.assert_almost_equal( + x_out[:, 0], + [0.61185289, 0.78517596, 0.60754485, 0.80839735, 0.04791036, -9.96778948, -0.01986505, 4.39786051], + ) + + @pytest.mark.parametrize("phase_dynamics", [PhaseDynamics.SHARED_DURING_THE_PHASE, PhaseDynamics.ONE_PER_NODE]) @pytest.mark.parametrize("cx", [MX, SX]) @pytest.mark.parametrize("with_external_force", [False, True]) @@ -1386,7 +1566,7 @@ def test_muscle_driven( nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.phase_idx = 0 external_forces = ( @@ -1979,7 +2159,7 @@ def test_joints_acceleration_driven(cx, rigid_body_dynamics, phase_dynamics): nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT @@ -2063,7 +2243,7 @@ def configure(ocp, nlp, with_contact=None): nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT diff --git a/tests/shard1/test_global_align.py b/tests/shard1/test_global_align.py index ec4e00bd8..a3c599930 100644 --- a/tests/shard1/test_global_align.py +++ b/tests/shard1/test_global_align.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import pytest import os diff --git a/tests/shard1/test_global_mhe.py b/tests/shard1/test_global_mhe.py index 31166a161..2966cf489 100644 --- a/tests/shard1/test_global_mhe.py +++ b/tests/shard1/test_global_mhe.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import numpy as np import pytest diff --git a/tests/shard1/test_prepare_all_examples.py b/tests/shard1/test_prepare_all_examples.py index df86e611c..2a98eda10 100644 --- a/tests/shard1/test_prepare_all_examples.py +++ b/tests/shard1/test_prepare_all_examples.py @@ -1378,13 +1378,13 @@ def test__torque_driven_ocp__track_markers_with_torque_actuators(): ) -def test__torque_driven_ocp__trampo_quaternions(): - from bioptim.examples.torque_driven_ocp import trampo_quaternions as ocp_module +def test__torque_driven_ocp__example_quaternions(): + from bioptim.examples.torque_driven_ocp import example_quaternions as ocp_module bioptim_folder = os.path.dirname(ocp_module.__file__) ocp_module.prepare_ocp( - biorbd_model_path=bioptim_folder + "/models/TruncAnd2Arm_Quaternion.bioMod", + biorbd_model_path=bioptim_folder + "/models/trunk_and_2arm_quaternion.bioMod", n_shooting=5, final_time=0.25, expand_dynamics=False, diff --git a/tests/shard2/test_cost_function_integration.py b/tests/shard2/test_cost_function_integration.py index 4f3246f98..9a1734c9c 100644 --- a/tests/shard2/test_cost_function_integration.py +++ b/tests/shard2/test_cost_function_integration.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import sys import io diff --git a/tests/shard2/test_global_inverse_optimal_control.py b/tests/shard2/test_global_inverse_optimal_control.py index 783f056e6..01e8cd1e6 100644 --- a/tests/shard2/test_global_inverse_optimal_control.py +++ b/tests/shard2/test_global_inverse_optimal_control.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os from bioptim import PhaseDynamics, SolutionMerge diff --git a/tests/shard2/test_global_minimize_marker_velocity.py b/tests/shard2/test_global_minimize_marker_velocity.py index 2a473f1db..356a23806 100644 --- a/tests/shard2/test_global_minimize_marker_velocity.py +++ b/tests/shard2/test_global_minimize_marker_velocity.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import pytest import numpy as np from bioptim import ( diff --git a/tests/shard2/test_global_muscle_driven_ocp.py b/tests/shard2/test_global_muscle_driven_ocp.py index 0dd3643e1..3599e8571 100644 --- a/tests/shard2/test_global_muscle_driven_ocp.py +++ b/tests/shard2/test_global_muscle_driven_ocp.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest diff --git a/tests/shard2/test_global_muscle_tracking_0_False.py b/tests/shard2/test_global_muscle_tracking_0_False.py index 5c5d7996f..5af9242ff 100644 --- a/tests/shard2/test_global_muscle_tracking_0_False.py +++ b/tests/shard2/test_global_muscle_tracking_0_False.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest import platform diff --git a/tests/shard2/test_global_muscle_tracking_0_True.py b/tests/shard2/test_global_muscle_tracking_0_True.py index 9418b40b8..c2b5d047f 100644 --- a/tests/shard2/test_global_muscle_tracking_0_True.py +++ b/tests/shard2/test_global_muscle_tracking_0_True.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest import platform diff --git a/tests/shard2/test_global_muscle_tracking_1.py b/tests/shard2/test_global_muscle_tracking_1.py index 7d33745ea..d85156fa3 100644 --- a/tests/shard2/test_global_muscle_tracking_1.py +++ b/tests/shard2/test_global_muscle_tracking_1.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest import platform diff --git a/tests/shard2/test_global_muscle_tracking_2.py b/tests/shard2/test_global_muscle_tracking_2.py index 079287c15..b4358544c 100644 --- a/tests/shard2/test_global_muscle_tracking_2.py +++ b/tests/shard2/test_global_muscle_tracking_2.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest diff --git a/tests/shard2/test_global_nmpc_final.py b/tests/shard2/test_global_nmpc_final.py index 30a5303c3..444ad1e60 100644 --- a/tests/shard2/test_global_nmpc_final.py +++ b/tests/shard2/test_global_nmpc_final.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import platform diff --git a/tests/shard2/test_global_optimal_time.py b/tests/shard2/test_global_optimal_time.py index 29e134bb4..2415c37f8 100644 --- a/tests/shard2/test_global_optimal_time.py +++ b/tests/shard2/test_global_optimal_time.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import platform import pytest diff --git a/tests/shard2/test_global_optimal_time_mayer_min.py b/tests/shard2/test_global_optimal_time_mayer_min.py index eb56d5171..b09550f83 100644 --- a/tests/shard2/test_global_optimal_time_mayer_min.py +++ b/tests/shard2/test_global_optimal_time_mayer_min.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import platform import pytest diff --git a/tests/shard3/test_global_getting_started.py b/tests/shard3/test_global_getting_started.py index e01c17062..baf949941 100644 --- a/tests/shard3/test_global_getting_started.py +++ b/tests/shard3/test_global_getting_started.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pickle import re diff --git a/tests/shard3/test_global_symmetrical_torque_driven_ocp.py b/tests/shard3/test_global_symmetrical_torque_driven_ocp.py index 35e089000..860485495 100644 --- a/tests/shard3/test_global_symmetrical_torque_driven_ocp.py +++ b/tests/shard3/test_global_symmetrical_torque_driven_ocp.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest diff --git a/tests/shard3/test_global_torque_driven_ocp.py b/tests/shard3/test_global_torque_driven_ocp.py index 315363f0c..2e968a9ca 100644 --- a/tests/shard3/test_global_torque_driven_ocp.py +++ b/tests/shard3/test_global_torque_driven_ocp.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest import numpy as np @@ -437,7 +438,7 @@ def test_trampo_quaternions(phase_dynamics): bioptim_folder = os.path.dirname(ocp_module.__file__) # Define the problem - model_path = bioptim_folder + "/models/TruncAnd2Arm_Quaternion.bioMod" + model_path = bioptim_folder + "/models/trunk_and_2arm_quaternion.bioMod" final_time = 0.25 n_shooting = 5 @@ -700,21 +701,20 @@ def test_example_multi_biorbd_model(phase_dynamics): ) sol = ocp.solve() - # The subsequent values fail, but I suspect there was a mistake in the original code. To be investigated # # Check objective function value # f = np.array(sol.cost) # np.testing.assert_equal(f.shape, (1, 1)) # np.testing.assert_almost_equal(f[0, 0], 10.697019532108447) - # + # # Check constraints # g = np.array(sol.constraints) # np.testing.assert_equal(g.shape, (240, 1)) # np.testing.assert_almost_equal(g, np.zeros((240, 1)), decimal=6) - # + # # Check some of the results # states = sol.decision_states(to_merge=SolutionMerge.NODES) # controls = sol.decision_controls(to_merge=SolutionMerge.NODES) - # + # # initial and final position # np.testing.assert_almost_equal( # states["q"][:, 0], np.array([-3.14159265, 0.0, 0.0, -3.14159265, 0.0, 0.0]), decimal=6 diff --git a/tests/shard3/test_global_torque_driven_with_contact_ocp.py b/tests/shard3/test_global_torque_driven_with_contact_ocp.py index 3bfdf39fb..561e7553d 100644 --- a/tests/shard3/test_global_torque_driven_with_contact_ocp.py +++ b/tests/shard3/test_global_torque_driven_with_contact_ocp.py @@ -5,6 +5,7 @@ - the contact_forces_inequality constraint - the non_slipping constraint """ + import os import pytest diff --git a/tests/shard3/test_graph.py b/tests/shard3/test_graph.py index 7500e0d9d..8f5db547b 100644 --- a/tests/shard3/test_graph.py +++ b/tests/shard3/test_graph.py @@ -49,7 +49,7 @@ def custom_func_track_markers(controller: PenaltyController, first_marker: str, # noinspection PyTypeChecker model: BiorbdModel = controller.model - markers = controller.mx_to_cx("markers", model.model.markers, controller.states["q"]) + markers = controller.mx_to_cx("markers", model.model.markers, controller.q) return markers[:, marker_1_idx] - markers[:, marker_0_idx] diff --git a/tests/shard3/test_initial_condition.py b/tests/shard3/test_initial_condition.py index 5dbad67af..4881e7ecf 100644 --- a/tests/shard3/test_initial_condition.py +++ b/tests/shard3/test_initial_condition.py @@ -306,8 +306,6 @@ def test_initial_guess_error_messages(phase_dynamics): bioptim_folder = os.path.dirname(ocp_module.__file__) biorbd_model_path = bioptim_folder + "/models/pendulum.bioMod" - bio_model = BiorbdModel(biorbd_model_path) - n_q = bio_model.nb_q # Add objective functions objective_functions = Objective(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau") @@ -317,6 +315,7 @@ def test_initial_guess_error_messages(phase_dynamics): # check the error messages with pytest.raises(RuntimeError, match="x_init should be built from a InitialGuessList"): + bio_model = BiorbdModel(biorbd_model_path) OptimalControlProgram( bio_model, dynamics, @@ -325,7 +324,9 @@ def test_initial_guess_error_messages(phase_dynamics): x_init=1, objective_functions=objective_functions, ) + with pytest.raises(RuntimeError, match="u_init should be built from a InitialGuessList"): + bio_model = BiorbdModel(biorbd_model_path) OptimalControlProgram( bio_model, dynamics, @@ -334,3 +335,34 @@ def test_initial_guess_error_messages(phase_dynamics): u_init=1, objective_functions=objective_functions, ) + + with pytest.raises( + ValueError, match="bad_key is not a state variable, please check for typos in the declaration of x_init" + ): + x_init = InitialGuessList() + x_init.add("bad_key", [1, 2]) + bio_model = BiorbdModel(biorbd_model_path) + OptimalControlProgram( + bio_model, + dynamics, + n_shooting=5, + phase_time=1, + x_init=x_init, + objective_functions=objective_functions, + ) + + del bio_model # This is to fix a memory bug + with pytest.raises( + ValueError, match="bad_key is not a control variable, please check for typos in the declaration of u_init" + ): + u_init = InitialGuessList() + u_init.add("bad_key", [1, 2]) + bio_model = BiorbdModel(biorbd_model_path) + OptimalControlProgram( + bio_model, + dynamics, + n_shooting=5, + phase_time=1, + u_init=u_init, + objective_functions=objective_functions, + ) diff --git a/tests/shard3/test_ligaments.py b/tests/shard3/test_ligaments.py index 74f31be2a..56d888f77 100644 --- a/tests/shard3/test_ligaments.py +++ b/tests/shard3/test_ligaments.py @@ -48,7 +48,7 @@ def test_torque_driven_with_ligament(with_ligament, cx, phase_dynamics): nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.x_bounds = np.zeros((nlp.model.nb_q * 3, 1)) nlp.u_bounds = np.zeros((nlp.model.nb_q, 1)) @@ -110,7 +110,7 @@ def test_torque_derivative_driven_with_ligament(with_ligament, cx, phase_dynamic nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.x_bounds = np.zeros((nlp.model.nb_q * 3, 1)) nlp.u_bounds = np.zeros((nlp.model.nb_q, 1)) @@ -174,7 +174,7 @@ def test_torque_activation_driven_with_ligament(with_ligament, cx, phase_dynamic nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.x_bounds = np.zeros((nlp.model.nb_q * 2, 1)) nlp.u_bounds = np.zeros((nlp.model.nb_q, 1)) ocp = OptimalControlProgram(nlp) @@ -236,7 +236,7 @@ def test_muscle_driven_with_ligament(with_ligament, cx, phase_dynamics): nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.x_bounds = np.zeros((nlp.model.nb_q * 2 + nlp.model.nb_muscles, 1)) nlp.u_bounds = np.zeros((nlp.model.nb_muscles, 1)) diff --git a/tests/shard3/test_muscle_driven_ocp_implicit.py b/tests/shard3/test_muscle_driven_ocp_implicit.py index 63a821b8b..f675c2495 100644 --- a/tests/shard3/test_muscle_driven_ocp_implicit.py +++ b/tests/shard3/test_muscle_driven_ocp_implicit.py @@ -1,6 +1,7 @@ """ Test for file IO """ + import os import pytest diff --git a/tests/shard3/test_passive_torque.py b/tests/shard3/test_passive_torque.py index 99ab477ba..9769e5d55 100644 --- a/tests/shard3/test_passive_torque.py +++ b/tests/shard3/test_passive_torque.py @@ -49,7 +49,7 @@ def test_torque_driven_with_passive_torque(with_passive_torque, cx, rigidbody_dy nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.x_bounds = np.zeros((nlp.model.nb_q * 3, 1)) nlp.u_bounds = np.zeros((nlp.model.nb_q, 1)) @@ -134,7 +134,7 @@ def test_torque_derivative_driven_with_passive_torque(with_passive_torque, cx, p nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.x_bounds = np.zeros((nlp.model.nb_q * 3, 1)) nlp.u_bounds = np.zeros((nlp.model.nb_q, 1)) @@ -346,7 +346,7 @@ def test_muscle_driven_with_passive_torque(with_passive_torque, rigidbody_dynami nlp.x_scaling = VariableScalingList() nlp.xdot_scaling = VariableScalingList() nlp.u_scaling = VariableScalingList() - nlp.s_scaling = VariableScalingList() + nlp.a_scaling = VariableScalingList() nlp.x_bounds = np.zeros((nlp.model.nb_q * 2 + nlp.model.nb_muscles, 1)) nlp.u_bounds = np.zeros((nlp.model.nb_muscles, 1)) diff --git a/tests/shard4/test_penalty.py b/tests/shard4/test_penalty.py index 5ab753000..bad2f18c5 100644 --- a/tests/shard4/test_penalty.py +++ b/tests/shard4/test_penalty.py @@ -1186,7 +1186,7 @@ def test_penalty_constraint_total_time(value, phase_dynamics): @pytest.mark.parametrize("value", [0.1, -10]) def test_penalty_custom(penalty_origin, value, phase_dynamics): def custom(controller: PenaltyController, mult): - my_values = controller.states["q"].cx_start * mult + my_values = controller.q.cx_start * mult return my_values ocp = prepare_test_ocp(phase_dynamics=phase_dynamics) @@ -1268,7 +1268,7 @@ def custom_with_mult(ocp, nlp, t, x, u, p, mult): @pytest.mark.parametrize("value", [0.1, -10]) def test_penalty_custom_with_bounds(value, phase_dynamics): def custom_with_bounds(controller: PenaltyController): - return -10, controller.states["q"].cx_start, 10 + return -10, controller.q.cx_start, 10 ocp = prepare_test_ocp(phase_dynamics=phase_dynamics) t = [0] @@ -1290,7 +1290,7 @@ def custom_with_bounds(controller: PenaltyController): @pytest.mark.parametrize("value", [0.1, -10]) def test_penalty_custom_with_bounds_failing_min_bound(value, phase_dynamics): def custom_with_bounds(controller: PenaltyController): - return -10, controller.states["q"].cx_start, 10 + return -10, controller.q.cx_start, 10 ocp = prepare_test_ocp(phase_dynamics=phase_dynamics) t = [0] @@ -1313,7 +1313,7 @@ def custom_with_bounds(controller: PenaltyController): @pytest.mark.parametrize("value", [0.1, -10]) def test_penalty_custom_with_bounds_failing_max_bound(value, phase_dynamics): def custom_with_bounds(controller: PenaltyController): - return -10, controller.states["q"].cx_start, 10 + return -10, controller.q.cx_start, 10 ocp = prepare_test_ocp(phase_dynamics=phase_dynamics) t = [0] diff --git a/tests/shard4/test_update_bounds_and_init.py b/tests/shard4/test_update_bounds_and_init.py index e243fcda9..3c31e0779 100644 --- a/tests/shard4/test_update_bounds_and_init.py +++ b/tests/shard4/test_update_bounds_and_init.py @@ -92,6 +92,34 @@ def test_double_update_bounds_and_init(phase_dynamics): ocp.update_bounds(x_init, u_init) with pytest.raises(RuntimeError, match="u_bounds should be built from a BoundsList"): ocp.update_bounds(None, u_init) + with pytest.raises( + ValueError, match="bad_key is not a state variable, please check for typos in the declaration of x_bounds" + ): + x_bounds = BoundsList() + x_bounds.add("bad_key", [1, 2]) + ocp.update_bounds(x_bounds, u_bounds) + with pytest.raises( + ValueError, match="bad_key is not a control variable, please check for typos in the declaration of u_bounds" + ): + x_bounds = BoundsList() + x_bounds["q"] = -np.ones((nq, 1)), np.ones((nq, 1)) + u_bounds = BoundsList() + u_bounds.add("bad_key", [1, 2]) + ocp.update_bounds(x_bounds, u_bounds) + with pytest.raises( + ValueError, match="bad_key is not a state variable, please check for typos in the declaration of x_init" + ): + x_init = InitialGuessList() + x_init.add("bad_key", [1, 2]) + ocp.update_initial_guess(x_init, u_init) + with pytest.raises( + ValueError, match="bad_key is not a control variable, please check for typos in the declaration of u_init" + ): + x_init = InitialGuessList() + x_init["q"] = 0.5 * np.ones((nq, 1)) + u_init = InitialGuessList() + u_init.add("bad_key", [1, 2]) + ocp.update_initial_guess(x_init, u_init) @pytest.mark.parametrize("phase_dynamics", [PhaseDynamics.SHARED_DURING_THE_PHASE, PhaseDynamics.ONE_PER_NODE]) diff --git a/tests/shard5/test_global_stochastic_collocation.py b/tests/shard5/test_global_stochastic_collocation.py index ad206ae8c..22af2e7aa 100644 --- a/tests/shard5/test_global_stochastic_collocation.py +++ b/tests/shard5/test_global_stochastic_collocation.py @@ -3,7 +3,7 @@ import numpy as np from casadi import DM, vertcat -from bioptim import Solver, SocpType, SolutionMerge +from bioptim import Solver, SocpType, SolutionMerge, PenaltyHelpers @pytest.mark.parametrize("use_sx", [False, True]) @@ -45,7 +45,7 @@ def test_arm_reaching_torque_driven_collocations(use_sx: bool): # Check objective function value f = np.array(sol.cost) np.testing.assert_equal(f.shape, (1, 1)) - np.testing.assert_almost_equal(f[0, 0], 426.8457209111154) + np.testing.assert_almost_equal(f[0, 0], 426.84572091057413) # Check constraints g = np.array(sol.constraints) @@ -71,6 +71,343 @@ def test_arm_reaching_torque_driven_collocations(use_sx: bool): np.testing.assert_almost_equal(ref[:, 0], np.array([2.81907786e-02, 2.84412560e-01, 0, 0])) + np.testing.assert_almost_equal( + m[:10, 0], + np.array( + [ + 1.00000000e00, + 0.0, + 0.0, + 0.0, + 0.0, + 1.00000000e00, + 0.0, + 0.0, + 0.0, + 0.0, + ] + ), + ) + + # TODO: cov is still too sensitive to be properly tested this way. We probably need to test it otherwise + np.testing.assert_almost_equal( + cov[:, -1], + np.array( + [ + -0.89353026, + -0.66059229, + -0.39031482, + -0.31941486, + -0.66059229, + -0.44897437, + -0.12298423, + -0.30298653, + -0.39031482, + -0.12298423, + -0.36377371, + 0.05702737, + -0.31941486, + -0.30298653, + 0.05702737, + -0.24391646, + ] + ), + decimal=1, + ) + + # Test the automatic initialization of the stochastic variables + socp = ocp_module.prepare_socp( + biorbd_model_path=bioptim_folder + "/models/LeuvenArmModel.bioMod", + final_time=final_time, + n_shooting=n_shooting, + polynomial_degree=3, + hand_final_position=hand_final_position, + motor_noise_magnitude=motor_noise_magnitude, + sensory_noise_magnitude=sensory_noise_magnitude, + q_opt=q, + qdot_opt=qdot, + tau_opt=tau, + ) + + # Solver parameters + solver = Solver.IPOPT(show_online_optim=False) + solver.set_nlp_scaling_method("none") + solver.set_maximum_iterations(0) + solver.set_bound_frac(1e-8) + solver.set_bound_push(1e-8) + + sol_socp = socp.solve(solver) + + # Check some of the results + states = sol_socp.decision_states(to_merge=SolutionMerge.NODES) + q_sol, qdot_sol = states["q"], states["qdot"] + states_sol = np.zeros((4, 5, 5)) + for i in range(4): + states_sol[:, :, i] = sol_socp.decision_states(to_merge=SolutionMerge.KEYS)[i] + states_sol[:, 0, 4] = np.reshape(sol_socp.decision_states(to_merge=SolutionMerge.KEYS)[4], (4,)) + + controls = sol_socp.decision_controls(to_merge=SolutionMerge.NODES) + tau_sol = controls["tau"] + controls_sol = sol_socp.decision_controls(to_merge=SolutionMerge.ALL) + + algebraic_states = sol_socp.decision_algebraic_states(to_merge=SolutionMerge.NODES) + k_sol, ref_sol, m_sol, cov_sol = ( + algebraic_states["k"], + algebraic_states["ref"], + algebraic_states["m"], + algebraic_states["cov"], + ) + algebraic_sol = sol_socp.decision_algebraic_states(to_merge=SolutionMerge.ALL) + + duration = sol_socp.decision_time()[-1] + dt = duration / n_shooting + p_sol = vertcat(ocp.nlp[0].model.motor_noise_magnitude, ocp.nlp[0].model.sensory_noise_magnitude) + polynomial_degree = socp.nlp[0].ode_solver.polynomial_degree + + # Constraint values + x_opt = vertcat(q_sol, qdot_sol) + x_sol = np.zeros((x_opt.shape[0], polynomial_degree + 2, socp.n_shooting)) + for i_node in range(socp.n_shooting): + x_sol[:, :, i_node] = x_opt[:, i_node * (polynomial_degree + 2) : (i_node + 1) * (polynomial_degree + 2)] + a_sol = vertcat(k_sol, ref_sol, m_sol, cov_sol) + + x_multi_thread = np.zeros((socp.nlp[0].states.shape * (polynomial_degree + 3), socp.nlp[0].ns)) + for i_node in range(socp.nlp[0].ns): + for i_state in range(socp.nlp[0].states.shape): + x_multi_thread[i_state, i_node] = x_sol[i_state, 0, i_node] + for i_coll in range(1, polynomial_degree + 3): + x_multi_thread[i_coll * socp.nlp[0].states.shape + i_state, i_node] = x_sol[i_state, i_coll - 1, i_node] + + # Initial posture + penalty = socp.nlp[0].g[0] + x = PenaltyHelpers.states( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: states_sol[:, sn_idx, n_idx], + ) + u = PenaltyHelpers.controls( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: controls_sol[:, n_idx], + ) + a = PenaltyHelpers.states( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: algebraic_sol[:, n_idx], + ) + shoulder_pos_initial = 0.349065850398866 + elbow_pos_initial = 2.245867726451909 + constraint_value = penalty.function[0]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value[0], shoulder_pos_initial, decimal=6) + np.testing.assert_almost_equal(constraint_value[1], elbow_pos_initial, decimal=6) + + # Initial and final velocities + penalty = socp.nlp[0].g[1] + x = PenaltyHelpers.states( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: states_sol[:, sn_idx, n_idx], + ) + u = PenaltyHelpers.controls( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: controls_sol[:, n_idx], + ) + a = PenaltyHelpers.states( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: algebraic_sol[:, n_idx], + ) + constraint_value = penalty.function[0]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value[0], 0, decimal=6) + np.testing.assert_almost_equal(constraint_value[1], 0, decimal=6) + + penalty = socp.nlp[0].g[2] + x = states_sol[:, 0, -1] + u = controls_sol[:, -1] + a = algebraic_sol[:, -1] + constraint_value = penalty.function[-1]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value[0], 0, decimal=6) + np.testing.assert_almost_equal(constraint_value[1], 0, decimal=6) + + # Hand final marker position + penalty = socp.nlp[0].g[4] + x = states_sol[:, 0, -1] + u = controls_sol[:, -1] + a = algebraic_sol[:, -1] + constraint_value = penalty.function[-1]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value[0], hand_final_position[0], decimal=6) + np.testing.assert_almost_equal(constraint_value[1], hand_final_position[1], decimal=6) + + # Reference equals mean sensory input + penalty = socp.nlp[0].g[7] + for i_node in range(socp.n_shooting): + x = PenaltyHelpers.states( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: states_sol[:, sn_idx, n_idx], + ) + u = PenaltyHelpers.controls( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: controls_sol[:, n_idx], + ) + a = PenaltyHelpers.states( + penalty, + 0, + lambda p_idx, n_idx, sn_idx: algebraic_sol[:, n_idx], + ) + constraint_value = penalty.function[i_node]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value, np.zeros(constraint_value.shape), decimal=6) + + # Constraint on M -------------------------------------------------------------------- + penalty = socp.nlp[0].g[8] + for i_node in range(socp.n_shooting): + x = PenaltyHelpers.states( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: states_sol[:, sn_idx, n_idx], + ) + u = PenaltyHelpers.controls( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: controls_sol[:, n_idx], + ) + a = PenaltyHelpers.states( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: algebraic_sol[:, n_idx], + ) + constraint_value = penalty.function[i_node]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value, np.zeros(constraint_value.shape), decimal=6) + + # Covariance continuity -------------------------------------------------------------------- + penalty = socp.nlp[0].g[9] + for i_node in range(socp.n_shooting): + x = PenaltyHelpers.states( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: states_sol[:, sn_idx, n_idx], + ) + u = PenaltyHelpers.controls( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: controls_sol[:, n_idx], + ) + a = PenaltyHelpers.states( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: algebraic_sol[:, n_idx], + ) + + constraint_value = penalty.function[0]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value, np.zeros(constraint_value.shape), decimal=6) + + # States continuity -------------------------------------------------------------------- + penalty = socp.nlp[0].g_internal[0] + for i_node in range(socp.n_shooting): + x = PenaltyHelpers.states( + socp.nlp[0].g[9], + i_node, + lambda p_idx, n_idx, sn_idx: states_sol[:, sn_idx, n_idx], + ) + u = PenaltyHelpers.controls( + socp.nlp[0].g[9], + i_node, + lambda p_idx, n_idx, sn_idx: controls_sol[:, n_idx], + ) + a = PenaltyHelpers.states( + socp.nlp[0].g[9], + i_node, + lambda p_idx, n_idx, sn_idx: algebraic_sol[:, n_idx], + ) + constraint_value = penalty.function[0]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value, np.zeros(constraint_value.shape), decimal=6) + + # First collocation state is equal to the states at node + penalty = socp.nlp[0].g_internal[1] + for i_node in range(socp.n_shooting): + x = PenaltyHelpers.states( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: states_sol[:, sn_idx, n_idx], + ) + u = PenaltyHelpers.controls( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: controls_sol[:, n_idx], + ) + a = PenaltyHelpers.states( + penalty, + i_node, + lambda p_idx, n_idx, sn_idx: algebraic_sol[:, n_idx], + ) + constraint_value = penalty.function[i_node]( + duration, + dt, + x, + u, + p_sol, + a, + ) + np.testing.assert_almost_equal(constraint_value, np.zeros(constraint_value.shape), decimal=6) + @pytest.mark.parametrize("use_sx", [False, True]) def test_obstacle_avoidance_direct_collocation(use_sx: bool): diff --git a/tests/shard6/test_global_stochastic_except_collocation.py b/tests/shard6/test_global_stochastic_except_collocation.py index b7af77acf..345409a45 100644 --- a/tests/shard6/test_global_stochastic_except_collocation.py +++ b/tests/shard6/test_global_stochastic_except_collocation.py @@ -4,7 +4,7 @@ import platform import numpy as np -from casadi import DM, vertcat, Function +from casadi import DM, vertcat from bioptim import Solver, SolutionMerge from bioptim.examples.stochastic_optimal_control.arm_reaching_torque_driven_implicit import ExampleType @@ -256,118 +256,6 @@ def test_arm_reaching_muscle_driven(use_sx): ), ) - # np.testing.assert_almost_equal( - # cov[:, -2], - # np.array( - # [ - # 0.00033791, - # 0.00039624, - # 0.00070543, - # 0.00124988, - # 0.00021535, - # 0.00029579, - # 0.00024912, - # 0.00028454, - # 0.00025029, - # 0.00030357, - # 0.00039624, - # 0.00061519, - # 0.00019818, - # 0.00228786, - # 0.00029938, - # 0.00042956, - # 0.00038645, - # 0.00039457, - # 0.00036173, - # 0.00042616, - # 0.00070543, - # 0.00019818, - # 0.00482193, - # -0.00067968, - # 0.00027328, - # 0.00027578, - # 0.00012372, - # 0.00035437, - # 0.00024831, - # 0.00035016, - # 0.00124988, - # 0.00228786, - # -0.00067968, - # 0.01031238, - # 0.00110132, - # 0.00158725, - # 0.00147344, - # 0.00143574, - # 0.00134504, - # 0.00155263, - # 0.00021535, - # 0.00029938, - # 0.00027328, - # 0.00110132, - # 0.00015521, - # 0.00021834, - # 0.00019183, - # 0.00020435, - # 0.00018451, - # 0.00021946, - # 0.00029579, - # 0.00042956, - # 0.00027578, - # 0.00158725, - # 0.00021834, - # 0.00031178, - # 0.00027831, - # 0.00028783, - # 0.00026257, - # 0.00031046, - # 0.00024912, - # 0.00038645, - # 0.00012372, - # 0.00147344, - # 0.00019183, - # 0.00027831, - # 0.00025442, - # 0.00025227, - # 0.00023393, - # 0.00027342, - # 0.00028454, - # 0.00039457, - # 0.00035437, - # 0.00143574, - # 0.00020435, - # 0.00028783, - # 0.00025227, - # 0.00026958, - # 0.00024298, - # 0.00028959, - # 0.00025029, - # 0.00036173, - # 0.00024831, - # 0.00134504, - # 0.00018451, - # 0.00026257, - # 0.00023393, - # 0.00024298, - # 0.00022139, - # 0.00026183, - # 0.00030357, - # 0.00042616, - # 0.00035016, - # 0.00155263, - # 0.00021946, - # 0.00031046, - # 0.00027342, - # 0.00028959, - # 0.00026183, - # 0.00031148, - # ] - # ), - # ) - - # simulate - # TestUtils.simulate(sol) # TODO: charbie -> fix this - # for now, it does not match because the integration is done in the multinode_constraint - @pytest.mark.parametrize("use_sx", [True, False]) def test_arm_reaching_torque_driven_explicit(use_sx): @@ -529,50 +417,6 @@ def test_arm_reaching_torque_driven_explicit(use_sx): ), ) - # np.testing.assert_almost_equal( - # cov[:, -2], - # np.array( - # [ - # 3.04928811e-02, - # -4.37121214e-02, - # 1.14814524e-01, - # -1.66441847e-01, - # -5.31760888e-04, - # -5.31760888e-04, - # -4.37121214e-02, - # 1.21941013e-01, - # -1.65522823e-01, - # 4.54983180e-01, - # 1.77217039e-03, - # 1.77217039e-03, - # 1.14814524e-01, - # -1.65522823e-01, - # 6.31786758e-01, - # -8.93221670e-01, - # -2.17528809e-03, - # -2.17528809e-03, - # -1.66441847e-01, - # 4.54983180e-01, - # -8.93221670e-01, - # 2.42721714e00, - # 7.04045031e-03, - # 7.04045031e-03, - # -5.31760888e-04, - # 1.77217039e-03, - # -2.17528809e-03, - # 7.04045031e-03, - # 2.73513461e-05, - # 2.67634623e-05, - # -5.31760888e-04, - # 1.77217039e-03, - # -2.17528809e-03, - # 7.04045031e-03, - # 2.67634623e-05, - # 2.73513461e-05, - # ] - # ), - # ) - @pytest.mark.parametrize("with_cholesky", [True, False]) @pytest.mark.parametrize("with_scaling", [True, False]) @@ -582,6 +426,8 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx if with_cholesky and not with_scaling: return + if not with_cholesky and not with_scaling and not use_sx: + return final_time = 0.8 n_shooting = 4 @@ -645,28 +491,28 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx ) if not with_scaling: # Check objective function value - np.testing.assert_almost_equal(f[0, 0], 62.61240153041182) + np.testing.assert_almost_equal(f[0, 0], 62.83236488441526) # detailed cost values - np.testing.assert_almost_equal(sol.detailed_cost[0]["cost_value_weighted"], 62.41253120355753) - np.testing.assert_almost_equal(sol.detailed_cost[1]["cost_value_weighted"], 0.19987032685429304) + np.testing.assert_almost_equal(sol.detailed_cost[0]["cost_value_weighted"], 62.48137304816964) + np.testing.assert_almost_equal(sol.detailed_cost[1]["cost_value_weighted"], 0.3509918362456358) np.testing.assert_almost_equal( f[0, 0], sum(sol.detailed_cost[i]["cost_value_weighted"] for i in range(len(sol.detailed_cost))) ) # initial and final position np.testing.assert_almost_equal(q[:, 0], np.array([0.34906585, 2.24586773])) - np.testing.assert_almost_equal(q[:, -1], np.array([0.92560992, 1.29037324])) + np.testing.assert_almost_equal(q[:, -1], np.array([0.9256103, 1.29037205])) np.testing.assert_almost_equal(qdot[:, 0], np.array([0, 0])) np.testing.assert_almost_equal(qdot[:, -1], np.array([0, 0])) - np.testing.assert_almost_equal(tau[:, 0], np.array([0.42126532, -0.30424813])) - np.testing.assert_almost_equal(tau[:, -2], np.array([-0.3933987, 0.36251051])) + np.testing.assert_almost_equal(tau[:, 0], np.array([0.41942813, -0.29886019])) + np.testing.assert_almost_equal(tau[:, -2], np.array([-0.39449672, 0.36921743])) np.testing.assert_almost_equal( k[:, 0], np.array( - [-0.0770916, 0.24594264, 0.14356716, -0.37903073, 0.02556642, -0.33600195, -0.09768757, 0.21875505] + [-0.0790218, 0.27128222, 0.15890697, -0.49504993, 0.0463122, -0.3824336, -0.08291488, 0.1947862] ), ) np.testing.assert_almost_equal(ref[:, 0], np.array([2.81907786e-02, 2.84412560e-01, 0, 0])) @@ -674,22 +520,22 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx m[:, 0], np.array( [ - 1.11117933e00, - -9.89790360e-05, - -6.36575900e-03, - 9.15679149e-03, - 3.42798480e-04, - 1.11078623e00, - -3.11148477e-02, - 2.94613721e-02, - -1.22442829e-02, - -3.62937369e-04, - 1.10217942e00, - 3.23426016e-02, - -7.49239001e-05, - -1.20983199e-02, - 6.87328702e-03, - 1.08858017e00, + 1.11119915e00, + -1.44056147e-04, + -7.92387508e-03, + 1.29653660e-02, + 4.03350767e-04, + 1.11072947e00, + -3.63028044e-02, + 3.43506625e-02, + -1.22287288e-02, + -4.03446951e-04, + 1.10058636e00, + 3.63086964e-02, + -5.38166455e-05, + -1.21464871e-02, + 4.84391756e-03, + 1.09318297e00, ] ), ) @@ -698,22 +544,22 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx cov[:, -2], np.array( [ - 6.52095128e-05, - 8.58550134e-05, - -3.43580868e-05, - 3.04961560e-04, - 8.58550134e-05, - 7.51879842e-05, - 3.90995716e-05, - 8.48441793e-05, - -3.43580868e-05, - 3.90995716e-05, - -4.29201762e-04, - 1.31706534e-03, - 3.04961560e-04, - 8.48441793e-05, - 1.31706534e-03, - -3.45141868e-03, + -2.44361371e-05, + 5.03988822e-05, + -2.53889736e-04, + 5.54490654e-04, + 5.03988822e-05, + -1.05730935e-04, + 4.57675296e-04, + -1.03369318e-03, + -2.53889736e-04, + 4.57675296e-04, + -4.38051559e-03, + 9.28976812e-03, + 5.54490654e-04, + -1.03369318e-03, + 9.28976812e-03, + -2.03118675e-02, ] ), ) @@ -723,21 +569,21 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx np.array( [ 1.00000000e00, - -8.11373860e-26, - -1.67781542e-01, - 3.32131421e-01, - 8.67187637e-27, + 2.83470503e-29, + -2.73371469e-01, + 5.61967286e-01, + -5.54924440e-28, 1.00000000e00, - -7.76947657e-01, - 1.89631798e00, + -1.06373905e00, + 2.76850288e00, -1.00000000e-01, - -9.63358420e-25, - 6.71919094e-01, - 8.86491256e-01, - -4.98418026e-25, + 2.69216079e-27, + 5.97588981e-01, + 1.05005559e00, + -1.54142332e-27, -1.00000000e-01, - 4.44699545e-02, - 8.69525177e-01, + -1.20328930e-02, + 9.99955126e-01, ] ), ) @@ -746,30 +592,30 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx c[:, 2], np.array( [ - -3.27251991e-21, - -1.90706757e-20, - -1.35504801e00, - 1.12904779e00, - -8.42798952e-21, - 3.05675532e-20, - 1.12904779e00, - -3.79816715e00, - 8.03040833e-21, - 2.53102785e-19, - 3.01182416e-02, - -9.93007432e-02, - -6.05887964e-20, - 1.18604326e-20, - 6.81924202e-01, - -1.82105153e00, - -4.59863930e-23, - 6.62189569e-24, - 3.54626037e-01, - -5.62181724e-01, - 8.68527931e-24, - 2.81260929e-23, - -2.34792769e-01, - 5.93239215e-01, + -4.51226127e-30, + 8.33773592e-30, + -1.35562569e00, + 1.12573123e00, + 1.73767029e-29, + 1.26613716e-28, + 1.12573123e00, + -3.79219059e00, + -2.46703922e-29, + -2.77757617e-28, + 8.44560218e-02, + -2.14525153e-01, + -2.12049507e-28, + -7.55987586e-28, + 7.46209951e-01, + -2.20583953e00, + -4.98343155e-25, + -4.67005692e-27, + 4.43752625e-01, + -7.50216036e-01, + -1.13596005e-27, + 2.87218037e-27, + -2.08200045e-01, + 5.33395037e-01, ] ), ) @@ -935,7 +781,7 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx np.testing.assert_almost_equal( k[:, 0], np.array( - [0.31301854, 0.12182861, 0.19203473, 0.14751018, -0.32685328, -0.08325657, 0.14068481, -0.27395387] + [0.38339153, 0.16410165, 0.24810509, 0.42872769, -0.35368849, -0.10938936, 0.14249199, -0.25350259] ), ) np.testing.assert_almost_equal(ref[:, 0], np.array([2.81907786e-02, 2.84412560e-01, 0, 0])) @@ -943,22 +789,22 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx m[:, 0], np.array( [ - 1.11108266e00, - 4.17488777e-06, - 2.56083176e-03, - -3.75739664e-04, - 1.81096926e-04, - 1.11136793e00, - -1.62987234e-02, - -2.31135787e-02, - -1.24633259e-02, - -5.13628363e-05, - 1.12169933e00, - 4.62265629e-03, - -3.42511466e-06, - -1.24196246e-02, - 3.08259114e-04, - 1.11776621e00, + 1.11109420e00, + -2.00975244e-05, + 1.52182976e-03, + 1.80877721e-03, + 1.76457230e-04, + 1.11160762e00, + -1.58811508e-02, + -4.46861689e-02, + -1.24668133e-02, + -6.45898260e-05, + 1.12201319e00, + 5.81308511e-03, + -7.64514277e-06, + -1.24164397e-02, + 6.88061131e-04, + 1.11747957e00, ] ), ) @@ -967,16 +813,16 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx cov[:, -2], np.array( [ - -1.07718751e-02, - -1.99371716e-03, - -5.91375606e-05, - 7.26583106e-03, - 1.90429781e-03, - 1.99709199e-03, - 1.06545354e-02, - 9.52116195e-03, - 1.98084162e-03, - 8.98559860e-04, + -8.67400623e-03, + 5.77329567e-05, + -1.30885973e-03, + 1.12501586e-02, + 4.64929473e-03, + 2.32462786e-03, + 4.92631923e-03, + 3.97615552e-03, + 6.52664876e-03, + -6.66843408e-04, ] ), ) @@ -986,21 +832,21 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx np.array( [ 1.00000000e00, - -9.20399499e-14, - -1.61741543e-02, - -3.51768147e-02, - -6.60561379e-15, + -8.35886644e-14, + -2.60671845e-02, + -6.51469362e-02, + -1.10663430e-14, 1.00000000e00, - -2.23124489e-01, - 3.93635442e-01, + -3.68704938e-01, + 7.95318548e-01, -1.00000000e-01, - -1.09518183e-14, - 1.13040430e00, - 6.10450178e-02, - -2.95093488e-14, + -1.65656031e-14, + 1.13051096e00, + 7.33175161e-02, + -1.61686165e-14, -1.00000000e-01, - 5.94097331e-02, - 9.92723714e-01, + 6.34380653e-02, + 9.83897210e-01, ] ), ) @@ -1009,30 +855,30 @@ def test_arm_reaching_torque_driven_implicit(with_cholesky, with_scaling, use_sx c[:, 2], np.array( [ - -2.70484535e-14, - 2.01986142e-14, - -1.35499970e00, - 1.12950727e00, - 6.51869829e-14, - -5.06432738e-14, - 1.12950727e00, - -3.79903120e00, - -6.67237108e-14, - 5.18030287e-14, - -1.31113529e-02, - 6.16616592e-02, - -1.09243748e-13, - 8.40241717e-14, - -4.16479142e-02, - 9.08025965e-02, - 1.60067573e-12, - -2.05218113e-13, - 1.89372425e-02, - 8.54507632e-03, - -2.53289376e-12, - 1.87539717e-12, - 1.30216832e-02, - -2.80276598e-02, + -1.12917038e-15, + 4.53652494e-15, + -1.35499971e00, + 1.12950724e00, + 1.05525107e-14, + -2.09358023e-14, + 1.12950724e00, + -3.79903115e00, + -2.39923923e-14, + 4.58953582e-14, + -2.78756182e-02, + 1.26240135e-01, + -3.27866630e-14, + 7.03268708e-14, + -9.99367009e-02, + 2.09729743e-01, + -3.14543642e-12, + 3.70435383e-12, + 1.91480322e-02, + 8.03625184e-03, + -1.12721323e-12, + 2.00365744e-12, + 1.01115604e-03, + 2.88702060e-03, ] ), ) diff --git a/tests/shard6/test_time_dependent_problems.py b/tests/shard6/test_time_dependent_problems.py index fe4e40c6b..684d2dda0 100644 --- a/tests/shard6/test_time_dependent_problems.py +++ b/tests/shard6/test_time_dependent_problems.py @@ -5,6 +5,7 @@ Note that the final node is not tracked. """ + import os import pytest import platform