Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adjust import statements to match convention #43

Merged
merged 9 commits into from
Jan 12, 2025
55 changes: 26 additions & 29 deletions napytau/core/chi.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,19 @@
from napytau.core.polynomials import (
evaluate_differentiated_polynomial_at_measuring_distances,
) # noqa E501
from numpy import ndarray
from numpy import sum
from numpy import mean
from numpy import power
import numpy as np
from scipy import optimize
from scipy.optimize import OptimizeResult
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this does not match the import convention

from typing import Tuple


def chi_squared_fixed_t(
doppler_shifted_intensities: ndarray,
unshifted_intensities: ndarray,
delta_doppler_shifted_intensities: ndarray,
delta_unshifted_intensities: ndarray,
coefficients: ndarray,
distances: ndarray,
doppler_shifted_intensities: np.ndarray,
unshifted_intensities: np.ndarray,
delta_doppler_shifted_intensities: np.ndarray,
delta_unshifted_intensities: np.ndarray,
coefficients: np.ndarray,
distances: np.ndarray,
t_hyp: float,
weight_factor: float,
) -> float:
Expand Down Expand Up @@ -47,14 +44,14 @@ def chi_squared_fixed_t(
"""

# Compute the difference between Doppler-shifted intensities and polynomial model
shifted_intensity_difference: ndarray = (
shifted_intensity_difference: np.ndarray = (
doppler_shifted_intensities
- evaluate_polynomial_at_measuring_distances(distances, coefficients)
) / delta_doppler_shifted_intensities

# Compute the difference between unshifted intensities and
# scaled derivative of the polynomial model
unshifted_intensity_difference: ndarray = (
unshifted_intensity_difference: np.ndarray = (
unshifted_intensities
- (
t_hyp
Expand All @@ -65,24 +62,24 @@ def chi_squared_fixed_t(
) / delta_unshifted_intensities

# combine the weighted sum of squared differences
result: float = sum(
(power(shifted_intensity_difference, 2))
+ (weight_factor * (power(unshifted_intensity_difference, 2)))
result: float = np.sum(
(np.power(shifted_intensity_difference, 2))
+ (weight_factor * (np.power(unshifted_intensity_difference, 2)))
)

return result


def optimize_coefficients(
doppler_shifted_intensities: ndarray,
unshifted_intensities: ndarray,
delta_doppler_shifted_intensities: ndarray,
delta_unshifted_intensities: ndarray,
initial_coefficients: ndarray,
distances: ndarray,
doppler_shifted_intensities: np.ndarray,
unshifted_intensities: np.ndarray,
delta_doppler_shifted_intensities: np.ndarray,
delta_unshifted_intensities: np.ndarray,
initial_coefficients: np.ndarray,
distances: np.ndarray,
t_hyp: float,
weight_factor: float,
) -> Tuple[ndarray, float]:
) -> Tuple[np.ndarray, float]:
"""
Optimizes the polynomial coefficients to minimize the chi-squared function.

Expand Down Expand Up @@ -132,12 +129,12 @@ def optimize_coefficients(


def optimize_t_hyp(
doppler_shifted_intensities: ndarray,
unshifted_intensities: ndarray,
delta_doppler_shifted_intensities: ndarray,
delta_unshifted_intensities: ndarray,
initial_coefficients: ndarray,
distances: ndarray,
doppler_shifted_intensities: np.ndarray,
unshifted_intensities: np.ndarray,
delta_doppler_shifted_intensities: np.ndarray,
delta_unshifted_intensities: np.ndarray,
initial_coefficients: np.ndarray,
distances: np.ndarray,
t_hyp_range: Tuple[float, float],
weight_factor: float,
) -> float:
Expand Down Expand Up @@ -181,7 +178,7 @@ def optimize_t_hyp(
chi_squared_t_hyp,
# Initial guess for t_hyp. Startíng with the mean reduces likelihood of
# biasing the optimization process toward one boundary.
x0=mean(t_hyp_range),
x0=np.mean(t_hyp_range),
bounds=[(t_hyp_range[0], t_hyp_range[1])],
)

Expand Down
73 changes: 37 additions & 36 deletions napytau/core/delta_tau.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,12 @@
evaluate_differentiated_polynomial_at_measuring_distances,
) # noqa E501
from napytau.core.polynomials import evaluate_polynomial_at_measuring_distances
from numpy import array
from numpy import ndarray
from numpy import zeros
from numpy import diag
from numpy import power
from numpy import linalg
import numpy as np


def calculate_jacobian_matrix(distances: ndarray, coefficients: ndarray) -> ndarray:
def calculate_jacobian_matrix(
distances: np.ndarray, coefficients: np.ndarray
) -> np.ndarray:
"""
calculated the jacobian matrix for a set of polynomial coefficients taking
different distances into account.
Expand All @@ -26,20 +23,20 @@ def calculate_jacobian_matrix(distances: ndarray, coefficients: ndarray) -> ndar
"""

# initializes the jacobian matrix
jacobian_matrix: ndarray = zeros((len(distances), len(coefficients)))
jacobian_matrix: np.ndarray = np.zeros((len(distances), len(coefficients)))

epsilon: float = 1e-8 # small disturbance value

# Loop over each coefficient and calculate the partial derivative
for i in range(len(coefficients)):
perturbed_coefficients: ndarray = array(coefficients, dtype=float)
perturbed_coefficients: np.ndarray = np.array(coefficients, dtype=float)
perturbed_coefficients[i] += epsilon # slightly disturb the current coefficient

# Compute the disturbed and original polynomial values at the given distances
perturbed_function: ndarray = evaluate_polynomial_at_measuring_distances(
perturbed_function: np.ndarray = evaluate_polynomial_at_measuring_distances(
distances, perturbed_coefficients
)
original_function: ndarray = evaluate_polynomial_at_measuring_distances(
original_function: np.ndarray = evaluate_polynomial_at_measuring_distances(
distances, coefficients
)

Expand All @@ -53,8 +50,10 @@ def calculate_jacobian_matrix(distances: ndarray, coefficients: ndarray) -> ndar


def calculate_covariance_matrix(
delta_shifted_intensities: ndarray, distances: ndarray, coefficients: ndarray
) -> ndarray:
delta_shifted_intensities: np.ndarray,
distances: np.ndarray,
coefficients: np.ndarray,
) -> np.ndarray:
"""
Computes the covariance matrix for the polynomial coefficients using the
jacobian matrix and a weight matrix derived from the shifted intensities' errors.
Expand All @@ -67,26 +66,26 @@ def calculate_covariance_matrix(
ndarray: The computed covariance matrix for the polynomial coefficients.
"""

jacobian_matrix: ndarray = calculate_jacobian_matrix(distances, coefficients)
jacobian_matrix: np.ndarray = calculate_jacobian_matrix(distances, coefficients)

# Construct the weight matrix from the inverse squared errors
weight_matrix: ndarray = diag(1 / power(delta_shifted_intensities, 2))
weight_matrix: np.ndarray = np.diag(1 / np.power(delta_shifted_intensities, 2))

fit_matrix: ndarray = jacobian_matrix.T @ weight_matrix @ jacobian_matrix
fit_matrix: np.ndarray = jacobian_matrix.T @ weight_matrix @ jacobian_matrix

covariance_matrix: ndarray = linalg.inv(fit_matrix)
covariance_matrix: np.ndarray = np.linalg.inv(fit_matrix)

return covariance_matrix


def calculate_error_propagation_terms(
unshifted_intensities: ndarray,
delta_shifted_intensities: ndarray,
delta_unshifted_intensities: ndarray,
distances: ndarray,
coefficients: ndarray,
unshifted_intensities: np.ndarray,
delta_shifted_intensities: np.ndarray,
delta_unshifted_intensities: np.ndarray,
distances: np.ndarray,
coefficients: np.ndarray,
taufactor: float,
) -> ndarray:
) -> np.ndarray:
"""
creates the error propagation term for the polynomial coefficients.
combining direct errors, polynomial uncertainties, and mixed covariance terms.
Expand All @@ -109,16 +108,16 @@ def calculate_error_propagation_terms(
)
)

gaussian_error_from_unshifted_intensity: ndarray = power(
gaussian_error_from_unshifted_intensity: np.ndarray = np.power(
delta_unshifted_intensities, 2
) / power(
) / np.power(
calculated_differentiated_polynomial_sum_at_measuring_distances,
2,
)

# Initialize the polynomial uncertainty term for second term
delta_p_j_i_squared: ndarray = zeros(len(distances))
covariance_matrix: ndarray = calculate_covariance_matrix(
delta_p_j_i_squared: np.ndarray = np.zeros(len(distances))
covariance_matrix: np.ndarray = calculate_covariance_matrix(
delta_shifted_intensities, distances, coefficients
)

Expand All @@ -127,25 +126,27 @@ def calculate_error_propagation_terms(
for l in range(len(coefficients)): # noqa E741
delta_p_j_i_squared = (
delta_p_j_i_squared
+ power(distances, k) * power(distances, l) * covariance_matrix[k, l]
+ np.power(distances, k)
* np.power(distances, l)
* covariance_matrix[k, l]
)

gaussian_error_from_polynomial_uncertainties: ndarray = (
power(unshifted_intensities, 2)
/ power(
gaussian_error_from_polynomial_uncertainties: np.ndarray = (
np.power(unshifted_intensities, 2)
/ np.power(
calculated_differentiated_polynomial_sum_at_measuring_distances,
4,
)
) * power(delta_p_j_i_squared, 2)
) * np.power(delta_p_j_i_squared, 2)

error_from_covariance: ndarray = (
error_from_covariance: np.ndarray = (
unshifted_intensities * taufactor * delta_p_j_i_squared
) / power(calculated_differentiated_polynomial_sum_at_measuring_distances, 3)
) / np.power(calculated_differentiated_polynomial_sum_at_measuring_distances, 3)

interim_result: ndarray = (
interim_result: np.ndarray = (
gaussian_error_from_unshifted_intensity
+ gaussian_error_from_polynomial_uncertainties
)
errors: ndarray = interim_result + error_from_covariance
errors: np.ndarray = interim_result + error_from_covariance
# Return the sum of all three contributions
return errors
20 changes: 9 additions & 11 deletions napytau/core/polynomials.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
from numpy import ndarray
from numpy import power
from numpy import zeros_like
import numpy as np

from napytau.core.errors.polynomial_coefficient_error import (
PolynomialCoefficientError,
)


def evaluate_polynomial_at_measuring_distances(
distances: ndarray, coefficients: ndarray
) -> ndarray:
distances: np.ndarray, coefficients: np.ndarray
) -> np.ndarray:
"""
Computes the sum of a polynomial evaluated at given distance points.

Expand All @@ -29,16 +27,16 @@ def evaluate_polynomial_at_measuring_distances(
)

# Evaluate the polynomial sum at the given time points
sum_at_measuring_distances: ndarray = zeros_like(distances, dtype=float)
sum_at_measuring_distances: np.ndarray = np.zeros_like(distances, dtype=float)
for exponent, coefficient in enumerate(coefficients):
sum_at_measuring_distances += coefficient * power(distances, exponent)
sum_at_measuring_distances += coefficient * np.power(distances, exponent)

return sum_at_measuring_distances


def evaluate_differentiated_polynomial_at_measuring_distances(
distances: ndarray, coefficients: ndarray
) -> ndarray:
distances: np.ndarray, coefficients: np.ndarray
) -> np.ndarray:
"""
Computes the sum of the derivative of a polynomial evaluated
at given distance points.
Expand All @@ -59,13 +57,13 @@ def evaluate_differentiated_polynomial_at_measuring_distances(
"An empty array of coefficients can not be evaluated."
)

sum_of_derivative_at_measuring_distances: ndarray = zeros_like(
sum_of_derivative_at_measuring_distances: np.ndarray = np.zeros_like(
distances, dtype=float
)
for exponent, coefficient in enumerate(coefficients):
if exponent > 0:
sum_of_derivative_at_measuring_distances += (
exponent * coefficient * power(distances, (exponent - 1))
exponent * coefficient * np.power(distances, (exponent - 1))
)

return sum_of_derivative_at_measuring_distances
21 changes: 11 additions & 10 deletions napytau/core/tau.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,22 @@
from napytau.core.polynomials import (
evaluate_differentiated_polynomial_at_measuring_distances,
) # noqa E501
from numpy import ndarray

import numpy as np
from typing import Tuple, Optional


def calculate_tau_i_values(
doppler_shifted_intensities: ndarray,
unshifted_intensities: ndarray,
delta_doppler_shifted_intensities: ndarray,
delta_unshifted_intensities: ndarray,
initial_coefficients: ndarray,
distances: ndarray,
doppler_shifted_intensities: np.ndarray,
unshifted_intensities: np.ndarray,
delta_doppler_shifted_intensities: np.ndarray,
delta_unshifted_intensities: np.ndarray,
initial_coefficients: np.ndarray,
distances: np.ndarray,
t_hyp_range: Tuple[float, float],
weight_factor: float,
custom_t_hyp_estimate: Optional[float],
) -> ndarray:
) -> np.ndarray:
"""
Calculates the decay times (tau_i) based on the provided
intensities and time points.
Expand Down Expand Up @@ -61,7 +62,7 @@ def calculate_tau_i_values(
)

# optimize the polynomial coefficients with the optimized t_hyp
optimized_coefficients: ndarray = (
optimized_coefficients: np.ndarray = (
optimize_coefficients(
doppler_shifted_intensities,
unshifted_intensities,
Expand All @@ -75,7 +76,7 @@ def calculate_tau_i_values(
)[0]

# calculate decay times using the optimized coefficients
tau_i_values: ndarray = (
tau_i_values: np.ndarray = (
unshifted_intensities
/ evaluate_differentiated_polynomial_at_measuring_distances(
distances, optimized_coefficients
Expand Down
13 changes: 5 additions & 8 deletions napytau/core/tau_final.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
from numpy import ndarray
from numpy import power
from numpy import sum
from numpy import sqrt
import numpy as np
from typing import Tuple


def calculate_tau_final(
tau_i_values: ndarray, delta_tau_i_values: ndarray
tau_i_values: np.ndarray, delta_tau_i_values: np.ndarray
) -> Tuple[float, float]:
"""
Computes the final decay time (tau_final) and its associated uncertainty
Expand All @@ -24,12 +21,12 @@ def calculate_tau_final(
if len(tau_i_values) == 0:
return -1, -1

weights: ndarray = 1 / power(delta_tau_i_values, 2)
weights: np.ndarray = 1 / np.power(delta_tau_i_values, 2)

# Calculate the weighted mean of tau_i
weighted_mean: float = sum(weights * tau_i_values) / sum(weights)
weighted_mean: float = np.sum(weights * tau_i_values) / np.sum(weights)

# Calculate the uncertainty of the weighted mean
uncertainty: float = sqrt(1 / sum(weights))
uncertainty: float = np.sqrt(1 / np.sum(weights))

return weighted_mean, uncertainty
Loading
Loading