diff --git a/jwst/extract_1d/soss_extract/atoca.py b/jwst/extract_1d/soss_extract/atoca.py index 36fca3b4d3..c3b69f2c89 100644 --- a/jwst/extract_1d/soss_extract/atoca.py +++ b/jwst/extract_1d/soss_extract/atoca.py @@ -579,7 +579,6 @@ def get_detector_model(self, data, error): def tikho_mat(self): """ Return the Tikhonov matrix. - Generate it with `atoca_utils.get_tikho_matrix` method if not defined yet. """ if self._tikho_mat is not None: return self._tikho_mat @@ -900,9 +899,9 @@ def _get_lo_hi(self, grid, wave_p, wave_m, mask): grid : array[float] Wave_grid to check. wave_p : array[float] - TODO: add here + Wavelengths on the higher side of each pixel. wave_m : array[float] - TODO: add here + Wavelengths on the lower side of each pixel. Returns ------- @@ -985,7 +984,7 @@ def get_w(self, i_order): ma = mask_ord[~self.mask] # Get lo hi - lo, hi = self._get_lo_hi(wave_grid, wave_p, wave_m, ma) # Get indexes + lo, hi = self._get_lo_hi(wave_grid, wave_p, wave_m, ma) # Get indices # Number of used pixels n_i = len(lo) diff --git a/jwst/extract_1d/soss_extract/atoca_utils.py b/jwst/extract_1d/soss_extract/atoca_utils.py index d04953c78b..a99577646c 100644 --- a/jwst/extract_1d/soss_extract/atoca_utils.py +++ b/jwst/extract_1d/soss_extract/atoca_utils.py @@ -64,8 +64,6 @@ def arange_2d(starts, stops): def sparse_k(val, k, n_k): """ - TODO: ensure test coverage. Probably sufficient to have test for compute_weights - in atoca.py Transform a 2D array `val` to a sparse matrix. Parameters @@ -380,11 +378,11 @@ def grid_from_map(wave_map, trace_profile, wave_range=None, n_os=1): Minimum and maximum boundary of the grid to generate, in microns. Wave_range must include some wavelengths of wave_map. Note wave_range is exclusive, in the sense that wave_range[0] and wave_range[1] - will not be between min(output) and mad(output). Instead, min(output) will be + will not be between min(output) and max(output). Instead, min(output) will be the smallest value in the extrapolated grid that is greater than wave_range[0] and max(output) will be the largest value that is less than wave_range[1]. n_os : int - Oversampling of the grid compare to the pixel sampling. + Oversampling of the grid compared to the pixel sampling. Returns ------- @@ -470,9 +468,6 @@ def _trim_grids(all_grids, grid_range): def make_combined_adaptive_grid(all_grids, all_estimates, grid_range, max_iter=10, rtol=10e-6, max_total_size=1000000): """ - TODO: can this be a class? e.g., class AdaptiveGrid? - q: why are there multiple grids passed in here in the first place - Return an irregular oversampled grid needed to reach a given precision when integrating over each intervals of `grid`. The grid is built by subdividing iteratively each intervals that @@ -1085,8 +1080,6 @@ def _get_wings(fct, grid, h_len, i_a, i_b): def _trpz_weight(grid, length, shape, i_a, i_b): """ - TODO: add to some integration class? - Compute weights due to trapezoidal integration Parameters @@ -1466,7 +1459,7 @@ def _get_interp_idx_array(idx, relative_range, max_length): return np.arange(*abs_range, 1) -def _minimize_on_grid(factors, val_to_minimize, interpolate=True, interp_index=[-2,4]): +def _minimize_on_grid(factors, val_to_minimize, interpolate=True, interp_index=None): """ Find minimum of a grid using akima spline interpolation to get a finer estimate Parameters @@ -1486,6 +1479,8 @@ def _minimize_on_grid(factors, val_to_minimize, interpolate=True, interp_index=[ min_fac : float The factor with minimized error/curvature. """ + if interp_index is None: + interp_index = [-2, 4] # Only keep finite values idx_finite = np.isfinite(val_to_minimize) @@ -1528,7 +1523,7 @@ def _minimize_on_grid(factors, val_to_minimize, interpolate=True, interp_index=[ return min_fac -def _find_intersect(factors, y_val, thresh, interpolate=True, search_range=[0,3]): +def _find_intersect(factors, y_val, thresh, interpolate=True, search_range=None): """ Find the root of y_val - thresh (so the intersection between thresh and y_val) Parameters ---------- @@ -1551,6 +1546,8 @@ def _find_intersect(factors, y_val, thresh, interpolate=True, search_range=[0,3] Factor corresponding to the best approximation of the intersection point. """ + if search_range is None: + search_range = [0, 3] # Only keep finite values idx_finite = np.isfinite(y_val) @@ -1821,9 +1818,6 @@ def try_solve_two_methods(matrix, result): class Tikhonov: """ - TODO: can we avoid all of this by using scipy.optimize.least_squares - like this? https://stackoverflow.com/questions/62768131/how-to-add-tikhonov-regularization-in-scipy-optimize-least-squares - Tikhonov regularization to solve the ill-posed problem A.x = b, where A is accidentally singular or close to singularity. Tikhonov regularization adds a regularization term in the equation and aim to minimize the diff --git a/jwst/extract_1d/soss_extract/soss_extract.py b/jwst/extract_1d/soss_extract/soss_extract.py index 34fb487cca..5bdfac74d4 100644 --- a/jwst/extract_1d/soss_extract/soss_extract.py +++ b/jwst/extract_1d/soss_extract/soss_extract.py @@ -92,7 +92,6 @@ def get_ref_file_args(ref_files): # The spectral kernels. speckernel_ref = ref_files['speckernel'] - # ovs = speckernel_ref.meta.spectral_oversampling n_pix = 2 * speckernel_ref.meta.halfwidth + 1 # Take the centroid of each trace as a grid to project the WebbKernel @@ -237,10 +236,10 @@ def _get_native_grid_from_trace(ref_files, spectral_order): Returns ------- - wave : - Grid of the pixels boundaries at the native sampling (1d array) - col : - The column number of the pixel + wave : + Grid of the pixels boundaries at the native sampling (1d array) + col : + The column number of the pixel """ # From wavelength solution @@ -389,8 +388,6 @@ def _populate_tikho_attr(spec, tiktests, idx, sp_ord): def _f_to_spec(f_order, grid_order, ref_file_args, pixel_grid, mask, sp_ord): """ - TODO: would it be better to pass the engine directly in here somehow? - Bin the flux to the pixel grid and build a SpecModel. Parameters @@ -631,7 +628,7 @@ def _model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, log_guess = np.log10(guess_factor) factors = np.logspace(log_guess - 4, log_guess + 4, 10) all_tests = engine.get_tikho_tests(factors, scidata_bkg, scierr) - tikfac= engine.best_tikho_factor(all_tests, fit_mode='all') + tikfac = engine.best_tikho_factor(all_tests, fit_mode='all') # Refine across 4 orders of magnitude. tikfac = np.log10(tikfac) @@ -854,7 +851,7 @@ def _model_single_order(data_order, err_order, ref_file_args, mask_fit, The last spectrum in the list of SpecModels lacks the "chi2", "chi2_soft_l1", "chi2_cauchy", and "reg" attributes, as these are only calculated for the intermediate models. The last spectrum is not necessarily identical to any of the spectra in the list, as it is reconstructed according to - mask_rebuild instead of fit respecting mask_fit. + mask_rebuild instead of fit respecting mask_fit; that is, bad pixels are included. # TODO are all of these behaviors for the last spec in the list the desired ones? """ diff --git a/jwst/extract_1d/soss_extract/tests/conftest.py b/jwst/extract_1d/soss_extract/tests/conftest.py index e339bfa4ec..3e48416145 100644 --- a/jwst/extract_1d/soss_extract/tests/conftest.py +++ b/jwst/extract_1d/soss_extract/tests/conftest.py @@ -19,7 +19,8 @@ - Randomly-selected bad pixels in the data - Wave grid of size ~100 with varying resolution - Triangle function throughput for each spectral order -- Kernel set to unity (for now) +- Kernel is also a triangle function peaking at the center, or else unity for certain tests +- (partial) Mock of the Pastasoss reference model """ PWCPOS = 245.85932900002442