From 32e350a80adc7af79cfb159680bb766ea097823b Mon Sep 17 00:00:00 2001 From: cxduser Date: Mon, 27 Mar 2023 13:49:32 -0500 Subject: [PATCH 01/13] Additional documentation for multipeak phasing --- docs/source/config_mp.rst | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/source/config_mp.rst b/docs/source/config_mp.rst index 9cf86c6..7a3f3fd 100644 --- a/docs/source/config_mp.rst +++ b/docs/source/config_mp.rst @@ -1,5 +1,5 @@ ======== -cofig_mp +config_mp ======== - scan: | mandatory, string type encapsulating scans or ranges of scans containing data for each peak. The scans/scan ranges should be arranged in ascending order. @@ -9,35 +9,35 @@ cofig_mp scan = "898-913,919-934,940-955,961-976" - orientations: -| mandatory, a list of lists, each inner list defining orientation of a peak. +| mandatory, a list of lists, each inner list defining the orientation of a peak. | example: :: orientations = [[-1, -1, 1], [0, 0, 2], [1, 1, 1], [2, 0, 0]] - hkl_in: -| mandatory, list with Miller indices. +| mandatory, list with Miller indices representing the in-plane lattice vector. | example: :: hkl_in = [3.031127677370978, 12.31353345906843, 8.75104158816168] - hkl_out: -| mandatory, list with Miller indices. +| mandatory, list with Miller indices representing the out-of-plane lattice vector. | example: :: hkl_out = [9.77805918769193, -8.402719849515048, 8.436553021703112] - twin_plane: -| mandatory, list with Miller indices of the twin plane. +| mandatory, list with Miller indices of the twin plane. If there is not a twin plane, this should be the same as sample_axis | example: :: twin_plane = [1, -1, 1] - sample_axis: -| mandatory, axis of the sample +| mandatory, axis of the sample. The data will be rotated so that the twin plane vector in the lattice frame (hkl) corresponds to this vector in the array frame (xyz). | example: :: @@ -51,21 +51,21 @@ cofig_mp final_size = 180 - mp_max_weight: -| mandatory, a number between 0 and 1.0 specifying the limit of weight assigned to any peak +| optional, a number between 0 and 1.0 specifying the initial coupling weight assigned to all peaks. if not provided, defaults to 1.0. | example: :: mp_max_weight = 1.0 - mp_taper: -| mandatory, +| mandatory, fraction of the total iterations at which to begin tapering the coupling weight. Coupling weight will decrease for all peaks starting at mp_taper times the total number of iterations. | example: :: mp_taper = 0.6 - lattice_size: -| mandatory, +| mandatory, lattice parameter of the reconstructing crystal. This is used to define the reciprocal lattice vectors, which are required for projecting to each peak. | example: :: From c61905b6e8d5a4125008d887df32d7c1d2142cfe Mon Sep 17 00:00:00 2001 From: Barbara Frosik Date: Thu, 8 Jun 2023 16:17:33 -0500 Subject: [PATCH 02/13] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5983f10..e67bc0c 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ author = 'Barbara Frosik, Ross Harder', author_email = 'bfrosik@anl.gov', url='https://github.com/advancedPhotonSource/cohere', - version='4.0-beta', + version='4.0-beta1', packages=setuptools.find_packages(), install_requires=['numpy', 'scikit-learn', From c3574c5e5e6bf7bd855151b0e330f125fdd84e0a Mon Sep 17 00:00:00 2001 From: Barbara Frosik Date: Thu, 8 Jun 2023 16:18:39 -0500 Subject: [PATCH 03/13] Update meta.yaml --- meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meta.yaml b/meta.yaml index c3b90bd..d674713 100644 --- a/meta.yaml +++ b/meta.yaml @@ -1,6 +1,6 @@ package: name: cohere_core - version: "4.0" + version: "4.0-beta1" source: path: . From 4217f2cd4166190de34e9b9829c51dec98519717 Mon Sep 17 00:00:00 2001 From: cxduser Date: Wed, 21 Jun 2023 13:54:17 -0500 Subject: [PATCH 04/13] Refactored get_centered to reduce number of numpy function calls. --- cohere_core/utilities/utils.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/cohere_core/utilities/utils.py b/cohere_core/utilities/utils.py index e8437a0..e09136c 100644 --- a/cohere_core/utilities/utils.py +++ b/cohere_core/utilities/utils.py @@ -229,14 +229,8 @@ def get_centered(arr, center_shift): ndarray centered array """ - max_coordinates = list(np.unravel_index(np.argmax(arr), arr.shape)) - max_coordinates = np.add(max_coordinates, center_shift) - shape = arr.shape - centered = arr - for i in range(len(max_coordinates)): - centered = np.roll(centered, int(shape[i] / 2) - max_coordinates[i], i) - - return centered + shift = (np.array(arr.shape)/2) - np.unravel_index(np.argmax(arr), arr.shape) + np.array(center_shift) + return np.roll(arr, shift.astype(int), tuple(range(arr.ndim))), shift.astype(int) def get_zero_padded_centered(arr, new_shape): From c0bbc2d4d18c45544d177dafb534b93bd48d6f2a Mon Sep 17 00:00:00 2001 From: cxduser Date: Wed, 21 Jun 2023 13:54:40 -0500 Subject: [PATCH 05/13] Added a bunch of functions to cplib.py --- cohere_core/lib/cplib.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/cohere_core/lib/cplib.py b/cohere_core/lib/cplib.py index 3cf0632..6ada4da 100644 --- a/cohere_core/lib/cplib.py +++ b/cohere_core/lib/cplib.py @@ -181,3 +181,27 @@ def cos(arr): def linspace(start, stop, num): return cp.linspace(start, stop, num) + + def gradient(arr, dx=1): + return cp.gradient(arr, dx) + + def argmin(arr, axis=None): + return cp.argmin(arr, axis) + + def take_along_axis(a, indices, axis): + return cp.take_along_axis(a, indices, axis) + + def moveaxis(arr, source, dest): + return cp.moveaxis(arr, source, dest) + + def lstsq(A, B): + return cp.linalg.lstsq(A, B, rcond=None) + + def zeros(shape): + return cp.zeros(shape) + + def indices(dims): + return cp.indices(dims) + + def concatenate(tup, axis=0): + return cp.concatenate(tup, axis) From 357c3c4db78c8a086926837219359dd02e021b59 Mon Sep 17 00:00:00 2001 From: cxduser Date: Wed, 21 Jun 2023 13:57:24 -0500 Subject: [PATCH 06/13] Get strain from final reconstruction. This commit also contains commented code that would implement the partial fourier constraint with free-varying pixels according to a preset mask. --- cohere_core/controller/phasing.py | 96 +++++++++++++++++++++++-- cohere_core/data/standard_preprocess.py | 20 ++++-- 2 files changed, 104 insertions(+), 12 deletions(-) diff --git a/cohere_core/controller/phasing.py b/cohere_core/controller/phasing.py index 203ff9c..2284fa2 100644 --- a/cohere_core/controller/phasing.py +++ b/cohere_core/controller/phasing.py @@ -19,8 +19,10 @@ from math import pi import random import importlib +import tqdm import numpy as np +from matplotlib import pyplot as plt import cohere_core.utilities.dvc_utils as dvut import cohere_core.utilities.utils as ut @@ -613,6 +615,10 @@ class Peak: def __init__(self, dir_ornt): (self.dir, self.orientation) = dir_ornt + self.g_vec = None + self.gdotg = None + self.mask = None + self.data = None def set_data(self, G_0): import tifffile as tf @@ -620,10 +626,18 @@ def set_data(self, G_0): self.g_vec = devlib.array([0, * self.orientation]) * G_0 self.gdotg = devlib.array(devlib.dot(self.g_vec, self.g_vec)) - fn = self.dir + '/phasing_data/data.tif' - data_np = tf.imread(fn.replace(os.sep, '/')) + datafile = self.dir + '/phasing_data/data.tif' + data_np = tf.imread(datafile.replace(os.sep, '/')) data = devlib.from_numpy(data_np) + try: + maskfile = self.dir + "/phasing_data/mask.tif" + mask_np = tf.imread(maskfile.replace(os.sep, '/')).astype("?") + mask = devlib.from_numpy(mask_np) + self.mask = devlib.fftshift(mask) + except FileNotFoundError: + pass + # in the formatted data the max is in the center, we want it in the corner, so do fft shift self.data = devlib.fftshift(devlib.absolute(data)) @@ -664,7 +678,6 @@ def __init__(self, params, peak_dir_orient): self.peak_objs = [Peak(dir_ornt) for dir_ornt in peak_dir_orient] - def init_dev(self, device_id): self.dev = device_id if device_id != -1: @@ -708,10 +721,20 @@ def init(self, img_dir=None, gen=None): def save_res(self, save_dir): from array import array - - self.shared_image = self.shared_image * self.support_obj.get_support()[:, :, :, None] + J = self.calc_jacobian() + s_xx = J[:,:,:,0,0][:,:,:,None] + s_yy = J[:,:,:,1,1][:,:,:,None] + s_zz = J[:,:,:,2,2][:,:,:,None] + s_xy = ((J[:,:,:,0,1] + J[:,:,:,1,0]) / 2)[:,:,:,None] + s_yz = ((J[:,:,:,1,2] + J[:,:,:,2,1]) / 2)[:,:,:,None] + s_zx = ((J[:,:,:,2,0] + J[:,:,:,0,2]) / 2)[:,:,:,None] + + sup = self.support_obj.get_support()[:, :, :, None] + self.shared_image = self.shared_image * sup if not os.path.exists(save_dir): os.makedirs(save_dir) + final_rec = devlib.concatenate((self.shared_image, s_xx, s_yy, s_zz, s_xy, s_yz, s_zx, sup), axis=-1) + devlib.save(save_dir + "/reconstruction", final_rec) devlib.save(save_dir + "/image", self.shared_image) devlib.save(save_dir + "/shared_density", self.shared_image[:, :, :, 0]) devlib.save(save_dir + "/shared_u1", self.shared_image[:, :, :, 1]) @@ -732,7 +755,59 @@ def save_res(self, save_dir): return 0 + def get_phase_gradient(self, peak): + """Calculate the phase gradient for a given peak""" + # Project onto the peak + self.iter_data = peak.data + self.to_working_image(peak) + + # Iterate a few times to settle + for _ in range(25): + self.to_reciprocal_space() + self.modulus() + self.to_direct_space() + self.er() + + # Calculate the continuous phase gradient using Hofmann's method + # (https://doi.org/10.1103/PhysRevMaterials.4.013801) + gradient = [] + voxel_size = self.params["ds_voxel_size"] + dx0, dy0, dz0 = devlib.gradient(devlib.angle(self.ds_image), voxel_size) + dx1, dy1, dz1 = devlib.gradient(devlib.angle(self.ds_image * devlib.exp(1j*pi/2)), voxel_size) + dx2, dy2, dz2 = devlib.gradient(devlib.angle(self.ds_image * devlib.exp(-1j*pi/2)), voxel_size) + for stack in ([dx0, dx1, dx2], [dy0, dy1, dy2], [dz0, dz1, dz2]): + stack = devlib.array(stack) + index = devlib.argmin(stack**2, axis=0) + gradient.append(devlib.take_along_axis(stack, index[None, :, :, :], axis=0).squeeze(axis=0)) + + return gradient + + def calc_jacobian(self): + grad_phi = devlib.array([self.get_phase_gradient(peak) for peak in self.peak_objs]) + G = devlib.array([peak.g_vec[1:] for peak in self.peak_objs]) + grad_phi = devlib.moveaxis(devlib.moveaxis(grad_phi, 0, -1), 0, -1) + cond = devlib.where(self.support_obj.get_support(), True, False) + final_shape = (grad_phi.shape[0], grad_phi.shape[1], grad_phi.shape[2], 3, 3) + ind = devlib.moveaxis(devlib.indices(cond.shape), 0, -1).reshape((-1, 3))[cond.ravel()].get() + print("Calculating strain...") + jacobian = devlib.zeros(final_shape) + for i, j, k in tqdm.tqdm(ind): + jacobian[i, j, k] = devlib.lstsq(G, grad_phi[i, j, k])[0] + return jacobian + def switch_peaks(self): + # if self.iter > 0.95*self.iter_no: + # img = devlib.to_numpy(devlib.absolute(devlib.fftshift(self.rs_amplitudes)))[90] + # mask = devlib.to_numpy(devlib.fftshift(self.peak_objs[self.pk].mask))[90] + # im2 = np.where(mask, img, np.nan) + # plt.subplot(221, xticks=[], yticks=[], title="Enforced") + # plt.imshow(np.log(im2 + 1), cmap="plasma") + # plt.subplot(222, xticks=[], yticks=[], title="Fwd model") + # plt.imshow(np.log(img + 1), cmap="plasma") + # plt.subplot2grid((2, 2), (1, 0), colspan=2, yscale="log", title="Horizontal lineout") + # plt.plot(img[91]) + # plt.tight_layout() + # plt.show() self.to_shared_image() self.pk = random.choice([x for x in range(self.num_peaks) if x not in (self.pk,)]) self.iter_data = self.peak_objs[self.pk].data @@ -747,12 +822,21 @@ def to_shared_image(self): devlib.absolute(self.ds_image)[:, :, :, None] * self.rho_hat self.shared_image = self.shared_image + beta * (new_image - old_image) - def to_working_image(self, peak_obj): phi = devlib.dot(self.shared_image, peak_obj.g_vec) rho = self.shared_image[:, :, :, 0] self.ds_image = rho * devlib.exp(1j*phi) + # def modulus(self): + # ratio = self.get_ratio(self.iter_data, devlib.absolute(self.rs_amplitudes)) + # error = get_norm(devlib.where((self.rs_amplitudes != 0), (devlib.absolute(self.rs_amplitudes) - self.iter_data), + # 0)) / get_norm(self.iter_data) + # self.errs.append(error) + # if self.iter < 0.3*self.iter_no: + # self.rs_amplitudes *= ratio + # else: + # self.rs_amplitudes[self.peak_objs[self.pk].mask] *= ratio[self.peak_objs[self.pk].mask] + # def progress_trigger(self): ornt = self.peak_objs[self.pk].orientation print(f'| iter {self.iter:>4} ' diff --git a/cohere_core/data/standard_preprocess.py b/cohere_core/data/standard_preprocess.py index 597022a..cc5a9b8 100644 --- a/cohere_core/data/standard_preprocess.py +++ b/cohere_core/data/standard_preprocess.py @@ -23,7 +23,7 @@ ] -def prep(datafile, **kwargs): +def prep(prep_dir, **kwargs): """ This function formats data for reconstruction and saves it in data.tif file. The preparation consists of the following steps: - removing the alien: aliens are areas that are effect of interference. The area is manually set in a configuration file after inspecting the data. It could be also a mask file of the same dimensions that data. Another option is AutoAlien1 algorithm that automatically removes the aliens. @@ -75,14 +75,18 @@ def prep(datafile, **kwargs): # the error message is printed in verifier return - datafile = datafile.replace(os.sep, '/') + prep_dir = prep_dir.replace(os.sep, '/') # The data has been transposed when saved in tif format for the ImageJ to show the right orientation - data = ut.read_tif(datafile) + data = ut.read_tif(f"{prep_dir}/prep_data.tif") + try: + mask = ut.read_tif(f"{prep_dir}/mask.tif") + except FileNotFoundError: + mask = None if 'data_dir' in kwargs: data_dir = kwargs['data_dir'].replace(os.sep, '/') else: - data_dir, filename = os.path.split(datafile) + data_dir = prep_dir if 'alien_alg' in kwargs: data = at.remove_aliens(data, kwargs, data_dir) @@ -121,9 +125,13 @@ def prep(datafile, **kwargs): if 'center_shift' in kwargs: center_shift = kwargs['center_shift'] - prep_data = ut.get_centered(prep_data, center_shift) + prep_data, shift = ut.get_centered(prep_data, center_shift) else: - prep_data = ut.get_centered(prep_data, [0, 0, 0]) + prep_data, shift = ut.get_centered(prep_data, [0, 0, 0]) + if mask is not None: + mask = np.roll(mask, shift, tuple(range(mask.ndim))) + ut.save_tif(mask, data_dir + '/mask.tif') + if 'binning' in kwargs: binsizes = kwargs['binning'] From 30f814893a1bcbd203a782ecca60635ce67db105 Mon Sep 17 00:00:00 2001 From: cxduser Date: Tue, 27 Jun 2023 13:20:49 -0500 Subject: [PATCH 07/13] Added several new methods to the cohlib signature: - diff - gradient - argmin - take_along_axis - moveaxis - lstsq - zeros - indices - concatenate These methods have been implemented in cplib and nplib, but will raise NotImplementedError if called from torchlib or aflib. --- cohere_core/lib/aflib.py | 28 +++++++++++++++++ cohere_core/lib/cohlib.py | 62 ++++++++++++++++++++++++++++++++++--- cohere_core/lib/cplib.py | 12 ++----- cohere_core/lib/nplib.py | 28 +++++++++++++++++ cohere_core/lib/torchlib.py | 28 +++++++++++++++++ 5 files changed, 145 insertions(+), 13 deletions(-) diff --git a/cohere_core/lib/aflib.py b/cohere_core/lib/aflib.py index d76c962..143abd1 100644 --- a/cohere_core/lib/aflib.py +++ b/cohere_core/lib/aflib.py @@ -333,3 +333,31 @@ def gaussian_filter(arr, sigma, **kwargs): correction = arr_sum / af.sum(convag) convag *= correction return convag + + def diff(arr, axis=None, prepend=0): + raise NotImplementedError + + def gradient(arr, dx=1): + raise NotImplementedError + + def argmin(arr, axis=None): + raise NotImplementedError + + def take_along_axis(a, indices, axis): + raise NotImplementedError + + def moveaxis(arr, source, dest): + raise NotImplementedError + + def lstsq(A, B): + raise NotImplementedError + + def zeros(shape): + raise NotImplementedError + + def indices(dims): + raise NotImplementedError + + def concatenate(tup, axis=0): + raise NotImplementedError + diff --git a/cohere_core/lib/cohlib.py b/cohere_core/lib/cohlib.py index 0e2ae8d..be3221a 100644 --- a/cohere_core/lib/cohlib.py +++ b/cohere_core/lib/cohlib.py @@ -98,13 +98,31 @@ def __subclasshook__(cls, subclass): hasattr(subclass, 'conj') and callable(subclass.conj) and hasattr(subclass, 'array_equal') and - callable(subclass.array_equal) or + callable(subclass.array_equal) and hasattr(subclass, 'cos') and - callable(subclass.cos) or + callable(subclass.cos) and hasattr(subclass, 'linspace') and - callable(subclass.linspace) or + callable(subclass.linspace) and hasattr(subclass, 'clip') and - callable(subclass.clip) or + callable(subclass.clip) and + hasattr(subclass, 'gradient') and + callable(subclass.gradient) and + hasattr(subclass, 'argmin') and + callable(subclass.argmin) and + hasattr(subclass, 'take_along_axis') and + callable(subclass.take_along_axis) and + hasattr(subclass, 'moveaxis') and + callable(subclass.moveaxis) and + hasattr(subclass, 'lstsq') and + callable(subclass.lstsq) and + hasattr(subclass, 'zeros') and + callable(subclass.zeros) and + hasattr(subclass, 'indices') and + callable(subclass.indices) and + hasattr(subclass, 'diff') and + callable(subclass.diff) and + hasattr(subclass, 'concatenate') and + callable(subclass.concatenate) or NotImplemented) @abc.abstractmethod @@ -308,6 +326,42 @@ def cos(arr): def linspace(start, stop, num): pass + @abc.abstractmethod + def diff(arr, axis=None, prepend=0): + pass + @abc.abstractmethod def clip(arr, min, max=None): pass + + @abc.abstractmethod + def gradient(arr, dx=1): + pass + + @abc.abstractmethod + def argmin(arr, axis=None): + pass + + @abc.abstractmethod + def take_along_axis(a, indices, axis): + pass + + @abc.abstractmethod + def moveaxis(arr, source, dest): + pass + + @abc.abstractmethod + def lstsq(A, B): + pass + + @abc.abstractmethod + def zeros(shape): + pass + + @abc.abstractmethod + def indices(dims): + pass + + @abc.abstractmethod + def concatenate(tup, axis=0): + pass diff --git a/cohere_core/lib/cplib.py b/cohere_core/lib/cplib.py index 7c2f586..834b7e7 100644 --- a/cohere_core/lib/cplib.py +++ b/cohere_core/lib/cplib.py @@ -165,15 +165,6 @@ def exp(arr): def conj(arr): return cp.conj(arr) - def cos(arr): - return cp.cos(arr) - - def linspace(start, stop, steps): - return cp.linspace(start, stop, steps) - - def diff(arr, axis=None, prepend=0): - return cp.diff(arr, axis=axis, prepend=prepend) - def array_equal(arr1, arr2): return cp.array_equal(arr1, arr2) @@ -186,6 +177,9 @@ def linspace(start, stop, num): def clip(arr, min, max=None): return cp.clip(arr, min, max) + def diff(arr, axis=None, prepend=0): + return cp.diff(arr, axis=axis, prepend=prepend) + def gradient(arr, dx=1): return cp.gradient(arr, dx) diff --git a/cohere_core/lib/nplib.py b/cohere_core/lib/nplib.py index 4c5c6f9..9dba43c 100644 --- a/cohere_core/lib/nplib.py +++ b/cohere_core/lib/nplib.py @@ -186,6 +186,34 @@ def linspace(start, stop, num): def clip(arr, min, max=None): return np.clip(arr, min, max) + def diff(arr, axis=None, prepend=0): + return np.diff(arr, axis=axis, prepend=prepend) + + def gradient(arr, dx=1): + return np.gradient(arr, dx) + + def argmin(arr, axis=None): + return np.argmin(arr, axis) + + def take_along_axis(a, indices, axis): + return np.take_along_axis(a, indices, axis) + + def moveaxis(arr, source, dest): + return np.moveaxis(arr, source, dest) + + def lstsq(A, B): + return np.linalg.lstsq(A, B, rcond=None) + + def zeros(shape): + return np.zeros(shape) + + def indices(dims): + return np.indices(dims) + + def concatenate(tup, axis=0): + return np.concatenate(tup, axis) + + # a11 = np.array([0.1, 0.2, 0.3, 1.0, 1.2, 1.3]) # a12 = np.array([10.1, 10.2, 10.3, 11.0]) # print(np.convolve(a11,a12, mode='same')) diff --git a/cohere_core/lib/torchlib.py b/cohere_core/lib/torchlib.py index 0ec7420..0641a8d 100644 --- a/cohere_core/lib/torchlib.py +++ b/cohere_core/lib/torchlib.py @@ -254,6 +254,34 @@ def linspace(start, stop, num): def clip(arr, min, max=None): return torch.clip(arr, min, max) + def diff(arr, axis=None, prepend=0): + raise NotImplementedError + + def gradient(arr, dx=1): + raise NotImplementedError + + def argmin(arr, axis=None): + raise NotImplementedError + + def take_along_axis(a, indices, axis): + raise NotImplementedError + + def moveaxis(arr, source, dest): + raise NotImplementedError + + def lstsq(A, B): + raise NotImplementedError + + def zeros(shape): + raise NotImplementedError + + def indices(dims): + raise NotImplementedError + + def concatenate(tup, axis=0): + raise NotImplementedError + + # a1 = torch.Tensor([0.1, 0.2, 0.3, 1.0, 1.2, 1.3]) # a2 = torch.Tensor([10.1, 10.2, 10.3, 11.0]) # conv = torchlib.fftconvolve(a1,a2) From e58d0a65a88ff25315a4ed4678ee33812016d153 Mon Sep 17 00:00:00 2001 From: cxduser Date: Tue, 27 Jun 2023 13:33:13 -0500 Subject: [PATCH 08/13] Removed some experimental code that accidentally got included in this version. --- cohere_core/controller/phasing.py | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/cohere_core/controller/phasing.py b/cohere_core/controller/phasing.py index c61922a..13b4b9a 100644 --- a/cohere_core/controller/phasing.py +++ b/cohere_core/controller/phasing.py @@ -627,18 +627,6 @@ def calc_jacobian(self): return jacobian def switch_peaks(self): - # if self.iter > 0.95*self.iter_no: - # img = devlib.to_numpy(devlib.absolute(devlib.fftshift(self.rs_amplitudes)))[90] - # mask = devlib.to_numpy(devlib.fftshift(self.peak_objs[self.pk].mask))[90] - # im2 = np.where(mask, img, np.nan) - # plt.subplot(221, xticks=[], yticks=[], title="Enforced") - # plt.imshow(np.log(im2 + 1), cmap="plasma") - # plt.subplot(222, xticks=[], yticks=[], title="Fwd model") - # plt.imshow(np.log(img + 1), cmap="plasma") - # plt.subplot2grid((2, 2), (1, 0), colspan=2, yscale="log", title="Horizontal lineout") - # plt.plot(img[91]) - # plt.tight_layout() - # plt.show() self.to_shared_image() self.pk = random.choice([x for x in range(self.num_peaks) if x not in (self.pk,)]) self.iter_data = self.peak_objs[self.pk].data @@ -658,16 +646,6 @@ def to_working_image(self, peak_obj): rho = self.shared_image[:, :, :, 0] self.ds_image = rho * devlib.exp(1j*phi) - # def modulus(self): - # ratio = self.get_ratio(self.iter_data, devlib.absolute(self.rs_amplitudes)) - # error = get_norm(devlib.where((self.rs_amplitudes != 0), (devlib.absolute(self.rs_amplitudes) - self.iter_data), - # 0)) / get_norm(self.iter_data) - # self.errs.append(error) - # if self.iter < 0.3*self.iter_no: - # self.rs_amplitudes *= ratio - # else: - # self.rs_amplitudes[self.peak_objs[self.pk].mask] *= ratio[self.peak_objs[self.pk].mask] - # def progress_trigger(self): ornt = self.peak_objs[self.pk].orientation print(f'| iter {self.iter:>4} ' From 2f7334570c008ef6660dd45bdfd818b0c500525f Mon Sep 17 00:00:00 2001 From: bfrosik Date: Wed, 28 Jun 2023 17:11:48 -0500 Subject: [PATCH 09/13] assigned release tag --- meta.yaml | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/meta.yaml b/meta.yaml index a37a6c6..dce1353 100644 --- a/meta.yaml +++ b/meta.yaml @@ -1,6 +1,6 @@ package: name: cohere_core - version: "4.0-dev" + version: "4.0.0" source: path: . diff --git a/setup.py b/setup.py index afd013c..441fdcc 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ author = 'Barbara Frosik, Ross Harder', author_email = 'bfrosik@anl.gov', url='https://github.com/advancedPhotonSource/cohere', - version='4.0-dev', + version='4.0.0', packages=setuptools.find_packages(), install_requires=['numpy', 'scikit-learn', From c26231ecd2aa5945cae847028547990314c713df Mon Sep 17 00:00:00 2001 From: bfrosik Date: Tue, 18 Jul 2023 15:56:20 -0500 Subject: [PATCH 10/13] fixed doc for developers --- docs/source/for_developers.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/for_developers.rst b/docs/source/for_developers.rst index b3b7bbe..663896e 100755 --- a/docs/source/for_developers.rst +++ b/docs/source/for_developers.rst @@ -9,7 +9,7 @@ The best practice is to create conda environment allocated for the development. :: - conda create -n python=3.x + conda create -n python=3.x -c conda-forge conda activate | Clone the latest cohere repository from GitHub. This will include the cohere-ui directory with all of the cohere-ui content, such running scripts and example. @@ -27,7 +27,7 @@ The best practice is to create conda environment allocated for the development. git checkout -b pip install -e . -| Go to cohere-ui directory and repeat the environment steps, then run setup.py and install_pkgs.sh/install_pkgs.bat scripts. The setup.py script modifies paths from relative to absolute in the provided example configuration. The install_pkgs script installs python packages xrayutilities, mayavi, and pyqt that are required to run the cohere-ui. During the installation user must interact with dialog to agree to the steps when installing the packages. +| Go to cohere-ui directory and checkout the Dev branch and create your own branch, then run setup.py and install_pkgs.sh/install_pkgs.bat scripts. The setup.py script modifies paths from relative to absolute in the provided example configuration. The install_pkgs script installs python packages xrayutilities, mayavi, and pyqt that are required to run the cohere-ui. During the installation user must interact with dialog to agree to the steps when installing the packages. :: @@ -35,8 +35,8 @@ The best practice is to create conda environment allocated for the development. git checkout Dev git checkout -b python setup.py - sh cohere-ui/install_pkgs.sh # for Linux and OS_X - cohere-ui/install_pkgs.bat # for Windows + sh install_pkgs.sh # for Linux and OS_X + install_pkgs.bat # for Windows | If planning to use GPUs, install the packages/libraries that you wish to use. From f8275ae05f3b25ffa34fdd5d2596968eab14b537 Mon Sep 17 00:00:00 2001 From: bfrosik Date: Mon, 28 Aug 2023 15:27:12 -0500 Subject: [PATCH 11/13] fixed crop issue in ui --- cohere-ui | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cohere-ui b/cohere-ui index fa62bae..0e6ca85 160000 --- a/cohere-ui +++ b/cohere-ui @@ -1 +1 @@ -Subproject commit fa62bae430c93c89a738672949f04a17728831e0 +Subproject commit 0e6ca85b624023c329df1e141b57f9e342ba3cee From 7e41374e7aabb171e6c124559bfbe1069b03a537 Mon Sep 17 00:00:00 2001 From: cxduser Date: Wed, 24 Jan 2024 12:34:42 -0600 Subject: [PATCH 12/13] Added the following functions to cplib.py: amin(), affine_transform(), pad(), histogram2d(), calc_nmi(), calc_ehd(). These have only been implemented in cplib, other libraries are only implemented as stubs. --- cohere_core/lib/aflib.py | 17 +++++++++++++++++ cohere_core/lib/cohlib.py | 24 ++++++++++++++++++++++++ cohere_core/lib/cplib.py | 29 +++++++++++++++++++++++++++++ cohere_core/lib/nplib.py | 17 +++++++++++++++++ cohere_core/lib/torchlib.py | 17 +++++++++++++++++ 5 files changed, 104 insertions(+) diff --git a/cohere_core/lib/aflib.py b/cohere_core/lib/aflib.py index 143abd1..cafcee5 100644 --- a/cohere_core/lib/aflib.py +++ b/cohere_core/lib/aflib.py @@ -361,3 +361,20 @@ def indices(dims): def concatenate(tup, axis=0): raise NotImplementedError + def amin(arr): + raise NotImplementedError + + def affine_transform(arr, matrix, order=3, offset=0): + raise NotImplementedError + + def pad(arr, padding): + raise NotImplementedError + + def histogram2d(meas, rec, n_bins=100, log=False): + raise NotImplementedError + + def calc_nmi(hgram): + raise NotImplementedError + + def calc_ehd(hgram): + raise NotImplementedError \ No newline at end of file diff --git a/cohere_core/lib/cohlib.py b/cohere_core/lib/cohlib.py index be3221a..33f40a2 100644 --- a/cohere_core/lib/cohlib.py +++ b/cohere_core/lib/cohlib.py @@ -365,3 +365,27 @@ def indices(dims): @abc.abstractmethod def concatenate(tup, axis=0): pass + + @abc.abstractmethod + def amin(arr): + pass + + @abc.abstractmethod + def affine_transform(arr, matrix, order=3, offset=0): + pass + + @abc.abstractmethod + def pad(arr, padding): + pass + + @abc.abstractmethod + def histogram2d(meas, rec, n_bins=100, log=False): + pass + + @abc.abstractmethod + def calc_nmi(hgram): + pass + + @abc.abstractmethod + def calc_ehd(hgram): + pass \ No newline at end of file diff --git a/cohere_core/lib/cplib.py b/cohere_core/lib/cplib.py index 834b7e7..c063e76 100644 --- a/cohere_core/lib/cplib.py +++ b/cohere_core/lib/cplib.py @@ -2,6 +2,7 @@ import cupy as cp import numpy as np import cupyx.scipy.ndimage +import cupyx.scipy.stats as stats class cplib(cohlib): def array(obj): @@ -203,3 +204,31 @@ def indices(dims): def concatenate(tup, axis=0): return cp.concatenate(tup, axis) + + def amin(arr): + return cp.amin(arr) + + def affine_transform(arr, matrix, order=3, offset=0): + return cupyx.scipy.ndimage.affine_transform(arr, matrix, order=order, offset=offset, prefilter=True) + + def pad(arr, padding): + return cp.pad(arr, padding) + + def histogram2d(meas, rec, n_bins=100, log=False): + norm = cp.max(meas) / cp.max(rec) + if log: + bins = cp.logspace(cp.log10(1), cp.log10(cp.max(meas)), n_bins+1) + else: + bins = n_bins + return cp.histogram2d(cp.ravel(meas), cp.ravel(norm*rec), bins)[0] + + def calc_nmi(hgram): + h0 = stats.entropy(np.sum(hgram, axis=0)) + h1 = stats.entropy(np.sum(hgram, axis=1)) + h01 = stats.entropy(np.reshape(hgram, -1)) + return (h0 + h1) / h01 + + def calc_ehd(hgram): + n = hgram.shape[0] * 1j + x, y = cp.mgrid[0:1:n, 0:1:n] + return cp.sum(hgram * cp.abs(x - y)) / cp.sum(hgram) diff --git a/cohere_core/lib/nplib.py b/cohere_core/lib/nplib.py index 9dba43c..d114927 100644 --- a/cohere_core/lib/nplib.py +++ b/cohere_core/lib/nplib.py @@ -213,6 +213,23 @@ def indices(dims): def concatenate(tup, axis=0): return np.concatenate(tup, axis) + def amin(arr): + return np.amin(arr) + + def affine_transform(arr, matrix, order=3, offset=0): + raise NotImplementedError + + def pad(arr, padding): + raise NotImplementedError + + def histogram2d(meas, rec, n_bins=100, log=False): + raise NotImplementedError + + def calc_nmi(hgram): + raise NotImplementedError + + def calc_ehd(hgram): + raise NotImplementedError # a11 = np.array([0.1, 0.2, 0.3, 1.0, 1.2, 1.3]) # a12 = np.array([10.1, 10.2, 10.3, 11.0]) diff --git a/cohere_core/lib/torchlib.py b/cohere_core/lib/torchlib.py index 0641a8d..b1b4571 100644 --- a/cohere_core/lib/torchlib.py +++ b/cohere_core/lib/torchlib.py @@ -281,6 +281,23 @@ def indices(dims): def concatenate(tup, axis=0): raise NotImplementedError + def amin(arr): + raise NotImplementedError + + def affine_transform(arr, matrix, order=3, offset=0): + raise NotImplementedError + + def pad(arr, padding): + raise NotImplementedError + + def histogram2d(meas, rec, n_bins=100, log=False): + raise NotImplementedError + + def calc_nmi(hgram): + raise NotImplementedError + + def calc_ehd(hgram): + raise NotImplementedError # a1 = torch.Tensor([0.1, 0.2, 0.3, 1.0, 1.2, 1.3]) # a2 = torch.Tensor([10.1, 10.2, 10.3, 11.0]) From 3f3cde4f9d89d7c95671eea3a1a3200389c1e06c Mon Sep 17 00:00:00 2001 From: cxduser Date: Wed, 24 Jan 2024 12:52:05 -0600 Subject: [PATCH 13/13] Significant changes that should have been committed earlier: - Resampling is now included in the phasing process. - Added several new error metrics, including normalized mutual information (NMI) and expected histogram deviation (EHD) - Apply the support constraint to the full object during ER iterations (to prevent buildup of HIO feedback) - Fixed incorrect normalization when projecting to each Bragg peak. - Added a "control_peak" option to exclude one peak from the phasing process to use for unbiased error calculations. - Added a "calc_strain" option which can skip the slow strain calculation. - Added a "fast_resample" option to toggle whether the original or resampled data is used. --- cohere_core/controller/op_flow.py | 4 + cohere_core/controller/phasing.py | 295 +++++++++++++----- .../controller/reconstruction_coupled.py | 19 +- 3 files changed, 226 insertions(+), 92 deletions(-) diff --git a/cohere_core/controller/op_flow.py b/cohere_core/controller/op_flow.py index 34e4926..9d35310 100644 --- a/cohere_core/controller/op_flow.py +++ b/cohere_core/controller/op_flow.py @@ -197,6 +197,10 @@ def get_flow_arr(params, flow_items_list, curr_gen=None, first_run=False): if 'progress_trigger' in params: flow_arr[i] = trigger_row(params['progress_trigger'], iter_no) flow_arr[i][-1] = 1 + elif flow_item == "switch_resampling": + if 'switch_resamp_trigger' in params: + flow_arr[i] = trigger_row(params['switch_resamp_trigger'], iter_no) + flow_arr[i][-1] = 1 elif flow_item == 'switch_peaks': if 'switch_peak_trigger' in params: flow_arr[i] = trigger_row(params['switch_peak_trigger'], iter_no) diff --git a/cohere_core/controller/phasing.py b/cohere_core/controller/phasing.py index 13b4b9a..1fc34e4 100644 --- a/cohere_core/controller/phasing.py +++ b/cohere_core/controller/phasing.py @@ -14,6 +14,7 @@ """ +from pathlib import Path import time import os from math import pi @@ -23,6 +24,7 @@ import numpy as np from matplotlib import pyplot as plt +import tifffile as tf import cohere_core.utilities.dvc_utils as dvut import cohere_core.utilities.utils as ut @@ -74,6 +76,10 @@ def get_support(self): def flip(self): self.support = devlib.flip(self.support) + def make_binary(self): + self.support = devlib.absolute(self.support) + self.support = devlib.where(self.support >= 0.9 * devlib.amax(self.support), 1, 0).astype("?") + class Rec: """ @@ -133,6 +139,7 @@ def __init__(self, params, data_file): self.ds_image = None self.need_save_data = False self.saved_data = None + self.er_iter = False # Indicates whether the last iteration done was ER, used in CoupledRec def init_dev(self, device_id): os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" @@ -206,7 +213,6 @@ def fast_ga(self, worker_qin, worker_qout): worker_qout.put(ret) - def init(self, dir=None, gen=None): def create_feat_objects(params, feats): if 'shrink_wrap_trigger' in params: @@ -288,7 +294,6 @@ def breed(self): self.params['ga_sw_gauss_sigmas'][self.gen]) return 0 - def iterate(self): self.iter = -1 start_t = time.time() @@ -310,7 +315,6 @@ def iterate(self): return 0 - def save_res(self, save_dir, only_image=False): from array import array @@ -339,7 +343,6 @@ def save_res(self, save_dir, only_image=False): return 0 - def get_metric(self, metric_type): return dvut.all_metrics(self.ds_image, self.errs) # return dvut.get_metric(self.ds_image, self.errs, metric_type) @@ -395,9 +398,11 @@ def to_direct_space(self): self.ds_image_raw = devlib.fft(self.rs_amplitudes) def er(self): + self.er_iter = True self.ds_image = self.ds_image_raw * self.support_obj.get_support() def hio(self): + self.er_iter = False combined_image = self.ds_image - self.ds_image_raw * self.params['hio_beta'] support = self.support_obj.get_support() self.ds_image = devlib.where((support > 0), self.ds_image_raw, combined_image) @@ -439,38 +444,89 @@ def get_ratio(self, divident, divisor): return ratio +def resample(data, matrix, size=None, plot=False): + s = data.shape + corners = [ + [0, 0, 0], + [s[0], 0, 0], [0, s[1], 0], [0, 0, s[2]], + [0, s[1], s[2]], [s[0], 0, s[2]], [s[0], s[1], 0], + [s[0], s[1], s[2]] + ] + new_corners = [np.linalg.inv(matrix)@c for c in corners] + new_shape = np.ceil(np.max(new_corners, axis=0) - np.min(new_corners, axis=0)).astype(int) + pad = np.max((new_shape - s) // 2) + data = devlib.pad(data, np.max([pad, 0])) + mid_shape = np.array(data.shape) + + offset = (mid_shape / 2) - matrix @ (mid_shape / 2) + + # The phasing data as saved is a magnitude, which is not smooth everywhere (there are non-differentiable points + # where the complex amplitude crosses zero). However, the intensity (magnitude squared) is a smooth function. + # Thus, in order to allow a high-order spline, we take the square root of the interpolated square of the image. + data = devlib.affine_transform(devlib.square(data), devlib.array(matrix), order=1, offset=offset) + data = devlib.sqrt(devlib.clip(data, 0)) + if plot: + arr = devlib.to_numpy(data) + plt.imshow(arr[np.argmax(np.sum(arr, axis=(1, 2)))]) + plt.title(np.linalg.det(matrix)) + plt.show() + + if size is not None: + data = pad_to_cube(data, size) + return data + + +def pad_to_cube(data, size): + shp = np.array([size, size, size]) // 2 + # Pad the array to the largest dimensions + shp1 = np.array(data.shape) // 2 + pad = shp - shp1 + pad[pad < 0] = 0 + data = devlib.pad(data, [(pad[0], pad[0]), (pad[1], pad[1]), (pad[2], pad[2])]) + # Crop the array to the final dimensions + shp1 = np.array(data.shape) // 2 + start, end = shp1 - shp, shp1 + shp + data = data[start[0]:end[0], start[1]:end[1], start[2]:end[2]] + return data + + class Peak: """ Holds parameters related to a peak. """ - def __init__(self, dir_ornt): - (self.dir, self.orientation) = dir_ornt - self.g_vec = None - self.gdotg = None - self.mask = None - self.data = None + def __init__(self, peak_dir): + geometry = ut.read_config(f"{peak_dir}/geometry") + self.hkl = geometry["peak_hkl"] + self.norm = np.linalg.norm(self.hkl) + print(f"Loading peak {self.hkl}") - def set_data(self, G_0): - import tifffile as tf - - self.g_vec = devlib.array([0, * self.orientation]) * G_0 + # Geometry parameters + self.g_vec = devlib.array([0, * self.hkl]) * 2*pi/geometry["lattice"] self.gdotg = devlib.array(devlib.dot(self.g_vec, self.g_vec)) - - datafile = self.dir + '/phasing_data/data.tif' - data_np = tf.imread(datafile.replace(os.sep, '/')) - data = devlib.from_numpy(data_np) - - try: - maskfile = self.dir + "/phasing_data/mask.tif" - mask_np = tf.imread(maskfile.replace(os.sep, '/')).astype("?") - mask = devlib.from_numpy(mask_np) - self.mask = devlib.fftshift(mask) - except FileNotFoundError: - pass - - # in the formatted data the max is in the center, we want it in the corner, so do fft shift - self.data = devlib.fftshift(devlib.absolute(data)) + self.size = geometry["final_size"] + self.rs_lab_to_det = np.array(geometry["rmatrix"]) / geometry["rs_voxel_size"] + self.rs_det_to_lab = np.linalg.inv(self.rs_lab_to_det) + self.ds_lab_to_det = self.rs_det_to_lab.T + self.ds_det_to_lab = self.rs_lab_to_det.T + + datafile = (Path(peak_dir) / "phasing_data/data.tif").as_posix() + self.full_data = devlib.absolute(devlib.from_numpy(tf.imread(datafile))) + self.full_data = devlib.moveaxis(self.full_data, 0, 2) + self.full_data = devlib.moveaxis(self.full_data, 0, 1) + self.full_size = np.max(self.full_data.shape) + self.full_data = pad_to_cube(self.full_data, self.full_size) + self.data = pad_to_cube(self.full_data, self.size) + + # Resample the diffraction patterns into the lab reference frame. + self.res_data = resample(self.full_data, self.rs_det_to_lab, self.size) + tf.imwrite((Path(peak_dir) / "phasing_data/data_resampled.tif").as_posix(), devlib.to_numpy(self.res_data)) + # self.res_mask = resample(devlib.array(np.ones(self.full_data.shape)), self.det_to_lab, self.size) + + # Do the fftshift on all the arrays + self.full_data = devlib.fftshift(self.full_data) + self.data = devlib.fftshift(self.data) + self.res_data = devlib.fftshift(self.res_data) class CoupledRec(Rec): @@ -495,19 +551,21 @@ class CoupledRec(Rec): __author__ = "Nick Porter" __all__ = [] - def __init__(self, params, peak_dir_orient): + def __init__(self, params, peak_dirs): super().__init__(params, None) - self.iter_functions = self.iter_functions + [self.switch_peaks] + self.iter_functions = self.iter_functions + [self.switch_resampling, self.switch_peaks] - if "switch_peak_trigger" not in params: - params["switch_peak_trigger"] = [0, 10] - if "mp_max_weight" not in params: - params["mp_max_weight"] = 0.9 - if "mp_taper" not in params: - params["mp_taper"] = 0.75 + if "switch_peak_trigger" not in self.params: + self.params["switch_peak_trigger"] = [0, 5] + if "mp_max_weight" not in self.params: + self.params["mp_max_weight"] = 0.9 + if "mp_taper" not in self.params: + self.params["mp_taper"] = 0.75 + if "calc_strain" not in self.params: + self.params["calc_strain"] = True - self.peak_objs = [Peak(dir_ornt) for dir_ornt in peak_dir_orient] + self.peak_dirs = peak_dirs def init_dev(self, device_id): self.dev = device_id @@ -519,13 +577,30 @@ def init_dev(self, device_id): print('may need to restart GUI') return -1 - G_0 = 2*pi/self.params["lattice_size"] - for or_obj in self.peak_objs: - or_obj.set_data(G_0) + # Allows for a control peak to be left out of the reconstruction for comparison + all_peaks = [Peak(d) for d in self.peak_dirs] + if "control_peak" in self.params: + control_peak = self.params["control_peak"] + self.peak_objs = [p for p in all_peaks if p.hkl != control_peak] + try: + self.ctrl_peak = next(p for p in all_peaks if p.hkl == control_peak) + self.ctrl_error = [] + except StopIteration: + self.ctrl_peak = None + self.ctrl_error = None + else: + self.peak_objs = all_peaks + self.ctrl_peak = None + self.ctrl_error = None + + # Fast resampling means using the resampled data to reconstruct the object in a single common basis + # When this is set to False, the reconstruction will need to be resampled each time the peak is switched + self.fast_resample = True + self.lab_frame = True self.num_peaks = len(self.peak_objs) self.pk = 0 # index in list of current peak being reconstructed - self.data = self.peak_objs[self.pk].data + self.data = self.peak_objs[self.pk].res_data self.iter_data = self.data self.dims = self.data.shape @@ -552,19 +627,25 @@ def init(self, img_dir=None, gen=None): def save_res(self, save_dir): from array import array - J = self.calc_jacobian() - s_xx = J[:,:,:,0,0][:,:,:,None] - s_yy = J[:,:,:,1,1][:,:,:,None] - s_zz = J[:,:,:,2,2][:,:,:,None] - s_xy = ((J[:,:,:,0,1] + J[:,:,:,1,0]) / 2)[:,:,:,None] - s_yz = ((J[:,:,:,1,2] + J[:,:,:,2,1]) / 2)[:,:,:,None] - s_zx = ((J[:,:,:,2,0] + J[:,:,:,0,2]) / 2)[:,:,:,None] sup = self.support_obj.get_support()[:, :, :, None] self.shared_image = self.shared_image * sup if not os.path.exists(save_dir): os.makedirs(save_dir) - final_rec = devlib.concatenate((self.shared_image, s_xx, s_yy, s_zz, s_xy, s_yz, s_zx, sup), axis=-1) + + if self.params["calc_strain"]: + J = self.calc_jacobian() + s_xx = J[:,:,:,0,0][:,:,:,None] + s_yy = J[:,:,:,1,1][:,:,:,None] + s_zz = J[:,:,:,2,2][:,:,:,None] + s_xy = ((J[:,:,:,0,1] + J[:,:,:,1,0]) / 2)[:,:,:,None] + s_yz = ((J[:,:,:,1,2] + J[:,:,:,2,1]) / 2)[:,:,:,None] + s_zx = ((J[:,:,:,2,0] + J[:,:,:,0,2]) / 2)[:,:,:,None] + final_rec = devlib.concatenate((self.shared_image, s_xx, s_yy, s_zz, s_xy, s_yz, s_zx, sup), axis=-1) + else: + s_00 = devlib.zeros(sup.shape) + final_rec = devlib.concatenate((self.shared_image, s_00, s_00, s_00, s_00, s_00, s_00, sup), axis=-1) + devlib.save(save_dir + "/reconstruction", final_rec) devlib.save(save_dir + "/image", self.shared_image) devlib.save(save_dir + "/shared_density", self.shared_image[:, :, :, 0]) @@ -584,13 +665,21 @@ def save_res(self, save_dir): with open(save_dir + "/metrics.txt", "w+") as f: f.write(str(metric)) + if self.ctrl_error is not None: + ctrl_error = np.array(self.ctrl_error) + np.save(save_dir + '/ctrl_error', ctrl_error) + np.savetxt(save_dir + '/ctrl_error.txt', ctrl_error) return 0 - def get_phase_gradient(self, peak): + def get_phase_gradient(self): """Calculate the phase gradient for a given peak""" # Project onto the peak - self.iter_data = peak.data - self.to_working_image(peak) + if self.fast_resample: + self.iter_data = self.peak_objs[self.pk].res_data + else: + self.iter_data = self.peak_objs[self.pk].data + + self.to_working_image() # Iterate a few times to settle for _ in range(25): @@ -614,45 +703,98 @@ def get_phase_gradient(self, peak): return gradient def calc_jacobian(self): - grad_phi = devlib.array([self.get_phase_gradient(peak) for peak in self.peak_objs]) + grad_phi = [] + for i, peak in enumerate(self.peak_objs): + self.prev_pk = self.pk + self.pk = i + grad_phi.append(self.get_phase_gradient()) + grad_phi = devlib.array(grad_phi) G = devlib.array([peak.g_vec[1:] for peak in self.peak_objs]) grad_phi = devlib.moveaxis(devlib.moveaxis(grad_phi, 0, -1), 0, -1) cond = devlib.where(self.support_obj.get_support(), True, False) final_shape = (grad_phi.shape[0], grad_phi.shape[1], grad_phi.shape[2], 3, 3) ind = devlib.moveaxis(devlib.indices(cond.shape), 0, -1).reshape((-1, 3))[cond.ravel()].get() - print("Calculating strain...") jacobian = devlib.zeros(final_shape) + print("Calculating strain...") for i, j, k in tqdm.tqdm(ind): jacobian[i, j, k] = devlib.lstsq(G, grad_phi[i, j, k])[0] return jacobian def switch_peaks(self): self.to_shared_image() + self.get_control_error() self.pk = random.choice([x for x in range(self.num_peaks) if x not in (self.pk,)]) - self.iter_data = self.peak_objs[self.pk].data - self.to_working_image(self.peak_objs[self.pk]) + if self.fast_resample: + self.iter_data = self.peak_objs[self.pk].res_data + else: + self.iter_data = self.peak_objs[self.pk].data + self.to_working_image() def to_shared_image(self): + if not self.fast_resample: + if self.lab_frame: + self.lab_frame = False + else: + self.shift_to_center() + self.ds_image = resample(self.ds_image, self.peak_objs[self.pk].ds_det_to_lab, self.dims[0]) + self.support_obj.support = resample(self.support_obj.support, self.peak_objs[self.pk].ds_det_to_lab, + self.dims[0], plot=True) + self.support_obj.make_binary() + beta = self.proj_weight[self.iter] - curr_peak = self.peak_objs[self.pk] - old_image = (devlib.dot(self.shared_image, curr_peak.g_vec) / curr_peak.gdotg)[:, :, :, None] * \ - curr_peak.g_vec + devlib.dot(self.shared_image, self.rho_hat)[:, :, :, None] * self.rho_hat - new_image = (devlib.angle(self.ds_image) / curr_peak.gdotg)[:, :, :, None] * curr_peak.g_vec + \ - devlib.absolute(self.ds_image)[:, :, :, None] * self.rho_hat + pk = self.peak_objs[self.pk] + old_image = (devlib.dot(self.shared_image, pk.g_vec) / pk.gdotg)[:, :, :, None] * pk.g_vec + \ + devlib.dot(self.shared_image, self.rho_hat)[:, :, :, None] * self.rho_hat + new_image = (devlib.angle(self.ds_image) / pk.gdotg)[:, :, :, None] * pk.g_vec + \ + devlib.absolute(self.ds_image)[:, :, :, None] * self.rho_hat * pk.norm self.shared_image = self.shared_image + beta * (new_image - old_image) - - def to_working_image(self, peak_obj): - phi = devlib.dot(self.shared_image, peak_obj.g_vec) - rho = self.shared_image[:, :, :, 0] - self.ds_image = rho * devlib.exp(1j*phi) + if self.er_iter: + self.shared_image = self.shared_image * self.support_obj.support[:, :, :, None] + + def to_working_image(self): + phi = devlib.dot(self.shared_image, self.peak_objs[self.pk].g_vec) + rho = self.shared_image[:, :, :, 0] / self.peak_objs[self.pk].norm + self.ds_image = rho * devlib.exp(1j * phi) + if not self.fast_resample: + self.ds_image = resample(self.ds_image, self.peak_objs[self.pk].ds_lab_to_det, self.dims[0]) + self.support_obj.support = resample(self.support_obj.support, self.peak_objs[self.pk].ds_lab_to_det, + self.dims[0]) + self.support_obj.make_binary() + + def get_control_error(self): + if self.ctrl_peak is None: + return + phi = devlib.dot(self.shared_image, self.peak_objs[self.pk].g_vec) + rho = self.shared_image[:, :, :, 0] / self.peak_objs[self.pk].norm + img = devlib.absolute(devlib.ifft(rho * devlib.exp(1j * phi))) + lin_hist = devlib.histogram2d(self.ctrl_peak.res_data, img, log=False) + nmi = devlib.calc_nmi(lin_hist).get() + ehd = devlib.calc_ehd(lin_hist).get() + log_hist = devlib.histogram2d(self.ctrl_peak.res_data, img, log=True) + lnmi = devlib.calc_nmi(log_hist).get() + lehd = devlib.calc_ehd(log_hist).get() + self.ctrl_error.append([nmi, lnmi, ehd, lehd]) def progress_trigger(self): - ornt = self.peak_objs[self.pk].orientation - print(f'| iter {self.iter:>4} ' - f'| [{ornt[0]:>2}, {ornt[1]:>2}, {ornt[2]:>2}] ' - f'| err {self.errs[-1]:0.6f} ' - f'| max {self.shared_image[:, :, :, 0].max():0.5g}' - ) + ornt = self.peak_objs[self.pk].hkl + prg = f'| iter {self.iter:>4} ' \ + f'| [{ornt[0]:>2}, {ornt[1]:>2}, {ornt[2]:>2}] ' \ + f'| err {self.errs[-1]:0.6f} ' \ + f'| max {self.shared_image[:, :, :, 0].max():0.5f} ' + if self.ctrl_error is not None: + prg += f"| NMI {self.ctrl_error[-1][0]:0.6f} " + prg += f"| LNMI {self.ctrl_error[-1][1]:0.6f} " + prg += f"| EHD {self.ctrl_error[-1][2]:0.6f} " + prg += f"| LEHD {self.ctrl_error[-1][3]:0.6f} " + print(prg) + + # def shrink_wrap_trigger(self): + # if self.proj_weight[self.iter] > 0.9: + # args = (self.ds_image,) + # self.support_obj.support = self.shrink_wrap_obj.apply_trigger(*args) + # + def switch_resampling(self): + self.fast_resample = False def get_density(self): return self.shared_image[:, :, :, 0] @@ -665,10 +807,13 @@ def flip(self): self.shared_image[:, :, :, 1:] *= -1 self.support_obj.flip() - def shift_to_center(self, ind, cutoff=None): + def shift_to_center(self): + arr = devlib.to_numpy(self.support_obj.get_support()) + ind = [np.argmax(np.sum(arr, axis=axs)) for axs in ((1, 2), (0, 2), (0, 1))] shift_dist = -devlib.array(ind) + (self.dims[0]//2) self.shared_image = devlib.shift(self.shared_image, shift_dist, axis=(0, 1, 2)) - self.support_obj.support = devlib.shift(self.support_obj.support, shift_dist) + self.ds_image = devlib.shift(self.ds_image, shift_dist, axis=(0, 1, 2)) + self.support_obj.support = devlib.shift(self.support_obj.support, shift_dist, axis=(0, 1, 2)) def reconstruction(datafile, **kwargs): diff --git a/cohere_core/controller/reconstruction_coupled.py b/cohere_core/controller/reconstruction_coupled.py index 5da8994..e234614 100644 --- a/cohere_core/controller/reconstruction_coupled.py +++ b/cohere_core/controller/reconstruction_coupled.py @@ -37,23 +37,8 @@ def set_lib(pkg, ndim=None): def rec_process(lib, pars, peak_dirs, dev, continue_dir): set_lib(lib) - # It is assumed that the calling script uses peak_dirs containing - # peak orientation. It is parsed here, and passed to the CoupledRec constractor as list of - # touples (, ) - peak_dir_orient = [] - packed_orientations = [(str(o[0]) + str(o[1]) + str(o[2])) for o in pars['orientations']] - # find the directory that matches the packed orientation - for dir in peak_dirs: - found = False - i = 0 - while i < len(packed_orientations) and not found: - if dir.endswith(packed_orientations[i]): - found = True - peak_dir_orient.append((dir, pars['orientations'][i])) - i += 1 - print(peak_dir_orient) - - worker = calc.CoupledRec(pars, peak_dir_orient) + + worker = calc.CoupledRec(pars, peak_dirs) if worker.init_dev(dev[0]) < 0: return worker.init(continue_dir)