diff --git a/.github/workflows/autodoc_tutorials.yml b/.github/workflows/autodoc_tutorials.yml
index 42a01701..37859295 100644
--- a/.github/workflows/autodoc_tutorials.yml
+++ b/.github/workflows/autodoc_tutorials.yml
@@ -40,11 +40,6 @@ jobs:
sudo wget https://github.com/jgm/pandoc/releases/download/3.1.8/pandoc-3.1.8-1-amd64.deb
sudo dpkg -i pandoc-3.1.8-1-amd64.deb
- # Remove whitespaces in filenames of tutorial notebooks only during workflow
- - name: remove whitespaces in tutorial filenames
- run: |
- find $GITHUB_WORKSPACE/tutorial -type f -exec bash -c 'newname="${1// /}"; newname="${newname//-/_}"; mv "$1" "$newname"' bash {} \;
-
# Execute and convert notebooks
- name: execute notebooks
run: |
diff --git a/docs/index.rst b/docs/index.rst
index 1a877e52..396e7c27 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -21,10 +21,9 @@ Single-Event DataFrame (SED) documentation
:numbered:
:caption: Examples
- tutorial/1_Binningfakedata
- tutorial/2_ConversionPipelineforexampletime_resolvedARPESdata
- tutorial/3_MetadatacollectionandexporttoNeXus
- tutorial/Flashenergycalibration
+ tutorial/1_binning_fake_data
+ tutorial/2_conversion_pipeline_for_example_time-resolved_ARPES_data
+ tutorial/3_metadata_collection_and_export_to_NeXus
.. toctree::
:maxdepth: 2
diff --git a/docs/tutorial/1_Binningfakedata.rst b/docs/tutorial/1_Binningfakedata.rst
deleted file mode 100644
index f4fa9f42..00000000
--- a/docs/tutorial/1_Binningfakedata.rst
+++ /dev/null
@@ -1,302 +0,0 @@
-Binning demonstration on locally generated fake data
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In this example, we generate a table with random data simulating a
-single event dataset. We showcase the binning method, first on a simple
-single table using the bin_partition method and then in the distributed
-mehthod bin_dataframe, using daks dataframes. The first method is never
-really called directly, as it is simply the function called by the
-bin_dataframe on each partition of the dask dataframe.
-
-.. code:: ipython3
-
- import sys
-
- import dask
- import numpy as np
- import pandas as pd
- import dask.dataframe
-
- import matplotlib.pyplot as plt
-
- sys.path.append("../")
- from sed.binning import bin_partition, bin_dataframe
-
-Generate Fake Data
-------------------
-
-.. code:: ipython3
-
- n_pts = 100000
- cols = ["posx", "posy", "energy"]
- df = pd.DataFrame(np.random.randn(n_pts, len(cols)), columns=cols)
- df
-
-
-
-
-.. raw:: html
-
-
-
-
-
-
- |
- posx |
- posy |
- energy |
-
-
-
-
- 0 |
- 0.920564 |
- -1.068583 |
- 1.011868 |
-
-
- 1 |
- 1.101308 |
- -1.133177 |
- 2.264009 |
-
-
- 2 |
- -2.175991 |
- 0.469750 |
- -0.366066 |
-
-
- 3 |
- -1.414038 |
- 1.505585 |
- -1.168827 |
-
-
- 4 |
- -1.239659 |
- -0.401433 |
- 0.055166 |
-
-
- ... |
- ... |
- ... |
- ... |
-
-
- 99995 |
- -0.386748 |
- 0.796456 |
- 1.208073 |
-
-
- 99996 |
- -1.012029 |
- 0.886339 |
- -0.616620 |
-
-
- 99997 |
- 1.486870 |
- -0.143890 |
- -1.772194 |
-
-
- 99998 |
- -1.169187 |
- 2.560502 |
- -0.039270 |
-
-
- 99999 |
- -0.813120 |
- 1.152984 |
- 0.006634 |
-
-
-
-
100000 rows × 3 columns
-
-
-
-
-Define the binning range
-------------------------
-
-.. code:: ipython3
-
- binAxes = ["posx", "posy", "energy"]
- nBins = [120, 120, 120]
- binRanges = [(-2, 2), (-2, 2), (-2, 2)]
- coords = {ax: np.linspace(r[0], r[1], n) for ax, r, n in zip(binAxes, binRanges, nBins)}
-
-Compute the binning along the pandas dataframe
-----------------------------------------------
-
-.. code:: ipython3
-
- %%time
- res = bin_partition(
- part=df,
- bins=nBins,
- axes=binAxes,
- ranges=binRanges,
- hist_mode="numba",
- )
-
-
-.. parsed-literal::
-
- CPU times: user 1.13 s, sys: 18.5 ms, total: 1.14 s
- Wall time: 1.19 s
-
-
-.. code:: ipython3
-
- fig, axs = plt.subplots(1, 3, figsize=(8, 2.5), constrained_layout=True)
- for i in range(3):
- axs[i].imshow(res.sum(i))
-
-
-
-.. image:: 1_Binningfakedata_files/1_Binningfakedata_8_0.png
-
-
-Transform to dask dataframe
----------------------------
-
-.. code:: ipython3
-
- ddf = dask.dataframe.from_pandas(df, npartitions=50)
- ddf
-
-
-
-
-.. raw:: html
-
- Dask DataFrame Structure:
-
-
-
-
-
- |
- posx |
- posy |
- energy |
-
-
- npartitions=50 |
- |
- |
- |
-
-
-
-
- 0 |
- float64 |
- float64 |
- float64 |
-
-
- 2000 |
- ... |
- ... |
- ... |
-
-
- ... |
- ... |
- ... |
- ... |
-
-
- 98000 |
- ... |
- ... |
- ... |
-
-
- 99999 |
- ... |
- ... |
- ... |
-
-
-
-
- Dask Name: from_pandas, 1 graph layer
-
-
-
-compute distributed binning on the partitioned dask dataframe
--------------------------------------------------------------
-
-In this example, the small dataset does not give significant improvement
-over the pandas implementation, at least using this number of
-partitions. A single partition would be faster (you can try…) but we use
-multiple for demonstration purpouses.
-
-.. code:: ipython3
-
- %%time
- res = bin_dataframe(
- df=ddf,
- bins=nBins,
- axes=binAxes,
- ranges=binRanges,
- hist_mode="numba",
- )
-
-
-
-.. parsed-literal::
-
- 0%| | 0/50 [00:00, ?it/s]
-
-
-.. parsed-literal::
-
- CPU times: user 504 ms, sys: 280 ms, total: 784 ms
- Wall time: 699 ms
-
-
-.. code:: ipython3
-
- fig, axs = plt.subplots(1, 3, figsize=(8, 2.5), constrained_layout=True)
- for dim, ax in zip(binAxes, axs):
- res.sum(dim).plot(ax=ax)
-
-
-
-.. image:: 1_Binningfakedata_files/1_Binningfakedata_13_0.png
-
-
diff --git a/docs/tutorial/1_Binningfakedata_files/1_Binningfakedata_13_0.png b/docs/tutorial/1_Binningfakedata_files/1_Binningfakedata_13_0.png
deleted file mode 100644
index 64fe9522..00000000
Binary files a/docs/tutorial/1_Binningfakedata_files/1_Binningfakedata_13_0.png and /dev/null differ
diff --git a/docs/tutorial/1_Binningfakedata_files/1_Binningfakedata_8_0.png b/docs/tutorial/1_Binningfakedata_files/1_Binningfakedata_8_0.png
deleted file mode 100644
index 31c4020b..00000000
Binary files a/docs/tutorial/1_Binningfakedata_files/1_Binningfakedata_8_0.png and /dev/null differ
diff --git a/docs/tutorial/2_ConversionPipelineforexampletime_resolvedARPESdata.rst b/docs/tutorial/2_ConversionPipelineforexampletime_resolvedARPESdata.rst
deleted file mode 100644
index 2df9f6b0..00000000
--- a/docs/tutorial/2_ConversionPipelineforexampletime_resolvedARPESdata.rst
+++ /dev/null
@@ -1,393 +0,0 @@
-Demonstration of the conversion pipeline using time-resolved ARPES data stored on Zenodo
-========================================================================================
-
-In this example, we pull some time-resolved ARPES data from Zenodo, and
-load it into the sed package using functions of the mpes package. Then,
-we run a conversion pipeline on it, containing steps for visualizing the
-channels, correcting image distortions, calibrating the momentum space,
-correcting for energy distortions and calibrating the energy axis.
-Finally, the data are binned in calibrated axes. For performance
-reasons, best store the data on a locally attached storage (no network
-drive). This can also be achieved transparently using the included
-MirrorUtil class.
-
-.. code:: ipython3
-
- %load_ext autoreload
- %autoreload 2
- import numpy as np
- import matplotlib.pyplot as plt
- import os
-
- import sed
-
- %matplotlib widget
-
-Load Data
----------
-
-.. code:: ipython3
-
- data_path = '.' # Put in Path to a storage of at least 20 Gbyte free space.
- if not os.path.exists(data_path + "/WSe2.zip"):
- os.system(f"curl -L --output {data_path}/WSe2.zip https://zenodo.org/record/6369728/files/WSe2.zip")
- if not os.path.isdir(data_path + "/Scan049_1") or not os.path.isdir(data_path + "/energycal_2019_01_08/"):
- os.system(f"unzip -d {data_path} -o {data_path}/WSe2.zip")
-
-.. code:: ipython3
-
- # The Scan directory
- fdir = data_path + '/Scan049_1'
- # create sed processor using the config file:
- sp = sed.SedProcessor(folder=fdir, config="../sed/config/mpes_example_config.yaml")
-
-.. code:: ipython3
-
- # Plot of the count rate through the scan
- rate, secs = sp.loader.get_count_rate(range(100))
- plt.plot(secs, rate)
-
-.. code:: ipython3
-
- # The time elapsed in the scan
- sp.loader.get_elapsed_time()
-
-.. code:: ipython3
-
- # Apply jittering to X, Y, t, ADC columns.
- # Columns are defined in the config, or can be provided as list.
- sp.add_jitter()
-
-.. code:: ipython3
-
- # Inspect data in dataframe Columns:
- # axes = ['X', 'Y', 't', 'ADC']
- # bins = [100, 100, 100, 100]
- # ranges = [(0, 1800), (0, 1800), (130000, 140000), (0, 9000)]
- # sp.viewEventHistogram(dfpid=1, axes=axes, bins=bins, ranges=ranges)
- sp.view_event_histogram(dfpid=2)
-
-Distortion correction and Momentum Calibration workflow
--------------------------------------------------------
-
-Distortion correction
-~~~~~~~~~~~~~~~~~~~~~
-
-1. step:
-^^^^^^^^
-
-Bin and load part of the dataframe in detector coordinates, and choose
-energy plane where high-symmetry points can well be identified. Either
-use the interactive tool, or pre-select the range:
-
-.. code:: ipython3
-
- #sp.bin_and_load_momentum_calibration(df_partitions=20, plane=170)
- sp.bin_and_load_momentum_calibration(df_partitions=100, plane=33, width=10, apply=True)
-
-2. Step:
-^^^^^^^^
-
-Next, we select a number of features corresponding to the rotational
-symmetry of the material, plus the center. These can either be
-auto-detected (for well-isolated points), or provided as a list (these
-can be read-off the graph in the cell above). These are then symmetrized
-according to the rotational symmetry, and a spline-warping correction
-for the x/y coordinates is calculated, which corrects for any geometric
-distortions from the perfect n-fold rotational symmetry.
-
-.. code:: ipython3
-
- #features = np.array([[203.2, 341.96], [299.16, 345.32], [350.25, 243.70], [304.38, 149.88], [199.52, 152.48], [154.28, 242.27], [248.29, 248.62]])
- #sp.define_features(features=features, rotation_symmetry=6, include_center=True, apply=True)
- # Manual selection: Use a GUI tool to select peaks:
- #sp.define_features(rotation_symmetry=6, include_center=True)
- #sp.generate_splinewarp(rotation_symmetry=6, include_center=True, fwhm=10, sigma=12, sigma_radius=4)
- # Autodetect: Uses the DAOStarFinder routine to locate maxima.
- # Parameters are:
- # fwhm: Full-width at half maximum of peaks.
- # sigma: Number of standard deviations above the mean value of the image peaks must have.
- # sigma_radius: number of standard deviations around a peak that peaks are fitted
- sp.define_features(rotation_symmetry=6, auto_detect=True, include_center=True, fwhm=10, sigma=12, sigma_radius=4, apply=True)
-
-3. Step:
-^^^^^^^^
-
-Generate nonlinear correction using splinewarp algorithm. If no
-landmarks have been defined in previous step, default parameters from
-the config are used
-
-.. code:: ipython3
-
- # Option whether a central point shall be fixed in the determiantion fo the correction
- sp.generate_splinewarp(include_center=True)
-
-Optional (Step 3a):
-^^^^^^^^^^^^^^^^^^^
-
-Save distortion correction parameters to configuration file in current
-data folder:
-
-.. code:: ipython3
-
- # Save generated distortion correction parameters for later reuse
- sp.save_splinewarp()
-
-4. Step:
-^^^^^^^^
-
-To adjust scaling, position and orientation of the corrected momentum
-space image, you can apply further affine transformations to the
-distortion correction field. Here, first a postential scaling is
-applied, next a translation, and finally a rotation around the center of
-the image (defined via the config). One can either use an interactive
-tool, or provide the adjusted values and apply them directly.
-
-.. code:: ipython3
-
- #sp.pose_adjustment(xtrans=14, ytrans=18, angle=2)
- sp.pose_adjustment(xtrans=8, ytrans=7, angle=-4, apply=True)
-
-5. Step:
-^^^^^^^^
-
-Finally, the momentum correction is applied to the dataframe, and
-corresponding meta data are stored
-
-.. code:: ipython3
-
- sp.apply_momentum_correction()
-
-Momentum calibration workflow
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-1. Step:
-^^^^^^^^
-
-First, the momentum scaling needs to be calibtrated. Either, one can
-provide the coordinates of one point outside the center, and provide its
-distane to the Brillouin zone center (which is assumed to be located in
-the center of the image), one can specify two points on the image and
-their distance (where the 2nd point marks the BZ center),or one can
-provide absolute k-coordinates of two distinct momentum points.
-
-If no points are provided, an interactive tool is created. Here, left
-mouse click selectes the off-center point (brillouin_zone_cetnered=True)
-or toggle-selects the off-center and center point.
-
-.. code:: ipython3
-
- k_distance = 4/3*np.pi/3.28
- #sp.calibrate_momentum_axes(k_distance = k_distance)
- point_a = [308, 345]
- sp.calibrate_momentum_axes(point_a=point_a, k_distance = k_distance, apply=True)
- #point_b = [247, 249]
- #sp.calibrate_momentum_axes(point_a=point_a, point_b = point_b, k_coord_a = [.5, 1.1], k_coord_b = [1.3, 0], equiscale=False
-
-Optional (Step 1a):
-'''''''''''''''''''
-
-Save momentum calibration parameters to configuration file in current
-data folder:
-
-.. code:: ipython3
-
- # Save generated momentum calibration parameters for later reuse
- sp.save_momentum_calibration()
-
-2. Step:
-^^^^^^^^
-
-Now, the distortion correction and momentum calibration needs to be
-applied to the dataframe.
-
-.. code:: ipython3
-
- sp.apply_momentum_calibration()
-
-Energy Correction (optional)
-----------------------------
-
-The purpose of the energy correction is to correct for any
-momentum-dependent distortion of the energy axis, e.g. from geometric
-effects in the flight tube, or from space charge
-
-1st step:
-^^^^^^^^^
-
-Here, one can select the functional form to be used, and adjust its
-parameters. The binned data used for the momentum calibration is plotted
-around the Fermi energy (defined by tof_fermi), and the correction
-function is plotted ontop. Possible correction functions are: “sperical”
-(parameter: diameter), “Lorentzian” (parameter: gamma), “Gaussian”
-(parameter: sigma), and “Lorentzian_asymmetric” (parameters: gamma,
-amplitude2, gamma2).
-
-One can either use an interactive alignment tool, or provide parameters
-directly.
-
-.. code:: ipython3
-
- #sp.adjust_energy_correction(amplitude=2.5, center=(730, 730), gamma=920, tof_fermi = 66200)
- sp.adjust_energy_correction(amplitude=2.5, center=(730, 730), gamma=920, tof_fermi = 66200, apply=True)
-
-Optional (Step 1a):
-'''''''''''''''''''
-
-Save energy correction parameters to configuration file in current data
-folder:
-
-.. code:: ipython3
-
- # Save generated energy correction parameters for later reuse
- sp.save_energy_correction()
-
-2. Step
-^^^^^^^
-
-After adjustment, the energy correction is directly applied to the TOF
-axis.
-
-.. code:: ipython3
-
- sp.apply_energy_correction()
-
-3. Energy calibration
----------------------
-
-For calibrating the energy axis, a set of data taken at different bias
-voltages around the value where the measurement was taken is required.
-
-1. Step:
-^^^^^^^^
-
-In a first step, the data are loaded, binned along the TOF dimension,
-and normalized. The used bias voltages can be either provided, or read
-from attributes in the source files if present.
-
-.. code:: ipython3
-
- # Load energy calibration EDCs
- energycalfolder = data_path + "/energycal_2019_01_08/"
- scans = np.arange(1,12)
- voltages = np.arange(12,23,1)
- files = [energycalfolder + r'Scan' + str(num).zfill(3) + '_' + str(num+11) + '.h5' for num in scans]
- sp.load_bias_series(files, normalize=True, biases=voltages, ranges=[(64000, 75000)])
-
-2. Step:
-^^^^^^^^
-
-Next, the same peak or feature needs to be selected in each curve. For
-this, one needs to define “ranges” for each curve, within which the peak
-of interest is located. One can either provide these ranges manually, or
-provide one range for a “reference” curve, and infer the ranges for the
-other curves using a dynamic time warping algorithm.
-
-.. code:: ipython3
-
- # Option 1 = specify the ranges containing a common feature (e.g an equivalent peak) for all bias scans
- # rg = [(129031.03103103103, 129621.62162162163), (129541.54154154155, 130142.14214214214), (130062.06206206206, 130662.66266266267), (130612.61261261262, 131213.21321321322), (131203.20320320321, 131803.8038038038), (131793.7937937938, 132384.38438438438), (132434.43443443443, 133045.04504504506), (133105.10510510512, 133715.71571571572), (133805.8058058058, 134436.43643643643), (134546.54654654654, 135197.1971971972)]
- # sp.find_bias_peaks(ranges=rg, infer_others=False)
- # Option 2 = specify the range for one curve and infer the others
- # This will open an interactive tool to select the correct ranges for the curves.
- # IMPORTANT: Don't choose the range too narrow about a peak, and choose a refid
- # somewhere in the middle or towards larger biases!
- rg = (66100, 67000)
- sp.find_bias_peaks(ranges=rg, ref_id=5, infer_others=True, apply=True)
-
-3. Step:
-^^^^^^^^
-
-Next, the detected peak positions and bias voltages are used to
-determine the calibration function. This can be either done by fitting
-the functional form d\ :sup:`2/(t-t0)`\ 2 via lmfit (“lmfit”), or using
-a polynomial approxiamtion (“lstsq” or “lsqr”). Here, one can also
-define a reference id, and a reference energy. Those define the absolute
-energy position of the feature used for calibration in the “reference”
-trace, at the bias voltage where the final measurement has been
-performed. The energy scale can be either “kientic” (decreasing energy
-with increasing TOF), or “binding” (increasing energy with increasing
-TOF).
-
-After calculating the calibration, all traces corrected with the
-calibration are plotted ontop of each other, the calibration function
-together with the extracted features is plotted.
-
-.. code:: ipython3
-
- # use the refid of the bias that the measurement was taken at
- # Eref can be used to set the absolute energy (kinetic energy, E-EF) of the feature used for energy calibration (if known)
- refid=4
- Eref=-0.5
- # the lmfit method uses a fit of (d/(t-t0))**2 to determine the energy calibration
- sp.calibrate_energy_axis(ref_energy=Eref, ref_id=refid, energy_scale="kinetic", method="lmfit")
-
-Optional (Step 3a):
-'''''''''''''''''''
-
-Save energy calibration parameters to configuration file in current data
-folder:
-
-.. code:: ipython3
-
- # Save generated energy calibration parameters for later reuse
- sp.save_energy_calibration()
-
-4. Step:
-^^^^^^^^
-
-Finally, the the energy axis is added to the dataframe.
-
-.. code:: ipython3
-
- sp.append_energy_axis()
-
-4. Delay calibration:
----------------------
-
-The delay axis is calculated from the ADC input column based on the
-provided delay range. ALternatively, the delay scan range can also be
-extracted from attributes inside a source file, if present.
-
-.. code:: ipython3
-
- #from pathlib import Path
- #datafile = "file.h5"
- #print(datafile)
- #sp.calibrate_delay_axis(datafile=datafile)
- delay_range = (-500, 1500)
- sp.calibrate_delay_axis(delay_range=delay_range, preview=True)
-
-5. Visualization of calibrated histograms
------------------------------------------
-
-With all calibrated axes present in the dataframe, we can visualize the
-corresponding histograms, and determine the respective binning ranges
-
-.. code:: ipython3
-
- axes = ['kx', 'ky', 'energy', 'delay']
- ranges = [[-3, 3], [-3, 3], [-6, 2], [-600, 1600]]
- sp.view_event_histogram(dfpid=1, axes=axes, ranges=ranges)
-
-Define the binning ranges and compute calibrated data volume
-------------------------------------------------------------
-
-.. code:: ipython3
-
- axes = ['kx', 'ky', 'energy', 'delay']
- bins = [100, 100, 200, 50]
- ranges = [[-2, 2], [-2, 2], [-4, 2], [-600, 1600]]
- res = sp.compute(bins=bins, axes=axes, ranges=ranges)
-
-Some visualization:
--------------------
-
-.. code:: ipython3
-
- fig, axs = plt.subplots(4, 1, figsize=(6, 18), constrained_layout=True)
- res.loc[{'energy':slice(-.1, 0)}].sum(axis=(2,3)).T.plot(ax=axs[0])
- res.loc[{'kx':slice(-.8, -.5)}].sum(axis=(0,3)).T.plot(ax=axs[1])
- res.loc[{'ky':slice(-.2, .2)}].sum(axis=(1,3)).T.plot(ax=axs[2])
- res.loc[{'kx':slice(-.8, -.5), 'energy':slice(.5, 2)}].sum(axis=(0,1)).plot(ax=axs[3])
-
diff --git a/docs/tutorial/3_MetadatacollectionandexporttoNeXus.rst b/docs/tutorial/3_MetadatacollectionandexporttoNeXus.rst
deleted file mode 100644
index 8f373ecd..00000000
--- a/docs/tutorial/3_MetadatacollectionandexporttoNeXus.rst
+++ /dev/null
@@ -1,399 +0,0 @@
-Binning with metadata generation, and storing into a NeXus file
-===============================================================
-
-In this example, we show how to bin the same data used for example 3,
-but using the values for correction/calibration parameters generated in
-the example notebook 3, which are locally saved in the file
-sed_config.yaml. These data and the corresponding (machine and
-processing) metadata are then stored to a NeXus file following the
-NXmpes NeXus standard
-(https://fairmat-experimental.github.io/nexus-fairmat-proposal/9636feecb79bb32b828b1a9804269573256d7696/classes/contributed_definitions/NXmpes.html#nxmpes)
-using the ‘dataconverter’ of the pynxtools package
-(https://github.com/FAIRmat-NFDI/pynxtools).
-
-.. code:: ipython3
-
- %load_ext autoreload
- %autoreload 2
- import numpy as np
- import matplotlib.pyplot as plt
- import os
-
- import sed
-
- %matplotlib widget
-
-Load Data
----------
-
-.. code:: ipython3
-
- data_path = '.' # Put in Path to a storage of at least 20 Gbyte free space.
- if not os.path.exists(data_path + "/WSe2.zip"):
- os.system(f"curl -L --output {data_path}/WSe2.zip https://zenodo.org/record/6369728/files/WSe2.zip")
- if not os.path.isdir(data_path + "/Scan049_1") or not os.path.isdir(data_path + "/energycal_2019_01_08/"):
- os.system(f"unzip -d {data_path} -o {data_path}/WSe2.zip")
-
-.. code:: ipython3
-
- metadata = {}
- # manual Meta data. These should ideally come from an Electronic Lab Notebook.
- #General
- metadata['experiment_summary'] = 'WSe2 XUV NIR pump probe data.'
- metadata['entry_title'] = 'Valence Band Dynamics - 800 nm linear s-polarized pump, 0.6 mJ/cm2 absorbed fluence'
- metadata['experiment_title'] = 'Valence band dynamics of 2H-WSe2'
-
- #User
- # Fill general parameters of NXuser
- # TODO: discuss how to deal with multiple users?
- metadata['user0'] = {}
- metadata['user0']['name'] = 'Julian Maklar'
- metadata['user0']['role'] = 'Principal Investigator'
- metadata['user0']['affiliation'] = 'Fritz Haber Institute of the Max Planck Society'
- metadata['user0']['address'] = 'Faradayweg 4-6, 14195 Berlin'
- metadata['user0']['email'] = 'maklar@fhi-berlin.mpg.de'
-
- #NXinstrument
- metadata['instrument'] = {}
- #analyzer
- metadata['instrument']['analyzer']={}
- metadata['instrument']['analyzer']['slow_axes'] = "delay" # the scanned axes
- metadata['instrument']['analyzer']['spatial_resolution'] = 10.
- metadata['instrument']['analyzer']['energy_resolution'] = 110.
- metadata['instrument']['analyzer']['momentum_resolution'] = 0.08
- metadata['instrument']['analyzer']['working_distance'] = 4.
- metadata['instrument']['analyzer']['lens_mode'] = "6kV_kmodem4.0_30VTOF.sav"
-
- #probe beam
- metadata['instrument']['beam']={}
- metadata['instrument']['beam']['probe']={}
- metadata['instrument']['beam']['probe']['incident_energy'] = 21.7
- metadata['instrument']['beam']['probe']['incident_energy_spread'] = 0.11
- metadata['instrument']['beam']['probe']['pulse_duration'] = 20.
- metadata['instrument']['beam']['probe']['frequency'] = 500.
- metadata['instrument']['beam']['probe']['incident_polarization'] = [1, 1, 0, 0] # p pol Stokes vector
- metadata['instrument']['beam']['probe']['extent'] = [80., 80.]
- #pump beam
- metadata['instrument']['beam']['pump']={}
- metadata['instrument']['beam']['pump']['incident_energy'] = 1.55
- metadata['instrument']['beam']['pump']['incident_energy_spread'] = 0.08
- metadata['instrument']['beam']['pump']['pulse_duration'] = 35.
- metadata['instrument']['beam']['pump']['frequency'] = 500.
- metadata['instrument']['beam']['pump']['incident_polarization'] = [1, -1, 0, 0] # s pol Stokes vector
- metadata['instrument']['beam']['pump']['incident_wavelength'] = 800.
- metadata['instrument']['beam']['pump']['average_power'] = 300.
- metadata['instrument']['beam']['pump']['pulse_energy'] = metadata['instrument']['beam']['pump']['average_power']/metadata['instrument']['beam']['pump']['frequency']#µJ
- metadata['instrument']['beam']['pump']['extent'] = [230., 265.]
- metadata['instrument']['beam']['pump']['fluence'] = 0.15
-
- #sample
- metadata['sample']={}
- metadata['sample']['preparation_date'] = '2019-01-13T10:00:00+00:00'
- metadata['sample']['preparation_description'] = 'Cleaved'
- metadata['sample']['sample_history'] = 'Cleaved'
- metadata['sample']['chemical_formula'] = 'WSe2'
- metadata['sample']['description'] = 'Sample'
- metadata['sample']['name'] = 'WSe2 Single Crystal'
-
- metadata['file'] = {}
- metadata['file']["trARPES:Carving:TEMP_RBV"] = 300.
- metadata['file']["trARPES:XGS600:PressureAC:P_RD"] = 5.e-11
- metadata['file']["KTOF:Lens:Extr:I"] = -0.12877
- metadata['file']["KTOF:Lens:UDLD:V"] = 399.99905
- metadata['file']["KTOF:Lens:Sample:V"] = 17.19976
- metadata['file']["KTOF:Apertures:m1.RBV"] = 3.729931
- metadata['file']["KTOF:Apertures:m2.RBV"] = -5.200078
- metadata['file']["KTOF:Apertures:m3.RBV"] = -11.000425
-
- # Sample motor positions
- metadata['file']['trARPES:Carving:TRX.RBV'] = 7.1900000000000004
- metadata['file']['trARPES:Carving:TRY.RBV'] = -6.1700200225439552
- metadata['file']['trARPES:Carving:TRZ.RBV'] = 33.4501953125
- metadata['file']['trARPES:Carving:THT.RBV'] = 423.30500940561586
- metadata['file']['trARPES:Carving:PHI.RBV'] = 0.99931647456264949
- metadata['file']['trARPES:Carving:OMG.RBV'] = 11.002500171914066
-
-.. code:: ipython3
-
- # The Scan directory
- fdir = data_path + '/Scan049_1'
- # create sed processor using the config file, and collect the meta data from the files:
- sp = sed.SedProcessor(folder=fdir, config="../sed/config/mpes_example_config.yaml", metadata=metadata, collect_metadata=True)
-
-
-.. parsed-literal::
-
- Configuration loaded from: [/home/runner/work/sed/sed/sed/config/mpes_example_config.yaml]
- Folder config loaded from: [/home/runner/work/sed/sed/tutorial/sed_config.yaml]
- Default config loaded from: [/home/runner/work/sed/sed/sed/config/default.yaml]
- Gathering metadata from different locations
- Collecting time stamps...
- Collecting file metadata...
- Collecting data from the EPICS archive...
-
-
-.. code:: ipython3
-
- # Apply jittering to X, Y, t, ADC columns.
- sp.add_jitter()
-
-.. code:: ipython3
-
- # Calculate machine-coordinate data for pose adjustment
- sp.bin_and_load_momentum_calibration(df_partitions=10, plane=33, width=10, apply=True)
-
-
-
-.. parsed-literal::
-
- 0%| | 0/10 [00:00, ?it/s]
-
-
-
-.. parsed-literal::
-
- interactive(children=(IntSlider(value=33, description='plane', max=290), IntSlider(value=10, description='widt…
-
-
-
-.. parsed-literal::
-
- Button(description='apply', style=ButtonStyle())
-
-
-
-.. raw:: html
-
-
-
-
- Figure
-
-
-
-
-
-
-.. code:: ipython3
-
- # Adjust pose alignment, using stored distortion correction
- sp.pose_adjustment(xtrans=8, ytrans=7, angle=-4, apply=True, use_correction=True)
-
-
-.. parsed-literal::
-
- Calulated thin spline correction based on the following landmarks:
- pouter: [[203.2 341.96]
- [299.16 345.32]
- [350.25 243.7 ]
- [304.38 149.88]
- [199.52 152.48]
- [154.28 242.27]]
- pcent: (248.29, 248.62)
-
-
-
-.. parsed-literal::
-
- interactive(children=(FloatSlider(value=1.0, description='scale', max=1.2, min=0.8, step=0.01), FloatSlider(va…
-
-
-
-.. parsed-literal::
-
- Button(description='apply', style=ButtonStyle())
-
-
-
-.. raw:: html
-
-
-
-
- Figure
-
-
-
-
-
-
-
-.. parsed-literal::
-
- Output()
-
-
-
-.. raw:: html
-
-
-
-
- Figure
-
-
-
-
-
-
-.. code:: ipython3
-
- # Apply stored momentum correction
- sp.apply_momentum_correction()
-
-
-.. parsed-literal::
-
- Adding corrected X/Y columns to dataframe:
- Calculating inverse deformation field, this might take a moment...
- Dask DataFrame Structure:
- X Y t ADC Xm Ym
- npartitions=100
- float64 float64 float64 float64 float64 float64
- ... ... ... ... ... ...
- ... ... ... ... ... ... ...
- ... ... ... ... ... ...
- ... ... ... ... ... ...
- Dask Name: apply_dfield, 206 graph layers
-
-
-.. code:: ipython3
-
- # Apply stored config momentum calibration
- sp.apply_momentum_calibration()
-
-
-.. parsed-literal::
-
- Adding kx/ky columns to dataframe:
- Dask DataFrame Structure:
- X Y t ADC Xm Ym kx ky
- npartitions=100
- float64 float64 float64 float64 float64 float64 float64 float64
- ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ...
- Dask Name: assign, 216 graph layers
-
-
-.. code:: ipython3
-
- # Apply stored config energy correction
- sp.apply_energy_correction()
-
-
-.. parsed-literal::
-
- Applying energy correction to dataframe...
- Dask DataFrame Structure:
- X Y t ADC Xm Ym kx ky tm
- npartitions=100
- float64 float64 float64 float64 float64 float64 float64 float64 float64
- ... ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ... ...
- Dask Name: assign, 230 graph layers
-
-
-.. code:: ipython3
-
- # Apply stored config energy calibration
- sp.append_energy_axis()
-
-
-.. parsed-literal::
-
- Adding energy column to dataframe:
- Dask DataFrame Structure:
- X Y t ADC Xm Ym kx ky tm energy
- npartitions=100
- float64 float64 float64 float64 float64 float64 float64 float64 float64 float64
- ... ... ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ... ... ...
- ... ... ... ... ... ... ... ... ... ...
- Dask Name: assign, 240 graph layers
-
-
-.. code:: ipython3
-
- # Apply delay calibration
- delay_range = (-500, 1500)
- sp.calibrate_delay_axis(delay_range=delay_range, preview=True)
-
-
-.. parsed-literal::
-
- Adding delay column to dataframe:
- X Y t ADC Xm \
- 0 -0.321605 -0.321605 -0.321605 -0.321605 NaN
- 1 365.140758 1002.140758 70101.140758 6317.140758 353.006737
- 2 761.331995 818.331995 75615.331995 6316.331995 790.815006
- 3 691.620712 970.620712 66454.620712 6316.620712 712.539689
- 4 671.418493 712.418493 73026.418493 6317.418493 696.025422
- 5 299.308212 1164.308212 68459.308212 6316.308212 280.497870
- 6 571.333208 665.333208 73903.333208 6316.333208 587.731353
- 7 821.940026 544.940026 72631.940026 6317.940026 844.999420
- 8 817.568542 415.568542 72421.568542 6316.568542 833.916568
- 9 1005.501300 666.501300 72801.501300 6316.501300 1035.284109
-
- Ym kx ky tm energy delay
- 0 NaN NaN NaN -48.551243 -25.060283 -660.446111
- 1 1032.666031 -1.285379 0.819763 70084.131419 -9.288423 1472.030636
- 2 837.290997 0.070666 0.214618 75614.443517 -16.576951 1471.757635
- 3 981.587499 -0.171780 0.661555 66448.951009 -0.844295 1471.855093
- 4 740.443504 -0.222930 -0.085352 73026.037877 -13.731331 1472.124386
- 5 1185.406635 -1.509965 1.292855 68432.794682 -5.972168 1471.749608
- 6 701.577832 -0.558355 -0.205733 73900.433665 -14.783450 1471.758045
- 7 585.663806 0.238494 -0.564759 72627.785886 -13.216652 1472.300431
- 8 465.755453 0.204167 -0.936157 72411.898402 -12.927652 1471.837483
- 9 706.093044 0.827872 -0.191748 72794.037473 -13.434359 1471.814785
-
-
-Compute final data volume
--------------------------
-
-.. code:: ipython3
-
- axes = ['kx', 'ky', 'energy', 'delay']
- bins = [100, 100, 200, 50]
- ranges = [[-2, 2], [-2, 2], [-4, 2], [-600, 1600]]
- res = sp.compute(bins=bins, axes=axes, ranges=ranges)
-
-
-
-.. parsed-literal::
-
- 0%| | 0/100 [00:00, ?it/s]
-
-
-.. code:: ipython3
-
- # save to NXmpes NeXus (including standardized metadata)
- sp.save(data_path + "/binned.nxs")
-
-
-.. parsed-literal::
-
- Using mpes reader to convert the given files:
- • ../sed/config/NXmpes_config.json
- The output file generated: ./binned.nxs
-
-
-.. code:: ipython3
-
- # Visualization (requires JupyterLab)
- from jupyterlab_h5web import H5Web
- H5Web(data_path + "/binned.nxs")
-
-
-
-
-.. parsed-literal::
-
-
-
-
-
diff --git a/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_7_3.png b/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_7_3.png
deleted file mode 100644
index 439adf93..00000000
Binary files a/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_7_3.png and /dev/null differ
diff --git a/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_8_3.png b/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_8_3.png
deleted file mode 100644
index a156ffda..00000000
Binary files a/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_8_3.png and /dev/null differ
diff --git a/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_8_5.png b/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_8_5.png
deleted file mode 100644
index 0cddfaaa..00000000
Binary files a/docs/tutorial/3_MetadatacollectionandexporttoNeXus_files/3_MetadatacollectionandexporttoNeXus_8_5.png and /dev/null differ
diff --git a/docs/tutorial/Flashenergycalibration.rst b/docs/tutorial/Flashenergycalibration.rst
deleted file mode 100644
index 6a5988e0..00000000
--- a/docs/tutorial/Flashenergycalibration.rst
+++ /dev/null
@@ -1,86 +0,0 @@
-.. code:: ipython3
-
- %load_ext autoreload
- %autoreload 2
-
- from sed import SedProcessor
- import sed
- import numpy as np
-
- # %matplotlib inline
- %matplotlib widget
- import matplotlib.pyplot as plt
-
-Try to calibrate energy
-=======================
-
-Spin-integrated branch, E_TOF=10eV
-----------------------------------
-
-single scan, move sample bias manually every 2000 pulses.
-
-.. code:: ipython3
-
- sp = SedProcessor(runs=[44638], config="config_flash_energy_calib.yaml", system_config={})
-
-.. code:: ipython3
-
- sp.add_jitter()
-
-.. code:: ipython3
-
- axes = ['sampleBias', 'dldTime']
- bins = [6, 500]
- ranges = [[0,6], [40000, 55000]]
- res = sp.compute(bins=bins, axes=axes, ranges=ranges)
-
-.. code:: ipython3
-
- sp.load_bias_series(binned_data=res)
-
-.. code:: ipython3
-
- ranges=(44500, 46000)
- ref_id=3
- sp.find_bias_peaks(ranges=ranges, ref_id=ref_id)
-
-.. code:: ipython3
-
- ref_id=3
- ref_energy=-.3
- sp.calibrate_energy_axis(ref_id=ref_id, ref_energy=ref_energy, method="lstsq", order=3)
-
-.. code:: ipython3
-
- ref_id=3
- ref_energy=-.3
- sp.calibrate_energy_axis(ref_id=ref_id, ref_energy=ref_energy, method="lmfit")
-
-.. code:: ipython3
-
- sp.append_energy_axis(preview=True)
-
-.. code:: ipython3
-
- axes = ['sampleBias', 'energy']
- bins = [6, 1000]
- ranges = [[0,6], [-5, 5]]
- res = sp.compute(bins=bins, axes=axes, ranges=ranges)
-
-.. code:: ipython3
-
- plt.figure()
- res[3,:].plot()
-
-.. code:: ipython3
-
- axes = ['sampleBias', 'energy', 'dldPosX']
- bins = [6, 100, 480]
- ranges = [[0,6], [-2, 1], [420,900]]
- res = sp.compute(bins=bins, axes=axes, ranges=ranges)
-
-.. code:: ipython3
-
- plt.figure()
- res[3, :, :].plot()
-
diff --git a/tutorial/1 - Binning fake data.ipynb b/tutorial/1_binning_fake_data.ipynb
similarity index 100%
rename from tutorial/1 - Binning fake data.ipynb
rename to tutorial/1_binning_fake_data.ipynb
diff --git a/tutorial/2 - Conversion Pipeline for example time-resolved ARPES data.ipynb b/tutorial/2_conversion_pipeline_for_example_time-resolved_ARPES_data.ipynb
similarity index 100%
rename from tutorial/2 - Conversion Pipeline for example time-resolved ARPES data.ipynb
rename to tutorial/2_conversion_pipeline_for_example_time-resolved_ARPES_data.ipynb
diff --git a/tutorial/3 - Metadata collection and export to NeXus.ipynb b/tutorial/3_metadata_collection_and_export_to_NeXus.ipynb
similarity index 100%
rename from tutorial/3 - Metadata collection and export to NeXus.ipynb
rename to tutorial/3_metadata_collection_and_export_to_NeXus.ipynb
diff --git a/tutorial/Flash energy calibration.ipynb b/tutorial/Flash energy calibration.ipynb
deleted file mode 100755
index dceacdbb..00000000
--- a/tutorial/Flash energy calibration.ipynb
+++ /dev/null
@@ -1,206 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "39b2e62a",
- "metadata": {},
- "outputs": [],
- "source": [
- "%load_ext autoreload\n",
- "%autoreload 2\n",
- "\n",
- "from sed import SedProcessor\n",
- "import sed\n",
- "import numpy as np\n",
- "\n",
- "# %matplotlib inline\n",
- "%matplotlib widget\n",
- "import matplotlib.pyplot as plt"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4d78d236",
- "metadata": {},
- "source": [
- "# Try to calibrate energy"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "a62f084f",
- "metadata": {},
- "source": [
- "## Spin-integrated branch, E_TOF=10eV\n",
- "single scan, move sample bias manually every 2000 pulses."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "7dabbe92",
- "metadata": {},
- "outputs": [],
- "source": [
- "sp = SedProcessor(runs=[44638], config=\"config_flash_energy_calib.yaml\", system_config={})"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "248a41a7",
- "metadata": {},
- "outputs": [],
- "source": [
- "sp.add_jitter()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "2b867e40",
- "metadata": {},
- "outputs": [],
- "source": [
- "axes = ['sampleBias', 'dldTime']\n",
- "bins = [6, 500]\n",
- "ranges = [[0,6], [40000, 55000]]\n",
- "res = sp.compute(bins=bins, axes=axes, ranges=ranges)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "62081458",
- "metadata": {},
- "outputs": [],
- "source": [
- "sp.load_bias_series(binned_data=res)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "424af94e",
- "metadata": {},
- "outputs": [],
- "source": [
- "ranges=(44500, 46000)\n",
- "ref_id=3\n",
- "sp.find_bias_peaks(ranges=ranges, ref_id=ref_id)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "034eff42",
- "metadata": {},
- "outputs": [],
- "source": [
- "ref_id=3\n",
- "ref_energy=-.3\n",
- "sp.calibrate_energy_axis(ref_id=ref_id, ref_energy=ref_energy, method=\"lstsq\", order=3)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "bbbfe992",
- "metadata": {},
- "outputs": [],
- "source": [
- "ref_id=3\n",
- "ref_energy=-.3\n",
- "sp.calibrate_energy_axis(ref_id=ref_id, ref_energy=ref_energy, method=\"lmfit\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "e14d6cef",
- "metadata": {},
- "outputs": [],
- "source": [
- "sp.append_energy_axis(preview=True)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "59c83544",
- "metadata": {},
- "outputs": [],
- "source": [
- "axes = ['sampleBias', 'energy']\n",
- "bins = [6, 1000]\n",
- "ranges = [[0,6], [-5, 5]]\n",
- "res = sp.compute(bins=bins, axes=axes, ranges=ranges)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "addba4cb",
- "metadata": {},
- "outputs": [],
- "source": [
- "plt.figure()\n",
- "res[3,:].plot()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1676ec57",
- "metadata": {},
- "outputs": [],
- "source": [
- "axes = ['sampleBias', 'energy', 'dldPosX']\n",
- "bins = [6, 100, 480]\n",
- "ranges = [[0,6], [-2, 1], [420,900]]\n",
- "res = sp.compute(bins=bins, axes=axes, ranges=ranges)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "ad199c40",
- "metadata": {},
- "outputs": [],
- "source": [
- "plt.figure()\n",
- "res[3, :, :].plot()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3a4ae88c",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": ".pyenv",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.12"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/tutorial/config_flash_energy_calib.yaml b/tutorial/config_flash_energy_calib.yaml
deleted file mode 100755
index 675aee1a..00000000
--- a/tutorial/config_flash_energy_calib.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-core:
- loader: flash
- beamtime_id: 11013410
- year: 2023
- beamline: pg2
- instrument: hextof
- paths:
- data_raw_dir: "."
- data_parquet_dir: "./parquet"
-
-dataframe:
- ubid_offset: 5
- daq: fl1user3
- channels:
-
- timeStamp:
- format: per_train
- group_name: "/uncategorised/FLASH.DIAG/TIMINGINFO/TIME1.BUNCH_FIRST_INDEX.1/"
-
- pulseId:
- format: per_electron
- group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/"
- slice: 2
- dldPosX:
- format: per_electron
- group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/"
- slice: 1
- dldPosY:
- format: per_electron
- group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/"
- slice: 0
- dldTime:
- format: per_electron
- group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/"
- slice: 3
- dldAux:
- format: per_pulse
- group_name : "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/"
- slice: 4
- dldAuxChannels:
- sampleBias: 0
- tofVoltage: 1
- extractorVoltage: 2
- extractorCurrent: 3
- cryoTemperature: 4
- sampleTemperature: 5
- crystalVoltage: 6
- dldTimeBinSize: 15
-
-
- # ADC containing the pulser sign (1: value approx. 35000, 0: 33000)
- pulserSignAdc:
- format: per_pulse
- group_name: "/FL1/Experiment/PG/SIS8300 100MHz ADC/CH6/TD/"
- #slice: 0
-
- monochromatorPhotonEnergy:
- format: per_train
- group_name: "/FL1/Beamlines/PG/Monochromator/monochromator photon energy/"
-
-
- # The GMDs can not be read yet...
- gmdBda:
- format: per_train
- group_name: "/FL1/Photon Diagnostic/GMD/Average energy/energy BDA/"
- # slice: ":"
-
- #gmdTunnel:
- # format: per_pulse
- # group_name: "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/"
- # slice: ":"
-
- # Here we use the DBC2 BAM as the "normal" one is broken.
- bam:
- format: per_pulse
- group_name: "/uncategorised/FLASH.SDIAG/BAM.DAQ/FL0.DBC2.ARRIVAL_TIME.ABSOLUTE.SA1.COMP/"
-
- delayStage:
- format: per_train
- group_name: "/zraw/FLASH.SYNC/LASER.LOCK.EXP/F1.PG.OSC/FMC0.MD22.1.ENCODER_POSITION.RD/dGroup/"
-
- tof_column: dldTime
- bias_column: sampleBias
- tof_binning: 3
-
- stream_name_prefixes:
- pbd: "GMD_DATA_gmd_data"
- pbd2: "FL2PhotDiag_pbd2_gmd_data"
- fl1user1: "FLASH1_USER1_stream_2"
- fl1user2: "FLASH1_USER2_stream_2"
- fl1user3: "FLASH1_USER3_stream_2"
- fl2user1: "FLASH2_USER1_stream_2"
- fl2user2: "FLASH2_USER2_stream_2"
- beamtime_dir:
- pg2: "/asap3/flash/gpfs/pg2/"
- hextof: "/asap3/fs-flash-o/gpfs/hextof/"
- wespe: "/asap3/fs-flash-o/gpfs/wespe/"
-
-nexus:
- reader: "mpes"
- definition: "NXmpes"
- input_files: ["/home/kutnyakd/__beamtimes/Spin_2023/NXmpes_config_HEXTOF_light.json"]
diff --git a/tutorial/sed_config.yaml b/tutorial/sed_config.yaml
index 8565c8fb..5577b42e 100644
--- a/tutorial/sed_config.yaml
+++ b/tutorial/sed_config.yaml
@@ -25,18 +25,20 @@ momentum:
y_center: 256.0
correction:
feature_points:
- - - 203.11575556771575
- - 343.1023874450215
- - - 299.9643115931048
- - 346.2942034781325
- - - 351.05271790029917
- - 244.87949469676045
- - - 305.76331680416877
- - 150.31266296600884
- - - 199.64692385066613
- - 152.8942716287488
- - - 153.0
- - 243.0
- - - 249.32627242026467
- - 249.34641745326562
+ - - 202.99667164649654
+ - 342.9841737181237
+ - - 299.87095669185146
+ - 346.1951264748602
+ - - 350.95080745426304
+ - 244.7908230308385
+ - - 305.6268110815786
+ - 150.20132111991873
+ - - 199.5398499983996
+ - 152.77801048162016
+ - - 153.40923361300395
+ - 243.06399842230255
+ - - 249.232157094759
+ - 249.2577242394875
+ include_center: true
rotation_symmetry: 6
+ use_center: true