Skip to content

Commit

Permalink
Change dark rate monitoring of the software trigger to seconds (#1535)
Browse files Browse the repository at this point in the history
* Add new protocol for run starts

* Change saving of raw data based on time

* Fix typo

* Convert seconds into ns

* Only concetenate if needed to prevent second copy of data in memory.

* Improve comment

* Fix tests and implement comments

* Set time for no trigger to 10 s

---------

Co-authored-by: Dacheng Xu <[email protected]>
  • Loading branch information
WenzDaniel and dachengx authored Jan 23, 2025
1 parent 8fa804e commit bf6e753
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 10 deletions.
18 changes: 18 additions & 0 deletions straxen/config/protocols.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,3 +311,21 @@ def open_jax_model(model_path: str, **kwargs):
serialized_jax_object = file_obj.read()
# Deserialize the JAX object and return its callable function
return export.deserialize(serialized_jax_object).call


@URLConfig.register("runstart")
def get_run_start(run_id):
"""Protocol which returns start time of a given run as unix time in ns."""
import pytz

rundb = utilix.xent_collection()
doc = rundb.find_one(
{"number": int(run_id)},
projection={
"start": 1,
},
)
start_time = doc["start"]
start_time_unix = start_time.replace(tzinfo=pytz.utc).timestamp()
start_time_unix = np.int64(start_time_unix) * 10**9
return start_time_unix
60 changes: 50 additions & 10 deletions straxen/plugins/raw_records_coin_nv/nveto_recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,16 +79,23 @@ class nVETORecorder(strax.Plugin):
help="Crash if any of the pulses in raw_records overlap with others in the same channel",
)

keep_n_chunks_for_monitoring = straxen.URLConfig(
default=5,
keep_n_seconds_for_monitoring = straxen.URLConfig(
default=10,
track=False,
infer_type=False,
help=(
"How many chunks at a begining of a run should be "
"kept to monitor the detector performance."
"Number of seconds which should be stored without applying the software trigger."
"Use -1 if all records should be kept without applying the software trigger."
),
)

run_start = straxen.URLConfig(
default="runstart://plugin.run_id?",
track=False,
infer_type=False,
help="Returns run start in utc unix time in ns.",
)

def setup(self):
self.baseline_samples = self.baseline_software_trigger_samples_nv
self.hit_thresholds = self.software_trigger_hit_threshold
Expand All @@ -111,15 +118,20 @@ def infer_dtype(self):
return {k: v for k, v in zip(self.provides, dtypes)}

def compute(self, raw_records_nv, start, end, chunk_i):

if not len(raw_records_nv):
rr = raw_records_nv
lrs = np.zeros(0, dtype=self.dtype["lone_raw_record_statistics_nv"])
return {
"raw_records_coin_nv": rr,
"lone_raw_record_statistics_nv": lrs,
}

if self.check_raw_record_overlaps_nv:
straxen.check_overlaps(raw_records_nv, n_channels=3000)

# Cover the case if we do not want to have any coincidence
# Keep all raw data for the very first 5 chunks of data for
# monitoring purposes.
_keep_all_raw_records = (self.coincidence_level_recorder_nv <= 1) or (
chunk_i < self.keep_n_chunks_for_monitoring
)
# Cover the case if we do not want to have any coincidence:
_keep_all_raw_records = self.keep_n_seconds_for_monitoring == -1
if _keep_all_raw_records:
rr = raw_records_nv
lrs = np.zeros(0, dtype=self.dtype["lone_raw_record_statistics_nv"])
Expand All @@ -128,6 +140,30 @@ def compute(self, raw_records_nv, start, end, chunk_i):
"lone_raw_record_statistics_nv": lrs,
}

# Keep all raw data for the very first n seconds of a run.
# This case is tricky since it can also be just a
# subset of a chunk and in this case we need to make sure to also keep
# all fragments of a pulse beyond the first n seconds boundary.
# For performance check very first fragment if in applicable time range:
_need_save_for_monitoring = raw_records_nv[0]["time"] < (
self.run_start + self.keep_n_seconds_for_monitoring * straxen.units.s
)
raw_records_to_keep_without_trigger = np.zeros(0, dtype=raw_records_nv.dtype)

if _need_save_for_monitoring:
len_data = len(raw_records_nv[0]["data"])
# Now compute all pulse starts to make sure that all fragments of a pulse are saved:
pulse_starts = raw_records_nv["time"] - (
raw_records_nv["record_i"] * len_data * raw_records_nv["dt"]
)
_pulse_is_in_first_n_seconds = pulse_starts < (
self.run_start + self.keep_n_seconds_for_monitoring * straxen.units.s
)

# Now divide the data:
raw_records_to_keep_without_trigger = raw_records_nv[_pulse_is_in_first_n_seconds]
raw_records_nv = raw_records_nv[~_pulse_is_in_first_n_seconds]

# Search for hits to define coincidence intervals:
temp_records = strax.raw_to_records(raw_records_nv)
temp_records = strax.sort_by_time(temp_records)
Expand Down Expand Up @@ -191,6 +227,10 @@ def compute(self, raw_records_nv, start, end, chunk_i):
lrs["time"] = start
lrs["endtime"] = end

# Now combine results of with and without software trigger:
if _need_save_for_monitoring:
rr = np.concatenate([raw_records_to_keep_without_trigger, rr])

return {
"raw_records_coin_nv": rr,
"lone_raw_record_statistics_nv": lrs,
Expand Down

0 comments on commit bf6e753

Please sign in to comment.