diff --git a/src/ilabs_streamsync/example_script.py b/src/ilabs_streamsync/example_script.py new file mode 100644 index 0000000..710b7ed --- /dev/null +++ b/src/ilabs_streamsync/example_script.py @@ -0,0 +1,34 @@ +import mne + +from ilabs_streamsync import StreamSync, extract_audio_from_video + +# load an MNE raw file +raw = None +cam1 = None +flux1 = None +my_events = [] + + +subjects = ["146a", "222b"] + +for subj in subjects: + # construct the filename/path + # load the Raw + # figure out where video files are & load them + audio1 = extract_audio_from_video(cam1) + + ss = StreamSync(raw, "STIM001") + ss.add_stream(audio1) + ss.add_camera_events(my_events) + ss.add_stream(flux1) + result = ss.do_syncing() + fig = ss.plot_sync() + annot = ss.add_camera_events(my_events) + raw.set_annotations(annot) + fig.savefig(...) + if result < 0.7: + write_log_msg(f"subj {subj} had bad pulse syncing, aborting") + continue + + # apply maxfilter + # do ICA diff --git a/src/ilabs_streamsync/streamsync.py b/src/ilabs_streamsync/streamsync.py index ea99b0c..c8a8552 100644 --- a/src/ilabs_streamsync/streamsync.py +++ b/src/ilabs_streamsync/streamsync.py @@ -1,3 +1,47 @@ class StreamSync: - def __init__(self): + """Synchronize two data streams. + + Inputs: `mne.io.Raw` files, audio files (TODO which formats?), + and additional camera events. + + Outputs: `mne.Annotations` object created from the camera events and + time-warped to the timescale of the `Raw`. + """ + + def __init__(self, reference_object, pulse_channel): + self.ref_stream = reference_object.get_chan(pulse_channel) + self.sfreq = reference_object.info["sfreq"] # Hz + self.streams = [] + + def add_stream(self, stream, channel=None, events=None): + """Add a new ``Raw`` or video stream, optionally with events. + + stream : Raw | wav + An audio or FIF stream. + channel : str | int | None + Which channel of `stream` contains the sync pulse sequence. + events : array-like | None + Events associated with the stream. TODO: should they be integer sample + numbers? Timestamps? Do we support both? + """ + pulses = self._extract_pulse_sequence_from_stream(stream, channel=channel) + self.streams.append(pulses) + + def _extract_pulse_sequence_from_stream(self, stream, channel): + # TODO triage based on input type (e.g., if it's a Raw, pull out a stim chan, + # if it's audio, just add it as-is) pass + + def do_syncing(self): + """Synchronize all streams with the reference stream.""" + # TODO (waves hands) do the hard part. + # TODO spit out a report of correlation/association between all pairs of streams + pass + + def plot_sync(self): + pass + + +def extract_audio_from_video(path_to_video, channel): + """Path can be a regex or glob to allow batch processing.""" + pass