diff --git a/doc/_includes/data_formats.rst b/doc/_includes/data_formats.rst index 641810c6b63..63dbfcdc98b 100644 --- a/doc/_includes/data_formats.rst +++ b/doc/_includes/data_formats.rst @@ -75,6 +75,8 @@ EEG :ref:`Persyst ` .lay :func:`mn NIRS :ref:`NIRx ` directory :func:`mne.io.read_raw_nirx` NIRS :ref:`BOXY ` directory :func:`mne.io.read_raw_boxy` + +EYETRACK SR eyelink ASCII files .asc :func:`mne.io.read_raw_eyelink` ============ ============================================ ========= =================================== More details are provided in the tutorials in the :ref:`tut-data-formats` diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index c74f97f3eca..2d731c83364 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -40,6 +40,7 @@ Enhancements - Add automatic projection of sEEG contact onto the inflated surface for :meth:`mne.viz.Brain.add_sensors` (:gh:`11436` by `Alex Rockhill`_) - Allow an image with intracranial electrode contacts (e.g. computed tomography) to be used without the freesurfer recon-all surfaces to locate contacts so that it doesn't have to be downsampled to freesurfer dimensions (for microelectrodes) and show an example :ref:`ex-ieeg-micro` with :func:`mne.transforms.apply_volume_registration_points` added to aid this transform (:gh:`11567` by `Alex Rockhill`_) - Use new :meth:`dipy.workflows.align.DiffeomorphicMap.transform_points` to transform a montage of intracranial contacts more efficiently (:gh:`11572` by `Alex Rockhill`_) +- Add support for eyetracking data using :func:`mne.io.read_raw_eyelink` (:gh:`11152` by `Dominik Welke`_ and `Scott Huberty`_) Bugs ~~~~ diff --git a/doc/conf.py b/doc/conf.py index cd11edf8789..1b4f7ad2ed0 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -267,7 +267,7 @@ # Undocumented (on purpose) 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', - 'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi', 'RawFIL', + 'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi', 'RawFIL', 'RawEyelink', # sklearn subclasses 'mapping', 'to', 'any', # unlinkable @@ -1231,6 +1231,7 @@ def reset_warnings(gallery_conf, fname): f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501 f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html', f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html', + f'{tu}/{tf}/plot_eyetracking.html': f'{tu}/preprocessing/90_eyetracking_data.html', # noqa E501 f'{ex}/{co}/mne_inverse_label_connectivity.html': f'{mne_conn}/{ex}/mne_inverse_label_connectivity.html', # noqa E501 f'{ex}/{co}/cwt_sensor_connectivity.html': f'{mne_conn}/{ex}/cwt_sensor_connectivity.html', # noqa E501 f'{ex}/{co}/mixed_source_space_connectivity.html': f'{mne_conn}/{ex}/mixed_source_space_connectivity.html', # noqa E501 diff --git a/doc/datasets.rst b/doc/datasets.rst index 8f8e98d4d82..c3d94c49006 100644 --- a/doc/datasets.rst +++ b/doc/datasets.rst @@ -44,4 +44,5 @@ Datasets refmeg_noise.data_path ssvep.data_path erp_core.data_path - epilepsy_ecog.data_path \ No newline at end of file + epilepsy_ecog.data_path + eyelink.data_path \ No newline at end of file diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst index 23827d978d7..b2d0715e8e9 100644 --- a/doc/overview/datasets_index.rst +++ b/doc/overview/datasets_index.rst @@ -475,6 +475,19 @@ standard. * :ref:`tut-ssvep` +EYELINK +======= +:func:`mne.datasets.eyelink.data_path` + +A small example dataset in SR research's proprietary .asc format. +1 participant fixated on the screen while short light flashes appeared. +Monocular recording of gaze position and pupil size, 1000 Hz sampling +frequency. + +.. topic:: Examples + + * :ref:`tut-eyetrack` + References ========== diff --git a/doc/preprocessing.rst b/doc/preprocessing.rst index 98403661a6d..c92167a04fe 100644 --- a/doc/preprocessing.rst +++ b/doc/preprocessing.rst @@ -141,6 +141,19 @@ Projections: make_montage_volume warp_montage +:py:mod:`mne.preprocessing.eyetracking`: + +.. currentmodule:: mne.preprocessing.eyetracking + +.. automodule:: mne.preprocessing.eyetracking + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + set_channel_types_eyetrack + EEG referencing: .. currentmodule:: mne diff --git a/doc/reading_raw_data.rst b/doc/reading_raw_data.rst index ad04c0ca91a..c9316ffa9b0 100644 --- a/doc/reading_raw_data.rst +++ b/doc/reading_raw_data.rst @@ -20,6 +20,7 @@ Reading raw data read_raw_ctf read_raw_curry read_raw_edf + read_raw_eyelink read_raw_bdf read_raw_gdf read_raw_kit diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 7b2afb2ffbb..c3c86d20a34 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -203,7 +203,8 @@ def equalize_channels(instances, copy=True, verbose=None): FIFF.FIFF_UNIT_MOL: 'M', FIFF.FIFF_UNIT_NONE: 'NA', FIFF.FIFF_UNIT_CEL: 'C', - FIFF.FIFF_UNIT_S: 'S'} + FIFF.FIFF_UNIT_S: 'S', + FIFF.FIFF_UNIT_PX: 'px'} def _check_set(ch, projs, ch_type): @@ -331,7 +332,8 @@ def set_channel_types(self, mapping, verbose=None): ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, - fnirs_fd_phase, fnirs_od, temperature, gsr + fnirs_fd_phase, fnirs_od, eyetrack_pos, eyetrack_pupil, + temperature, gsr .. versionadded:: 0.9.0 """ @@ -379,6 +381,10 @@ def set_channel_types(self, mapping, verbose=None): coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE elif ch_type == 'fnirs_od': coil_type = FIFF.FIFFV_COIL_FNIRS_OD + elif ch_type == 'eyetrack_pos': + coil_type = FIFF.FIFFV_COIL_EYETRACK_POS + elif ch_type == 'eyetrack_pupil': + coil_type = FIFF.FIFFV_COIL_EYETRACK_PUPIL else: coil_type = FIFF.FIFFV_COIL_NONE self.info['chs'][c_ind]['coil_type'] = coil_type @@ -595,7 +601,7 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=False, dipole=False, gof=False, bio=False, ecog=False, fnirs=False, csd=False, dbs=False, - temperature=False, gsr=False, + temperature=False, gsr=False, eyetrack=False, include=(), exclude='bads', selection=None, verbose=None): """Pick some channels by type and names. @@ -621,9 +627,9 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio, - ecog=ecog, fnirs=fnirs, csd=csd, dbs=dbs, include=include, - exclude=exclude, selection=selection, temperature=temperature, - gsr=gsr) + ecog=ecog, fnirs=fnirs, csd=csd, dbs=dbs, temperature=temperature, + gsr=gsr, eyetrack=eyetrack, include=include, exclude=exclude, + selection=selection) self._pick_drop_channels(idx) diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index 96219aaf621..ec24f450fd0 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -26,6 +26,7 @@ from . import ssvep from . import erp_core from . import epilepsy_ecog +from . import eyelink from . import ucl_opm_auditory from ._fetch import fetch_dataset from .utils import (_download_all_example_data, fetch_hcp_mmp_parcellation, @@ -42,5 +43,5 @@ 'sleep_physionet', 'somato', 'spm_face', 'ssvep', 'testing', 'visual_92_categories', 'limo', 'erp_core', 'epilepsy_ecog', 'fetch_dataset', 'fetch_phantom', 'has_dataset', 'refmeg_noise', - 'fnirs_motor' + 'fnirs_motor', 'eyelink' ] diff --git a/mne/datasets/config.py b/mne/datasets/config.py index c9431a9837e..dc851e9bd2f 100644 --- a/mne/datasets/config.py +++ b/mne/datasets/config.py @@ -87,7 +87,7 @@ # respective repos, and make a new release of the dataset on GitHub. Then # update the checksum in the MNE_DATASETS dict below, and change version # here: ↓↓↓↓↓ ↓↓↓ -RELEASES = dict(testing='0.142', misc='0.24') +RELEASES = dict(testing='0.144', misc='0.26') TESTING_VERSIONED = f'mne-testing-data-{RELEASES["testing"]}' MISC_VERSIONED = f'mne-misc-data-{RELEASES["misc"]}' @@ -111,7 +111,7 @@ # Testing and misc are at the top as they're updated most often MNE_DATASETS['testing'] = dict( archive_name=f'{TESTING_VERSIONED}.tar.gz', - hash='md5:44b857ddb34aefd752e4f5b19d625dee', + hash='md5:fb546f44dba3310945225ed8fdab4a91', url=('https://codeload.github.com/mne-tools/mne-testing-data/' f'tar.gz/{RELEASES["testing"]}'), # In case we ever have to resort to osf.io again... @@ -123,7 +123,7 @@ ) MNE_DATASETS['misc'] = dict( archive_name=f'{MISC_VERSIONED}.tar.gz', # 'mne-misc-data', - hash='md5:eb017a919939511932bd683f26f97490', + hash='md5:868b484fadd73b1d1a3535b7194a0d03', url=('https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/' f'{RELEASES["misc"]}'), folder_name='MNE-misc-data', @@ -335,3 +335,12 @@ folder_name='foo', config_key='MNE_DATASETS_FAKE_PATH' ) + +# eyelink dataset +MNE_DATASETS['eyelink'] = dict( + archive_name='eyelink_example_data.zip', + hash='md5:081950c05f35267458d9c751e178f161', + url=('https://osf.io/r5ndq/download?version=1'), + folder_name='eyelink-example-data', + config_key='MNE_DATASETS_EYELINK_PATH' +) diff --git a/mne/datasets/eyelink/__init__.py b/mne/datasets/eyelink/__init__.py new file mode 100644 index 00000000000..85931aba72d --- /dev/null +++ b/mne/datasets/eyelink/__init__.py @@ -0,0 +1,3 @@ +"""Eyelink test dataset.""" + +from .eyelink import data_path, get_version diff --git a/mne/datasets/eyelink/eyelink.py b/mne/datasets/eyelink/eyelink.py new file mode 100644 index 00000000000..a08e338ab33 --- /dev/null +++ b/mne/datasets/eyelink/eyelink.py @@ -0,0 +1,26 @@ +# Authors: Dominik Welke +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='eyelink', processor='unzip', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='eyelink', + conf='MNE_DATASETS_EYELINK_PATH') + + +def get_version(): # noqa: D103 + return _get_version('eyelink') + + +get_version.__doc__ = _version_doc.format(name='eyelink') diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index e03d179cfc6..50a894bfd7b 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -304,7 +304,7 @@ def _download_all_example_data(verbose=True): kiloword, phantom_4dbti, sleep_physionet, limo, fnirs_motor, refmeg_noise, fetch_infant_template, fetch_fsaverage, ssvep, erp_core, epilepsy_ecog, - fetch_phantom, ucl_opm_auditory) + fetch_phantom, eyelink, ucl_opm_auditory) sample_path = sample.data_path() testing.data_path() misc.data_path() @@ -327,6 +327,7 @@ def _download_all_example_data(verbose=True): brainstorm.bst_resting.data_path(accept=True) phantom_path = brainstorm.bst_phantom_elekta.data_path(accept=True) fetch_phantom('otaniemi', subjects_dir=phantom_path) + eyelink.data_path() brainstorm.bst_phantom_ctf.data_path(accept=True) eegbci.load_data(1, [6, 10, 14], update_path=True) for subj in range(4): diff --git a/mne/defaults.py b/mne/defaults.py index 362eba0d67f..16b3b843406 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -13,26 +13,29 @@ dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', fnirs_cw_amplitude='k', fnirs_fd_ac_amplitude='k', fnirs_fd_phase='k', fnirs_od='k', csd='k', whitened='k', - gsr='#666633', temperature='#663333'), + gsr='#666633', temperature='#663333', + eyegaze='k', pupil='k'), si_units=dict(mag='T', grad='T/m', eeg='V', eog='V', ecg='V', emg='V', misc='AU', seeg='V', dbs='V', dipole='Am', gof='GOF', bio='V', ecog='V', hbo='M', hbr='M', ref_meg='T', fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', fnirs_fd_phase='rad', fnirs_od='V', csd='V/m²', - whitened='Z', gsr='S', temperature='C'), + whitened='Z', gsr='S', temperature='C', + eyegaze='AU', pupil='AU'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dbs='µV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', fnirs_fd_phase='rad', fnirs_od='V', csd='mV/m²', - whitened='Z', gsr='S', temperature='C'), + whitened='Z', gsr='S', temperature='C', + eyegaze='AU', pupil='AU'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dbs=1e6, ecog=1e6, dipole=1e9, gof=1.0, bio=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, fnirs_fd_ac_amplitude=1.0, fnirs_fd_phase=1., fnirs_od=1.0, csd=1e3, whitened=1., - gsr=1., temperature=1.), + gsr=1., temperature=1., eyegaze=1., pupil=1.), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', @@ -42,13 +45,15 @@ fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-1, fnirs_od=2e-2, csd=200e-4, dipole=1e-7, gof=1e2, - gsr=1., temperature=0.1), + gsr=1., temperature=0.1, + eyegaze=3e-1, pupil=1e3), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, dbs=1e4, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), misc=(-5., 5.), seeg=(-20., 20.), dbs=(-200., 200.), dipole=(-100., 100.), gof=(0., 1.), bio=(-500., 500.), - ecog=(-200., 200.), hbo=(0, 20), hbr=(0, 20), csd=(-50., 50.)), + ecog=(-200., 200.), hbo=(0, 20), hbr=(0, 20), csd=(-50., 50.), + eyegaze=(0., 5000.), pupil=(0., 5000.)), titles=dict(mag='Magnetometers', grad='Gradiometers', eeg='EEG', eog='EOG', ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', dbs='DBS', bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', @@ -60,6 +65,8 @@ gof='Goodness of fit', csd='Current source density', stim='Stimulus', gsr='Galvanic skin response', temperature='Temperature', + eyegaze='Eye-tracking (Gaze position)', + pupil='Eye-tracking (Pupil size)', ), mask_params=dict(marker='o', markerfacecolor='w', diff --git a/mne/io/__init__.py b/mne/io/__init__.py index 6ed6b898566..0abb704873b 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -60,6 +60,7 @@ read_evoked_fieldtrip) from .nihon import read_raw_nihon from ._read_raw import read_raw +from .eyelink import read_raw_eyelink # for backward compatibility diff --git a/mne/io/constants.py b/mne/io/constants.py index 1159c85283d..f2847644f07 100644 --- a/mne/io/constants.py +++ b/mne/io/constants.py @@ -204,6 +204,8 @@ FIFF.FIFFV_FNIRS_CH = 1100 # Functional near-infrared spectroscopy FIFF.FIFFV_TEMPERATURE_CH = 1200 # Functional near-infrared spectroscopy FIFF.FIFFV_GALVANIC_CH = 1300 # Galvanic skin response +FIFF.FIFFV_EYETRACK_CH = 1400 # Eye-tracking + _ch_kind_named = {key: key for key in ( FIFF.FIFFV_BIO_CH, FIFF.FIFFV_MEG_CH, @@ -227,6 +229,7 @@ FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_GALVANIC_CH, FIFF.FIFFV_TEMPERATURE_CH, + FIFF.FIFFV_EYETRACK_CH )} # @@ -854,6 +857,8 @@ FIFF.FIFF_UNIT_AM = 202 # Am FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2 FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3 + +FIFF.FIFF_UNIT_PX = 210 # Pixel _ch_unit_named = {key: key for key in( FIFF.FIFF_UNIT_NONE, FIFF.FIFF_UNIT_UNITLESS, FIFF.FIFF_UNIT_M, FIFF.FIFF_UNIT_KG, FIFF.FIFF_UNIT_SEC, FIFF.FIFF_UNIT_A, FIFF.FIFF_UNIT_K, @@ -865,6 +870,7 @@ FIFF.FIFF_UNIT_CEL, FIFF.FIFF_UNIT_LM, FIFF.FIFF_UNIT_LX, FIFF.FIFF_UNIT_V_M2, FIFF.FIFF_UNIT_T_M, FIFF.FIFF_UNIT_AM, FIFF.FIFF_UNIT_AM_M2, FIFF.FIFF_UNIT_AM_M3, + FIFF.FIFF_UNIT_PX, )} # # Multipliers @@ -916,6 +922,11 @@ FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE = 304 # fNIRS frequency domain AC amplitude FIFF.FIFFV_COIL_FNIRS_FD_PHASE = 305 # fNIRS frequency domain phase FIFF.FIFFV_COIL_FNIRS_RAW = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE # old alias +FIFF.FIFFV_COIL_FNIRS_TD_GATED_AMPLITUDE = 306 # fNIRS time-domain gated amplitude +FIFF.FIFFV_COIL_FNIRS_TD_MOMENTS_AMPLITUDE = 307 # fNIRS time-domain moments amplitude + +FIFF.FIFFV_COIL_EYETRACK_POS = 400 # Eye-tracking gaze position +FIFF.FIFFV_COIL_EYETRACK_PUPIL = 401 # Eye-tracking pupil size FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software @@ -1002,7 +1013,9 @@ FIFF.FIFFV_COIL_DIPOLE, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFFV_COIL_FNIRS_RAW, FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, - FIFF.FIFFV_COIL_FNIRS_FD_PHASE, FIFF.FIFFV_COIL_MCG_42, + FIFF.FIFFV_COIL_FNIRS_FD_PHASE, FIFF.FIFFV_COIL_FNIRS_TD_GATED_AMPLITUDE, + FIFF.FIFFV_COIL_FNIRS_TD_MOMENTS_AMPLITUDE, FIFF.FIFFV_COIL_MCG_42, + FIFF.FIFFV_COIL_EYETRACK_POS, FIFF.FIFFV_COIL_EYETRACK_PUPIL, FIFF.FIFFV_COIL_POINT_MAGNETOMETER, FIFF.FIFFV_COIL_AXIAL_GRAD_5CM, FIFF.FIFFV_COIL_VV_PLANAR_W, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFFV_COIL_VV_PLANAR_T2, FIFF.FIFFV_COIL_VV_PLANAR_T3, diff --git a/mne/io/eyelink/__init__.py b/mne/io/eyelink/__init__.py new file mode 100644 index 00000000000..77ee7ebc9ef --- /dev/null +++ b/mne/io/eyelink/__init__.py @@ -0,0 +1,7 @@ +"""Module for loading Eye-Tracker data.""" + +# Author: Dominik Welke +# +# License: BSD-3-Clause + +from .eyelink import read_raw_eyelink diff --git a/mne/io/eyelink/eyelink.py b/mne/io/eyelink/eyelink.py new file mode 100644 index 00000000000..a85796d5b77 --- /dev/null +++ b/mne/io/eyelink/eyelink.py @@ -0,0 +1,882 @@ +# Authors: Dominik Welke +# Scott Huberty +# Christian O'Reilly +# +# License: BSD-3-Clause + +from datetime import datetime, timezone, timedelta +from pathlib import Path + +import numpy as np +from ..constants import FIFF +from ..base import BaseRaw +from ..meas_info import create_info +from ...annotations import Annotations +from ...utils import logger, verbose, fill_doc, _check_pandas_installed + +EYELINK_COLS = {'timestamp': ('time',), + 'pos': {'left': ('xpos_left', 'ypos_left', 'pupil_left'), + 'right': ('xpos_right', 'ypos_right', 'pupil_right')}, + 'velocity': {'left': ('xvel_left', 'yvel_left'), + 'right': ('xvel_right', 'yvel_right')}, + 'resolution': ('xres', 'yres'), + 'input': ('DIN',), + 'flags': ('flags',), + 'remote': ('x_head', 'y_head', + 'distance'), + 'remote_flags': ('head_flags',), + 'block_num': ('block',), + 'eye_event': ('eye', 'time', 'end_time', 'duration'), + 'fixation': ('fix_avg_x', 'fix_avg_y', + 'fix_avg_pupil_size'), + 'saccade': ('sacc_start_x', 'sacc_start_y', + 'sacc_end_x', 'sacc_end_y', + 'sacc_visual_angle', 'peak_velocity')} + + +def _isfloat(token): + """Boolean test for whether string can be of type float. + + Parameters + ---------- + token : str + Single element from tokens list. + """ + if isinstance(token, str): + try: + float(token) + return True + except ValueError: + return False + else: + raise ValueError('input should be a string,' + f' but {token} is of type {type(token)}') + + +def _convert_types(tokens): + """Convert the type of each token in list. + + The tokens input is a list of string elements. + Posix timestamp strings can be integers, eye gaze position and + pupil size can be floats. flags token ("...") remains as string. + Missing eye/head-target data (indicated by '.' or 'MISSING_DATA') + are replaced by np.nan. + + Parameters + ---------- + Tokens : list + List of string elements. + + Returns + ------- + Tokens list with elements of various types. + """ + return [int(token) if token.isdigit() # execute this before _isfloat() + else float(token) if _isfloat(token) + else np.nan if token in ('.', 'MISSING_DATA') + else token # remains as string + for token in tokens] + + +def _parse_line(line): + """Parse tab delminited string from eyelink ASCII file. + + Takes a tab deliminited string from eyelink file, + splits it into a list of tokens, and converts the type + for each token in the list. + """ + if len(line): + tokens = line.split() + return _convert_types(tokens) + else: + raise ValueError('line is empty, nothing to parse') + + +def _is_sys_msg(line): + """Flag lines from eyelink ASCII file that contain a known system message. + + Some lines in eyelink files are system outputs usually + only meant for Eyelinks DataViewer application to read. + These shouldn't need to be parsed. + + Parameters + ---------- + line : string + single line from Eyelink asc file + + Returns + ------- + bool : + True if any of the following strings that are + known to indicate a system message are in the line + + Notes + ----- + Examples of eyelink system messages: + - ;Sess:22Aug22;Tria:1;Tri2:False;ESNT:182BFE4C2F4; + - ;NTPT:182BFE55C96;SMSG:__NTP_CLOCK_SYNC__;DIFF:-1; + - !V APLAYSTART 0 1 library/audio + - !MODE RECORD CR 500 2 1 R + """ + return any(['!V' in line, + '!MODE' in line, + ';' in line]) + + +def _get_sfreq(rec_info): + """Get sampling frequency from Eyelink ASCII file. + + Parameters + ---------- + rec_info : list + the first list in self._event_lines['SAMPLES']. + The sfreq occurs after RATE: i.e. [..., RATE, 1000, ...]. + + Returns + ------- + sfreq : int | float + """ + for i, token in enumerate(rec_info): + if token == 'RATE': + # sfreq is the first token after RATE + return rec_info[i + 1] + + +def _sort_by_time(df, col='time'): + df.sort_values(col, ascending=True, inplace=True) + df.reset_index(drop=True, inplace=True) + + +def _convert_times(df, first_samp, col='time'): + """Set initial time to 0, converts from ms to seconds in place. + + Parameters + ---------- + df pandas.DataFrame: + One of the dataframes in the self.dataframes dict. + + first_samp int: + timestamp of the first sample of the recording. This should + be the first sample of the first recording block. + col str (default 'time'): + column name to sort pandas.DataFrame by + + Notes + ----- + Each sample in an Eyelink file has a posix timestamp string. + Subtracts the "first" sample's timestamp from each timestamp. + The "first" sample is inferred to be the first sample of + the first recording block, i.e. the first "START" line. + """ + _sort_by_time(df, col) + for col in df.columns: + if col.endswith('time'): # 'time' and 'end_time' cols + df[col] -= first_samp + df[col] /= 1000 + if col in ['duration', 'offset']: + df[col] /= 1000 + + +def _fill_times(df, sfreq, time_col='time',): + """Fill missing timestamps if there are multiple recording blocks. + + Parameters + ---------- + df : pandas.DataFrame: + dataframe of the eyetracking data samples, BEFORE + _convert_times() is applied to the dataframe + + sfreq : int | float: + sampling frequency of the data + + time_col : str (default 'time'): + name of column with the timestamps (e.g. 9511881, 9511882, ...) + + Returns + ------- + %(df_return)s + + Notes + ----- + After _parse_recording_blocks, Files with multiple recording blocks will + have missing timestamps for the duration of the period between the blocks. + This would cause the occular annotations (i.e. blinks) to not line up with + the signal. + """ + pd = _check_pandas_installed() + + first, last = df[time_col].iloc[[0, -1]] + step = 1000 / sfreq + df[time_col] = df[time_col].astype(float) + new_times = pd.DataFrame(np.arange(first, last + step / 2, step), + columns=[time_col]) + return pd.merge_asof(new_times, df, on=time_col, direction='nearest', + tolerance=step / 10) + + +def _find_overlaps(df, max_time=0.05): + """Merge left/right eye events with onset/offset diffs less than max_time. + + df : pandas.DataFrame + Pandas DataFrame with occular events (fixations, saccades, blinks) + max_time : float (default 0.05) + Time in seconds. Defaults to .05 (50 ms) + + Returns + ------- + DataFrame: %(df_return)s + :class:`pandas.DataFrame` specifying overlapped eye events, if any + Notes + ----- + The idea is to cumulative sum the boolean values for rows with onset and + offset differences (against the previous row) that are greater than the + max_time. If onset and offset diffs are less than max_time then no_overlap + will become False. Alternatively, if either the onset or offset diff is + greater than max_time, no_overlap becomes True. Cumulatively summing over + these boolean values will leave rows with no_overlap == False unchanged + and hence with the same group number. + """ + pd = _check_pandas_installed() + + df = df.copy() + df["overlap_start"] = df.sort_values("time")["time"]\ + .diff()\ + .lt(max_time) + + df["overlap_end"] = (df["end_time"] + .diff().abs() + .lt(max_time)) + + df["no_overlap"] = ~(df["overlap_end"] + & df["overlap_start"]) + df["group"] = df["no_overlap"].cumsum() + + # now use groupby on 'group'. If one left and one right eye in group + # the new start/end times are the mean of the two eyes + ovrlp = pd.concat([pd.DataFrame(g[1].drop(columns="eye").mean()).T + if (len(g[1]) == 2) and (len(g[1].eye.unique()) == 2) + else g[1] # not an overlap, return group unchanged + for g in df.groupby("group")] + ) + # overlapped events get a "both" value in the "eye" col + if "eye" in ovrlp.columns: + ovrlp["eye"] = ovrlp["eye"].fillna("both") + else: + ovrlp["eye"] = "both" + tmp_cols = ["overlap_start", "overlap_end", "no_overlap", "group"] + return ovrlp.drop(columns=tmp_cols).reset_index(drop=True) + + +@fill_doc +def read_raw_eyelink(fname, preload=False, verbose=None, + create_annotations=True, apply_offsets=False, + find_overlaps=False, overlap_threshold=0.05, + gap_description='bad_rec_gap'): + """Reader for an Eyelink .asc file. + + Parameters + ---------- + fname : str + Path to the eyelink file (.asc). + %(preload)s + %(verbose)s + create_annotations : bool | list (default True) + Whether to create mne.Annotations from occular events + (blinks, fixations, saccades) and experiment messages. If a list, must + contain one or more of ['fixations', 'saccades',' blinks', messages']. + If True, creates mne.Annotations for both occular events and experiment + messages. + apply_offsets : bool (default False) + Adjusts the onset time of the mne.Annotations created from Eyelink + experiment messages, if offset values exist in + self.dataframes['messages']. + find_overlaps : bool (default False) + Combine left and right eye :class:`mne.Annotations` (blinks, fixations, + saccades) if their start times and their stop times are both not + separated by more than overlap_threshold. + overlap_threshold : float (default 0.05) + Time in seconds. Threshold of allowable time-gap between the start and + stop times of the left and right eyes. If gap is larger than threshold, + the :class:`mne.Annotations` will be kept separate (i.e. "blink_L", + "blink_R"). If the gap is smaller than the threshold, the + :class:`mne.Annotations` will be merged (i.e. "blink_both"). + gap_description : str (default 'bad_rec_gap') + If there are multiple recording blocks in the file, the description of + the annotation that will span across the gap period between the + blocks. Uses 'bad_rec_gap' by default so that these time periods will + be considered bad by MNE and excluded from operations like epoching. + + Returns + ------- + raw : instance of RawEyelink + A Raw object containing eyetracker data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + extension = Path(fname).suffix + if extension not in '.asc': + raise ValueError('This reader can only read eyelink .asc files.' + f' Got extension {extension} instead. consult eyelink' + ' manual for converting eyelink data format (.edf)' + ' files to .asc format.') + + return RawEyelink(fname, preload=preload, verbose=verbose, + create_annotations=create_annotations, + apply_offsets=apply_offsets, + find_overlaps=find_overlaps, + overlap_threshold=overlap_threshold, + gap_desc=gap_description) + + +@fill_doc +class RawEyelink(BaseRaw): + """Raw object from an XXX file. + + Parameters + ---------- + fname : str + Path to the data file (.XXX). + create_annotations : bool | list (default True) + Whether to create mne.Annotations from occular events + (blinks, fixations, saccades) and experiment messages. If a list, must + contain one or more of ['fixations', 'saccades',' blinks', messages']. + If True, creates mne.Annotations for both occular events and experiment + messages. + apply_offsets : bool (default False) + Adjusts the onset time of the mne.Annotations created from Eyelink + experiment messages, if offset values exist in + raw.dataframes['messages']. + find_overlaps : boolean (default False) + Combine left and right eye :class:`mne.Annotations` (blinks, fixations, + saccades) if their start times and their stop times are both not + separated by more than overlap_threshold. + overlap_threshold : float (default 0.05) + Time in seconds. Threshold of allowable time-gap between the start and + stop times of the left and right eyes. If gap is larger than threshold, + the :class:`mne.Annotations` will be kept separate (i.e. "blink_L", + "blink_R"). If the gap is smaller than the threshold, the + :class:`mne.Annotations` will be merged (i.e. "blink_both"). + gap_desc : str (default 'bad_rec_gap') + If there are multiple recording blocks in the file, the description of + the annotation that will span across the gap period between the + blocks. Uses 'bad_rec_gap' by default so that these time periods will + be considered bad by MNE and excluded from operations like epoching. + %(preload)s + %(verbose)s + + Attributes + ---------- + fname : pathlib.Path + Eyelink filename + dataframes : dict + Dictionary of pandas DataFrames. One for eyetracking samples, + and one for each type of eyelink event (blinks, messages, etc) + _sample_lines : list + List of lists, each list is one sample containing eyetracking + X/Y and pupil channel data (+ other channels, if they exist) + _event_lines : dict + Each key contains a list of lists, for an event-type that occurred + during the recording period. Events can vary, from occular events + (blinks, saccades, fixations), to messages from the stimulus + presentation software, or info from a response controller. + _system_lines : list + List of tab delimited strings. Each string is a system message, + that in most cases aren't needed. System messages occur for + Eyelinks DataViewer application. + _tracking_mode : str + Whether whether a single eye was tracked ('monocular'), or both + ('binocular'). + _gap_desc : str + The description to be used for annotations returned by _make_gap_annots + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None, + create_annotations=True, + apply_offsets=False, find_overlaps=False, + overlap_threshold=0.05, + gap_desc='bad_rec_gap'): + + logger.info('Loading {}'.format(fname)) + + self.fname = Path(fname) + self._sample_lines = None + self._event_lines = None + self._system_lines = None + self._tracking_mode = None # assigned in self._infer_col_names + self._meas_date = None + self._rec_info = None + self._gap_desc = gap_desc + self.dataframes = {} + + self._get_recording_datetime() # sets self._meas_date + self._parse_recording_blocks() # sets sample, event, & system lines + + sfreq = _get_sfreq(self._event_lines['SAMPLES'][0]) + col_names, ch_names = self._infer_col_names() + self._create_dataframes(col_names, sfreq, find_overlaps=find_overlaps, + threshold=overlap_threshold) + info = self._create_info(ch_names, sfreq) + eye_ch_data = self.dataframes['samples'][ch_names] + eye_ch_data = eye_ch_data.to_numpy().T + + # create mne object + super(RawEyelink, self).__init__(info, preload=eye_ch_data, + filenames=[self.fname], + verbose=verbose) + # set meas_date + self.set_meas_date(self._meas_date) + + # Make Annotations + gap_annots = None + if len(self.dataframes['recording_blocks']) > 1: + gap_annots = self._make_gap_annots() + eye_annots = None + if create_annotations: + eye_annots = self._make_eyelink_annots(self.dataframes, + create_annotations, + apply_offsets) + if gap_annots and eye_annots: # set both + self.set_annotations(gap_annots + eye_annots) + elif gap_annots: + self.set_annotations(gap_annots) + elif eye_annots: + self.set_annotations(eye_annots) + else: + logger.info('Not creating any annotations') + + def _parse_recording_blocks(self): + """Parse Eyelink ASCII file. + + Eyelink samples occur within START and END blocks. + samples lines start with a posix-like string, + and contain eyetracking sample info. Event Lines + start with an upper case string and contain info + about occular events (i.e. blink/saccade), or experiment + messages sent by the stimulus presentation software. + """ + with self.fname.open() as file: + block_num = 1 + self._sample_lines = [] + self._event_lines = {'START': [], 'END': [], 'SAMPLES': [], + 'EVENTS': [], 'ESACC': [], 'EBLINK': [], + 'EFIX': [], 'MSG': [], 'INPUT': [], + 'BUTTON': [], 'PUPIL': []} + self._system_lines = [] + + is_recording_block = False + for line in file: + if line.startswith('START'): # start of recording block + is_recording_block = True + if is_recording_block: + if _is_sys_msg(line): + self._system_lines.append(line) + continue # system messages don't need to be parsed. + tokens = _parse_line(line) + tokens.append(block_num) # add current block number + if isinstance(tokens[0], (int, float)): # Samples + self._sample_lines.append(tokens) + elif tokens[0] in self._event_lines.keys(): + event_key, event_info = tokens[0], tokens[1:] + self._event_lines[event_key].append(event_info) + if tokens[0] == 'END': # end of recording block + is_recording_block = False + block_num += 1 + if not self._event_lines['START']: + raise ValueError('Could not determine the start of the' + ' recording. When converting to ASCII, START' + ' events should not be suppressed.') + if not self._sample_lines: # no samples parsed + raise ValueError(f"Couldn't find any samples in {self.fname}") + self._validate_data() + + def _validate_data(self): + """Check the incoming data for some known problems that can occur.""" + self._rec_info = self._event_lines['SAMPLES'][0] + pupil_info = self._event_lines['PUPIL'][0] + n_blocks = len(self._event_lines['START']) + sfreq = int(_get_sfreq(self._rec_info)) + first_samp = self._event_lines['START'][0][0] + if ('LEFT' in self._rec_info) and ('RIGHT' in self._rec_info): + self._tracking_mode = 'binocular' + else: + self._tracking_mode = 'monocular' + # Detect the datatypes that are in file. + if 'GAZE' in self._rec_info: + logger.info('Pixel coordinate data detected.') + logger.warning('Pass `scalings=dict(eyegaze=1e3)` when using plot' + ' method to make traces more legible.') + elif 'HREF' in self._rec_info: + logger.info('Head-referenced eye angle data detected.') + elif 'PUPIL' in self._rec_info: + logger.warning('Raw eyegaze coordinates detected. Analyze with' + ' caution.') + if 'AREA' in pupil_info: + logger.info('Pupil-size area reported.') + elif 'DIAMETER' in pupil_info: + logger.info('Pupil-size diameter reported.') + # Check sampling frequency. + if sfreq == 2000 and isinstance(first_samp, int): + raise ValueError(f'The sampling rate is {sfreq}Hz but the' + ' timestamps were not output as float values.' + ' Check the settings in the EDF2ASC application.') + elif sfreq != 2000 and isinstance(first_samp, float): + raise ValueError('For recordings with a sampling rate less than' + ' 2000Hz, timestamps should not be output to the' + ' ASCII file as float values. Check the' + ' settings in the EDF2ASC application. Got a' + f' sampling rate of {sfreq}Hz.') + # If more than 1 recording period, make sure sfreq didn't change. + if n_blocks > 1: + err_msg = 'The sampling frequency changed during the recording.'\ + ' This file cannot be read into MNE.' + for block_info in self._event_lines['SAMPLES'][1:]: + block_sfreq = int(_get_sfreq(block_info)) + if block_sfreq != sfreq: + raise ValueError(err_msg + + f' Got both {sfreq} and {block_sfreq} Hz.' + ) + if self._tracking_mode == 'monocular': + assert self._rec_info[1] in ['LEFT', 'RIGHT'] + eye = self._rec_info[1] + blocks_list = self._event_lines['SAMPLES'] + eye_per_block = [block_info[1] for block_info in blocks_list] + if not all([this_eye == eye for this_eye in eye_per_block]): + logger.warning('The eye being tracked changed during the' + ' recording. The channel names will reflect' + ' the eye that was tracked at the start of' + ' the recording.') + + def _get_recording_datetime(self): + """Create a datetime object from the datetime in ASCII file.""" + # create a timezone object for UTC + tz = timezone(timedelta(hours=0)) + in_header = False + with self.fname.open() as file: + for line in file: + # header lines are at top of file and start with ** + if line.startswith('**'): + in_header = True + if in_header: + if line.startswith('** DATE:'): + dt_str = line.replace('** DATE:', '').strip() + fmt = "%a %b %d %H:%M:%S %Y" + try: + # Eyelink measdate timestamps are timezone naive. + # Force datetime to be in UTC. + # Even though dt is probably in local time zone. + dt_naive = datetime.strptime(dt_str, fmt) + dt_aware = dt_naive.replace(tzinfo=tz) + self._meas_date = dt_aware + except Exception: + msg = ('Extraction of measurement date failed.' + ' Please report this as a github issue.' + ' The date is being set to None') + logger.warning(msg) + break + + def _href_to_radian(self, opposite, f=15_000): + """Convert HREF eyegaze samples to radians. + + Parameters + ---------- + opposite : int + The x or y coordinate in an HREF gaze sample. + f : int (default 15_000) + distance of plane from the eye. + + Returns + ------- + x or y coordinate in radians + + Notes + ----- + See section 4.4.2.2 in the Eyelink 1000 Plus User Manual + (version 1.0.19) for a detailed description of HREF data. + """ + return np.arcsin(opposite / f) + + def _infer_col_names(self): + """Build column and channel names for data from Eyelink ASCII file. + + Returns the expected column names for the sample lines and event + lines, to be passed into pd.DataFrame. Sample and event lines in + eyelink files have a fixed order of columns, but the columns that + are present can vary. The order that col_names is built below should + NOT change. + """ + col_names = {} + # initiate the column names for the sample lines + col_names['sample'] = list(EYELINK_COLS['timestamp']) + + # and for the eye message lines + col_names['blink'] = list(EYELINK_COLS['eye_event']) + col_names['fixation'] = list(EYELINK_COLS['eye_event'] + + EYELINK_COLS['fixation']) + col_names['saccade'] = list(EYELINK_COLS['eye_event'] + + EYELINK_COLS['saccade']) + + # Recording was either binocular or monocular + # If monocular, find out which eye was tracked and append to ch_name + if self._tracking_mode == 'monocular': + assert self._rec_info[1] in ['LEFT', 'RIGHT'] + eye = self._rec_info[1].lower() + ch_names = list(EYELINK_COLS['pos'][eye]) + elif self._tracking_mode == 'binocular': + ch_names = list(EYELINK_COLS['pos']['left'] + + EYELINK_COLS['pos']['right']) + col_names['sample'].extend(ch_names) + + # The order of these if statements should not be changed. + if 'VEL' in self._rec_info: # If velocity data are reported + if self._tracking_mode == 'monocular': + ch_names.extend(EYELINK_COLS['velocity'][eye]) + col_names['sample'].extend(EYELINK_COLS['velocity'][eye]) + elif self._tracking_mode == 'binocular': + ch_names.extend(EYELINK_COLS['velocity']['left'] + + EYELINK_COLS['velocity']['right']) + col_names['sample'].extend(EYELINK_COLS['velocity']['left'] + + EYELINK_COLS['velocity']['right']) + # if resolution data are reported + if 'RES' in self._rec_info: + ch_names.extend(EYELINK_COLS['resolution']) + col_names['sample'].extend(EYELINK_COLS['resolution']) + col_names['fixation'].extend(EYELINK_COLS['resolution']) + col_names['saccade'].extend(EYELINK_COLS['resolution']) + # if digital input port values are reported + if 'INPUT' in self._rec_info: + ch_names.extend(EYELINK_COLS['input']) + col_names['sample'].extend(EYELINK_COLS['input']) + + # add flags column + col_names['sample'].extend(EYELINK_COLS['flags']) + + # if head target info was reported, add its cols after flags col. + if 'HTARGET' in self._rec_info: + ch_names.extend(EYELINK_COLS['remote']) + col_names['sample'].extend(EYELINK_COLS['remote'] + + EYELINK_COLS['remote_flags']) + + # finally add a column for recording block number + # FYI this column does not exist in the asc file.. + # but it is added during _parse_recording_blocks + for col in col_names.values(): + col.extend(EYELINK_COLS['block_num']) + + return col_names, ch_names + + def _create_dataframes(self, col_names, sfreq, find_overlaps=False, + threshold=0.05): + """Create pandas.DataFrame for Eyelink samples and events. + + Creates a pandas DataFrame for self._sample_lines and for each + non-empty key in self._event_lines. + """ + pd = _check_pandas_installed() + + # First sample should be the first line of the first recording block + first_samp = self._event_lines['START'][0][0] + + # dataframe for samples + self.dataframes['samples'] = pd.DataFrame(self._sample_lines, + columns=col_names['sample']) + if 'HREF' in self._rec_info: + pos_names = (EYELINK_COLS['pos']['left'][:-1] + + EYELINK_COLS['pos']['right'][:-1]) + for col in self.dataframes['samples'].columns: + if col not in pos_names: # 'xpos_left' ... 'ypos_right' + continue + series = self._href_to_radian(self.dataframes['samples'][col]) + self.dataframes['samples'][col] = series + + n_block = len(self._event_lines['START']) + if n_block > 1: + logger.info(f'There are {n_block} recording blocks in this' + ' file. Times between blocks will be annotated with' + f' {self._gap_desc}.') + # if there is more than 1 recording block we must account for + # the missing timestamps and samples bt the blocks + self.dataframes['samples'] = _fill_times(self.dataframes + ['samples'], + sfreq=sfreq) + _convert_times(self.dataframes['samples'], first_samp) + + # dataframe for each type of occular event + for event, columns, label in zip(['EFIX', 'ESACC', 'EBLINK'], + [col_names['fixation'], + col_names['saccade'], + col_names['blink']], + ['fixations', + 'saccades', + 'blinks'] + ): + if self._event_lines[event]: # an empty list returns False + self.dataframes[label] = pd.DataFrame(self._event_lines[event], + columns=columns) + _convert_times(self.dataframes[label], first_samp) + + if find_overlaps is True: + if self._tracking_mode == 'monocular': + raise ValueError('find_overlaps is only valid with' + ' binocular recordings, this file is' + f' {self._tracking_mode}') + df = _find_overlaps(self.dataframes[label], + max_time=threshold) + self.dataframes[label] = df + + else: + logger.info(f'No {label} were found in this file. ' + f'Not returning any info on {label}.') + + # make dataframe for experiment messages + if self._event_lines['MSG']: + msgs = [] + for tokens in self._event_lines['MSG']: + timestamp = tokens[0] + block = tokens[-1] + # if offset token exists, it will be the 1st index + # and is an int or float + if isinstance(tokens[1], (int, float)): + offset = tokens[1] + msg = ' '.join(str(x) for x in tokens[2:-1]) + else: + # there is no offset token + offset = np.nan + msg = ' '.join(str(x) for x in tokens[1:-1]) + msgs.append([timestamp, offset, msg, block]) + + cols = ['time', 'offset', 'event_msg', 'block'] + self.dataframes['messages'] = (pd.DataFrame(msgs, + columns=cols)) + _convert_times(self.dataframes['messages'], first_samp) + + # make dataframe for recording block start, end times + assert (len(self._event_lines['START']) + == len(self._event_lines['END']) + ) + blocks = [[bgn[0], end[0], bgn[-1]] # start, end, block_num + for bgn, end in zip(self._event_lines['START'], + self._event_lines['END']) + ] + cols = ['time', 'end_time', 'block'] + self.dataframes['recording_blocks'] = pd.DataFrame(blocks, + columns=cols) + _convert_times(self.dataframes['recording_blocks'], first_samp) + + # make dataframe for digital input port + if self._event_lines['INPUT']: + cols = ['time', 'DIN', 'block'] + self.dataframes['DINS'] = pd.DataFrame(self._event_lines['INPUT'], + columns=cols) + _convert_times(self.dataframes['DINS'], first_samp) + + # TODO: Make dataframes for other eyelink events (Buttons) + + def _create_info(self, ch_names, sfreq): + """Create info object for RawEyelink.""" + # assign channel type from ch_name + pos_names = (EYELINK_COLS['pos']['left'][:-1] + + EYELINK_COLS['pos']['right'][:-1]) + pupil_names = (EYELINK_COLS['pos']['left'][-1] + + EYELINK_COLS['pos']['right'][-1]) + ch_types = ['eyegaze' if ch in pos_names + else 'pupil' if ch in pupil_names + else 'stim' if ch == 'DIN' + else 'misc' + for ch in ch_names] + info = create_info(ch_names, + sfreq, + ch_types) + # set correct loc for eyepos and pupil channels + for ch_dict in info['chs']: + # loc index 3 can indicate left or right eye + if ch_dict['ch_name'].endswith('left'): # [x,y,pupil]_left + ch_dict['loc'][3] = -1 # left eye + elif ch_dict['ch_name'].endswith('right'): # [x,y,pupil]_right + ch_dict['loc'][3] = 1 # right eye + else: + logger.debug(f"leaving index 3 of loc array as" + f" {ch_dict['loc'][3]} for {ch_dict['ch_name']}") + # loc index 4 can indicate x/y coord + if ch_dict['ch_name'].startswith('x'): + ch_dict['loc'][4] = -1 # x-coord + elif ch_dict['ch_name'].startswith('y'): + ch_dict['loc'][4] = 1 # y-coord + else: + logger.debug(f"leaving index 4 of loc array as" + f" {ch_dict['loc'][4]} for {ch_dict['ch_name']}") + if 'HREF' in self._rec_info: + if ch_dict['ch_name'].startswith(('xpos', 'ypos')): + ch_dict['unit'] = FIFF.FIFF_UNIT_RAD + return info + + def _make_gap_annots(self, key='recording_blocks'): + """Create Annotations for gap periods between recording blocks.""" + df = self.dataframes[key] + gap_desc = self._gap_desc + onsets = df['end_time'].iloc[:-1] + diffs = df['time'].shift(-1) - df['end_time'] + durations = diffs.iloc[:-1] + descriptions = [gap_desc] * len(onsets) + return Annotations(onset=onsets, + duration=durations, + description=descriptions) + + def _make_eyelink_annots(self, df_dict, create_annots, apply_offsets): + """Create Annotations for each df in self.dataframes.""" + valid_descs = ['blinks', 'saccades', 'fixations', 'messages'] + msg = ("create_annotations must be True or a list containing one or" + f" more of {valid_descs}.") + wrong_type = (msg + f' Got a {type(create_annots)} instead.') + if create_annots is True: + descs = valid_descs + else: + assert isinstance(create_annots, list), wrong_type + for desc in create_annots: + assert desc in valid_descs, msg + f" Got '{desc}' instead" + descs = create_annots + + annots = None + for key, df in df_dict.items(): + eye_annot_cond = ((key in ['blinks', 'fixations', 'saccades']) + and (key in descs)) + if eye_annot_cond: + onsets = df['time'] + durations = df['duration'] + # Create annotations for both eyes + descriptions = f'{key[:-1]}_' + df['eye'] # i.e "blink_r" + this_annot = Annotations(onset=onsets, + duration=durations, + description=descriptions) + elif (key in ['messages']) and (key in descs): + if apply_offsets: + if df['offset'].isnull().all(): + logger.warning('There are no offsets for the messages' + f' in {self.fname}. Not applying any' + ' offset') + # If df['offset] is all NaNs, time is not changed + onsets = df['time'] + df['offset'].fillna(0) + else: + onsets = df['time'] + durations = [0] * onsets + descriptions = df['event_msg'] + this_annot = Annotations(onset=onsets, + duration=durations, + description=descriptions) + else: + continue # TODO make df and annotations for Buttons + if not annots: + annots = this_annot + elif annots: + annots += this_annot + if not annots: + logger.warning(f'Annotations for {descs} were requested but' + ' none could be made.') + return + return annots diff --git a/mne/io/eyelink/tests/__init__.py b/mne/io/eyelink/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mne/io/eyelink/tests/test_eyelink.py b/mne/io/eyelink/tests/test_eyelink.py new file mode 100644 index 00000000000..0aa5e4d4e0b --- /dev/null +++ b/mne/io/eyelink/tests/test_eyelink.py @@ -0,0 +1,147 @@ +import pytest + +import numpy as np + +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_eyelink +from mne.io.constants import FIFF +from mne.io.pick import _DATA_CH_TYPES_SPLIT +from mne.utils import _check_pandas_installed, requires_pandas + +testing_path = data_path(download=False) +fname = testing_path / 'eyetrack' / 'test_eyelink.asc' +fname_href = testing_path / 'eyetrack' / 'test_eyelink_HREF.asc' + + +def test_eyetrack_not_data_ch(): + """Eyetrack channels are not data channels.""" + msg = 'eyetrack channels are not data channels. Refer to MNE definition'\ + ' of data channels in the glossary section of the documentation.' + assert 'eyegaze' not in _DATA_CH_TYPES_SPLIT, msg + assert 'pupil' not in _DATA_CH_TYPES_SPLIT, msg + + +@requires_testing_data +@requires_pandas +@pytest.mark.parametrize('fname, create_annotations, find_overlaps', + [(fname, False, False), + (fname, True, False), + (fname, True, True), + (fname, ['fixations', 'saccades', 'blinks'], True)]) +def test_eyelink(fname, create_annotations, find_overlaps): + """Test reading eyelink asc files.""" + raw = read_raw_eyelink(fname, create_annotations=create_annotations, + find_overlaps=find_overlaps) + + # First, tests that shouldn't change based on function arguments + assert raw.info['sfreq'] == 500 # True for this file + assert raw.info['meas_date'].month == 3 + assert raw.info['meas_date'].day == 10 + assert raw.info['meas_date'].year == 2022 + + assert len(raw.info['ch_names']) == 6 + assert raw.info['chs'][0]['kind'] == FIFF.FIFFV_EYETRACK_CH + assert raw.info['chs'][0]['coil_type'] == FIFF.FIFFV_COIL_EYETRACK_POS + raw.info['chs'][2]['coil_type'] == FIFF.FIFFV_COIL_EYETRACK_PUPIL + + # x_left + assert all(raw.info['chs'][0]['loc'][3:5] == [-1, -1]) + # pupil_left + assert raw.info['chs'][2]['loc'][3] == -1 + assert np.isnan(raw.info['chs'][2]['loc'][4]) + # y_right + assert all(raw.info['chs'][4]['loc'][3:5] == [1, 1]) + assert 'RawEyelink' in repr(raw) + + # Test some annotation values for accuracy. + if create_annotations is True and find_overlaps: + orig = raw.info['meas_date'] + df = raw.annotations.to_data_frame() + # Convert annot onset datetimes to seconds, relative to orig_time + df['time_in_sec'] = df['onset'].apply(lambda x: x.timestamp() + - orig.timestamp()) + # There is a blink in this data at 8.9 seconds + cond = (df['time_in_sec'] > 8.899) & (df['time_in_sec'] < 8.95) + assert df[cond]['description'].values[0].startswith('blink') + if find_overlaps is True: + df = raw.annotations.to_data_frame() + # these should both be True so long as _find_overlaps is not + # majorly refactored. + assert 'blink_L' in df['description'].unique() + assert 'blink_both' in df['description'].unique() + if isinstance(create_annotations, list) and find_overlaps: + # the last pytest parametrize condition should hit this + df = raw.annotations.to_data_frame() + # Rows 0, 1, 2 should be 'fixation_both', 'saccade_both', 'blink_both' + for i, label in zip([0, 1, 2], ['fixation', 'saccade', 'blink']): + assert df['description'].iloc[i] == f'{label}_both' + + +@requires_testing_data +@requires_pandas +@pytest.mark.parametrize('fname_href', + [(fname_href)]) +def test_radian(fname_href): + """Test converting HREF position data to radians.""" + raw = read_raw_eyelink(fname_href, create_annotations=['blinks']) + # Test channel types + assert raw.get_channel_types() == ['eyegaze', 'eyegaze', 'pupil'] + + # Test that eyegaze channels have a radian unit + assert raw.info['chs'][0]['unit'] == FIFF.FIFF_UNIT_RAD + assert raw.info['chs'][1]['unit'] == FIFF.FIFF_UNIT_RAD + + # Data in radians should range between -1 and 1 + # Test first channel (xpos_right) + assert raw.get_data()[0].min() > -1 + assert raw.get_data()[0].max() < 1 + + +@requires_testing_data +@requires_pandas +@pytest.mark.parametrize('fname', [(fname)]) +def test_fill_times(fname): + """Test use of pd.merge_asof in _fill_times. + + We are merging on floating + point values. pd.merge_asof is used so that any differences in floating + point precision between df['samples']['times'] and the times generated + with np.arange don't result in the time columns not merging + correctly - i.e. 1560687.0 and 1560687.000001 should merge. + """ + from ..eyelink import _fill_times + + raw = read_raw_eyelink(fname, create_annotations=False) + sfreq = raw.info['sfreq'] + # just take first 1000 points for testing + df = raw.dataframes['samples'].iloc[:1000].reset_index(drop=True) + # even during blinks, pupil val is 0, so there should be no nans + # in this column + assert not df['pupil_left'].isna().sum() + nan_count = df['pupil_left'].isna().sum() # i.e 0 + df_merged = _fill_times(df, sfreq) + # If times dont merge correctly, there will be additional rows in + # in df_merged with all nan values + assert df_merged['pupil_left'].isna().sum() == nan_count # i.e. 0 + + +@requires_pandas +def test_find_overlaps(): + """Test finding overlapping occular events between the left and right eyes. + + In the simulated blink df below, the first two rows + will be considered an overlap because the diff() of both the 'time' and + 'end_time' values is <.05 (50ms). the 3rd and 4th rows will not be + considered an overlap because the diff() of the 'time' values is > .05 + (4.20 - 4.14 = .06). The 5th and 6th rows will not be considered an + overlap because they are both left eye events. + """ + from ..eyelink import _find_overlaps + pd = _check_pandas_installed() + blink_df = pd.DataFrame({'eye': ['L', 'R', 'L', 'R', 'L', 'L'], + 'time': [.01, .04, 4.14, 4.20, 6.50, 6.504], + 'end_time': [.05, .08, 4.18, 4.22, 6.60, 6.604]}) + overlap_df = _find_overlaps(blink_df) + assert len(overlap_df['eye'].unique()) == 3 # ['both', 'left', 'right'] + assert len(overlap_df) == 5 # ['both', 'L', 'R', 'L', 'L'] + assert overlap_df['eye'].iloc[0] == 'both' diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index b5e8f844c62..f8c9eba13cc 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -2431,7 +2431,8 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): :term:`data channel `. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'dbs', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' - or 'hbo'. If str, then all channels are assumed to be of the same type. + 'eyetrack' or 'hbo'. + If str, then all channels are assumed to be of the same type. %(verbose)s Returns diff --git a/mne/io/pick.py b/mne/io/pick.py index 87511710143..d71971155d1 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -99,6 +99,10 @@ def get_channel_type_constants(include_defaults=False): unit=FIFF.FIFF_UNIT_CEL), gsr=dict(kind=FIFF.FIFFV_GALVANIC_CH, unit=FIFF.FIFF_UNIT_S), + eyegaze=dict(kind=FIFF.FIFFV_EYETRACK_CH, + coil_type=FIFF.FIFFV_COIL_EYETRACK_POS), + pupil=dict(kind=FIFF.FIFFV_EYETRACK_CH, + coil_type=FIFF.FIFFV_COIL_EYETRACK_PUPIL) ) if include_defaults: coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE) @@ -115,6 +119,8 @@ def get_channel_type_constants(include_defaults=False): emg=coil_none, bio=coil_none, fnirs_od=unit_none, + pupil=unit_none, + eyegaze=dict(unit=FIFF.FIFF_UNIT_PX), ) for key, value in defaults.items(): base[key].update(value) @@ -153,6 +159,7 @@ def get_channel_type_constants(include_defaults=False): FIFF.FIFFV_FNIRS_CH: 'fnirs', FIFF.FIFFV_TEMPERATURE_CH: 'temperature', FIFF.FIFFV_GALVANIC_CH: 'gsr', + FIFF.FIFFV_EYETRACK_CH: 'eyetrack', } # How to reduce our categories in channel_type (originally) _second_rules = { @@ -172,7 +179,10 @@ def get_channel_type_constants(include_defaults=False): FIFF.FIFFV_COIL_EEG_BIPOLAR: 'eeg', FIFF.FIFFV_COIL_NONE: 'eeg', # MNE-C backward compat FIFF.FIFFV_COIL_EEG_CSD: 'csd', - }) + }), + 'eyetrack': ('coil_type', {FIFF.FIFFV_COIL_EYETRACK_POS: 'eyegaze', + FIFF.FIFFV_COIL_EYETRACK_PUPIL: 'pupil' + }) } @@ -194,7 +204,7 @@ def channel_type(info, idx): {'grad', 'mag', 'eeg', 'csd', 'stim', 'eog', 'emg', 'ecg', 'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'dbs', 'bio', 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr', - 'temperature', 'gsr'} + 'temperature', 'gsr', 'eyetrack'} """ # This is faster than the original _channel_type_old now in test_pick.py # because it uses (at most!) two dict lookups plus one conditional @@ -350,6 +360,21 @@ def _triage_fnirs_pick(ch, fnirs, warned): return False +def _triage_eyetrack_pick(ch, eyetrack): + """Triage an eyetrack pick type.""" + if eyetrack is False: + return False + elif eyetrack is True: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_EYETRACK_PUPIL and \ + 'pupil' in eyetrack: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_EYETRACK_POS and \ + 'eyegaze' in eyetrack: + return True + return False + + def _check_meg_type(meg, allow_auto=False): """Ensure a valid meg type.""" if isinstance(meg, str): @@ -380,7 +405,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, chpi=False, exci=False, ias=False, syst=False, seeg=False, dipole=False, gof=False, bio=False, ecog=False, fnirs=False, csd=False, dbs=False, temperature=False, gsr=False, - include=(), exclude='bads', selection=None): + eyetrack=False, include=(), exclude='bads', selection=None): """Pick channels by type and names. Parameters @@ -412,14 +437,16 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, temperature, gsr): if not isinstance(param, bool): w = ('Parameters for all channel types (with the exception of ' - '"meg", "ref_meg" and "fnirs") must be of type bool, not {}.') + '"meg", "ref_meg", "fnirs", and "eyetrack") must be of type ' + 'bool, not {}.') raise ValueError(w.format(type(param))) param_dict = dict(eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, misc=misc, resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst, seeg=seeg, dbs=dbs, dipole=dipole, gof=gof, bio=bio, ecog=ecog, csd=csd, - temperature=temperature, gsr=gsr) + temperature=temperature, gsr=gsr, eyetrack=eyetrack) + # avoid triage if possible if isinstance(meg, bool): for key in ('grad', 'mag'): @@ -433,12 +460,14 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, try: pick[k] = param_dict[ch_type] except KeyError: # not so simple - assert ch_type in ( - 'grad', 'mag', 'ref_meg') + _FNIRS_CH_TYPES_SPLIT + assert ch_type in ('grad', 'mag', 'ref_meg') + \ + _FNIRS_CH_TYPES_SPLIT + _EYETRACK_CH_TYPES_SPLIT if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': pick[k] = _triage_meg_pick(info['chs'][k], ref_meg) + elif ch_type in ('eyegaze', 'pupil'): + pick[k] = _triage_eyetrack_pick(info['chs'][k], eyetrack) else: # ch_type in ('hbo', 'hbr') pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs, warned) @@ -730,10 +759,11 @@ def channel_indices_by_type(info, picks=None): channel indices. """ idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if - key not in ('meg', 'fnirs')} + key not in ('meg', 'fnirs', 'eyetrack')} idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), fnirs_cw_amplitude=list(), fnirs_fd_ac_amplitude=list(), - fnirs_fd_phase=list(), fnirs_od=list()) + fnirs_fd_phase=list(), fnirs_od=list(), + eyegaze=list(), pupil=list()) picks = _picks_to_idx(info, picks, none='all', exclude=(), allow_empty=True) for k in picks: @@ -823,8 +853,10 @@ def _contains_ch_type(info, ch_type): meg_extras = list(_MEG_CH_TYPES_SPLIT) fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT) + et_extras = list(_EYETRACK_CH_TYPES_SPLIT) valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS - if key != 'meg'] + meg_extras + fnirs_extras) + if key != 'meg'] + + meg_extras + fnirs_extras + et_extras) _check_option('ch_type', ch_type, valid_channel_types) if info is None: raise ValueError('Cannot check for channels of type "%s" because info ' @@ -925,22 +957,26 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): meg=True, eeg=True, csd=True, stim=False, eog=False, ecg=False, emg=False, misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True, - dbs=True, temperature=False, gsr=False) + dbs=True, temperature=False, gsr=False, eyetrack=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') _FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') +_EYETRACK_CH_TYPES_SPLIT = ('eyegaze', 'pupil') _DATA_CH_TYPES_ORDER_DEFAULT = ( 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'resp', 'emg', 'ref_meg', 'misc', 'stim', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', 'dbs', 'temperature', 'gsr', 'gof', 'dipole', -) + _FNIRS_CH_TYPES_SPLIT + ('whitened',) +) + _FNIRS_CH_TYPES_SPLIT + _EYETRACK_CH_TYPES_SPLIT + ('whitened',) + # Valid data types, ordered for consistency, used in viz/evoked. _VALID_CHANNEL_TYPES = ( 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'resp', 'emg', 'dipole', 'gof', - 'bio', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') + 'bio', 'ecog', 'dbs' +) + _FNIRS_CH_TYPES_SPLIT + _EYETRACK_CH_TYPES_SPLIT + ('misc', 'csd') _DATA_CH_TYPES_SPLIT = ( - 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs' +) + _FNIRS_CH_TYPES_SPLIT # Electrode types (e.g., can be average-referenced together or separately) _ELECTRODE_CH_TYPES = ('eeg', 'ecog', 'seeg', 'dbs') diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py index 1f0cd473992..2f05d73b19a 100644 --- a/mne/io/tests/test_constants.py +++ b/mne/io/tests/test_constants.py @@ -21,7 +21,7 @@ # https://github.com/mne-tools/fiff-constants/commits/master REPO = 'mne-tools' -COMMIT = '6d9ca9ce7fb44c63d429c2986a953500743dfb22' +COMMIT = 'e27f68cbf74dbfc5193ad429cc77900a59475181' # These are oddities that we won't address: iod_dups = (355, 359) # these are in both MEGIN and MNE files @@ -55,6 +55,10 @@ 303, # fNIRS optical density 304, # fNIRS frequency domain AC amplitude 305, # fNIRS frequency domain phase + 306, # fNIRS time domain gated amplitude + 307, # fNIRS time domain moments amplitude + 400, # Eye-tracking gaze position + 401, # Eye-tracking pupil size 1000, # For testing the MCG software 2001, # Generic axial gradiometer 3011, # VV prototype wirewound planar sensor diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py index 6bb8111efaa..b9c308dddaa 100644 --- a/mne/preprocessing/__init__.py +++ b/mne/preprocessing/__init__.py @@ -33,3 +33,4 @@ from .interpolate import equalize_bads, interpolate_bridged_electrodes from . import ieeg from ._css import cortical_signal_suppression +from . import eyetracking diff --git a/mne/preprocessing/eyetracking/__init__.py b/mne/preprocessing/eyetracking/__init__.py new file mode 100644 index 00000000000..7c7f5f42765 --- /dev/null +++ b/mne/preprocessing/eyetracking/__init__.py @@ -0,0 +1,7 @@ +"""Eye tracking specific preprocessing functions.""" + +# Authors: Dominik Welke +# +# License: BSD-3-Clause + +from .eyetracking import set_channel_types_eyetrack diff --git a/mne/preprocessing/eyetracking/eyetracking.py b/mne/preprocessing/eyetracking/eyetracking.py new file mode 100644 index 00000000000..346a130564f --- /dev/null +++ b/mne/preprocessing/eyetracking/eyetracking.py @@ -0,0 +1,146 @@ +# Authors: Dominik Welke +# +# License: BSD-3-Clause + + +import numpy as np + +from ...io.constants import FIFF + + +# specific function to set eyetrack channels +def set_channel_types_eyetrack(inst, mapping): + """Define sensor type for eyetrack channels. + + This function can set all eye tracking specific information: + channel type, unit, eye (and x/y component; only for gaze channels) + + Supported channel types: + ``'eyegaze'`` and ``'pupil'`` + + Supported units: + ``'au'``, ``'px'``, ``'deg'``, ``'rad'`` (for eyegaze) + ``'au'``, ``'mm'``, ``'m'`` (for pupil) + + Parameters + ---------- + inst : instance of Raw, Epochs, or Evoked + The data instance. + mapping : dict + A dictionary mapping a channel to a list/tuple including + channel type, unit, eye, [and x/y component] (all as str), e.g., + ``{'l_x': ('eyegaze', 'deg', 'left', 'x')}`` or + ``{'r_pupil': ('pupil', 'au', 'right')}``. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance, modified in place. + + Notes + ----- + ``inst.set_channel_types()`` to ``'eyegaze'`` or ``'pupil'`` + works as well, but cannot correctly set unit, eye and x/y component. + + Data will be stored in SI units: + if your data comes in ``deg`` (visual angle) it will be converted to + ``rad``, if it is in ``mm`` it will be converted to ``m``. + """ + ch_names = inst.info['ch_names'] + + # allowed + valid_types = ['eyegaze', 'pupil'] # ch_type + valid_units = {'px': ['px', 'pixel'], + 'rad': ['rad', 'radian', 'radians'], + 'deg': ['deg', 'degree', 'degrees'], + 'm': ['m', 'meter', 'meters'], + 'mm': ['mm', 'millimeter', 'millimeters'], + 'au': [None, 'none', 'au', 'arbitrary']} + valid_units['all'] = [item for sublist in valid_units.values() + for item in sublist] + valid_eye = {'l': ['left', 'l'], + 'r': ['right', 'r']} + valid_eye['all'] = [item for sublist in valid_eye.values() + for item in sublist] + valid_xy = {'x': ['x', 'h', 'horizontal'], + 'y': ['y', 'v', 'vertical']} + valid_xy['all'] = [item for sublist in valid_xy.values() + for item in sublist] + + # loop over channels + for ch_name, ch_desc in mapping.items(): + if ch_name not in ch_names: + raise ValueError("This channel name (%s) doesn't exist in " + "info." % ch_name) + c_ind = ch_names.index(ch_name) + + # set ch_type and unit + ch_type = ch_desc[0].lower() + if ch_type not in valid_types: + raise ValueError( + "ch_type must be one of {}. " + "Got '{}' instead.".format(valid_types, ch_type)) + if ch_type == 'eyegaze': + coil_type = FIFF.FIFFV_COIL_EYETRACK_POS + elif ch_type == 'pupil': + coil_type = FIFF.FIFFV_COIL_EYETRACK_PUPIL + inst.info['chs'][c_ind]['coil_type'] = coil_type + inst.info['chs'][c_ind]['kind'] = FIFF.FIFFV_EYETRACK_CH + + ch_unit = None if (ch_desc[1] is None) else ch_desc[1].lower() + if ch_unit not in valid_units['all']: + raise ValueError( + "unit must be one of {}. Got '{}' instead.".format( + valid_units['all'], ch_unit)) + if ch_unit in valid_units['px']: + unit_new = FIFF.FIFF_UNIT_PX + elif ch_unit in valid_units['rad']: + unit_new = FIFF.FIFF_UNIT_RAD + elif ch_unit in valid_units['deg']: # convert deg to rad (SI) + inst = inst.apply_function(_convert_deg_to_rad, picks=ch_name) + unit_new = FIFF.FIFF_UNIT_RAD + elif ch_unit in valid_units['m']: + unit_new = FIFF.FIFF_UNIT_M + elif ch_unit in valid_units['mm']: # convert mm to m (SI) + inst = inst.apply_function(_convert_mm_to_m, picks=ch_name) + unit_new = FIFF.FIFF_UNIT_M + elif ch_unit in valid_units['au']: + unit_new = FIFF.FIFF_UNIT_NONE + inst.info['chs'][c_ind]['unit'] = unit_new + + # set eye (and x/y-component) + loc = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]) + + ch_eye = ch_desc[2].lower() + if ch_eye not in valid_eye['all']: + raise ValueError( + "eye must be one of {}. Got '{}' instead.".format( + valid_eye['all'], ch_eye)) + if ch_eye in valid_eye['l']: + loc[3] = -1 + elif ch_eye in valid_eye['r']: + loc[3] = 1 + + if ch_type == 'eyegaze': + ch_xy = ch_desc[3].lower() + if ch_xy not in valid_xy['all']: + raise ValueError( + "x/y must be one of {}. Got '{}' instead.".format( + valid_xy['all'], ch_xy)) + if ch_xy in valid_xy['x']: + loc[4] = -1 + elif ch_xy in valid_xy['y']: + loc[4] = 1 + + inst.info['chs'][c_ind]['loc'] = loc + + return inst + + +def _convert_mm_to_m(array): + return array * .001 + + +def _convert_deg_to_rad(array): + return array * np.pi / 180. diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index e144eac0566..63b657f2132 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -1227,22 +1227,24 @@ def test_bad_channels(method, allow_ref_meg): allow_ref_meg=allow_ref_meg) for inst in [raw, epochs]: for ch in chs_bad: + picks_dict = {('eyetrack' if ch in ('eyegaze', 'pupil') + else str(ch)): True} if allow_ref_meg: # Test case for only bad channels picks_bad1 = pick_types(inst.info, meg=False, ref_meg=False, - **{str(ch): True}) + **picks_dict) # Test case for good and bad channels picks_bad2 = pick_types(inst.info, meg=True, ref_meg=True, - **{str(ch): True}) + **picks_dict) else: # Test case for only bad channels picks_bad1 = pick_types(inst.info, meg=False, - **{str(ch): True}) + **picks_dict) # Test case for good and bad channels picks_bad2 = pick_types(inst.info, meg=True, - **{str(ch): True}) + **picks_dict) with pytest.raises(ValueError, match='Invalid channel type'): ica.fit(inst, picks=picks_bad1) diff --git a/mne/simulation/tests/test_raw.py b/mne/simulation/tests/test_raw.py index 31169a403bf..49fdc82f881 100644 --- a/mne/simulation/tests/test_raw.py +++ b/mne/simulation/tests/test_raw.py @@ -325,6 +325,7 @@ def test_degenerate(raw_data): @pytest.mark.slowtest def test_simulate_raw_bem(raw_data): """Test simulation of raw data with BEM.""" + pytest.importorskip('nibabel') raw, src_ss, stc, trans, sphere = raw_data src = setup_source_space('sample', 'oct1', subjects_dir=subjects_dir) for s in src: diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 226afda56cc..96398425ad4 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -2661,6 +2661,11 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): Temperature channels. gsr : bool Galvanic skin response channels. +eyetrack : bool | str + Eyetracking channels. If True include all eyetracking channels. If False + (default) include none. If string it can be 'eyegaze' (to include + eye position channels) or 'pupil' (to include pupil-size + channels). include : list of str List of additional channels to include. If empty do not include any. diff --git a/mne/viz/_mpl_figure.py b/mne/viz/_mpl_figure.py index 7238d4490a6..ab5f4e76c67 100644 --- a/mne/viz/_mpl_figure.py +++ b/mne/viz/_mpl_figure.py @@ -52,7 +52,8 @@ from ..fixes import _close_event from ..annotations import _sync_onset from ..io.pick import (_DATA_CH_TYPES_ORDER_DEFAULT, _DATA_CH_TYPES_SPLIT, - _FNIRS_CH_TYPES_SPLIT, _VALID_CHANNEL_TYPES) + _FNIRS_CH_TYPES_SPLIT, _EYETRACK_CH_TYPES_SPLIT, + _VALID_CHANNEL_TYPES) from ..utils import Bunch, _click_ch_name, logger from . import plot_sensors from ._figure import BrowserBase @@ -2191,6 +2192,8 @@ def _split_picks_by_type(inst, picks, units, scalings, titles): pick_kwargs['meg'] = ch_type elif ch_type in _FNIRS_CH_TYPES_SPLIT: pick_kwargs['fnirs'] = ch_type + elif ch_type in _EYETRACK_CH_TYPES_SPLIT: + pick_kwargs['eyetrack'] = ch_type else: pick_kwargs[ch_type] = True these_picks = pick_types(inst.info, **pick_kwargs) diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py index 21ed14dd7fa..859db3ce646 100644 --- a/mne/viz/tests/test_raw.py +++ b/mne/viz/tests/test_raw.py @@ -961,6 +961,7 @@ def test_plotting_order_consistency(): pick_data_set = set(_PICK_TYPES_DATA_DICT) pick_data_set.remove('meg') pick_data_set.remove('fnirs') + pick_data_set.remove('eyetrack') missing = pick_data_set.difference(set(_DATA_CH_TYPES_ORDER_DEFAULT)) assert missing == set() diff --git a/tools/circleci_download.sh b/tools/circleci_download.sh index a8eef47fd42..421f6f63ec1 100755 --- a/tools/circleci_download.sh +++ b/tools/circleci_download.sh @@ -107,6 +107,9 @@ else if [[ $(cat $FNAME | grep -x ".*datasets.*erp_core.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.erp_core.data_path(update_path=True))"; fi; + if [[ $(cat $FNAME | grep -x ".*datasets.*eyelink.*" | wc -l) -gt 0 ]]; then + python -c "import mne; print(mne.datasets.eyelink.data_path(update_path=True))"; + fi; if [[ $(cat $FNAME | grep -x ".*datasets.*ucl_opm_auditory.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.ucl_opm_auditory.data_path(update_path=True))"; fi; diff --git a/tutorials/io/70_reading_eyetracking_data.py b/tutorials/io/70_reading_eyetracking_data.py new file mode 100644 index 00000000000..f84477255b6 --- /dev/null +++ b/tutorials/io/70_reading_eyetracking_data.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +r""" +.. _tut-importing-eyetracking-data: + +======================================= +Importing Data from Eyetracking devices +======================================= + +Eyetracking devices record a persons point of gaze, usually in relation to a +screen. Typically, gaze position (also referred to as eye or pupil position) +and pupil size are recorded as separate channels. This section describes how to +read data from supported eyetracking manufacturers. + +MNE-Python provides functions for reading eyetracking data. When possible, +MNE-Python will internally convert and store eyetracking data according to an +SI unit (for example radians for position data, and meters for pupil size). + +.. note:: If you have eye tracking data in a format that MNE does not support + yet, you can try reading it using other tools and create an MNE + object from a numpy array. Then you can use + :func:`mne.preprocessing.eyetracking.set_channel_types_eyetrack` + to assign the correct eyetrack channel types. + +.. seealso:: Some MNE functions may not be available to eyetracking and other + physiological data, because MNE does not consider them to be data + channels. See the :doc:`glossary ` for more + information. + +.. _import-eyelink_asc: + +SR Research (Eyelink) (.asc) +============================ + +.. note:: MNE-Python currently only supports reading Eyelink eyetracking data + stored in the ASCII (.asc) format. + +Eyelink recordings are stored in the Eyelink Data Format (EDF; .edf), which are +binary files and thus relatively complex to support. To make the data in EDF +files accessible, Eyelink provides the application EDF2ASC, which converts EDF +files to a plain text ASCII format (.asc). These files can be imported +into MNE using :func:`mne.io.read_raw_eyelink`. + +.. note:: The Eyelink Data Format (EDF), should not be confused + with the European Data Format, the common EEG data format that also + uses the .edf extension. + +Supported measurement types from Eyelink files include eye position, pupil +size, saccadic velocity, resolution, and head position (for recordings +collected in remote mode). Eyelink files often report occular events (blinks, +saccades, and fixations), MNE will store these events as `mne.Annotations`. +For More information on the various measurement types that can be present in +Eyelink files, read below. + +Eye Position Data +----------------- + +Eyelink samples can report eye position data in pixels, units of visual +degrees, or as raw pupil coordinates. Samples are written as (x, y) coordinate +pairs (or two pairs for binocular data). The type of position data present in +an ASCII file will be detected automatically by MNE. The three types of +position data are explained below. + +Gaze +^^^^ +Gaze position data report the estimated (x, y) pixel coordinates of the +participants's gaze on the stimulus screen, compensating for head position +changes and distance from the screen. This datatype may be preferable if you +are interested in knowing where the participant was looking at on the stimulus +screen. The default (0, 0) location for Eyelink systems is at the top left of +the screen. + +This may be best demonstrated with an example. In the file plotted below, +eyetracking data was recorded while the participant read text on a display. +In this file, as the participant read the each line from left to right, the +x-coordinate increased. When the participant moved their gaze down to read a +new line, the y-coordinate *increased*, which is why the ``ypos_right`` channel +in the plot below increases over time (for example, at about 4-seconds, and +at about 8-seconds). +""" + +# %% +from mne.io import read_raw_eyelink +from mne.datasets import misc + +# %% +fpath = misc.data_path() / 'eyetracking' / 'eyelink' +raw = read_raw_eyelink(fpath / 'px_textpage_ws.asc', + create_annotations=['blinks']) +custom_scalings = dict(eyegaze=1e3) +raw.pick_types(eyetrack=True).plot(scalings=custom_scalings) + + +# %% +# .. important:: The (0, 0) pixel coordinates are at the top-left of the +# trackable area of the screen. Gaze towards lower areas of the +# the screen will yield a relatively higher y-coordinate. +# +# Note that we passed a custom `dict` to the ``'scalings'`` argument of +# `mne.io.Raw.plot`. This is because MNE's default plot scalings for eye +# position data are calibrated for HREF data, which are stored in radians +# (read below). + + +# %% +# Head-Referenced Eye Angle (HREF) +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# HREF position data measures eye rotation angles relative to the head. It does +# not take into account changes in subject head position and angle, or distance +# from the stimulus screen. This datatype might be preferable for analyses +# that are interested in eye movement velocities and amplitudes, or for +# simultaneous and EEG/MEG eyetracking recordings where eye position data are +# used to identify EOG artifacts. +# +# HREF coordinates are stored in the ASCII file as integer values, with 260 or +# more units per visual degree, however MNE will convert and store these +# coordinates in radians. The (0, 0) point of HREF data is arbitrary, as the +# relationship between the screen position and the coordinates changes as the +# subject's head moves. +# +# Below is the same text reading recording that we plotted above, except a new +# ASCII file was generated, this time using HREF eye position data. + + +# %% +fpath = misc.data_path() / 'eyetracking' / 'eyelink' +raw = read_raw_eyelink(fpath / 'HREF_textpage_ws.asc', + create_annotations=['blinks']) +raw.pick_types(eyetrack=True).plot() + +# %% +# Pupil Position +# ^^^^^^^^^^^^^^ +# +# Pupil position data contains (x, y) coordinate pairs from the eye camera. +# It has not been converted to pixels (gaze) or eye angles (HREF). Most use +# cases do not require this data type, and caution should be taken when +# analyzing raw pupil positions. Note that when plotting data from a +# ``Raw`` object containing raw pupil position data, the plot scalings +# will likely be incorrect. You can pass custom scalings into the ``scalings`` +# parameter of `mne.io.Raw.plot` so that the signals are legible when plotting. + +# %% +# .. warning:: If a calibration was not performed prior to data collection, the +# EyeLink system cannot convert raw pupil position data to pixels +# (gaze) or eye angle (HREF). + +# %% +# Pupil Size Data +# --------------- +# Pupil size is measured by the EyeLink system at up to 500 samples per second. +# It may be reported as pupil *area*, or pupil *diameter* (i.e. the diameter +# of a circle/ellipse model fit to the pupil area). +# Which of these datatypes you get is specified by your recording- and/or your +# EDF2ASC settings. The pupil size data is not calibrated and reported in +# arbitrary units. Typical pupil *area* data range between 800 to 2000 units, +# with a precision of 1 unit, while pupil *diameter* data range between +# 1800-3000 units. +# +# Velocity, resolution, and head position data +# -------------------------------------------- +# Eyelink files can produce data on saccadic velocity, resolution, and head +# position for each sample in the file. MNE will read in these data if they are +# present in the file, but will label their channel types as ``'misc'``. +# +# .. warning:: Eyelink's EDF2ASC API allows for modification of the data +# and format that is converted to ASCII. However, MNE-Python +# assumes a specific structure, which the default parameters of +# EDF2ASC follow. ASCII files should be tab-deliminted, and both +# Samples and Events should be output. If the data were recorded +# at 2000Hz, timestamps should be floating point numbers. Manual +# modification of ASCII conversion via EDF2ASC is not recommended. diff --git a/tutorials/preprocessing/90_eyetracking_data.py b/tutorials/preprocessing/90_eyetracking_data.py new file mode 100644 index 00000000000..8c8a2e3b755 --- /dev/null +++ b/tutorials/preprocessing/90_eyetracking_data.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +""" +.. _tut-eyetrack: + +=========================================== +Working with eye tracker data in MNE-Python +=========================================== + +In this tutorial we will load some eye tracker data and plot the average +pupil response to light flashes (i.e. the pupillary light reflex). + +""" # noqa: E501 +# Authors: Dominik Welke +# Scott Huberty +# +# License: BSD-3-Clause + +# %% +# Data loading +# ------------ +# +# First we will load an eye tracker recording from SR research's proprietary +# ``'.asc'`` file format. +# +# By default, Eyelink files will output events for occular events (blinks, +# saccades, fixations), and experiment messages. MNE will store these events +# as `mne.Annotations`. If we are only interested in certain event types from +# the Eyelink file, we can select for these using the ``'create_annotations'`` +# argument of `mne.io.read_raw_eyelink`. Here, we will only create annotations +# for blinks, and experiment messages. +# +# The info structure tells us we loaded a monocular recording with 2 +# ``'eyegaze'``, channels (X/Y), 1 ``'pupil'`` channel, and 1 ``'stim'`` +# channel. + +from mne import Epochs, find_events +from mne.io import read_raw_eyelink +from mne.datasets.eyelink import data_path + +eyelink_fname = data_path() / 'mono_multi-block_multi-DINS.asc' + +raw = read_raw_eyelink(eyelink_fname, + create_annotations=['blinks', 'messages']) +raw.crop(tmin=0, tmax=146) + +# %% +# Get stimulus events from DIN channel +# ------------------------------------ +# +# Eyelink eye trackers have a DIN port that can be used to feed in stimulus +# or response timings. :func:`mne.io.read_raw_eyelink` loads this data as a +# ``'stim'`` channel. Alternatively, the onset of stimulus events could be sent +# to the eyetracker as ``messages`` - these can be read in as +# `mne.Annotations`. +# +# In the example data, the DIN channel contains the onset of light flashes on +# the screen. We now extract these events to visualize the pupil response. + +events = find_events(raw, 'DIN', + shortest_event=1, + min_duration=.02, + uint_cast=True) +event_dict = {'flash': 3} + + +# %% +# Plot raw data +# ------------- +# +# As the following plot shows, we now have a raw object with the eye tracker +# data, eyeblink annotations and stimulus events (from the DIN channel). +# +# The plot also shows us that there is some noise in the data (not always +# categorized as blinks). Also, notice that we have passed a custom `dict` into +# the scalings argument of ``raw.plot``. This is necessary to make the eyegaze +# channel traces legible when plotting, since the file contains pixel position +# data (as opposed to eye angles, which are reported in radians). + +raw.plot(events=events, event_id={'Flash': 3}, event_color='g', + start=25, duration=45, scalings=dict(eyegaze=1e3)) + + +# %% +# Plot average pupil response +# --------------------------- +# +# We now visualize the pupillary light reflex. +# Therefore, we select only the pupil channel and plot the evoked response to +# the light flashes. +# +# As we see, there is a prominent decrease in pupil size following the +# stimulation. The noise starting about 2.5 s after stimulus onset stems from +# eyeblinks and artifacts in some of the 16 trials. + +epochs = Epochs(raw, events, tmin=-0.3, tmax=5, + event_id=event_dict, preload=True) +epochs.pick_types(eyetrack='pupil') +epochs.average().plot() + +# %% +# It is important to note that pupil size data are reported by Eyelink (and +# stored internally by MNE) as arbitrary units (AU). While it often can be +# preferable to convert pupil size data to millimeters, this requires +# information that is not always present in the file. MNE does not currently +# provide methods to convert pupil size data. +# See :ref:`tut-importing-eyetracking-data` for more information on pupil size +# data.