diff --git a/doc/style_guide.rst b/doc/style_guide.rst index 725e12e65..0cbd5ba07 100644 --- a/doc/style_guide.rst +++ b/doc/style_guide.rst @@ -42,6 +42,10 @@ throughout Elephant. .. code-block:: python + def pair_of_signals_example(spiketrain_i, spiketrain_j): + # Add '_i' and '_j' suffixes to a pair of signals, spiketrains or any + # other variables that come in pairs. + def perfect_naming_of_parameters(spiketrains, spiketrain, reference_spiketrain, target_spiketrain, signal, signals, max_iterations, min_threshold, n_bins, n_surrogates, bin_size, max_size, @@ -119,9 +123,8 @@ throughout Elephant. Therefore, do not use terms as `n` or `n_surr`, that are not immediately understood. bin_size : pq.Quantity or int - Separate the words by underscore. Do not use `binsize`. Old functions - which use `binsize` will be gradually refactored to conform to the new - convention. + Separate the words by underscore. Do not use `bin_size`. Old functions + which use `binsize` are deprecated. max_size : float Another example showing that words should be separated by underscores. This intersects with the naming convention for a maximum value. diff --git a/doc/tutorials/asset.ipynb b/doc/tutorials/asset.ipynb index 62b1d21ae..c9ed941fc 100644 --- a/doc/tutorials/asset.ipynb +++ b/doc/tutorials/asset.ipynb @@ -185,7 +185,7 @@ ], "source": [ "# 2.1.1) create ASSET analysis object\n", - "# hint: try different binsizes, e.g. binsize=2.5, 3.5, 4.0 ms\n", + "# hint: try different bin sizes, e.g. bin_size=2.5, 3.5, 4.0 ms\n", "asset_obj = asset.ASSET(spiketrains, bin_size=3*pq.ms)\n", "\n", "# 2.1.2) compute the intersection matrix\n", diff --git a/doc/tutorials/parallel.ipynb b/doc/tutorials/parallel.ipynb index d58e182da..2ab344d91 100644 --- a/doc/tutorials/parallel.ipynb +++ b/doc/tutorials/parallel.ipynb @@ -256,7 +256,7 @@ "source": [ "### Example 2. Custom functions and positional argument\n", "\n", - "Sometimes you might want to iterate over the second (or third, etc.) argument of a function. To do this, you need to create a custom function that passes its first input argument into the right position of the original function. Below is an example of how to compute time histograms of spiketrains with different `binsize` values (the second argument)." + "Sometimes you might want to iterate over the second (or third, etc.) argument of a function. To do this, you need to create a custom function that passes its first input argument into the right position of the original function. Below is an example of how to compute time histograms of spiketrains with different `bin_size` values (the second argument)." ] }, { @@ -269,9 +269,9 @@ "spiketrains = [homogeneous_poisson_process(rate, t_stop=10*pq.s) for _ in range(8)]\n", "\n", "# step 2: define your custom function\n", - "def my_custom_function(binsize):\n", + "def my_custom_function(bin_size):\n", " # specify all custom key-word options here\n", - " return time_histogram(spiketrains, binsize, output='counts')" + " return time_histogram(spiketrains, bin_size, output='counts')" ] }, { @@ -282,9 +282,9 @@ }, "outputs": [], "source": [ - "binsize_list = np.linspace(0.1, 1, num=8) * pq.s\n", + "bin_size_list = np.linspace(0.1, 1, num=8) * pq.s\n", "\n", - "time_hist = ProcessPoolExecutor().execute(my_custom_function, binsize_list)" + "time_hist = ProcessPoolExecutor().execute(my_custom_function, bin_size_list)" ] }, { @@ -334,7 +334,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "`time_hist` contains 8 AnalogSignals - one AnalogSignal per `binsize` from `binsize_list`.\n", + "`time_hist` contains 8 AnalogSignals - one AnalogSignal per `bin_size` from `bin_size_list`.\n", "\n", "### Benchmark\n", "\n", @@ -351,7 +351,7 @@ "warnings.filterwarnings(\"ignore\")\n", "\n", "# initialize the iteration list\n", - "binsize_list = np.linspace(0.1, 1, 100) * pq.s" + "bin_size_list = np.linspace(0.1, 1, 100) * pq.s" ] }, { @@ -369,7 +369,7 @@ ], "source": [ "# sequential processing\n", - "%timeit [time_histogram(spiketrains, binsize) for binsize in binsize_list]" + "%timeit [time_histogram(spiketrains, bin_size) for bin_size in bin_size_list]" ] }, { @@ -387,7 +387,7 @@ ], "source": [ "# with parallelization\n", - "%timeit ProcessPoolExecutor(max_workers=4).execute(my_custom_function, binsize_list)" + "%timeit ProcessPoolExecutor(max_workers=4).execute(my_custom_function, bin_size_list)" ] } ], diff --git a/doc/tutorials/unitary_event_analysis.ipynb b/doc/tutorials/unitary_event_analysis.ipynb index d433d38a3..6f290000f 100644 --- a/doc/tutorials/unitary_event_analysis.ipynb +++ b/doc/tutorials/unitary_event_analysis.ipynb @@ -97,7 +97,7 @@ }, "outputs": [], "source": [ - "def plot_UE(data,Js_dict,Js_sig,binsize,winsize,winstep, pat,N,t_winpos,**kwargs):\n", + "def plot_UE(data,Js_dict,Js_sig,bin_size,winsize,winstep, pat,N,t_winpos,**kwargs):\n", " \"\"\"\n", " Examples:\n", " ---------\n", @@ -150,9 +150,9 @@ " if len(x)>0:\n", " xx = []\n", " for j in sig_idx_win:\n", - " xx =np.append(xx,x[np.where((x*binsize>=t_winpos[j]) &(x*binsize=t_winpos[j]) &(x*bin_size>> import numpy as np >>> import elephant.cell_assembly_detection as cad >>> np.random.seed(30) ->>> # Generate correlated data and bin it with a binsize of 10ms +>>> # Generate correlated data and bin it with a bin_size of 10ms >>> sts = elephant.spike_train_generation.cpp( >>> rate=15*pq.Hz, A=[0]+[0.95]+[0]*4+[0.05], t_stop=10*pq.s) ->>> binsize = 10*pq.ms ->>> spM = conv.BinnedSpikeTrain(sts, binsize=binsize) +>>> bin_size = 10*pq.ms +>>> spM = conv.BinnedSpikeTrain(sts, bin_size=bin_size) >>> # Call of the method ->>> patterns = cad.cell_assembly_detection(spM=spM, maxlag=2)[0] +>>> patterns = cad.cell_assembly_detection(spM=spM, max_lag=2)[0] >>> # Plotting >>> plt.figure() >>> for neu in patterns['neurons']: >>> if neu == 0: >>> plt.plot( ->>> patterns['times']*binsize, [neu]*len(patterns['times']), +>>> patterns['times']*bin_size, [neu]*len(patterns['times']), >>> 'ro', label='pattern') >>> else: >>> plt.plot( ->>> patterns['times']*binsize, [neu] * len(patterns['times']), +>>> patterns['times']*bin_size, [neu] * len(patterns['times']), >>> 'ro') >>> # Raster plot of the data >>> for st_idx, st in enumerate(sts): @@ -70,27 +70,34 @@ from __future__ import division, print_function, unicode_literals -import numpy as np import copy import math -import elephant.conversion as conv -from scipy.stats import f import time +import numpy as np +from scipy.stats import f + +import elephant.conversion as conv +from elephant.utils import deprecated_alias -def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, - min_occ=1, size_chunks=100, max_spikes=np.inf, - significance_pruning=True, subgroup_pruning=True, - same_config_cut=False, bool_times_format=False, - verbose=False): + +@deprecated_alias(data='binned_spiketrain', maxlag='max_lag', + min_occ='min_occurrences', + same_config_cut='same_configuration_pruning') +def cell_assembly_detection(binned_spiketrain, max_lag, reference_lag=2, + alpha=0.05, min_occurrences=1, size_chunks=100, + max_spikes=np.inf, significance_pruning=True, + subgroup_pruning=True, + same_configuration_pruning=False, + bool_times_format=False, verbose=False): """ The function performs the CAD analysis for the binned (discretized) spike trains given in input. The method looks for candidate significant patterns with lags (number of bins between successive spikes - in the pattern) going from `-maxlag` to `maxlag` (second parameter of the + in the pattern) going from `-max_lag` to `max_lag` (second parameter of the function). Thus, between two successive spikes in the pattern there can - be at most `maxlag`*`binsize` units of time. + be at most `max_lag`*`bin_size` units of time. The method agglomerates pairs of units (or a unit and a preexisting assembly), tests their significance by a statistical test @@ -107,12 +114,12 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, Parameters ---------- - data : elephant.conversion.BinnedSpikeTrain + binned_spiketrain : elephant.conversion.BinnedSpikeTrain Binned spike trains containing data to be analyzed. - maxlag : int - Maximal lag to be tested. For a binning dimension of binsize the + max_lag : int + Maximal lag to be tested. For a binning dimension of bin_size the method will test all pairs configurations with a time - shift between '-maxlag' and 'maxlag'. + shift between '-max_lag' and 'max_lag'. reference_lag : int, optional Reference lag (in bins) for the non-stationarity correction in the statistical test. @@ -120,7 +127,7 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, alpha : float, optional Significance level for the statistical test. Default: 0.05. - min_occ : int, optional + min_occurrences : int, optional Minimal number of occurrences required for an assembly (all assemblies, even if significant, with fewer occurrences than min_occurrences are discarded). @@ -142,7 +149,7 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, If True, the method performs subgroup pruning among the detected assemblies. Default: True. - same_config_cut : bool, optional + same_configuration_pruning : bool, optional If True, performs pruning (not present in the original code and more efficient), not testing assemblies already formed if they appear in the very same configuration. @@ -190,7 +197,8 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, Raises ------ TypeError - If `data` is not an `elephant.conv.BinnedSpikeTrain` object. + If `binned_spiketrain` is not an instance of + `elephant.conv.BinnedSpikeTrain`. ValueError If the parameters are out of bounds. @@ -213,31 +221,31 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, ... >>> np.random.seed(30) ... - >>> # Generate correlated data and bin it with a binsize of 10ms + >>> # Generate correlated data and bin it with a bin_size of 10ms >>> sts = elephant.spike_train_generation.cpp( >>> rate=15*pq.Hz, A=[0]+[0.95]+[0]*4+[0.05], t_stop=10*pq.s) - >>> binsize = 10*pq.ms - >>> spM = conv.BinnedSpikeTrain(sts, binsize=binsize) + >>> bin_size = 10*pq.ms + >>> spM = conv.BinnedSpikeTrain(sts, bin_size=bin_size) ... >>> # Call of the method - >>> patterns = cad.cell_assembly_detection(spM=spM, maxlag=2)[0] + >>> patterns = cad.cell_assembly_detection(spM=spM, max_lag=2)[0] """ initial_time = time.time() # check parameter input and raise errors if necessary - _raise_errors(data=data, - maxlag=maxlag, + _raise_errors(binned_spiketrain=binned_spiketrain, + max_lag=max_lag, alpha=alpha, - min_occ=min_occ, + min_occurrences=min_occurrences, size_chunks=size_chunks, max_spikes=max_spikes) # transform the binned spiketrain into array - data = data.to_array() + binned_spiketrain = binned_spiketrain.to_array() # zero order - n_neurons = len(data) + n_neurons = len(binned_spiketrain) # initialize empty assembly @@ -254,15 +262,15 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, assembly_in[w1]['neurons'] = [w1] assembly_in[w1]['lags'] = [] assembly_in[w1]['pvalue'] = [] - assembly_in[w1]['times'] = data[w1] - assembly_in[w1]['signature'] = [[1, sum(data[w1])]] + assembly_in[w1]['times'] = binned_spiketrain[w1] + assembly_in[w1]['signature'] = [[1, sum(binned_spiketrain[w1])]] # first order = test over pairs # denominator of the Bonferroni correction # divide alpha by the number of tests performed in the first # pairwise testing loop - number_test_performed = n_neurons * (n_neurons - 1) * (2 * maxlag + 1) + number_test_performed = n_neurons * (n_neurons - 1) * (2 * max_lag + 1) alpha = alpha * 2 / float(number_test_performed) if verbose: print('actual significance_level', alpha) @@ -284,20 +292,21 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, # for loop for the pairwise testing for w1 in range(n_neurons - 1): for w2 in range(w1 + 1, n_neurons): - spiketrain2 = data[w2] + spiketrain2 = binned_spiketrain[w2] n2 = w2 assembly_flag = 0 # call of the function that does the pairwise testing - call_tp = _test_pair(ensemble=assembly_in[w1], - spiketrain2=spiketrain2, - n2=n2, - maxlag=maxlag, - size_chunks=size_chunks, - reference_lag=reference_lag, - existing_patterns=existing_patterns, - same_config_cut=same_config_cut) - if same_config_cut: + call_tp = _test_pair( + ensemble=assembly_in[w1], + spiketrain2=spiketrain2, + n2=n2, + max_lag=max_lag, + size_chunks=size_chunks, + reference_lag=reference_lag, + existing_patterns=existing_patterns, + same_configuration_pruning=same_configuration_pruning) + if same_configuration_pruning: assem_tp = call_tp[0] else: assem_tp = call_tp @@ -305,13 +314,13 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, # if the assembly given in output is significant and the number # of occurrences is higher than the minimum requested number if assem_tp['pvalue'][-1] < alpha and \ - assem_tp['signature'][-1][1] > min_occ: + assem_tp['signature'][-1][1] > min_occurrences: # save the assembly in the output assembly.append(assem_tp) sign_pairs_matrix[w1][w2] = 1 assembly_flag = 1 # flag : it is indeed an assembly # put the item_candidate into the existing_patterns list - if same_config_cut: + if same_configuration_pruning: item_candidate = call_tp[1] if not existing_patterns: existing_patterns = [item_candidate] @@ -372,25 +381,26 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, if w2_to_test: # bonferroni correction only for the tests actually performed - alpha = alpha / float(len(w2_to_test) * n_as * (2 * maxlag + 1)) + alpha = alpha / float(len(w2_to_test) * n_as * (2 * max_lag + 1)) # testing for the element in w2_to_test for ww2 in range(len(w2_to_test)): w2 = w2_to_test[ww2] - spiketrain2 = data[w2] + spiketrain2 = binned_spiketrain[w2] assembly_flag = 0 pop_flag = max(assembly_flag, 0) # testing for the assembly and the new neuron - call_tp = _test_pair(ensemble=assembly[w1], - spiketrain2=spiketrain2, - n2=w2, - maxlag=maxlag, - size_chunks=size_chunks, - reference_lag=reference_lag, - existing_patterns=existing_patterns, - same_config_cut=same_config_cut) - if same_config_cut: + call_tp = _test_pair( + ensemble=assembly[w1], + spiketrain2=spiketrain2, + n2=w2, + max_lag=max_lag, + size_chunks=size_chunks, + reference_lag=reference_lag, + existing_patterns=existing_patterns, + same_configuration_pruning=same_configuration_pruning) + if same_configuration_pruning: assem_tp = call_tp[0] else: assem_tp = call_tp @@ -399,7 +409,7 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, # the number of occurrences is sufficient and # the length of the assembly is less than the input limit if assem_tp['pvalue'][-1] < alpha and \ - assem_tp['signature'][-1][1] > min_occ and \ + assem_tp['signature'][-1][1] > min_occurrences and \ assem_tp['signature'][-1][0] <= max_spikes: # the assembly is saved in the output list of # assemblies @@ -418,7 +428,7 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, assembly, n_filtered_assemblies = \ _significance_pruning_step( pre_pruning_assembly=assembly) - if same_config_cut: + if same_configuration_pruning: item_candidate = call_tp[1] existing_patterns.append(item_candidate) if assembly_flag: @@ -470,7 +480,7 @@ def cell_assembly_detection(data, maxlag, reference_lag=2, alpha=0.05, return assembly -def _chunking(binned_pair, size_chunks, maxlag, best_lag): +def _chunking(binned_pair, size_chunks, max_lag, best_lag): """ Chunking the object binned_pair into parts with the same bin length @@ -480,8 +490,8 @@ def _chunking(binned_pair, size_chunks, maxlag, best_lag): vector of the binned spike trains for the pair being analyzed size_chunks : int size of chunks desired - maxlag : int - max number of lags for the binsize chosen + max_lag : int + max number of lags for the bin_size chosen best_lag : int lag with the higher number of coincidences @@ -496,10 +506,10 @@ def _chunking(binned_pair, size_chunks, maxlag, best_lag): length = len(binned_pair[0], ) # number of chunks - n_chunks = math.ceil((length - maxlag) / size_chunks) + n_chunks = math.ceil((length - max_lag) / size_chunks) # new chunk size, this is to have all chunks of roughly the same size - size_chunks = math.floor((length - maxlag) / n_chunks) + size_chunks = math.floor((length - max_lag) / n_chunks) n_chunks = np.int(n_chunks) size_chunks = np.int(size_chunks) @@ -508,21 +518,21 @@ def _chunking(binned_pair, size_chunks, maxlag, best_lag): # cut the time series according to best_lag - binned_pair_cut = np.array([np.zeros(length - maxlag, dtype=np.int), - np.zeros(length - maxlag, dtype=np.int)]) + binned_pair_cut = np.array([np.zeros(length - max_lag, dtype=np.int), + np.zeros(length - max_lag, dtype=np.int)]) # choose which entries to consider according to the best lag chosen if best_lag == 0: - binned_pair_cut[0] = binned_pair[0][0:length - maxlag] - binned_pair_cut[1] = binned_pair[1][0:length - maxlag] + binned_pair_cut[0] = binned_pair[0][0:length - max_lag] + binned_pair_cut[1] = binned_pair[1][0:length - max_lag] elif best_lag > 0: - binned_pair_cut[0] = binned_pair[0][0:length - maxlag] + binned_pair_cut[0] = binned_pair[0][0:length - max_lag] binned_pair_cut[1] = binned_pair[1][ - best_lag:length - maxlag + best_lag] + best_lag:length - max_lag + best_lag] else: binned_pair_cut[0] = binned_pair[0][ - -best_lag:length - maxlag - best_lag] - binned_pair_cut[1] = binned_pair[1][0:length - maxlag] + -best_lag:length - max_lag - best_lag] + binned_pair_cut[1] = binned_pair[1][0:length - max_lag] # put the cut data into the chunked object for iii in range(n_chunks - 1): @@ -540,7 +550,7 @@ def _chunking(binned_pair, size_chunks, maxlag, best_lag): return chunked, n_chunks -def _assert_same_pattern(item_candidate, existing_patterns, maxlag): +def _assert_same_pattern(item_candidate, existing_patterns, max_lag): """ Tests if a particular pattern has already been tested and retrieved as significant. @@ -552,7 +562,7 @@ def _assert_same_pattern(item_candidate, existing_patterns, maxlag): in the second there are the correspondent lags existing_patterns: list list of the already significant patterns - maxlag: int + max_lag: int maximum lag to be tested Returns @@ -562,16 +572,16 @@ def _assert_same_pattern(item_candidate, existing_patterns, maxlag): """ # unique representation of pattern in term of lags, maxlag and neurons # participating - item_candidate = sorted(item_candidate[0] * 2 * maxlag + - item_candidate[1] + maxlag) + item_candidate = sorted(item_candidate[0] * 2 * max_lag + + item_candidate[1] + max_lag) if item_candidate in existing_patterns: return True else: return False -def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, - existing_patterns, same_config_cut): +def _test_pair(ensemble, spiketrain2, n2, max_lag, size_chunks, reference_lag, + existing_patterns, same_configuration_pruning): """ Tests if two spike trains have repetitive patterns occurring more frequently than chance. @@ -585,7 +595,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, (candidate to be a new assembly member) n2 : int new unit tested - maxlag : int + max_lag : int maximum lag to be tested size_chunks : int size (in bins) of chunks in which the spike trains is divided @@ -595,7 +605,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, lag of reference; if zero or negative reference lag=-l existing_patterns: list list of the already significant patterns - same_config_cut: bool + same_configuration_pruning: bool if True (not present in the original code and more efficient), does not test assemblies already formed if they appear in the very same configuration @@ -628,7 +638,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, # list with the binned spike trains of the two neurons binned_pair = [ensemble['times'], spiketrain2] - # For large binsizes, the binned spike counts may potentially fluctuate + # For large bin_sizes, the binned spike counts may potentially fluctuate # around a high mean level and never fall below some minimum count # considerably larger than zero for the whole time series. # Entries up to this minimum count would contribute @@ -662,23 +672,25 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, # we select the one corresponding to the highest count # structure with the coincidence counts for each lag - fwd_coinc_count = np.array([0 for _ in range(maxlag + 1)]) - bwd_coinc_count = np.array([0 for _ in range(maxlag + 1)]) + fwd_coinc_count = np.array([0 for _ in range(max_lag + 1)]) + bwd_coinc_count = np.array([0 for _ in range(max_lag + 1)]) - for l in range(maxlag + 1): + for lag in range(max_lag + 1): time_fwd_cc = np.array([binned_pair[0][ - 0:len(binned_pair[0]) - maxlag], + 0:len(binned_pair[0]) - max_lag], binned_pair[1][ - l:len(binned_pair[1]) - maxlag + l]]) + lag:len(binned_pair[1]) - max_lag + lag]]) time_bwd_cc = np.array([binned_pair[0][ - l:len(binned_pair[0]) - maxlag + l], + lag:len(binned_pair[0]) - max_lag + lag], binned_pair[1][ - 0:len(binned_pair[1]) - maxlag]]) + 0:len(binned_pair[1]) - max_lag]]) # taking the minimum, place by place for the coincidences - fwd_coinc_count[l] = np.sum(np.minimum(time_fwd_cc[0], time_fwd_cc[1])) - bwd_coinc_count[l] = np.sum(np.minimum(time_bwd_cc[0], time_bwd_cc[1])) + fwd_coinc_count[lag] = np.sum(np.minimum(time_fwd_cc[0], + time_fwd_cc[1])) + bwd_coinc_count[lag] = np.sum(np.minimum(time_bwd_cc[0], + time_bwd_cc[1])) # choice of the best lag, taking into account the reference lag if reference_lag <= 0: @@ -698,7 +710,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, # reverse the ctAB_ object and not take into account the first entry bwd_coinc_count_rev = bwd_coinc_count[1:len(bwd_coinc_count)][::-1] hab_l = np.append(bwd_coinc_count_rev, fwd_coinc_count) - lags = range(-maxlag, maxlag + 1) + lags = range(-max_lag, max_lag + 1) max_coinc_count = np.amax(hab_l) best_lag = lags[np.argmax(hab_l)] if best_lag < 0: @@ -728,26 +740,25 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, lags_candidate = list(lags_candidate) item_candidate = [[pattern_candidate], [lags_candidate]] - if same_config_cut: + if same_configuration_pruning: if _assert_same_pattern(item_candidate=item_candidate, existing_patterns=existing_patterns, - maxlag=maxlag): - en_neurons = copy.copy(ensemble['neurons']) - en_neurons.append(n2) - en_lags = copy.copy(ensemble['lags']) - en_lags.append(np.inf) - en_pvalue = copy.copy(ensemble['pvalue']) - en_pvalue.append(1) - en_n_occ = copy.copy(ensemble['signature']) - en_n_occ.append([0, 0]) - item_candidate = [] - assembly = {'neurons': en_neurons, - 'lags': en_lags, - 'pvalue': en_pvalue, - 'times': [], - 'signature': en_n_occ} - return assembly, item_candidate - + max_lag=max_lag): + en_neurons = copy.copy(ensemble['neurons']) + en_neurons.append(n2) + en_lags = copy.copy(ensemble['lags']) + en_lags.append(np.inf) + en_pvalue = copy.copy(ensemble['pvalue']) + en_pvalue.append(1) + en_n_occ = copy.copy(ensemble['signature']) + en_n_occ.append([0, 0]) + item_candidate = [] + assembly = {'neurons': en_neurons, + 'lags': en_lags, + 'pvalue': en_pvalue, + 'times': [], + 'signature': en_n_occ} + return assembly, item_candidate else: # I go on with the testing @@ -770,7 +781,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, 'pvalue': en_pvalue, 'times': [], 'signature': en_n_occ} - if same_config_cut: + if same_configuration_pruning: item_candidate = [] return assembly, item_candidate else: @@ -857,7 +868,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, chunked, nch = _chunking(binned_pair=binned_pair, size_chunks=size_chunks, - maxlag=maxlag, + max_lag=max_lag, best_lag=best_lag) marginal_counts = np.zeros((nch, maxrate, 2), dtype=np.int) @@ -902,7 +913,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, # calculation of variance for each chunk - n = ntp - maxlag # used in the calculation of the p-value + n = ntp - max_lag # used in the calculation of the p-value var_x = [np.zeros((2, 2)) for _ in range(nch)] var_tot = 0 cov_abab = [0 for _ in range(nch)] @@ -1012,7 +1023,7 @@ def _test_pair(ensemble, spiketrain2, n2, maxlag, size_chunks, reference_lag, 'pvalue': en_pvalue, 'times': activation_series, 'signature': en_n_occ} - if same_config_cut: + if same_configuration_pruning: return assembly, item_candidate else: return assembly @@ -1123,21 +1134,22 @@ def _subgroup_pruning_step(pre_pruning_assembly): return assembly -def _raise_errors(data, maxlag, alpha, min_occ, size_chunks, max_spikes): +def _raise_errors(binned_spiketrain, max_lag, alpha, min_occurrences, + size_chunks, max_spikes): """ Returns errors if the parameters given in input are not correct. Parameters ---------- - data : BinnedSpikeTrain object + binned_spiketrain : BinnedSpikeTrain object binned spike trains containing data to be analysed - maxlag: int - maximal lag to be tested. For a binning dimension of binsize the + max_lag: int + maximal lag to be tested. For a binning dimension of bin_size the method will test all pairs configurations with a time - shift between -maxlag and maxlag + shift between -max_lag and max_lag alpha : float alpha level. - min_occ : int + min_occurrences : int minimal number of occurrences required for an assembly (all assemblies, even if significant, with fewer occurrences than min_occurrences are discarded). @@ -1164,17 +1176,17 @@ def _raise_errors(data, maxlag, alpha, min_occ, size_chunks, max_spikes): """ - if not isinstance(data, conv.BinnedSpikeTrain): + if not isinstance(binned_spiketrain, conv.BinnedSpikeTrain): raise TypeError( 'data must be in BinnedSpikeTrain format') - if maxlag < 2: - raise ValueError('maxlag value cant be less than 2') + if max_lag < 2: + raise ValueError('max_lag value cant be less than 2') if alpha < 0 or alpha > 1: raise ValueError('significance level has to be in interval [0,1]') - if min_occ < 1: + if min_occurrences < 1: raise ValueError('minimal number of occurrences for an assembly ' 'must be at least 1') @@ -1184,7 +1196,7 @@ def _raise_errors(data, maxlag, alpha, min_occ, size_chunks, max_spikes): if max_spikes < 2: raise ValueError('maximal assembly order must be less than 2') - if data.matrix_columns - maxlag < 100: + if binned_spiketrain.matrix_columns - max_lag < 100: raise ValueError('The time series is too short, consider ' 'taking a longer portion of spike train ' 'or diminish the bin size to be tested') diff --git a/elephant/change_point_detection.py b/elephant/change_point_detection.py index 28bca4370..727db91be 100644 --- a/elephant/change_point_detection.py +++ b/elephant/change_point_detection.py @@ -7,10 +7,10 @@ case of non-stationarity, the output is a list of detected Change Points (CPs). Essentially, a det of two-sided window of width `h` (`_filter(t, h, spk)`) slides over the spike train within the time `[h, t_final-h]`. This generates a -`_filter_process(dt, h, spk)` that assigns at each time `t` the difference -between a spike lying in the right and left window. If at any time `t` this -difference is large 'enough' is assumed the presence of a rate Change Point in -a neighborhood of `t`. A threshold `test_quantile` for the maximum of +`_filter_process(time_step, h, spk)` that assigns at each time `t` the +difference between a spike lying in the right and left window. If at any time +`t` this difference is large 'enough' is assumed the presence of a rate Change +Point in a neighborhood of `t`. A threshold `test_quantile` for the maximum of the filter_process (max difference of spike count between the left and right window) is derived based on asymptotic considerations. The procedure is repeated for an arbitrary set of windows, with different size `h`. @@ -30,13 +30,14 @@ >>> alpha = 5.0 >>> num_surrogates = 10000 >>> change_points = multiple_filter_test(window_size, st, t_fin, alpha, - ... num_surrogates, dt = 0.5*pq.s) + ... num_surrogates, time_step = 0.5*pq.s) References ---------- -Messer, M., Kirchner, M., Schiemann, J., Roeper, J., Neininger, R., & Schneider, -G. (2014). A multiple filter test for the detection of rate changes in renewal -processes with varying variance. The Annals of Applied Statistics, 8(4),2027-2067. +Messer, M., Kirchner, M., Schiemann, J., Roeper, J., Neininger, R., & +Schneider, G. (2014). A multiple filter test for the detection of rate changes +in renewal processes with varying variance. The Annals of Applied Statistics, +8(4),2027-2067. Original code ------------- @@ -50,42 +51,45 @@ import numpy as np import quantities as pq +from elephant.utils import deprecated_alias + +@deprecated_alias(dt='time_step') def multiple_filter_test(window_sizes, spiketrain, t_final, alpha, n_surrogates, test_quantile=None, test_param=None, - dt=None): + time_step=None): """ Detects change points. This function returns the detected change points, that correspond to the maxima of the `_filter_processes`. These are the processes generated by - sliding the windows of step `dt`; at each step the difference between spike - on the right and left window is calculated. + sliding the windows of step `time_step`; at each step the difference + between spike on the right and left window is calculated. Parameters ---------- - window_sizes : list of quantity objects - list that contains windows sizes - spiketrain : neo.SpikeTrain, numpy array or list - spiketrain objects to analyze - t_final : quantity - final time of the spike train which is to be analysed - alpha : float - alpha-quantile in range [0, 100] for the set of maxima of the limit - processes - n_surrogates : integer - numbers of simulated limit processes - test_quantile : float - threshold for the maxima of the filter derivative processes, if any - of these maxima is larger than this value, it is assumed the - presence of a cp at the time corresponding to that maximum - dt : quantity - resolution, time step at which the windows are slided - test_param : np.array of shape (3, num of window), - first row: list of `h`, second and third rows: empirical means and - variances of the limit process correspodning to `h`. This will be - used to normalize the `filter_process` in order to give to the - every maximum the same impact on the global statistic. + window_sizes : list of quantity objects + list that contains windows sizes + spiketrain : neo.SpikeTrain, numpy array or list + spiketrain objects to analyze + t_final : quantity + final time of the spike train which is to be analysed + alpha : float + alpha-quantile in range [0, 100] for the set of maxima of the limit + processes + n_surrogates : integer + numbers of simulated limit processes + test_quantile : float + threshold for the maxima of the filter derivative processes, if any + of these maxima is larger than this value, it is assumed the + presence of a cp at the time corresponding to that maximum + time_step : quantity + resolution, time step at which the windows are slided + test_param : np.array of shape (3, num of window), + first row: list of `h`, second and third rows: empirical means and + variances of the limit process correspodning to `h`. This will be + used to normalize the `filter_process` in order to give to the + every maximum the same impact on the global statistic. Returns ------- @@ -99,13 +103,13 @@ def multiple_filter_test(window_sizes, spiketrain, t_final, alpha, if (test_quantile is None) and (test_param is None): test_quantile, test_param = empirical_parameters(window_sizes, t_final, alpha, n_surrogates, - dt) + time_step) elif test_quantile is None: test_quantile = empirical_parameters(window_sizes, t_final, alpha, - n_surrogates, dt)[0] + n_surrogates, time_step)[0] elif test_param is None: test_param = empirical_parameters(window_sizes, t_final, alpha, - n_surrogates, dt)[1] + n_surrogates, time_step)[1] spk = spiketrain @@ -113,8 +117,8 @@ def multiple_filter_test(window_sizes, spiketrain, t_final, alpha, cps = [] for i, h in enumerate(window_sizes): - # automatic setting of dt - dt_temp = h / 20 if dt is None else dt + # automatic setting of time_step + dt_temp = h / 20 if time_step is None else time_step # filter_process for window of size h t, differences = _filter_process(dt_temp, h, spk, t_final, test_param) time_index = np.arange(len(differences)) @@ -124,7 +128,6 @@ def multiple_filter_test(window_sizes, spiketrain, t_final, alpha, cp_index = np.argmax(differences) # from index to time cp = cp_index * dt_temp + h - #print("detected point {0}".format(cp), "with filter {0}".format(h)) # before repeating the procedure, the h-neighbourgs of detected CP # are discarded, because rate changes into it are alrady explained mask_fore = time_index > cp_index - int((h / dt_temp).simplified) @@ -151,23 +154,24 @@ def multiple_filter_test(window_sizes, spiketrain, t_final, alpha, return cps -def _brownian_motion(t_in, t_fin, x_in, dt): +def _brownian_motion(t_in, t_fin, x_in, time_step): """ Generate a Brownian Motion. Parameters ---------- - t_in : quantities, - initial time - t_fin : quantities, - final time - x_in : float, - initial point of the process: _brownian_motio(0) = x_in - dt : quantities, - resolution, time step at which brownian increments are summed + t_in : quantities, + initial time + t_fin : quantities, + final time + x_in : float, + initial point of the process: _brownian_motio(0) = x_in + time_step : quantities, + resolution, time step at which brownian increments are summed Returns ------- - Brownian motion on [t_in, t_fin], with resolution dt and initial state x_in + Brownian motion on [t_in, t_fin], with resolution time_step and initial + state x_in """ u = 1 * pq.s @@ -180,7 +184,7 @@ def _brownian_motion(t_in, t_fin, x_in, dt): except ValueError: raise ValueError("t_fin must be a time quantity") try: - dt_sec = dt.rescale(u).magnitude + dt_sec = time_step.rescale(u).magnitude except ValueError: raise ValueError("dt must be a time quantity") @@ -190,7 +194,7 @@ def _brownian_motion(t_in, t_fin, x_in, dt): return s + x_in -def _limit_processes(window_sizes, t_final, dt): +def _limit_processes(window_sizes, t_final, time_step): """ Generate the limit processes (depending only on t_final and h), one for each window size `h` in H. The distribution of maxima of these processes @@ -202,14 +206,14 @@ def _limit_processes(window_sizes, t_final, dt): set of windows' size t_final : quantity object end of limit process - dt : quantity object + time_step : quantity object resolution, time step at which the windows are slided Returns ------- limit_processes : list of numpy array each entries contains the limit processes for each h, - evaluated in [h,T-h] with steps dt + evaluated in [h,T-h] with steps time_step """ limit_processes = [] @@ -220,11 +224,11 @@ def _limit_processes(window_sizes, t_final, dt): except ValueError: raise ValueError("window_sizes must be a list of times") try: - dt_sec = dt.rescale(u).magnitude + dt_sec = time_step.rescale(u).magnitude except ValueError: - raise ValueError("dt must be a time quantity") + raise ValueError("time_step must be a time quantity") - w = _brownian_motion(0 * u, t_final, 0, dt) + w = _brownian_motion(0 * u, t_final, 0, time_step) for h in window_sizes_sec: # BM on [h,T-h], shifted in time t-->t+h @@ -241,13 +245,15 @@ def _limit_processes(window_sizes, t_final, dt): return limit_processes -def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, dt=None): +@deprecated_alias(dt='time_step') +def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, + time_step=None): """ This function generates the threshold and the null parameters. - The`_filter_process_h` has been proved to converge (for t_fin, h-->infinity) - to a continuous functional of a Brownaian motion ('limit_process'). - Using a MonteCarlo technique, maxima of these limit_processes are - collected. + The`_filter_process_h` has been proved to converge (for t_fin, + h-->infinity) to a continuous functional of a Brownaian motion + ('limit_process'). Using a MonteCarlo technique, maxima of + these limit_processes are collected. The threshold is defined as the alpha quantile of this set of maxima. Namely: @@ -256,29 +262,29 @@ def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, dt=None): Parameters ---------- - window_sizes : list of quantity objects - set of windows' size - t_final : quantity object - final time of the spike - alpha : float - alpha-quantile in range [0, 100] - n_surrogates : integer - numbers of simulated limit processes - dt : quantity object - resolution, time step at which the windows are slided + window_sizes : list of quantity objects + set of windows' size + t_final : quantity object + final time of the spike + alpha : float + alpha-quantile in range [0, 100] + n_surrogates : integer + numbers of simulated limit processes + time_step : quantity object + resolution, time step at which the windows are slided Returns ------- - test_quantile : float - threshold for the maxima of the filter derivative processes, if any - of these maxima is larger than this value, it is assumed the - presence of a cp at the time corresponding to that maximum - - test_param : np.array 3 * num of window, - first row: list of `h`, second and third rows: empirical means and - variances of the limit process correspodning to `h`. This will be - used to normalize the `filter_process` in order to give to the every - maximum the same impact on the global statistic. + test_quantile : float + threshold for the maxima of the filter derivative processes, if any + of these maxima is larger than this value, it is assumed the + presence of a cp at the time corresponding to that maximum + + test_param : np.array 3 * num of window, + first row: list of `h`, second and third rows: empirical means and + variances of the limit process correspodning to `h`. This will be + used to normalize the `filter_process` in order to give to the every + maximum the same impact on the global statistic. """ # try: @@ -298,8 +304,8 @@ def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, dt=None): raise ValueError("t_final must be a time quantity") if not isinstance(n_surrogates, int): raise TypeError("n_surrogates must be an integer") - if not (isinstance(dt, pq.Quantity) or (dt is None)): - raise ValueError("dt must be a time quantity") + if not (isinstance(time_step, pq.Quantity) or (time_step is None)): + raise ValueError("time_step must be a time quantity") if t_final <= 0: raise ValueError("t_final needs to be strictly positive") @@ -309,11 +315,11 @@ def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, dt=None): raise ValueError("window size needs to be strictly positive") if np.max(window_sizes) >= t_final / 2: raise ValueError("window size too large") - if dt is not None: + if time_step is not None: for h in window_sizes: - if int(h.rescale('us')) % int(dt.rescale('us')) != 0: + if int(h.rescale('us')) % int(time_step.rescale('us')) != 0: raise ValueError( - "Every window size h must be a multiple of dt") + "Every window size h must be a multiple of time_step") # Generate a matrix M*: n X m where n = n_surrogates is the number of # simulated limit processes and m is the number of chosen window sizes. @@ -323,7 +329,7 @@ def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, dt=None): for i in range(n_surrogates): # mh_star = [] - simu = _limit_processes(window_sizes, t_final, dt) + simu = _limit_processes(window_sizes, t_final, time_step) # for i, h in enumerate(window_sizes_mag): # # max over time of the limit process generated with window h # m_h = np.max(simu[i]) @@ -351,41 +357,41 @@ def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, dt=None): return test_quantile, test_param -def _filter(t, h, spk): +def _filter(t_center, window, spiketrain): """ - This function calculates the difference of spike counts in the left and right - side of a window of size h centered in t and normalized by its variance. - The variance of this count can be expressed as a combination of mean and var - of the I.S.I. lying inside the window. + This function calculates the difference of spike counts in the left and + right side of a window of size h centered in t and normalized by its + variance. The variance of this count can be expressed as a combination of + mean and var of the I.S.I. lying inside the window. Parameters ---------- - h : quantity - window's size - t : quantity - time on which the window is centered - spk : list, numpy array or SpikeTrain - spike train to analyze + t_center : quantity + time on which the window is centered + window : quantity + window's size + spiketrain : list, numpy array or SpikeTrain + spike train to analyze Returns ------- - difference : float, - difference of spike count normalized by its variance + difference : float, + difference of spike count normalized by its variance """ u = 1 * pq.s try: - t_sec = t.rescale(u).magnitude + t_sec = t_center.rescale(u).magnitude except AttributeError: raise ValueError("t must be a quantities object") # tm = t_sec.magnitude try: - h_sec = h.rescale(u).magnitude + h_sec = window.rescale(u).magnitude except AttributeError: raise ValueError("h must be a time quantity") # hm = h_sec.magnitude try: - spk_sec = spk.rescale(u).magnitude + spk_sec = spiketrain.rescale(u).magnitude except AttributeError: raise ValueError( "spiketrain must be a list (array) of times or a neo spiketrain") @@ -431,11 +437,11 @@ def _filter(t, h, spk): return difference -def _filter_process(dt, h, spk, t_final, test_param): +def _filter_process(time_step, h, spk, t_final, test_param): """ Given a spike train `spk` and a window size `h`, this function generates the `filter derivative process` by evaluating the function `_filter` - in steps of `dt`. + in steps of `time_step`. Parameters ---------- @@ -445,7 +451,7 @@ def _filter_process(dt, h, spk, t_final, test_param): time on which the window is centered spk : list, array or SpikeTrain spike train to analyze - dt : quantity object, time step at which the windows are slided + time_step : quantity object, time step at which the windows are slided resolution test_param : matrix, the means of the first row list of `h`, the second row Empirical and the third row variances of @@ -471,9 +477,9 @@ def _filter_process(dt, h, spk, t_final, test_param): except AttributeError: raise ValueError("t_final must be a time quanity") try: - dt_sec = dt.rescale(u).magnitude + dt_sec = time_step.rescale(u).magnitude except AttributeError: - raise ValueError("dt must be a time quantity") + raise ValueError("time_step must be a time quantity") # domain of the process time_domain = np.arange(h_sec, t_final_sec - h_sec, dt_sec) filter_trajectrory = [] diff --git a/elephant/conversion.py b/elephant/conversion.py index 33524ab07..cfc1f14a7 100644 --- a/elephant/conversion.py +++ b/elephant/conversion.py @@ -20,7 +20,7 @@ import quantities as pq import scipy.sparse as sps -from elephant.utils import is_binary +from elephant.utils import is_binary, deprecated_alias def binarize(spiketrain, sampling_rate=None, t_start=None, t_stop=None, @@ -184,18 +184,18 @@ def _detect_rounding_errors(values, tolerance): return 1 - (values % 1) <= tolerance -def _calc_tstart(num_bins, binsize, t_stop): +def _calc_tstart(n_bins, bin_size, t_stop): """ Calculates the start point from given parameters. Calculates the start point `t_start` from the three parameters - `num_bins`, `binsize`, `t_stop`. + `n_bins`, `bin_size`, `t_stop`. Parameters ---------- - num_bins : int + n_bins : int Number of bins - binsize : pq.Quantity + bin_size : pq.Quantity Size of Bins t_stop : pq.Quantity Stop time @@ -205,22 +205,22 @@ def _calc_tstart(num_bins, binsize, t_stop): t_start : pq.Quantity Starting point calculated from given parameters. """ - if num_bins is not None and binsize is not None and t_stop is not None: - return t_stop.rescale(binsize.units) - num_bins * binsize + if n_bins is not None and bin_size is not None and t_stop is not None: + return t_stop.rescale(bin_size.units) - n_bins * bin_size -def _calc_tstop(num_bins, binsize, t_start): +def _calc_tstop(n_bins, bin_size, t_start): """ Calculates the stop point from given parameters. Calculates the stop point `t_stop` from the three parameters - `num_bins`, `binsize`, `t_start`. + `n_bins`, `bin_size`, `t_start`. Parameters ---------- - num_bins : int + n_bins : int Number of bins - binsize : pq.Quantity + bin_size : pq.Quantity Size of bins t_start : pq.Quantity Start time @@ -230,20 +230,20 @@ def _calc_tstop(num_bins, binsize, t_start): t_stop : pq.Quantity Stopping point calculated from given parameters. """ - if num_bins is not None and binsize is not None and t_start is not None: - return t_start.rescale(binsize.units) + num_bins * binsize + if n_bins is not None and bin_size is not None and t_start is not None: + return t_start.rescale(bin_size.units) + n_bins * bin_size -def _calc_num_bins(binsize, t_start, t_stop, tolerance): +def _calc_number_of_bins(bin_size, t_start, t_stop, tolerance): """ Calculates the number of bins from given parameters. - Calculates the number of bins `num_bins` from the three parameters - `binsize`, `t_start`, `t_stop`. + Calculates the number of bins `n_bins` from the three parameters + `bin_size`, `t_start`, `t_stop`. Parameters ---------- - binsize : pq.Quantity + bin_size : pq.Quantity Size of Bins t_start : pq.Quantity Start time @@ -251,11 +251,11 @@ def _calc_num_bins(binsize, t_start, t_stop, tolerance): Stop time tolerance : float tolerance for detection of rounding errors before casting - the resulting num_bins to integer + the resulting num. of bins to integer Returns ------- - num_bins : int + n_bins : int Number of bins calculated from given parameters. Raises @@ -264,31 +264,31 @@ def _calc_num_bins(binsize, t_start, t_stop, tolerance): When `t_stop` is smaller than `t_start`". """ - if binsize is not None and t_start is not None and t_stop is not None: + if bin_size is not None and t_start is not None and t_stop is not None: if t_stop < t_start: raise ValueError("t_stop (%s) is smaller than t_start (%s)" % (t_stop, t_start)) - num_bins = ((t_stop - t_start).rescale( - binsize.units) / binsize.magnitude).item() - if _detect_rounding_errors(num_bins, tolerance): + n_bins = ((t_stop - t_start).rescale( + bin_size.units) / bin_size.magnitude).item() + if _detect_rounding_errors(n_bins, tolerance): warnings.warn('Correcting a rounding error in the calculation ' - 'of num_bins by increasing num_bins by 1. ' + 'of n_bins by increasing n_bins by 1. ' 'You can set tolerance=None to disable this ' 'behaviour.') - num_bins += 1 - return int(num_bins) + n_bins += 1 + return int(n_bins) -def _calc_binsize(num_bins, t_start, t_stop): +def _calc_bin_size(n_bins, t_start, t_stop): """ Calculates the stop point from given parameters. - Calculates the size of bins `binsize` from the three parameters - `num_bins`, `t_start` and `t_stop`. + Calculates the size of bins `bin_size` from the three parameters + `n_bins`, `t_start` and `t_stop`. Parameters ---------- - num_bins : int + n_bins : int Number of bins t_start : pq.Quantity Start time @@ -297,7 +297,7 @@ def _calc_binsize(num_bins, t_start, t_stop): Returns ------- - binsize : pq.Quantity + bin_size : pq.Quantity Size of bins calculated from given parameters. Raises @@ -306,11 +306,11 @@ def _calc_binsize(num_bins, t_start, t_stop): When `t_stop` is smaller than `t_start`. """ - if num_bins is not None and t_start is not None and t_stop is not None: + if n_bins is not None and t_start is not None and t_stop is not None: if t_stop < t_start: raise ValueError("t_stop (%s) is smaller than t_start (%s)" % (t_stop, t_start)) - return (t_stop - t_start) / num_bins + return (t_stop - t_start) / n_bins def _get_start_stop_from_input(spiketrains): @@ -382,10 +382,10 @@ class BinnedSpikeTrain(object): ---------- spiketrains : neo.SpikeTrain or list of neo.SpikeTrain or np.ndarray Spike train(s) to be binned. - binsize : pq.Quantity, optional + bin_size : pq.Quantity, optional Width of a time bin. Default: None - num_bins : int, optional + n_bins : int, optional Number of bins of the binned spike train. Default: None t_start : pq.Quantity, optional @@ -406,10 +406,10 @@ class BinnedSpikeTrain(object): TypeError If `spiketrains` is an np.ndarray with dimensionality different than NxM or - if type of `num_bins` is not an `int` or `num_bins` < 0. + if type of `n_bins` is not an `int` or `n_bins` < 0. ValueError - When number of bins calculated from `t_start`, `t_stop` and `binsize` - differs from provided `num_bins` or + When number of bins calculated from `t_start`, `t_stop` and `bin_size` + differs from provided `n_bins` or if `t_stop` of any spike train is smaller than any `t_start` or if any spike train does not cover the full [`t_start`, t_stop`] range. @@ -429,13 +429,13 @@ class BinnedSpikeTrain(object): ----- There are four minimal configurations of the optional parameters which have to be provided, otherwise a `ValueError` will be raised: - * `t_start`, `num_bins`, `binsize` - * `t_start`, `num_bins`, `t_stop` + * `t_start`, `n_bins`, `bin_size` + * `t_start`, `n_bins`, `t_stop` * `t_start`, `bin_size`, `t_stop` - * `t_stop`, `num_bins`, `binsize` + * `t_stop`, `n_bins`, `bin_size` If `spiketrains` is a `neo.SpikeTrain` or a list thereof, it is enough to - explicitly provide only one parameter: `num_bins` or `binsize`. The + explicitly provide only one parameter: `n_bins` or `bin_size`. The `t_start` and `t_stop` will be calculated from given `spiketrains` (max `t_start` and min `t_stop` of `neo.SpikeTrain`s). Missing parameter will be calculated automatically. @@ -445,7 +445,8 @@ class BinnedSpikeTrain(object): """ - def __init__(self, spiketrains, binsize=None, num_bins=None, t_start=None, + @deprecated_alias(binsize='bin_size', num_bins='n_bins') + def __init__(self, spiketrains, bin_size=None, n_bins=None, t_start=None, t_stop=None, tolerance=1e-8): """ Defines a BinnedSpikeTrain class @@ -463,12 +464,12 @@ def __init__(self, spiketrains, binsize=None, num_bins=None, t_start=None, spiketrains = [spiketrains] # Link to input - self.lst_input = spiketrains + self.input_spiketrains = spiketrains # Set given parameters self.t_start = t_start self.t_stop = t_stop - self.num_bins = num_bins - self.binsize = binsize + self.n_bins = n_bins + self.bin_size = bin_size self.tolerance = tolerance # Empty matrix for storage, time points matrix self._mat_u = None @@ -476,11 +477,11 @@ def __init__(self, spiketrains, binsize=None, num_bins=None, t_start=None, self._sparse_mat_u = None # Check all parameter, set also missing values if self.is_binned: - self.num_bins = np.shape(spiketrains)[1] + self.n_bins = np.shape(spiketrains)[1] self._calc_start_stop(spiketrains) self._check_init_params( - self.binsize, self.num_bins, self.t_start, self.t_stop) - self._check_consistency(spiketrains, self.binsize, self.num_bins, + self.bin_size, self.n_bins, self.t_start, self.t_stop) + self._check_consistency(spiketrains, self.bin_size, self.n_bins, self.t_start, self.t_stop) # Now create sparse matrix self._convert_to_binned(spiketrains) @@ -501,25 +502,42 @@ def matrix_rows(self): def matrix_columns(self): return self._sparse_mat_u.shape[1] + @property + def binsize(self): + warnings.warn("'.binsize' is deprecated; use '.bin_size'", + DeprecationWarning) + return self.bin_size + + @property + def lst_input(self): + warnings.warn("'.lst_input' is deprecated; use '.input_spiketrains'", + DeprecationWarning) + return self.input_spiketrains + + @property + def num_bins(self): + warnings.warn("'.num_bins' is deprecated; use '.n_bins'") + return self.n_bins + # ========================================================================= # There are four cases the given parameters must fulfill, or a `ValueError` # will be raised: - # t_start, num_bins, binsize - # t_start, num_bins, t_stop + # t_start, n_bins, bin_size + # t_start, n_bins, t_stop # t_start, bin_size, t_stop - # t_stop, num_bins, binsize + # t_stop, n_bins, bin_size # ========================================================================= - def _check_init_params(self, binsize, num_bins, t_start, t_stop): + def _check_init_params(self, bin_size, n_bins, t_start, t_stop): """ Checks given parameters. Calculates also missing parameter. Parameters ---------- - binsize : pq.Quantity + bin_size : pq.Quantity Size of bins - num_bins : int + n_bins : int Number of bins t_start: pq.Quantity Start time for the binned spike train @@ -529,25 +547,25 @@ def _check_init_params(self, binsize, num_bins, t_start, t_stop): Raises ------ TypeError - If type of `num_bins` is not an `int`. + If type of `n_bins` is not an `int`. ValueError When `t_stop` is smaller than `t_start`. """ - # Check if num_bins is an integer (special case) - if num_bins is not None: - if not np.issubdtype(type(num_bins), np.integer): - raise TypeError("num_bins is not an integer!") + # Check if n_bins is an integer (special case) + if n_bins is not None: + if not np.issubdtype(type(n_bins), np.integer): + raise TypeError("'n_bins' is not an integer!") # Check if all parameters can be calculated, otherwise raise ValueError if t_start is None: - self.t_start = _calc_tstart(num_bins, binsize, t_stop) + self.t_start = _calc_tstart(n_bins, bin_size, t_stop) elif t_stop is None: - self.t_stop = _calc_tstop(num_bins, binsize, t_start) - elif num_bins is None: - self.num_bins = _calc_num_bins(binsize, t_start, t_stop, - self.tolerance) - elif binsize is None: - self.binsize = _calc_binsize(num_bins, t_start, t_stop) + self.t_stop = _calc_tstop(n_bins, bin_size, t_start) + elif n_bins is None: + self.n_bins = _calc_number_of_bins(bin_size, t_start, t_stop, + self.tolerance) + elif bin_size is None: + self.bin_size = _calc_bin_size(n_bins, t_start, t_stop) def _calc_start_stop(self, spiketrains): """ @@ -588,10 +606,10 @@ def _count_params(self): """ return sum(x is not None for x in - [self.t_start, self.t_stop, self.binsize, - self.num_bins]) >= 3 + [self.t_start, self.t_stop, self.bin_size, + self.n_bins]) >= 3 - def _check_consistency(self, spiketrains, binsize, num_bins, t_start, + def _check_consistency(self, spiketrains, bin_size, n_bins, t_start, t_stop): """ Checks the given parameters for consistency @@ -601,7 +619,7 @@ def _check_consistency(self, spiketrains, binsize, num_bins, t_start, AttributeError If there is an insufficient number of parameters. TypeError - If `num_bins` is not an `int` or is <0. + If `n_bins` is not an `int` or is <0. ValueError If an inconsistency regarding the parameters appears, e.g. `t_start` > `t_stop`. @@ -611,12 +629,12 @@ def _check_consistency(self, spiketrains, binsize, num_bins, t_start, raise AttributeError("Too few parameters given. Please provide " "at least one of the parameter which are " "None.\n" - "t_start: %s, t_stop: %s, binsize: %s, " - "num_bins: %s" % ( + "t_start: %s, t_stop: %s, bin_size: %s, " + "n_bins: %s" % ( self.t_start, self.t_stop, - self.binsize, - self.num_bins)) + self.bin_size, + self.n_bins)) if self.is_spiketrain: t_starts = [elem.t_start for elem in spiketrains] t_stops = [elem.t_stop for elem in spiketrains] @@ -633,47 +651,47 @@ def _check_consistency(self, spiketrains, binsize, num_bins, t_start, raise ValueError( 'too many / too large time bins. Some spike trains are ' 'not defined in the ending time') + # account for rounding errors in the reference num_bins - num_bins_test = (( + n_bins_test = (( (t_stop - t_start).rescale( - binsize.units) / binsize).magnitude) - if _detect_rounding_errors(num_bins_test, tolerance=self.tolerance): - num_bins_test += 1 - num_bins_test = int(num_bins_test) - if num_bins != num_bins_test: + bin_size.units) / bin_size).magnitude) + if _detect_rounding_errors(n_bins_test, tolerance=self.tolerance): + n_bins_test += 1 + n_bins_test = int(n_bins_test) + if n_bins != n_bins_test: raise ValueError( "Inconsistent arguments t_start (%s), " % t_start + - "t_stop (%s), binsize (%s) " % (t_stop, binsize) + - "and num_bins (%d)" % num_bins) - if num_bins - int(num_bins) != 0 or num_bins < 0: + "t_stop (%s), bin_size (%s) " % (t_stop, bin_size) + + "and n_bins (%d)" % n_bins) + if n_bins - int(n_bins) != 0 or n_bins < 0: raise TypeError( - "Number of bins (num_bins) is not an integer or < 0: " + str( - num_bins)) + "Number of bins ({}) is not an integer or < 0".format(n_bins)) @property def bin_edges(self): """ - Returns all time edges as a quantity array with :attr:`num_bins` bins. + Returns all time edges as a quantity array with :attr:`n_bins` bins. The borders of all time steps between :attr:`t_start` and - :attr:`t_stop` with a step :attr:`binsize`. It is crucial for many + :attr:`t_stop` with a step :attr:`bin_size`. It is crucial for many analyses that all bins have the same size, so if - :attr:`t_stop` - :attr:`t_start` is not divisible by :attr:`binsize`, + :attr:`t_stop` - :attr:`t_start` is not divisible by :attr:`bin_size`, there will be some leftover time at the end (see https://github.com/NeuralEnsemble/elephant/issues/255). - The length of the returned array should match :attr:`num_bins`. + The length of the returned array should match :attr:`n_bins`. Returns ------- bin_edges : pq.Quantity All edges in interval [:attr:`t_start`, :attr:`t_stop`] with - :attr:`num_bins` bins are returned as a quantity array. + :attr:`n_bins` bins are returned as a quantity array. """ - t_start = self.t_start.rescale(self.binsize.units).magnitude - bin_edges = np.linspace(t_start, t_start + self.num_bins * - self.binsize.magnitude, - num=self.num_bins + 1, endpoint=True) - return pq.Quantity(bin_edges, units=self.binsize.units) + t_start = self.t_start.rescale(self.bin_size.units).magnitude + bin_edges = np.linspace(t_start, t_start + self.n_bins * + self.bin_size.magnitude, + num=self.n_bins + 1, endpoint=True) + return pq.Quantity(bin_edges, units=self.bin_size.units) @property def bin_centers(self): @@ -689,7 +707,7 @@ def bin_centers(self): All center edges in interval (:attr:`start`, :attr:`stop`). """ - return self.bin_edges[:-1] + self.binsize / 2 + return self.bin_edges[:-1] + self.bin_size / 2 def to_sparse_array(self): """ @@ -770,7 +788,7 @@ def spike_indices(self): >>> import quantities as pq >>> st = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, ... t_stop=10.0 * pq.s) - >>> x = conv.BinnedSpikeTrain(st, num_bins=10, binsize=1 * pq.s, + >>> x = conv.BinnedSpikeTrain(st, n_bins=10, bin_size=1 * pq.s, ... t_start=0 * pq.s) >>> print(x.spike_indices) [[0, 0, 1, 3, 4, 5, 6]] @@ -802,7 +820,7 @@ def is_binary(self): True for binary input, False otherwise. """ - return is_binary(self.lst_input) + return is_binary(self.input_spiketrains) def to_bool_array(self): """ @@ -831,7 +849,7 @@ def to_bool_array(self): >>> import quantities as pq >>> a = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, ... t_stop=10.0 * pq.s) - >>> x = conv.BinnedSpikeTrain(a, num_bins=10, binsize=1 * pq.s, + >>> x = conv.BinnedSpikeTrain(a, n_bins=10, bin_size=1 * pq.s, ... t_start=0 * pq.s) >>> print(x.to_bool_array()) [[ True True False True True True True False False False]] @@ -862,7 +880,7 @@ def to_array(self, store_array=False): >>> import quantities as pq >>> a = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, ... t_stop=10.0 * pq.s) - >>> x = conv.BinnedSpikeTrain(a, num_bins=10, binsize=1 * pq.s, + >>> x = conv.BinnedSpikeTrain(a, n_bins=10, bin_size=1 * pq.s, ... t_start=0 * pq.s) >>> print(x.to_array()) [[2 1 0 1 1 1 1 0 0 0]] @@ -957,8 +975,8 @@ def _convert_to_binned(self, spiketrains): counts = [] for idx, st in enumerate(spiketrains): - times = (st.times - self.t_start).rescale(self.binsize.units) - scale = np.array((times / self.binsize).magnitude) + times = (st.times - self.t_start).rescale(self.bin_size.units) + scale = np.array((times / self.bin_size).magnitude) # shift spikes that are very close # to the right edge into the next bin @@ -974,19 +992,19 @@ def _convert_to_binned(self, spiketrains): scale = scale.astype(int) - la = np.logical_and(times >= 0 * self.binsize.units, + la = np.logical_and(times >= 0 * self.bin_size.units, times <= (self.t_stop - self.t_start).rescale( - self.binsize.units)) + self.bin_size.units)) filled_tmp = scale[la] - filled_tmp = filled_tmp[filled_tmp < self.num_bins] + filled_tmp = filled_tmp[filled_tmp < self.n_bins] f, c = np.unique(filled_tmp, return_counts=True) column_ids.extend(f) counts.extend(c) row_ids.extend([idx] * len(f)) csr_matrix = sps.csr_matrix((counts, (row_ids, column_ids)), shape=(len(spiketrains), - self.num_bins), + self.n_bins), dtype=int) self._sparse_mat_u = csr_matrix diff --git a/elephant/cubic.py b/elephant/cubic.py index 6e896900d..159874351 100644 --- a/elephant/cubic.py +++ b/elephant/cubic.py @@ -7,12 +7,13 @@ steps: 1) compute the population histogram (PSTH) with the desired bin size - >>> binsize = 5 * pq.ms - >>> pop_count = elephant.statistics.time_histogram(sts, binsize) + >>> bin_size = 5 * pq.ms + >>> pop_count = elephant.statistics.time_histogram(sts, bin_size) 2) apply CuBIC to the population count >>> alpha = 0.05 # significance level of the tests used - >>> xi, p_val, k = cubic(data, ximax=100, alpha=0.05, errorval=4.): + >>> xi, p_val, k = cubic(data, max_iterations=100, alpha=0.05, + ... errorval=4.): :copyright: Copyright 2016 by the Elephant team, see `doc/authors.rst`. :license: BSD, see LICENSE.txt for details. @@ -26,39 +27,42 @@ import math import warnings +from elephant.utils import deprecated_alias + # Based on matlab code by Benjamin Staude # Adaptation to python by Pietro Quaglio and Emiliano Torre -def cubic(data, ximax=100, alpha=0.05): - """ +@deprecated_alias(data='histogram', ximax='max_iterations') +def cubic(histogram, max_iterations=100, alpha=0.05): + r""" Performs the CuBIC analysis [1]_ on a population histogram, calculated from a population of spiking neurons. - The null hypothesis :math:`H_0: k_3(data)<=k^*_{3,\\xi}` is iteratively - tested with increasing correlation order :math:`\\xi` (correspondent to - variable xi) until it is possible to accept, with a significance level alpha, - that :math:`\\hat{\\xi}` (corresponding to variable xi_hat) is the minimum - order of correlation necessary to explain the third cumulant + The null hypothesis :math:`H_0: k_3(data)<=k^*_{3,\xi}` is iteratively + tested with increasing correlation order :math:`\xi` until it is possible + to accept, with a significance level `alpha`, that :math:`\hat{\xi}` is + the minimum order of correlation necessary to explain the third cumulant :math:`k_3(data)`. - :math:`k^*_{3,\\xi}` is the maximized third cumulant, supposing a Compund + :math:`k^*_{3,\xi}` is the maximized third cumulant, supposing a Compound Poisson Process (CPP) model for correlated spike trains (see [1]_) - with maximum order of correlation equal to :math:`\\xi`. + with maximum order of correlation equal to :math:`\xi`. Parameters ---------- - data : neo.AnalogSignal + histogram : neo.AnalogSignal The population histogram (count of spikes per time bin) of the entire population of neurons. - ximax : int, optional - The maximum number of iteration of the hypothesis test: - if it is not possible to compute the :math:`\\hat{\\xi}` before - `ximax` iteration, the CuBIC procedure is aborted. + max_iterations : int, optional + The maximum number of iterations of the hypothesis test. Corresponds + to the :math:`\hat{\xi_{\text{max}}}` in [1]_. If it is not possible + to compute the :math:`\hat{\xi}` before `max_iterations` iteration, + the CuBIC procedure is aborted. Default: 100. alpha : float, optional - The significance level of the hypothesis tests perfomed. + The significance level of the hypothesis tests performed. Default: 0.05. Returns @@ -68,13 +72,13 @@ def cubic(data, ximax=100, alpha=0.05): explain the value of the third cumulant calculated from the population. p : list The ordered list of all the p-values of the hypothesis tests that have - been performed. If the maximum number of iteration `ximax` is reached, - the last p-value is set to -4. + been performed. If the maximum number of iteration `max_iterations` is + reached, the last p-value is set to -4. kappa : list The list of the first three cumulants of the data. test_aborted : bool - Wheter the test was aborted because reached the maximum number of - iteration `ximax`. + Whether the test was aborted because reached the maximum number of + iteration, `max_iterations`. References ---------- @@ -86,20 +90,19 @@ def cubic(data, ximax=100, alpha=0.05): raise ValueError( 'the significance level alpha (= %s) has to be in [0,1]' % alpha) - if not isinstance(ximax, int) or ximax < 0: - raise ValueError( - 'The maximum number of iterations ximax(= %i) has to be a positive' - % alpha + ' integer') + if not isinstance(max_iterations, int) or max_iterations < 0: + raise ValueError("'max_iterations' ({}) has to be a positive integer" + .format(max_iterations)) # dict of all possible rate functions try: - data = data.magnitude + histogram = histogram.magnitude except AttributeError: pass - L = len(data) + L = len(histogram) # compute first three cumulants - kappa = _kstat(data) + kappa = _kstat(histogram) xi_hat = 1 xi = 1 pval = 0. @@ -109,8 +112,9 @@ def cubic(data, ximax=100, alpha=0.05): # compute xi_hat iteratively while pval < alpha: xi_hat = xi - if xi > ximax: - warnings.warn('Test aborted, xihat= %i > ximax= %i' % (xi, ximax)) + if xi > max_iterations: + warnings.warn('Test aborted, xihat= %i > ximax= %i' % ( + xi, max_iterations)) test_aborted = True break diff --git a/elephant/current_source_density.py b/elephant/current_source_density.py index 38e799c8d..a8061a8b3 100644 --- a/elephant/current_source_density.py +++ b/elephant/current_source_density.py @@ -38,14 +38,14 @@ from __future__ import division, print_function, unicode_literals import neo -import quantities as pq import numpy as np -from scipy import io +import quantities as pq from scipy.integrate import simps +import elephant.current_source_density_src.utility_functions as utils from elephant.current_source_density_src import KCSD from elephant.current_source_density_src import icsd -import elephant.current_source_density_src.utility_functions as utils +from elephant.utils import deprecated_alias utils.patch_quantities() @@ -59,23 +59,25 @@ py_iCSD_toolbox = ['StandardCSD'] + icsd_methods -def estimate_csd(lfp, coords=None, method=None, +@deprecated_alias(coords='coordinates') +def estimate_csd(lfp, coordinates=None, method=None, process_estimate=True, **kwargs): """ - Fuction call to compute the current source density (CSD) from extracellular - potential recordings(local-field potentials - LFP) using laminar electrodes - or multi-contact electrodes with 2D or 3D geometries. + Function call to compute the current source density (CSD) from + extracellular potential recordings(local-field potentials - LFP) using + laminar electrodes or multi-contact electrodes with 2D or 3D geometries. Parameters ---------- lfp : neo.AnalogSignal positions of electrodes can be added as neo.RecordingChannel coordinate or sent externally as a func argument (See coords) - coords : [Optional] corresponding spatial coordinates of the electrodes + coordinates : [Optional] corresponding spatial coordinates of the + electrodes. Defaults to None - Otherwise looks for RecordingChannels coordinate + Otherwise looks for ChannelIndex coordinate method : string - Pick a method corresonding to the setup, in this implementation + Pick a method corresponding to the setup, in this implementation For Laminar probe style (1D), use 'KCSD1D' or 'StandardCSD', or 'DeltaiCSD' or 'StepiCSD' or 'SplineiCSD' For MEA probe style (2D), use 'KCSD2D', or 'MoIKCSD' @@ -110,25 +112,25 @@ def estimate_csd(lfp, coords=None, method=None, """ if not isinstance(lfp, neo.AnalogSignal): raise TypeError('Parameter `lfp` must be a neo.AnalogSignal object') - if coords is None: - coords = lfp.channel_index.coordinates + if coordinates is None: + coordinates = lfp.channel_index.coordinates else: scaled_coords = [] - for coord in coords: + for coord in coordinates: try: scaled_coords.append(coord.rescale(pq.mm)) except AttributeError: raise AttributeError('No units given for electrode spatial \ coordinates') - coords = scaled_coords + coordinates = scaled_coords if method is None: raise ValueError('Must specify a method of CSD implementation') - if len(coords) != lfp.shape[1]: + if len(coordinates) != lfp.shape[1]: raise ValueError('Number of signals and coords is not same') - for ii in coords: # CHECK for Dimensionality of electrodes + for ii in coordinates: # CHECK for Dimensionality of electrodes if len(ii) > 3: raise ValueError('Invalid number of coordinate positions') - dim = len(coords[0]) # TODO : Generic co-ordinates! + dim = len(coordinates[0]) # TODO : Generic co-ordinates! if dim == 1 and (method not in available_1d): raise ValueError('Invalid method, Available options are:', available_1d) @@ -145,7 +147,7 @@ def estimate_csd(lfp, coords=None, method=None, kernel_method = getattr(KCSD, method) # fetch the class 'KCSD1D' lambdas = kwargs.pop('lambdas', None) Rs = kwargs.pop('Rs', None) - k = kernel_method(np.array(coords), input_array.T, **kwargs) + k = kernel_method(np.array(coordinates), input_array.T, **kwargs) if process_estimate: k.cross_validate(lambdas, Rs) estm_csd = k.values() @@ -163,15 +165,15 @@ def estimate_csd(lfp, coords=None, method=None, z_coords=k.estm_z) elif method in py_iCSD_toolbox: - coords = np.array(coords) * coords[0].units + coordinates = np.array(coordinates) * coordinates[0].units if method in icsd_methods: try: - coords = coords.rescale(kwargs['diam'].units) + coordinates = coordinates.rescale(kwargs['diam'].units) except KeyError: # Then why specify as a default in icsd? - # All iCSD methods explicitly assume a source - # diameter in contrast to the stdCSD that - # implicitly assume infinite source radius + # All iCSD methods explicitly assume a source + # diameter in contrast to the stdCSD that + # implicitly assume infinite source radius raise ValueError("Parameter diam must be specified for iCSD \ methods: {}".format(", ".join(icsd_methods))) @@ -182,62 +184,72 @@ def estimate_csd(lfp, coords=None, method=None, specified".format(kwargs['f_type'])) lfp = neo.AnalogSignal(np.asarray(lfp).T, units=lfp.units, - sampling_rate=lfp.sampling_rate) + sampling_rate=lfp.sampling_rate) csd_method = getattr(icsd, method) # fetch class from icsd.py file csd_estimator = csd_method(lfp=lfp.magnitude * lfp.units, - coord_electrode=coords.flatten(), + coord_electrode=coordinates.flatten(), **kwargs) csd_pqarr = csd_estimator.get_csd() if process_estimate: csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr) output = neo.AnalogSignal(csd_pqarr_filtered.T, - t_start=lfp.t_start, - sampling_rate=lfp.sampling_rate) + t_start=lfp.t_start, + sampling_rate=lfp.sampling_rate) else: output = neo.AnalogSignal(csd_pqarr.T, t_start=lfp.t_start, - sampling_rate=lfp.sampling_rate) - output.annotate(x_coords=coords) + sampling_rate=lfp.sampling_rate) + output.annotate(x_coords=coordinates) return output -def generate_lfp(csd_profile, ele_xx, ele_yy=None, ele_zz=None, - xlims=[0., 1.], ylims=[0., 1.], zlims=[0., 1.], res=50): - """Forward modelling for the getting the potentials for testing CSD - - Parameters - ---------- - csd_profile : fuction that computes True CSD profile - Available options are (see ./csd/utility_functions.py) - 1D : gauss_1d_dipole - 2D : large_source_2D and small_source_2D - 3D : gauss_3d_dipole - ele_xx : np.array - Positions of the x coordinates of the electrodes - ele_yy : np.array - Positions of the y coordinates of the electrodes - Defaults ot None, use in 2D or 3D cases only - ele_zz : np.array - Positions of the z coordinates of the electrodes - Defaults ot None, use in 3D case only - x_lims : [start, end] - The starting spatial coordinate and the ending for integration - Defaults to [0.,1.] - y_lims : [start, end] - The starting spatial coordinate and the ending for integration - Defaults to [0.,1.], use only in 2D and 3D case - z_lims : [start, end] - The starting spatial coordinate and the ending for integration - Defaults to [0.,1.], use only in 3D case - res : int - The resolution of the integration - Defaults to 50 - - Returns - ------- - LFP : neo.AnalogSignal object - The potentials created by the csd profile at the electrode positions - The electrode postions are attached as RecordingChannel's coordinate +@deprecated_alias(ele_xx='x_positions', ele_yy='y_positions', + ele_zz='z_positions', xlims='x_limits', ylims='y_limits', + zlims='z_limits', res='resolution') +def generate_lfp(csd_profile, x_positions, y_positions=None, z_positions=None, + x_limits=[0., 1.], y_limits=[0., 1.], z_limits=[0., 1.], + resolution=50): + """ + Forward modelling for getting the potentials for testing Current Source + Density (CSD). + + Parameters + ---------- + csd_profile : callable + A function that computes true CSD profile. + Available options are (see ./csd/utility_functions.py) + 1D : gauss_1d_dipole + 2D : large_source_2D and small_source_2D + 3D : gauss_3d_dipole + x_positions : np.ndarray + Positions of the x coordinates of the electrodes + y_positions : np.ndarray, optional + Positions of the y coordinates of the electrodes + Defaults to None, use in 2D or 3D cases only + z_positions : np.ndarray, optional + Positions of the z coordinates of the electrodes + Defaults to None, use in 3D case only + x_limits : list, optional + A list of [start, end]. + The starting spatial coordinate and the ending for integration + Defaults to [0.,1.] + y_limits : list, optional + A list of [start, end]. + The starting spatial coordinate and the ending for integration + Defaults to [0.,1.], use only in 2D and 3D case + z_limits : list, optional + A list of [start, end]. + The starting spatial coordinate and the ending for integration + Defaults to [0.,1.], use only in 3D case + resolution : int, optional + The resolution of the integration + Defaults to 50 + + Returns + ------- + LFP : neo.AnalogSignal + The potentials created by the csd profile at the electrode positions. + The electrode positions are attached as RecordingChannel's coordinate. """ def integrate_1D(x0, csd_x, csd, h): m = np.sqrt((csd_x - x0)**2 + h**2) - abs(csd_x - x0) @@ -272,49 +284,53 @@ def integrate_3D(x, y, z, xlim, ylim, zlim, csd, xlin, ylin, zlin, F = simps(Iy, xlin) return F dim = 1 - if ele_zz is not None: + if z_positions is not None: dim = 3 - elif ele_yy is not None: + elif y_positions is not None: dim = 2 - x = np.linspace(xlims[0], xlims[1], res) + x = np.linspace(x_limits[0], x_limits[1], resolution) if dim >= 2: - y = np.linspace(ylims[0], ylims[1], res) + y = np.linspace(y_limits[0], y_limits[1], resolution) if dim == 3: - z = np.linspace(zlims[0], zlims[1], res) + z = np.linspace(z_limits[0], z_limits[1], resolution) sigma = 1.0 h = 50. - pots = np.zeros(len(ele_xx)) + pots = np.zeros(len(x_positions)) if dim == 1: - chrg_x = np.linspace(xlims[0], xlims[1], res) + chrg_x = np.linspace(x_limits[0], x_limits[1], resolution) csd = csd_profile(chrg_x) - for ii in range(len(ele_xx)): - pots[ii] = integrate_1D(ele_xx[ii], chrg_x, csd, h) + for ii in range(len(x_positions)): + pots[ii] = integrate_1D(x_positions[ii], chrg_x, csd, h) pots /= 2. * sigma # eq.: 26 from Potworowski et al - ele_pos = ele_xx + ele_pos = x_positions elif dim == 2: - chrg_x, chrg_y = np.mgrid[xlims[0]:xlims[1]:np.complex(0, res), - ylims[0]:ylims[1]:np.complex(0, res)] + chrg_x, chrg_y = np.mgrid[ + x_limits[0]:x_limits[1]:np.complex(0, resolution), + y_limits[0]:y_limits[1]:np.complex(0, resolution)] csd = csd_profile(chrg_x, chrg_y) - for ii in range(len(ele_xx)): - pots[ii] = integrate_2D(ele_xx[ii], ele_yy[ii], + for ii in range(len(x_positions)): + pots[ii] = integrate_2D(x_positions[ii], y_positions[ii], x, y, csd, h, chrg_x, chrg_y) pots /= 2 * np.pi * sigma - ele_pos = np.vstack((ele_xx, ele_yy)).T + ele_pos = np.vstack((x_positions, y_positions)).T elif dim == 3: - chrg_x, chrg_y, chrg_z = np.mgrid[xlims[0]:xlims[1]:np.complex(0, res), - ylims[0]:ylims[1]:np.complex(0, res), - zlims[0]:zlims[1]:np.complex(0, res)] + chrg_x, chrg_y, chrg_z = np.mgrid[ + x_limits[0]:x_limits[1]:np.complex(0, resolution), + y_limits[0]:y_limits[1]:np.complex(0, resolution), + z_limits[0]:z_limits[1]:np.complex(0, resolution) + ] csd = csd_profile(chrg_x, chrg_y, chrg_z) xlin = chrg_x[:, 0, 0] ylin = chrg_y[0, :, 0] zlin = chrg_z[0, 0, :] - for ii in range(len(ele_xx)): - pots[ii] = integrate_3D(ele_xx[ii], ele_yy[ii], ele_zz[ii], - xlims, ylims, zlims, csd, + for ii in range(len(x_positions)): + pots[ii] = integrate_3D(x_positions[ii], y_positions[ii], + z_positions[ii], + x_limits, y_limits, z_limits, csd, xlin, ylin, zlin, chrg_x, chrg_y, chrg_z) pots /= 4 * np.pi * sigma - ele_pos = np.vstack((ele_xx, ele_yy, ele_zz)).T + ele_pos = np.vstack((x_positions, y_positions, z_positions)).T pots = np.reshape(pots, (-1, 1)) * pq.mV ele_pos = ele_pos * pq.mm lfp = [] diff --git a/elephant/gpfa/gpfa.py b/elephant/gpfa/gpfa.py index d888522c7..05129fc34 100644 --- a/elephant/gpfa/gpfa.py +++ b/elephant/gpfa/gpfa.py @@ -53,8 +53,10 @@ import numpy as np import quantities as pq import sklearn +import warnings from elephant.gpfa import gpfa_core, gpfa_util +from elephant.utils import deprecated_alias class GPFA(sklearn.base.BaseEstimator): @@ -205,6 +207,7 @@ class GPFA(sklearn.base.BaseEstimator): ... returned_data=['xorth', 'xsm']) """ + @deprecated_alias(binsize='bin_size') def __init__(self, bin_size=20 * pq.ms, x_dim=3, min_var_frac=0.01, tau_init=100.0 * pq.ms, eps_init=1.0E-3, em_tol=1.0E-8, em_max_iters=500, freq_ll=5, verbose=False): @@ -229,6 +232,11 @@ def __init__(self, bin_size=20 * pq.ms, x_dim=3, min_var_frac=0.01, self.fit_info = dict() self.transform_info = dict() + @property + def binsize(self): + warnings.warn("'binsize' is deprecated; use 'bin_size'") + return self.bin_size + def fit(self, spiketrains): """ Fit the model with the given training data. diff --git a/elephant/gpfa/gpfa_util.py b/elephant/gpfa/gpfa_util.py index 55c6496f8..09eb89fa1 100644 --- a/elephant/gpfa/gpfa_util.py +++ b/elephant/gpfa/gpfa_util.py @@ -15,8 +15,10 @@ import scipy as sp from elephant.conversion import BinnedSpikeTrain +from elephant.utils import deprecated_alias +@deprecated_alias(binsize='bin_size') def get_seqs(data, bin_size, use_sqrt=True): """ Converts the data into a rec array using internally BinnedSpikeTrain. @@ -59,13 +61,13 @@ def get_seqs(data, bin_size, use_sqrt=True): seqs = [] for dat in data: sts = dat - binned_sts = BinnedSpikeTrain(sts, binsize=bin_size) + binned_spiketrain = BinnedSpikeTrain(sts, bin_size=bin_size) if use_sqrt: - binned = np.sqrt(binned_sts.to_array()) + binned = np.sqrt(binned_spiketrain.to_array()) else: - binned = binned_sts.to_array() + binned = binned_spiketrain.to_array() seqs.append( - (binned_sts.num_bins, binned)) + (binned_spiketrain.n_bins, binned)) seqs = np.array(seqs, dtype=[('T', np.int), ('y', 'O')]) # Remove trials that are shorter than one bin width diff --git a/elephant/kernels.py b/elephant/kernels.py index fb7ac8ade..586a15ffd 100644 --- a/elephant/kernels.py +++ b/elephant/kernels.py @@ -47,6 +47,8 @@ import scipy.special import scipy.stats +from elephant.utils import deprecated_alias + __all__ = [ 'RectangularKernel', 'TriangularKernel', 'EpanechnikovLikeKernel', 'GaussianKernel', 'LaplacianKernel', 'ExponentialKernel', 'AlphaKernel' @@ -119,15 +121,15 @@ def __repr__(self): return "{cls}(sigma={sigma}, invert={invert})".format( cls=self.__class__.__name__, sigma=self.sigma, invert=self.invert) - def __call__(self, t): + @deprecated_alias(t='times') + def __call__(self, times): """ - Evaluates the kernel at all points in the array `t`. + Evaluates the kernel at all points in the array `times`. Parameters ---------- - t : pq.Quantity - Vector with the interval on which the kernel is evaluated, - not necessarily a time interval. + times : pq.Quantity + A vector with time intervals on which the kernel is evaluated. Returns ------- @@ -137,21 +139,21 @@ def __call__(self, t): Raises ------ TypeError - If `t` is not `pq.Quantity`. + If `times` is not `pq.Quantity`. - If the dimensionality of `t` and :attr:`sigma` are different. + If the dimensionality of `times` and :attr:`sigma` are different. """ - self._check_time_input(t) - return self._evaluate(t) + self._check_time_input(times) + return self._evaluate(times) - def _evaluate(self, t): + def _evaluate(self, times): """ Evaluates the kernel Probability Density Function, PDF. Parameters ---------- - t : pq.Quantity + times : pq.Quantity Vector with the interval on which the kernel is evaluated, not necessarily a time interval. @@ -240,19 +242,20 @@ def _check_time_input(self, t): "Otherwise a normalization to 1 of the kernel " "cannot be performed.") - def cdf(self, t): + @deprecated_alias(t='time') + def cdf(self, time): r""" Cumulative Distribution Function, CDF. Parameters ---------- - t : pq.Quantity + time : pq.Quantity The input time scalar. Returns ------- float - CDF at `t`. + CDF at `time`. """ raise NotImplementedError @@ -270,12 +273,13 @@ def icdf(self, fraction): Returns ------- pq.Quantity - The time scalar `t` such that `CDF(t) = fraction`. + The time scalar `times` such that `CDF(t) = fraction`. """ raise NotImplementedError - def median_index(self, t): + @deprecated_alias(t='times') + def median_index(self, times): r""" Estimates the index of the Median of the kernel. @@ -294,7 +298,7 @@ def median_index(self, t): Parameters ---------- - t : pq.Quantity + times : pq.Quantity Vector with the interval on which the kernel is evaluated. Returns @@ -317,24 +321,24 @@ def median_index(self, t): Kernel.icdf : inverse cumulative distribution function """ - self._check_time_input(t) - if len(t) == 0: + self._check_time_input(times) + if len(times) == 0: raise ValueError("The input time array is empty.") - if len(t) <= 2: + if len(times) <= 2: # either left or right; choose left return 0 - is_sorted = (np.diff(t.magnitude) >= 0).all() + is_sorted = (np.diff(times.magnitude) >= 0).all() if not is_sorted: raise ValueError("The input time array must be sorted (in " "ascending order).") - cdf_mean = 0.5 * (self.cdf(t[0]) + self.cdf(t[-1])) + cdf_mean = 0.5 * (self.cdf(times[0]) + self.cdf(times[-1])) if cdf_mean == 0.: # any index of the kernel non-support is valid; choose median - return len(t) // 2 + return len(times) // 2 icdf = self.icdf(fraction=cdf_mean) - icdf = icdf.rescale(t.units).magnitude + icdf = icdf.rescale(times.units).magnitude # icdf is guaranteed to be in (t_start, t_end) interval - median_index = np.nonzero(t.magnitude >= icdf)[0][0] + median_index = np.nonzero(times.magnitude >= icdf)[0][0] return median_index def is_symmetric(self): @@ -412,19 +416,20 @@ def min_cutoff(self): min_cutoff = np.sqrt(3.0) return min_cutoff - def _evaluate(self, t): - t_units = t.units - t_abs = np.abs(t.magnitude) + def _evaluate(self, times): + t_units = times.units + t_abs = np.abs(times.magnitude) tau = math.sqrt(3) * self.sigma.rescale(t_units).magnitude kernel = (t_abs < tau) * 1 / (2 * tau) kernel = pq.Quantity(kernel, units=1 / t_units) return kernel - def cdf(self, t): - self._check_time_input(t) - tau = math.sqrt(3) * self.sigma.rescale(t.units).magnitude - t = np.clip(t.magnitude, a_min=-tau, a_max=tau) - cdf = (t + tau) / (2 * tau) + @deprecated_alias(t='time') + def cdf(self, time): + self._check_time_input(time) + tau = math.sqrt(3) * self.sigma.rescale(time.units).magnitude + time = np.clip(time.magnitude, a_min=-tau, a_max=tau) + cdf = (time + tau) / (2 * tau) return cdf def icdf(self, fraction): @@ -479,17 +484,18 @@ def min_cutoff(self): min_cutoff = np.sqrt(6.0) return min_cutoff - def _evaluate(self, t): - tau = math.sqrt(6) * self.sigma.rescale(t.units).magnitude - kernel = scipy.stats.triang.pdf(t.magnitude, c=0.5, loc=-tau, + def _evaluate(self, times): + tau = math.sqrt(6) * self.sigma.rescale(times.units).magnitude + kernel = scipy.stats.triang.pdf(times.magnitude, c=0.5, loc=-tau, scale=2 * tau) - kernel = pq.Quantity(kernel, units=1 / t.units) + kernel = pq.Quantity(kernel, units=1 / times.units) return kernel - def cdf(self, t): - self._check_time_input(t) - tau = math.sqrt(6) * self.sigma.rescale(t.units).magnitude - cdf = scipy.stats.triang.cdf(t.magnitude, c=0.5, loc=-tau, + @deprecated_alias(t='time') + def cdf(self, time): + self._check_time_input(time) + tau = math.sqrt(6) * self.sigma.rescale(time.units).magnitude + cdf = scipy.stats.triang.cdf(time.magnitude, c=0.5, loc=-tau, scale=2 * tau) return cdf @@ -554,17 +560,18 @@ def min_cutoff(self): min_cutoff = np.sqrt(5.0) return min_cutoff - def _evaluate(self, t): - tau = math.sqrt(5) * self.sigma.rescale(t.units).magnitude - t_div_tau = np.clip(t.magnitude / tau, a_min=-1, a_max=1) + def _evaluate(self, times): + tau = math.sqrt(5) * self.sigma.rescale(times.units).magnitude + t_div_tau = np.clip(times.magnitude / tau, a_min=-1, a_max=1) kernel = 3. / (4. * tau) * np.maximum(0., 1 - t_div_tau ** 2) - kernel = pq.Quantity(kernel, units=1 / t.units) + kernel = pq.Quantity(kernel, units=1 / times.units) return kernel - def cdf(self, t): - self._check_time_input(t) - tau = math.sqrt(5) * self.sigma.rescale(t.units).magnitude - t_div_tau = np.clip(t.magnitude / tau, a_min=-1, a_max=1) + @deprecated_alias(t='time') + def cdf(self, time): + self._check_time_input(time) + tau = math.sqrt(5) * self.sigma.rescale(time.units).magnitude + t_div_tau = np.clip(time.magnitude / tau, a_min=-1, a_max=1) cdf = 3. / 4 * (t_div_tau - t_div_tau ** 3 / 3.) + 0.5 return cdf @@ -658,16 +665,17 @@ def min_cutoff(self): min_cutoff = 3.0 return min_cutoff - def _evaluate(self, t): - sigma = self.sigma.rescale(t.units).magnitude - kernel = scipy.stats.norm.pdf(t.magnitude, loc=0, scale=sigma) - kernel = pq.Quantity(kernel, units=1 / t.units) + def _evaluate(self, times): + sigma = self.sigma.rescale(times.units).magnitude + kernel = scipy.stats.norm.pdf(times.magnitude, loc=0, scale=sigma) + kernel = pq.Quantity(kernel, units=1 / times.units) return kernel - def cdf(self, t): - self._check_time_input(t) - sigma = self.sigma.rescale(t.units).magnitude - cdf = scipy.stats.norm.cdf(t, loc=0, scale=sigma) + @deprecated_alias(t='time') + def cdf(self, time): + self._check_time_input(time) + sigma = self.sigma.rescale(time.units).magnitude + cdf = scipy.stats.norm.cdf(time, loc=0, scale=sigma) return cdf def icdf(self, fraction): @@ -719,16 +727,17 @@ def min_cutoff(self): min_cutoff = 3.0 return min_cutoff - def _evaluate(self, t): - tau = self.sigma.rescale(t.units).magnitude / math.sqrt(2) - kernel = scipy.stats.laplace.pdf(t.magnitude, loc=0, scale=tau) - kernel = pq.Quantity(kernel, units=1 / t.units) + def _evaluate(self, times): + tau = self.sigma.rescale(times.units).magnitude / math.sqrt(2) + kernel = scipy.stats.laplace.pdf(times.magnitude, loc=0, scale=tau) + kernel = pq.Quantity(kernel, units=1 / times.units) return kernel - def cdf(self, t): - self._check_time_input(t) - tau = self.sigma.rescale(t.units).magnitude / math.sqrt(2) - cdf = scipy.stats.laplace.cdf(t.magnitude, loc=0, scale=tau) + @deprecated_alias(t='time') + def cdf(self, time): + self._check_time_input(time) + tau = self.sigma.rescale(time.units).magnitude / math.sqrt(2) + cdf = scipy.stats.laplace.cdf(time.magnitude, loc=0, scale=tau) return cdf def icdf(self, fraction): @@ -785,23 +794,24 @@ def min_cutoff(self): min_cutoff = 3.0 return min_cutoff - def _evaluate(self, t): - tau = self.sigma.rescale(t.units).magnitude + def _evaluate(self, times): + tau = self.sigma.rescale(times.units).magnitude if self.invert: - t = -t - kernel = scipy.stats.expon.pdf(t.magnitude, loc=0, scale=tau) - kernel = pq.Quantity(kernel, units=1 / t.units) + times = -times + kernel = scipy.stats.expon.pdf(times.magnitude, loc=0, scale=tau) + kernel = pq.Quantity(kernel, units=1 / times.units) return kernel - def cdf(self, t): - self._check_time_input(t) - tau = self.sigma.rescale(t.units).magnitude - t = t.magnitude + @deprecated_alias(t='time') + def cdf(self, time): + self._check_time_input(time) + tau = self.sigma.rescale(time.units).magnitude + time = time.magnitude if self.invert: - t = np.minimum(t, 0) - return np.exp(t / tau) - t = np.maximum(t, 0) - return 1. - np.exp(-t / tau) + time = np.minimum(time, 0) + return np.exp(time / tau) + time = np.maximum(time, 0) + return 1. - np.exp(-time / tau) def icdf(self, fraction): self._check_fraction(fraction) @@ -855,20 +865,21 @@ def min_cutoff(self): min_cutoff = 3.0 return min_cutoff - def _evaluate(self, t): - t_units = t.units + def _evaluate(self, times): + t_units = times.units tau = self.sigma.rescale(t_units).magnitude / math.sqrt(2) - t = t.magnitude + times = times.magnitude if self.invert: - t = -t - kernel = (t >= 0) * 1 / tau ** 2 * t * np.exp(-t / tau) + times = -times + kernel = (times >= 0) * 1 / tau ** 2 * times * np.exp(-times / tau) kernel = pq.Quantity(kernel, units=1 / t_units) return kernel - def cdf(self, t): - self._check_time_input(t) - tau = self.sigma.rescale(t.units).magnitude / math.sqrt(2) - cdf = self._cdf_stripped(t.magnitude, tau) + @deprecated_alias(t='time') + def cdf(self, time): + self._check_time_input(time) + tau = self.sigma.rescale(time.units).magnitude / math.sqrt(2) + cdf = self._cdf_stripped(time.magnitude, tau) return cdf def _cdf_stripped(self, t, tau): diff --git a/elephant/neo_tools.py b/elephant/neo_tools.py index a8ff9aa77..0a29c8128 100644 --- a/elephant/neo_tools.py +++ b/elephant/neo_tools.py @@ -7,20 +7,23 @@ """ from __future__ import division, print_function, unicode_literals +import warnings from itertools import chain from neo.core.container import unique_objs +from elephant.utils import deprecated_alias -def extract_neo_attrs(obj, parents=True, child_first=True, - skip_array=False, skip_none=False): +@deprecated_alias(obj='neo_object') +def extract_neo_attributes(neo_object, parents=True, child_first=True, + skip_array=False, skip_none=False): """ Given a Neo object, return a dictionary of attributes and annotations. Parameters ---------- - obj : neo.BaseNeo + neo_object : neo.BaseNeo Object to get attributes and annotations. parents : bool, optional If True, also include attributes and annotations from parent Neo @@ -46,22 +49,24 @@ def extract_neo_attrs(obj, parents=True, child_first=True, the values are the corresponding annotation or attribute value. """ - attrs = obj.annotations.copy() - if not skip_array and hasattr(obj, "array_annotations"): + attrs = neo_object.annotations.copy() + if not skip_array and hasattr(neo_object, "array_annotations"): # Exclude labels and durations, and any other fields that should not # be a part of array_annotation. - required_keys = set(obj.array_annotations).difference(dir(obj)) + required_keys = set(neo_object.array_annotations).difference( + dir(neo_object)) for a in required_keys: if "array_annotations" not in attrs: attrs["array_annotations"] = {} - attrs["array_annotations"][a] = obj.array_annotations[a].copy() - for attr in obj._necessary_attrs + obj._recommended_attrs: + attrs["array_annotations"][a] = \ + neo_object.array_annotations[a].copy() + for attr in neo_object._necessary_attrs + neo_object._recommended_attrs: if skip_array and len(attr) >= 3 and attr[2]: continue attr = attr[0] - if attr == getattr(obj, '_quantity_attr', None): + if attr == getattr(neo_object, '_quantity_attr', None): continue - attrs[attr] = getattr(obj, attr, None) + attrs[attr] = getattr(neo_object, attr, None) if skip_none: for attr, value in attrs.copy().items(): @@ -71,13 +76,13 @@ def extract_neo_attrs(obj, parents=True, child_first=True, if not parents: return attrs - for parent in getattr(obj, 'parents', []): + for parent in getattr(neo_object, 'parents', []): if parent is None: continue - newattr = extract_neo_attrs(parent, parents=True, - child_first=child_first, - skip_array=skip_array, - skip_none=skip_none) + newattr = extract_neo_attributes(parent, parents=True, + child_first=child_first, + skip_array=skip_array, + skip_none=skip_none) if child_first: newattr.update(attrs) attrs = newattr @@ -87,7 +92,13 @@ def extract_neo_attrs(obj, parents=True, child_first=True, return attrs -def _get_all_objs(container, classname): +def extract_neo_attrs(*args, **kwargs): + warnings.warn("'extract_neo_attrs' function is deprecated; " + "use 'extract_neo_attributes'", DeprecationWarning) + return extract_neo_attributes(*args, **kwargs) + + +def _get_all_objs(container, class_name): """ Get all Neo objects of a given type from a container. @@ -101,7 +112,7 @@ def _get_all_objs(container, classname): ---------- container : list, tuple, iterable, dict, neo.Container The container for the Neo objects. - classname : str + class_name : str The name of the class, with proper capitalization (i.e., 'SpikeTrain', not 'Spiketrain' or 'spiketrain'). @@ -116,20 +127,20 @@ def _get_all_objs(container, classname): If can not handle containers of the type passed in `container`. """ - if container.__class__.__name__ == classname: + if container.__class__.__name__ == class_name: return [container] - classholder = classname.lower() + 's' + classholder = class_name.lower() + 's' if hasattr(container, classholder): vals = getattr(container, classholder) elif hasattr(container, 'list_children_by_class'): - vals = container.list_children_by_class(classname) + vals = container.list_children_by_class(class_name) elif hasattr(container, 'values') and not hasattr(container, 'ndim'): vals = container.values() elif hasattr(container, '__iter__') and not hasattr(container, 'ndim'): vals = container else: raise ValueError('Cannot handle object of type %s' % type(container)) - res = list(chain.from_iterable(_get_all_objs(obj, classname) + res = list(chain.from_iterable(_get_all_objs(obj, class_name) for obj in vals)) return unique_objs(res) @@ -147,8 +158,9 @@ def get_all_spiketrains(container): Parameters ---------- - container : list, tuple, iterable, dict, neo.Block, neo.Segment, neo.Unit, neo.ChannelIndex - The container for the spiketrains. + container : list, tuple, iterable, dict, neo.Block, neo.Segment, neo.Unit, + neo.ChannelIndex + The container for the spiketrains. Returns ------- diff --git a/elephant/pandas_bridge.py b/elephant/pandas_bridge.py index c18803752..8527994af 100644 --- a/elephant/pandas_bridge.py +++ b/elephant/pandas_bridge.py @@ -13,7 +13,7 @@ import warnings import quantities as pq -from elephant.neo_tools import (extract_neo_attrs, get_all_epochs, +from elephant.neo_tools import (extract_neo_attributes, get_all_epochs, get_all_events, get_all_spiketrains) @@ -95,8 +95,8 @@ def _extract_neo_attrs_safe(obj, parents=True, child_first=True): the values are the corresponding annotation or attribute value. """ - res = extract_neo_attrs(obj, skip_array=True, skip_none=True, - parents=parents, child_first=child_first) + res = extract_neo_attributes(obj, skip_array=True, skip_none=True, + parents=parents, child_first=child_first) for key, value in res.items(): res[key] = _convert_value_safe(value) key2 = _convert_value_safe(key) diff --git a/elephant/parallel/__init__.py b/elephant/parallel/__init__.py index 12026dc1f..1295aed68 100644 --- a/elephant/parallel/__init__.py +++ b/elephant/parallel/__init__.py @@ -17,7 +17,8 @@ Run tutorial interactively: .. image:: https://mybinder.org/badge.svg - :target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master?filepath=doc/tutorials/parallel.ipynb + :target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master + ?filepath=doc/tutorials/parallel.ipynb Available Executors diff --git a/elephant/parallel/parallel.py b/elephant/parallel/parallel.py index ba6c7d2bc..84acb91c3 100644 --- a/elephant/parallel/parallel.py +++ b/elephant/parallel/parallel.py @@ -40,8 +40,8 @@ def execute(self, handler, args_iterate, **kwargs): ------- results : list The result of applying the `handler` for each `arg` in the - `args_iterate`. The `i`-th item of the resulted list corresponds to - `args_iterate[i]` (the order is preserved). + `args_iterate`. The `i`-th item of the resulting list corresponds + to `args_iterate[i]` (the order is preserved). """ handler = self._update_handler(handler, **kwargs) results = [handler(arg) for arg in args_iterate] @@ -94,8 +94,8 @@ def execute(self, handler, args_iterate, **kwargs): ------- results : list The result of applying the `handler` for each `arg` in the - `args_iterate`. The `i`-th item of the resulted list corresponds to - `args_iterate[i]` (the order is preserved). + `args_iterate`. The `i`-th item of the resulting list corresponds + to `args_iterate[i]` (the order is preserved). """ handler = self._update_handler(handler, **kwargs) diff --git a/elephant/signal_processing.py b/elephant/signal_processing.py index 19c5d2517..0d5155f4c 100644 --- a/elephant/signal_processing.py +++ b/elephant/signal_processing.py @@ -14,6 +14,8 @@ import quantities as pq import scipy.signal +from elephant.utils import deprecated_alias + def zscore(signal, inplace=True): r""" @@ -151,8 +153,10 @@ def zscore(signal, inplace=True): return signal_ztransofrmed -def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, - scaleopt='unbiased'): +@deprecated_alias(ch_pairs='channel_pairs', nlags='n_lags', + env='hilbert_envelope') +def cross_correlation_function(signal, channel_pairs, hilbert_envelope=False, + n_lags=None, scaleopt='unbiased'): r""" Computes unbiased estimator of the cross-correlation function. @@ -165,9 +169,9 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, where :math:`R'(\tau) = \left` in a pairwise manner, i.e.: - `signal[ch_pairs[0,0]]` vs `signal[ch_pairs[0,1]]`, + `signal[channel_pairs[0,0]]` vs `signal[channel_pairs[0,1]]`, - `signal[ch_pairs[1,0]]` vs `signal[ch_pairs[1,1]]`, + `signal[channel_pairs[1,0]]` vs `signal[channel_pairs[1,1]]`, and so on. @@ -180,17 +184,18 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, ---------- signal : (nt, nch) neo.AnalogSignal Signal with `nt` number of samples that contains `nch` LFP channels. - ch_pairs : list or (n, 2) np.ndarray + channel_pairs : list or (n, 2) np.ndarray List with `n` channel pairs for which to compute cross-correlation. Each element of the list must contain 2 channel indices. If `np.ndarray`, the second axis must have dimension 2. - env : bool, optional - If True, returns the Hilbert envelope of cross-correlation function. + hilbert_envelope : bool, optional + If True, returns the Hilbert envelope of cross-correlation function + result. Default: False. - nlags : int, optional + n_lags : int, optional Defines the number of lags for cross-correlation function. If a `float` is passed, it will be rounded to the nearest integer. Number of - samples of output is `2*nlags+1`. + samples of output is `2*n_lags+1`. If None, the number of samples of the output is equal to the number of samples of the input signal (namely `nt`). Default: None. @@ -225,22 +230,24 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, Returns ------- cross_corr : neo.AnalogSignal - Shape: `[2*nlags+1, n]` + Shape: `[2*n_lags+1, n]` Pairwise cross-correlation functions for channel pairs given by - `ch_pairs`. If `env` is True, the output is the Hilbert envelope of - the pairwise cross-correlation function. This is helpful to compute - the correlation length for oscillating cross-correlation functions. + `channel_pairs`. If `hilbert_envelope` is True, the output is the + Hilbert envelope of the pairwise cross-correlation function. This is + helpful to compute the correlation length for oscillating + cross-correlation functions. Raises ------ ValueError If input `signal` is not a `neo.AnalogSignal`. - If `ch_pairs` is not a list of channel pair indices with shape `(n,2)`. + If `channel_pairs` is not a list of channel pair indices with shape + `(n,2)`. - If `env` is not a boolean. + If `hilbert_envelope` is not a boolean. - If `nlags` is not a positive integer. + If `n_lags` is not a positive integer. If `scaleopt` is not one of the predefined above keywords. @@ -267,9 +274,9 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, >>> # Generate neo.AnalogSignals from x and find cross-correlation >>> signal = neo.AnalogSignal(x, units='mV', t_start=0.*pq.ms, >>> sampling_rate=1/dt*pq.Hz, dtype=float) - >>> rho = cross_correlation_function(signal, [0,1], nlags=150) - >>> env = cross_correlation_function(signal, [0,1], nlags=150, - ... env=True) + >>> rho = cross_correlation_function(signal, [0,1], n_lags=150) + >>> env = cross_correlation_function(signal, [0,1], n_lags=150, + ... hilbert_envelope=True) ... >>> plt.plot(rho.times, rho) >>> plt.plot(env.times, env) # should be equal to one @@ -277,8 +284,8 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, """ - # Make ch_pairs a 2D array - pairs = np.asarray(ch_pairs) + # Make channel_pairs a 2D array + pairs = np.asarray(channel_pairs) if pairs.ndim == 1: pairs = np.expand_dims(pairs, axis=0) @@ -286,13 +293,13 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, if not isinstance(signal, neo.AnalogSignal): raise ValueError('Input signal must be of type neo.AnalogSignal') if pairs.shape[1] != 2: - raise ValueError('`ch_pairs` is not a list of channel pair indices. ' - 'Cannot define pairs for cross-correlation.') - if not isinstance(env, bool): - raise ValueError('`env` must be a boolean value') - if nlags is not None: - if not isinstance(nlags, int) or nlags <= 0: - raise ValueError('nlags must be a non-negative integer') + raise ValueError("'channel_pairs' is not a list of channel pair " + "indices. Cannot define pairs for cross-correlation.") + if not isinstance(hilbert_envelope, bool): + raise ValueError("'hilbert_envelope' must be a boolean value") + if n_lags is not None: + if not isinstance(n_lags, int) or n_lags <= 0: + raise ValueError('n_lags must be a non-negative integer') # z-score analog signal and store channel time series in different arrays # Cross-correlation will be calculated between xsig and ysig @@ -321,13 +328,13 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, # Calculate envelope of cross-correlation function with Hilbert transform. # This is useful for transient oscillatory signals. - if env: + if hilbert_envelope: xcorr = np.abs(scipy.signal.hilbert(xcorr, axis=0)) # Cut off lags outside the desired range - if nlags is not None: + if n_lags is not None: tau0 = np.argwhere(tau == 0).item() - xcorr = xcorr[tau0 - nlags: tau0 + nlags + 1, :] + xcorr = xcorr[tau0 - n_lags: tau0 + n_lags + 1, :] # Return neo.AnalogSignal cross_corr = neo.AnalogSignal(xcorr, @@ -339,13 +346,16 @@ def cross_correlation_function(signal, ch_pairs, env=False, nlags=None, return cross_corr -def butter(signal, highpass_freq=None, lowpass_freq=None, order=4, - filter_function='filtfilt', fs=1.0, axis=-1): +@deprecated_alias(highpass_freq='highpass_frequency', + lowpass_freq='lowpass_frequency', + fs='sampling_frequency') +def butter(signal, highpass_frequency=None, lowpass_frequency=None, order=4, + filter_function='filtfilt', sampling_frequency=1.0, axis=-1): """ Butterworth filtering function for `neo.AnalogSignal`. - Filter type is determined according to how values of `highpass_freq` and - `lowpass_freq` are given (see "Parameters" section for details). + Filter type is determined according to how values of `highpass_frequency` + and `lowpass_frequency` are given (see "Parameters" section for details). Parameters ---------- @@ -353,23 +363,25 @@ def butter(signal, highpass_freq=None, lowpass_freq=None, order=4, Time series data to be filtered. If `pq.Quantity` or `np.ndarray`, the sampling frequency should be given through the keyword argument `fs`. - highpass_freq : pq.Quantity of float, optional + highpass_frequency : pq.Quantity of float, optional High-pass cut-off frequency. If `float`, the given value is taken as frequency in Hz. Default: None. - lowpass_freq : pq.Quantity or float, optional + lowpass_frequency : pq.Quantity or float, optional Low-pass cut-off frequency. If `float`, the given value is taken as frequency in Hz. - Filter type is determined depending on the values of `lowpass_freq` - and `highpass_freq`: + Filter type is determined depending on the values of + `lowpass_frequency` and `highpass_frequency`: - * `highpass_freq` only (`lowpass_freq` is None): highpass filter + * `highpass_frequency` only (`lowpass_frequency` is None): + highpass filter - * `lowpass_freq` only (`highpass_freq` is None): lowpass filter + * `lowpass_frequency` only (`highpass_frequency` is None): + lowpass filter - * `highpass_freq` < `lowpass_freq`: bandpass filter + * `highpass_frequency` < `lowpass_frequency`: bandpass filter - * `highpass_freq` > `lowpass_freq`: bandstop filter + * `highpass_frequency` > `lowpass_frequency`: bandstop filter Default: None. order : int, optional @@ -389,7 +401,7 @@ def butter(signal, highpass_freq=None, lowpass_freq=None, order=4, filtering, in particular higher order filters, use 'sosfiltfilt' (see [1]_). Default: 'filtfilt'. - fs : pq.Quantity or float, optional + sampling_frequency : pq.Quantity or float, optional The sampling frequency of the input time series. When given as `float`, its value is taken as frequency in Hz. When `signal` is given as `neo.AnalogSignal`, its attribute is used to specify the sampling @@ -411,7 +423,7 @@ def butter(signal, highpass_freq=None, lowpass_freq=None, order=4, If `filter_function` is not one of 'lfilter', 'filtfilt', or 'sosfiltfilt'. - If both `highpass_freq` and `lowpass_freq` are None. + If both `highpass_frequency` and `lowpass_frequency` are None. References ---------- @@ -426,30 +438,30 @@ def butter(signal, highpass_freq=None, lowpass_freq=None, order=4, available_filters=available_filters)) # design filter if hasattr(signal, 'sampling_rate'): - fs = signal.sampling_rate.rescale(pq.Hz).magnitude - if isinstance(highpass_freq, pq.quantity.Quantity): - highpass_freq = highpass_freq.rescale(pq.Hz).magnitude - if isinstance(lowpass_freq, pq.quantity.Quantity): - lowpass_freq = lowpass_freq.rescale(pq.Hz).magnitude - Fn = fs / 2. + sampling_frequency = signal.sampling_rate.rescale(pq.Hz).magnitude + if isinstance(highpass_frequency, pq.quantity.Quantity): + highpass_frequency = highpass_frequency.rescale(pq.Hz).magnitude + if isinstance(lowpass_frequency, pq.quantity.Quantity): + lowpass_frequency = lowpass_frequency.rescale(pq.Hz).magnitude + Fn = sampling_frequency / 2. # filter type is determined according to the values of cut-off # frequencies - if lowpass_freq and highpass_freq: - if highpass_freq < lowpass_freq: - Wn = (highpass_freq / Fn, lowpass_freq / Fn) + if lowpass_frequency and highpass_frequency: + if highpass_frequency < lowpass_frequency: + Wn = (highpass_frequency / Fn, lowpass_frequency / Fn) btype = 'bandpass' else: - Wn = (lowpass_freq / Fn, highpass_freq / Fn) + Wn = (lowpass_frequency / Fn, highpass_frequency / Fn) btype = 'bandstop' - elif lowpass_freq: - Wn = lowpass_freq / Fn + elif lowpass_frequency: + Wn = lowpass_frequency / Fn btype = 'lowpass' - elif highpass_freq: - Wn = highpass_freq / Fn + elif highpass_frequency: + Wn = highpass_frequency / Fn btype = 'highpass' else: raise ValueError( - "Either highpass_freq or lowpass_freq must be given" + "Either highpass_frequency or lowpass_frequency must be given" ) if filter_function == 'sosfiltfilt': output = 'sos' @@ -488,7 +500,9 @@ def butter(signal, highpass_freq=None, lowpass_freq=None, order=4, return filtered_data -def wavelet_transform(signal, freq, nco=6.0, fs=1.0, zero_padding=True): +@deprecated_alias(nco='n_cycles', freq='frequency', fs='sampling_frequency') +def wavelet_transform(signal, frequency, n_cycles=6.0, sampling_frequency=1.0, + zero_padding=True): r""" Compute the wavelet transform of a given signal with Morlet mother wavelet. @@ -502,19 +516,20 @@ def wavelet_transform(signal, freq, nco=6.0, fs=1.0, zero_padding=True): `np.ndarray` or list is given, the time axis must be the last dimension. If `neo.AnalogSignal`, `Nt` is the number of time points and `Nch` is the number of channels. - freq : float or list of float + frequency : float or list of float Center frequency of the Morlet wavelet in Hz. Multiple center frequencies can be given as a list, in which case the function computes the wavelet transforms for all the given frequencies at once. - nco : float, optional + n_cycles : float, optional Size of the mother wavelet (approximate number of oscillation cycles - within a wavelet). A larger `nco` value leads to a higher frequency - resolution and a lower temporal resolution, and vice versa. Typically - used values are in a range of 3–8, but one should be cautious when - using a value smaller than ~ 6, in which case the admissibility of the - wavelet is not ensured (cf. [2]_). + within a wavelet). Corresponds to :math:`nco` in the paper [1]_. + A larger `n_cycles` value leads to a higher frequency resolution and a + lower temporal resolution, and vice versa. + Typically used values are in a range of 3–8, but one should be cautious + when using a value smaller than ~ 6, in which case the admissibility of + the wavelet is not ensured (cf. [2]_). Default: 6.0. - fs : float, optional + sampling_frequency : float, optional Sampling rate of the input data in Hz. When `signal` is given as a `neo.AnalogSignal`, the sampling frequency is taken from its attribute and this parameter is ignored. @@ -530,8 +545,8 @@ def wavelet_transform(signal, freq, nco=6.0, fs=1.0, zero_padding=True): Returns ------- signal_wt : np.ndarray - Wavelet transform of the input data. When `freq` was given as a list, - the way how the wavelet transforms for different frequencies are + Wavelet transform of the input data. When `frequency` was given as a + list, the way how the wavelet transforms for different frequencies are returned depends on the input type: * when the input was a `neo.AnalogSignal`, the returned array has @@ -551,15 +566,15 @@ def wavelet_transform(signal, freq, nco=6.0, fs=1.0, zero_padding=True): Raises ------ ValueError - If `freq` (or one of the values in `freq` when it is a list) is - greater than the half of `fs`. + If `frequency` (or one of the values in `frequency` when it is a list) + is greater than the half of `sampling_frequency`. - If `nco` is not positive. + If `n_cycles` is not positive. Notes ----- - `nco` is related to the wavelet number :math:`w` as - :math:`w \sim 2 \pi \frac{'nco'}{6}`, as defined in [1]_. + `n_cycles` is related to the wavelet number :math:`w` as + :math:`w \sim 2 \pi \frac{n_{\text{cycles}}}{6}`, as defined in [1]_. References ---------- @@ -571,10 +586,10 @@ def wavelet_transform(signal, freq, nco=6.0, fs=1.0, zero_padding=True): Turbulence," Annu Rev Fluid Mech, vol. 24, pp. 395–458, 1992. """ - def _morlet_wavelet_ft(freq, nco, fs, n): + def _morlet_wavelet_ft(freq, n_cycles, fs, n): # Generate the Fourier transform of Morlet wavelet as defined # in Le van Quyen et al. J Neurosci Meth 111:83-98 (2001). - sigma = nco / (6. * freq) + sigma = n_cycles / (6. * freq) freqs = np.fft.fftfreq(n, 1.0 / fs) heaviside = np.array(freqs > 0., dtype=np.float) ft_real = np.sqrt(2 * np.pi * freq) * sigma * np.exp( @@ -591,26 +606,27 @@ def _morlet_wavelet_ft(freq, nco, fs, n): # When the input is AnalogSignal, use its attribute to specify the # sampling frequency if hasattr(signal, 'sampling_rate'): - fs = signal.sampling_rate - if isinstance(fs, pq.quantity.Quantity): - fs = fs.rescale('Hz').magnitude + sampling_frequency = signal.sampling_rate + if isinstance(sampling_frequency, pq.quantity.Quantity): + sampling_frequency = sampling_frequency.rescale('Hz').magnitude - if isinstance(freq, (list, tuple, np.ndarray)): - freqs = np.asarray(freq) + if isinstance(frequency, (list, tuple, np.ndarray)): + freqs = np.asarray(frequency) else: - freqs = np.array([freq, ]) + freqs = np.array([frequency, ]) if isinstance(freqs[0], pq.quantity.Quantity): freqs = [f.rescale('Hz').magnitude for f in freqs] # check whether the given central frequencies are less than the # Nyquist frequency of the signal - if np.any(freqs >= fs / 2): - raise ValueError("`freq` must be less than the half of " + - "the sampling rate `fs` = {} Hz".format(fs)) + if np.any(freqs >= sampling_frequency / 2): + raise ValueError("'frequency' elements must be less than the half of " + "the 'sampling_frequency' ({}) Hz" + .format(sampling_frequency)) - # check if nco is positive - if nco <= 0: - raise ValueError("`nco` must be positive") + # check if n_cycles is positive + if n_cycles <= 0: + raise ValueError("`n_cycles` must be positive") n_orig = data.shape[-1] if zero_padding: @@ -621,7 +637,7 @@ def _morlet_wavelet_ft(freq, nco, fs, n): # generate Morlet wavelets (in the frequency domain) wavelet_fts = np.empty([len(freqs), n], dtype=np.complex) for i, f in enumerate(freqs): - wavelet_fts[i] = _morlet_wavelet_ft(f, nco, fs, n) + wavelet_fts[i] = _morlet_wavelet_ft(f, n_cycles, sampling_frequency, n) # perform wavelet transform by convoluting the signal with the wavelets if data.ndim == 1: @@ -633,18 +649,19 @@ def _morlet_wavelet_ft(freq, nco, fs, n): # reshape the result array according to the input if isinstance(signal, neo.AnalogSignal): signal_wt = np.rollaxis(signal_wt, -1) - if not isinstance(freq, (list, tuple, np.ndarray)): + if not isinstance(frequency, (list, tuple, np.ndarray)): signal_wt = signal_wt[..., 0] else: if signal.ndim == 1: signal_wt = signal_wt[0] - if not isinstance(freq, (list, tuple, np.ndarray)): + if not isinstance(frequency, (list, tuple, np.ndarray)): signal_wt = signal_wt[..., 0, :] return signal_wt -def hilbert(signal, N='nextpow'): +@deprecated_alias(N='padding') +def hilbert(signal, padding='nextpow'): """ Apply a Hilbert transform to a `neo.AnalogSignal` object in order to obtain its (complex) analytic signal. @@ -663,9 +680,11 @@ def hilbert(signal, N='nextpow'): ---------- signal : neo.AnalogSignal Signal(s) to transform. - N : int or {'none', 'nextpow'}, optional + padding : int, {'none', 'nextpow'}, or None, optional Defines whether the signal is zero-padded. - If 'none', no padding. + The `padding` argument corresponds to `N` in + `scipy.signal.hilbert(signal, N=padding)` function. + If 'none' or None, no padding. If 'nextpow', zero-pad to the next length that is a power of 2. If it is an `int`, directly specify the length to zero-pad to (indicates the number of Fourier components). @@ -681,12 +700,7 @@ def hilbert(signal, N='nextpow'): Raises ------ ValueError: - If `N` is not an integer or neither 'nextpow' nor 'none'. - - Notes - ----- - If `N` is an integer, this is passed as the parameter `N` of - `scipy.signal.hilbert` function. + If `padding` is not an integer or neither 'nextpow' nor 'none' (None). Examples -------- @@ -708,7 +722,7 @@ def hilbert(signal, N='nextpow'): ... t_start=0*pq.s, ... sampling_rate=1000*pq.Hz) ... - >>> analytic_signal = hilbert(a, N='nextpow') + >>> analytic_signal = hilbert(a, padding='nextpow') >>> angles = np.angle(analytic_signal) >>> amplitudes = np.abs(analytic_signal) >>> print(angles) @@ -726,10 +740,10 @@ def hilbert(signal, N='nextpow'): n_org = signal.shape[0] # Right-pad signal to desired length using the signal itself - if isinstance(N, int): + if isinstance(padding, int): # User defined padding - n = N - elif N == 'nextpow': + n = padding + elif padding == 'nextpow': # To speed up calculation of the Hilbert transform, make sure we change # the signal to be of a length that is a power of two. Failure to do so # results in computations of certain signal lengths to not finish (or @@ -746,11 +760,11 @@ def hilbert(signal, N='nextpow'): # For this reason, nextpow is the default setting for now. n = 2 ** (int(np.log2(n_org - 1)) + 1) - elif N == 'none': + elif padding == 'none' or padding is None: # No padding n = n_org else: - raise ValueError("'{}' is an unknown N.".format(N)) + raise ValueError("Invalid padding '{}'.".format(padding)) output = signal.duplicate_with_new_data( scipy.signal.hilbert(signal.magnitude, N=n, axis=0)[:n_org]) diff --git a/elephant/spade.py b/elephant/spade.py index 285575986..7b3fc3458 100644 --- a/elephant/spade.py +++ b/elephant/spade.py @@ -26,11 +26,11 @@ >>> spiketrains = elephant.spike_train_generation.cpp( ... rate=5*pq.Hz, A=[0]+[0.99]+[0]*9+[0.01], t_stop=10*pq.s) -Mining patterns with SPADE using a `binsize` of 1 ms and a window length of 1 +Mining patterns with SPADE using a `bin_size` of 1 ms and a window length of 1 bin (i.e., detecting only synchronous patterns). >>> patterns = spade( -... spiketrains=spiketrains, binsize=1*pq.ms, winlen=1, dither=5*pq.ms, +... spiketrains=spiketrains, bin_size=1*pq.ms, winlen=1, dither=5*pq.ms, ... min_spikes=10, n_surr=10, psr_param=[0,0,3], ... output_format='patterns')['patterns'][0] @@ -72,6 +72,7 @@ import elephant.conversion as conv import elephant.spike_train_surrogates as surr from elephant.spade_src import fast_fca +from elephant.utils import deprecated_alias warnings.simplefilter('once', UserWarning) @@ -90,7 +91,8 @@ HAVE_FIM = False -def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, +@deprecated_alias(binsize='bin_size') +def spade(spiketrains, bin_size, winlen, min_spikes=2, min_occ=2, max_spikes=None, max_occ=None, min_neu=1, approx_stab_pars=None, n_surr=0, dither=15 * pq.ms, spectrum='#', alpha=None, stat_corr='fdr_bh', surr_method='dither_spikes', @@ -98,7 +100,7 @@ def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, r""" Perform the SPADE [1-3] analysis for the parallel input `spiketrains`. They are discretized with a temporal resolution equal to - `binsize` in a sliding window of `winlen*binsize`. + `bin_size` in a sliding window of `winlen*bin_size`. First, spike patterns are mined from the `spiketrains` using a technique called frequent itemset mining (FIM) or formal concept analysis (FCA). In @@ -111,12 +113,12 @@ def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, ---------- spiketrains: list of neo.SpikeTrain List containing the parallel spike trains to analyze - binsize: pq.Quantity + bin_size: pq.Quantity The time precision used to discretize the spiketrains (binning). winlen: int The size (number of bins) of the sliding window used for the analysis. The maximal length of a pattern (delay between first and last spike) is - then given by winlen*binsize + then given by winlen*bin_size min_spikes: int, optional Minimum number of spikes of a sequence to be considered a pattern. Default: 2 @@ -269,9 +271,9 @@ def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, >>> from elephant.spade import spade >>> import quantities as pq - >>> binsize = 3 * pq.ms # time resolution to discretize the spiketrains + >>> bin_size = 3 * pq.ms # time resolution to discretize the spiketrains >>> winlen = 10 # maximal pattern length in bins (i.e., sliding window) - >>> result_spade = spade(spiketrains, binsize, winlen) + >>> result_spade = spade(spiketrains, bin_size, winlen) """ if HAVE_MPI: # pragma: no cover @@ -281,7 +283,7 @@ def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, rank = 0 compute_stability = _check_input( - spiketrains=spiketrains, binsize=binsize, winlen=winlen, + spiketrains=spiketrains, bin_size=bin_size, winlen=winlen, min_spikes=min_spikes, min_occ=min_occ, max_spikes=max_spikes, max_occ=max_occ, min_neu=min_neu, approx_stab_pars=approx_stab_pars, @@ -293,7 +295,7 @@ def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, if rank == 0 or compute_stability: # Mine the spiketrains for extraction of concepts concepts, rel_matrix = concepts_mining( - spiketrains, binsize, winlen, min_spikes=min_spikes, + spiketrains, bin_size, winlen, min_spikes=min_spikes, min_occ=min_occ, max_spikes=max_spikes, max_occ=max_occ, min_neu=min_neu, report='a') time_mining = time.time() - time_mining @@ -323,7 +325,7 @@ def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, # Compute pvalue spectrum time_pvalue_spectrum = time.time() pv_spec = pvalue_spectrum( - spiketrains, binsize, winlen, dither=dither, n_surr=n_surr, + spiketrains, bin_size, winlen, dither=dither, n_surr=n_surr, min_spikes=min_spikes, min_occ=min_occ, max_spikes=max_spikes, max_occ=max_occ, min_neu=min_neu, spectrum=spectrum, surr_method=surr_method) @@ -369,14 +371,14 @@ def spade(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, else: # output_format == 'patterns': # Transforming concepts to dictionary containing pattern's infos output['patterns'] = concept_output_to_patterns( - concepts, winlen, binsize, pv_spec, spectrum, + concepts, winlen, bin_size, pv_spec, spectrum, spiketrains[0].t_start) return output def _check_input( - spiketrains, binsize, winlen, min_spikes=2, min_occ=2, + spiketrains, bin_size, winlen, min_spikes=2, min_occ=2, max_spikes=None, max_occ=None, min_neu=1, approx_stab_pars=None, n_surr=0, dither=15 * pq.ms, spectrum='#', alpha=None, stat_corr='fdr_bh', surr_method='dither_spikes', @@ -405,9 +407,9 @@ def _check_input( raise ValueError( 'All spiketrains must have the same t_start and t_stop') - # Check binsize - if not isinstance(binsize, pq.Quantity): - raise ValueError('binsize must be a pq.Quantity') + # Check bin_size + if not isinstance(bin_size, pq.Quantity): + raise ValueError('bin_size must be a pq.Quantity') # Check winlen if not isinstance(winlen, int): @@ -493,13 +495,14 @@ def _check_input( return compute_stability -def concepts_mining(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, +@deprecated_alias(binsize='bin_size') +def concepts_mining(spiketrains, bin_size, winlen, min_spikes=2, min_occ=2, max_spikes=None, max_occ=None, min_neu=1, report='a'): """ Find pattern candidates extracting all the concepts of the context, formed - by the objects defined as all windows of length `winlen*binsize` slided + by the objects defined as all windows of length `winlen*bin_size` slided along the `spiketrains` and the attributes as the spikes occurring in each - of the window discretized at a time resolution equal to `binsize`. Hence, + of the window discretized at a time resolution equal to `bin_size`. Hence, the output are all the repeated sequences of spikes with maximal length `winlen`, which are not trivially explained by the same number of occurrences of a superset of spikes. @@ -508,12 +511,12 @@ def concepts_mining(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, ---------- spiketrains: list of neo.SpikeTrain List containing the parallel spike trains to analyze - binsize: pq.Quantity + bin_size: pq.Quantity The time precision used to discretize the `spiketrains` (clipping). winlen: int The size (number of bins) of the sliding window used for the analysis. The maximal length of a pattern (delay between first and last spike) is - then given by `winlen*binsize` + then given by `winlen*bin_size` min_spikes: int, optional Minimum number of spikes of a sequence to be considered a pattern. Default: 2 @@ -593,7 +596,7 @@ def concepts_mining(spiketrains, binsize, winlen, min_spikes=2, min_occ=2, " 'a', '#' and '3d#,' got {} instead".format(report)) # Binning the spiketrains and clipping (binary matrix) binary_matrix = conv.BinnedSpikeTrain( - spiketrains, binsize).to_sparse_bool_array().tocoo() + spiketrains, bin_size).to_sparse_bool_array().tocoo() # Computing the context and the binary matrix encoding the relation between # objects (window positions) and attributes (spikes, # indexed with a number equal to neuron idx*winlen+bin idx) @@ -649,7 +652,7 @@ def _build_context(binary_matrix, winlen): binary_matrix : sparse.coo_matrix Binary matrix containing the binned spike trains winlen : int - Length of the binsize used to bin the spiketrains + Length of the bin_size used to bin the spiketrains Returns ------- @@ -687,12 +690,13 @@ def _build_context(binary_matrix, winlen): # all non-empty bins are starting positions for windows for idx, window_idx in enumerate(unique_cols): # find the end of the current window in unique_cols - end_of_window = np.searchsorted(unique_cols, window_idx+winlen) + end_of_window = np.searchsorted(unique_cols, window_idx + winlen) # loop over all non-empty bins in the current window for rel_idx, col in enumerate(unique_cols[idx:end_of_window]): # get all occurrences of the current col in binary_matrix.col - spike_indices_in_window = np.arange(unique_col_idx[idx+rel_idx], - unique_col_idx[idx+rel_idx+1]) + spike_indices_in_window = np.arange( + unique_col_idx[idx + rel_idx], + unique_col_idx[idx + rel_idx + 1]) # get the binary_matrix.row entries matching the current col # prepare the row of rel_matrix matching the current window # spikes are indexed as (neuron_id * winlen + bin_id) @@ -791,7 +795,7 @@ def _fpgrowth(transactions, min_c=2, min_z=2, max_z=None, winlen: int The size (number of bins) of the sliding window used for the analysis. The maximal length of a pattern (delay between first and - last spike) is then given by winlen*binsize + last spike) is then given by winlen*bin_size Default: 1 min_neu: int Minimum number of neurons in a sequence to considered a @@ -1042,7 +1046,7 @@ def _fast_fca(context, min_c=2, min_z=2, max_z=None, winlen: int The size (number of bins) of the sliding window used for the analysis. The maximal length of a pattern (delay between first and - last spike) is then given by winlen*binsize + last spike) is then given by winlen*bin_size Default: 1 min_neu: int Minimum number of neurons in a sequence to considered a @@ -1144,9 +1148,20 @@ def _fca_filter(concept, winlen, min_c, min_z, max_c, max_z, min_neu): return keep_concepts -def pvalue_spectrum(spiketrains, binsize, winlen, dither, n_surr, min_spikes=2, - min_occ=2, max_spikes=None, max_occ=None, min_neu=1, - spectrum='#', surr_method='dither_spikes'): +@deprecated_alias(binsize='bin_size') +def pvalue_spectrum( + spiketrains, + bin_size, + winlen, + dither, + n_surr, + min_spikes=2, + min_occ=2, + max_spikes=None, + max_occ=None, + min_neu=1, + spectrum='#', + surr_method='dither_spikes'): """ Compute the p-value spectrum of pattern signatures extracted from surrogates of parallel spike trains, under the null hypothesis of @@ -1162,12 +1177,12 @@ def pvalue_spectrum(spiketrains, binsize, winlen, dither, n_surr, min_spikes=2, ---------- spiketrains: list of neo.SpikeTrain List containing the parallel spike trains to analyze - binsize: pq.Quantity + bin_size: pq.Quantity The time precision used to discretize the `spiketrains` (binning). winlen: int The size (number of bins) of the sliding window used for the analysis. The maximal length of a pattern (delay between first and last spike) is - then given by `winlen*binsize` + then given by `winlen*bin_size` dither: pq.Quantity Amount of spike time dithering for creating the surrogates for filtering the pattern spectrum. A spike at time t is placed randomly @@ -1274,16 +1289,17 @@ def pvalue_spectrum(spiketrains, binsize, winlen, dither, n_surr, min_spikes=2, # prevent that spikes fall into the same bin, if the spike trains # are sparse (min(ISI)>bin size). surrs = [surr.dither_spikes( - spiketrain, dither=dither, n=1, refractory_period=binsize)[0] - for spiketrain in spiketrains] + spiketrain, dither=dither, n_surrogates=1, + refractory_period=bin_size)[0] + for spiketrain in spiketrains] else: surrs = [surr.surrogates( - spiketrain, n=1, surr_method=surr_method, + spiketrain, n_surrogates=1, method=surr_method, dt=dither)[0] for spiketrain in spiketrains] # Find all pattern signatures in the current surrogate data set surr_concepts = concepts_mining( - surrs, binsize, winlen, min_spikes=min_spikes, + surrs, bin_size, winlen, min_spikes=min_spikes, max_spikes=max_spikes, min_occ=min_occ, max_occ=max_occ, min_neu=min_neu, report=spectrum)[0] # The last entry of the signature is the number of times the @@ -1810,14 +1826,13 @@ def _select_random_subsets(element_1, n_subsets): subsets : list each element a subset of element_1 """ - subsets_indices = [set()] * (len(element_1)+1) + subsets_indices = [set()] * (len(element_1) + 1) subsets = [] while len(subsets) < n_subsets: - num_indices = np.random.binomial(n=len(element_1), p=1/2) - random_indices = np.random.choice( - len(element_1), size=num_indices, replace=False) - random_indices.sort() + num_indices = np.random.binomial(n=len(element_1), p=1 / 2) + random_indices = sorted(np.random.choice( + len(element_1), size=num_indices, replace=False)) random_tuple = tuple(random_indices) if random_tuple not in subsets_indices[num_indices]: @@ -1887,7 +1902,7 @@ def pattern_set_reduction(concepts, ns_signatures, winlen, spectrum, winlen: int The size (number of bins) of the sliding window used for the analysis. The maximal length of a pattern (delay between first and last spike) is - then given by `winlen*binsize`. + then given by `winlen*bin_size`. spectrum: {'#', '3d#'} Define the signature of the patterns. @@ -2143,7 +2158,8 @@ def _covered_spikes_criterion(occ_superset, return reject_superset, reject_subset -def concept_output_to_patterns(concepts, winlen, binsize, pv_spec=None, +@deprecated_alias(binsize='bin_size') +def concept_output_to_patterns(concepts, winlen, bin_size, pv_spec=None, spectrum='#', t_start=0 * pq.ms): """ Construction of dictionaries containing all the information about a pattern @@ -2156,7 +2172,7 @@ def concept_output_to_patterns(concepts, winlen, binsize, pv_spec=None, tuple of (spikes in the pattern, occurrences of the patterns) winlen: int Length (in bins) of the sliding window used for the analysis. - binsize: pq.Quantity + bin_size: pq.Quantity The time precision used to discretize the `spiketrains` (binning). pv_spec: None or tuple Contains a tuple of signatures and the corresponding p-value. If equal @@ -2209,7 +2225,7 @@ def concept_output_to_patterns(concepts, winlen, binsize, pv_spec=None, if spectrum == '#': pvalue_dict[(entry[0], entry[1])] = entry[-1] # Initializing list containing all the patterns - t_start = t_start.rescale(binsize.units) + t_start = t_start.rescale(bin_size.units) output = [] for concept in concepts: itemset, window_ids = concept[:2] @@ -2226,10 +2242,10 @@ def concept_output_to_patterns(concepts, winlen, binsize, pv_spec=None, bin_ids = bin_ids_unsort[order_bin_ids] # id of the neurons forming the pattern output_dict['neurons'] = list(itemset[order_bin_ids] // winlen) - # Lags (in binsizes units) of the pattern - output_dict['lags'] = bin_ids[1:] * binsize - # Times (in binsize units) in which the pattern occurs - output_dict['times'] = sorted(window_ids) * binsize + t_start + # Lags (in bin_sizes units) of the pattern + output_dict['lags'] = bin_ids[1:] * bin_size + # Times (in bin_size units) in which the pattern occurs + output_dict['times'] = sorted(window_ids) * bin_size + t_start # pattern dictionary appended to the output if spectrum == '#': diff --git a/elephant/spectral.py b/elephant/spectral.py index 38d2e4c7c..fe22f1b9c 100644 --- a/elephant/spectral.py +++ b/elephant/spectral.py @@ -10,14 +10,20 @@ from __future__ import division, print_function, unicode_literals import neo +import warnings import numpy as np import quantities as pq import scipy.signal +from elephant.utils import deprecated_alias -def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, - fs=1.0, window='hanning', nfft=None, detrend='constant', - return_onesided=True, scaling='density', axis=-1): + +@deprecated_alias(num_seg='n_segments', len_seg='len_segment', + freq_res='frequency_resolution') +def welch_psd(signal, n_segments=8, len_segment=None, + frequency_resolution=None, overlap=0.5, fs=1.0, window='hanning', + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1): """ Estimates power spectrum density (PSD) of a given `neo.AnalogSignal` using Welch's method. @@ -28,8 +34,8 @@ def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, overlap can be specified by parameter `overlap` (default is 0.5, i.e. segments are overlapped by the half of their length). The number and the length of the segments are determined according - to the parameters `num_seg`, `len_seg` or `freq_res`. By default, the - data is cut into 8 segments; + to the parameters `n_segments`, `len_segment` or `frequency_resolution`. + By default, the data is cut into 8 segments; 2. Apply a window function to each segment. Hanning window is used by default. This can be changed by giving a window function or an @@ -46,16 +52,17 @@ def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, `pq.Quantity` or `np.ndarray`, sampling frequency should be given through the keyword argument `fs`. Otherwise, the default value is used (`fs` = 1.0). - num_seg : int, optional + n_segments : int, optional Number of segments. The length of segments is adjusted so that overlapping segments cover the entire stretch of the given data. This - parameter is ignored if `len_seg` or `freq_res` is given. + parameter is ignored if `len_segment` or `frequency_resolution` is + given. Default: 8. - len_seg : int, optional - Length of segments. This parameter is ignored if `freq_res` is given. - If None, it will be determined from other parameters. + len_segment : int, optional + Length of segments. This parameter is ignored if `frequency_resolution` + is given. If None, it will be determined from other parameters. Default: None. - freq_res : pq.Quantity or float, optional + frequency_resolution : pq.Quantity or float, optional Desired frequency resolution of the obtained PSD estimate in terms of the interval between adjacent frequency bins. When given as a `float`, it is taken as frequency in Hz. @@ -116,22 +123,23 @@ def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, Raises ------ ValueError - If `overlap` is not in the interval [0, 1). + If `overlap` is not in the interval `[0, 1)`. - If `freq_res` is not positive. + If `frequency_resolution` is not positive. - If `freq_res` is too high for the given data size. + If `frequency_resolution` is too high for the given data size. - If `freq_res` is None and `len_seg` is not a positive number. + If `frequency_resolution` is None and `len_segment` is not a positive + number. - If `freq_res` is None and `len_seg` is greater than the length of data - on `axis`. + If `frequency_resolution` is None and `len_segment` is greater than the + length of data at `axis`. - If both `freq_res` and `len_seg` are None and `num_seg` is not a - positive number. + If both `frequency_resolution` and `len_segment` are None and + `n_segments` is not a positive number. - If both `freq_res` and `len_seg` are None and `num_seg` is greater - than the length of data on `axis`. + If both `frequency_resolution` and `len_segment` are None and + `n_segments` is greater than the length of data at `axis`. Notes ----- @@ -142,16 +150,17 @@ def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, `scaling`, and `axis` are directly passed to the `scipy.signal.welch` function. See the respective descriptions in the docstring of `scipy.signal.welch` for usage. - 3. When only `num_seg` is given, parameter `nperseg` of + 3. When only `n_segments` is given, parameter `nperseg` of `scipy.signal.welch` function is determined according to the expression - `signal.shape[axis]` / (`num_seg` - `overlap` * (`num_seg` - 1)) + `signal.shape[axis] / (n_segments - overlap * (n_segments - 1))` converted to integer. See Also -------- scipy.signal.welch + welch_cohere """ @@ -182,31 +191,36 @@ def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, # determine the length of segments (i.e. *nperseg*) according to given # parameters - if freq_res is not None: - if freq_res <= 0: - raise ValueError("freq_res must be positive") - dF = freq_res.rescale('Hz').magnitude \ - if isinstance(freq_res, pq.quantity.Quantity) else freq_res + if frequency_resolution is not None: + if frequency_resolution <= 0: + raise ValueError("frequency_resolution must be positive") + if isinstance(frequency_resolution, pq.quantity.Quantity): + dF = frequency_resolution.rescale('Hz').magnitude + else: + dF = frequency_resolution nperseg = int(params['fs'] / dF) if nperseg > data.shape[axis]: - raise ValueError("freq_res is too high for the given data size") - elif len_seg is not None: - if len_seg <= 0: + raise ValueError("frequency_resolution is too high for the given " + "data size") + elif len_segment is not None: + if len_segment <= 0: raise ValueError("len_seg must be a positive number") - elif data.shape[axis] < len_seg: + elif data.shape[axis] < len_segment: raise ValueError("len_seg must be shorter than the data length") - nperseg = len_seg + nperseg = len_segment else: - if num_seg <= 0: - raise ValueError("num_seg must be a positive number") - elif data.shape[axis] < num_seg: - raise ValueError("num_seg must be smaller than the data length") - # when only *num_seg* is given, *nperseg* is determined by solving the - # following equation: - # num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1] - # ----------------- =============================== ^^^^^^^^^^^ - # summed segment lengths total overlap data length - nperseg = int(data.shape[axis] / (num_seg - overlap * (num_seg - 1))) + if n_segments <= 0: + raise ValueError("n_segments must be a positive number") + elif data.shape[axis] < n_segments: + raise ValueError("n_segments must be smaller than the data length") + # when only *n_segments* is given, *nperseg* is determined by solving + # the following equation: + # n_segments * nperseg - (n_segments-1) * overlap * nperseg = + # data.shape[-1] + # -------------------- =============================== ^^^^^^^^^^^ + # summed segment lengths total overlap data length + nperseg = int(data.shape[axis] / (n_segments - overlap * ( + n_segments - 1))) params['nperseg'] = nperseg params['noverlap'] = int(nperseg * overlap) @@ -223,9 +237,12 @@ def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, return freqs, psd -def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, - fs=1.0, window='hanning', nfft=None, detrend='constant', - scaling='density', axis=-1): +@deprecated_alias(x='signal_i', y='signal_j', num_seg='n_segments', + len_seg='len_segment', freq_res='frequency_resolution') +def welch_coherence(signal_i, signal_j, n_segments=8, len_segment=None, + frequency_resolution=None, overlap=0.5, fs=1.0, + window='hanning', nfft=None, detrend='constant', + scaling='density', axis=-1): r""" Estimates coherence between a given pair of analog signals. @@ -240,26 +257,27 @@ def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, Parameters ---------- - x : neo.AnalogSignal or pq.Quantity or np.ndarray + signal_i : neo.AnalogSignal or pq.Quantity or np.ndarray First time series data of the pair between which coherence is computed. - y : neo.AnalogSignal or pq.Quantity or np.ndarray + signal_j : neo.AnalogSignal or pq.Quantity or np.ndarray Second time series data of the pair between which coherence is computed. - The shapes and the sampling frequencies of `x` and `y` must be - identical. When `x` and `y` are not `neo.AnalogSignal`, sampling - frequency should be specified through the keyword argument `fs`. - Otherwise, the default value is used (`fs` = 1.0). - num_seg : int, optional + The shapes and the sampling frequencies of `signal_i` and `signal_j` + must be identical. When `signal_i` and `signal_j` are not + `neo.AnalogSignal`, sampling frequency should be specified through the + keyword argument `fs`. Otherwise, the default value is used + (`fs` = 1.0). + n_segments : int, optional Number of segments. The length of segments is adjusted so that overlapping segments cover the entire stretch of the given data. This - parameter is ignored if `len_seg` or `freq_res` is given. + parameter is ignored if `len_seg` or `frequency_resolution` is given. Default: 8. - len_seg : int, optional - Length of segments. This parameter is ignored if `freq_res` is given. - If None, it is determined from other parameters. + len_segment : int, optional + Length of segments. This parameter is ignored if `frequency_resolution` + is given. If None, it is determined from other parameters. Default: None. - freq_res : pq.Quantity or float, optional + frequency_resolution : pq.Quantity or float, optional Desired frequency resolution of the obtained coherence estimate in terms of the interval between adjacent frequency bins. When given as a `float`, it is taken as frequency in Hz. @@ -303,82 +321,73 @@ def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, freqs : pq.Quantity or np.ndarray Frequencies associated with the estimates of coherency and phase lag. `freqs` is always a vector irrespective of the shape of the input - data. If `x` and `y` are `neo.AnalogSignal` or `pq.Quantity`, a - `pq.Quantity` array is returned. Otherwise, a `np.ndarray` containing - frequency in Hz is returned. + data. If `signal_i` and `signal_j` are `neo.AnalogSignal` or + `pq.Quantity`, a `pq.Quantity` array is returned. Otherwise, a + `np.ndarray` containing frequency in Hz is returned. coherency : np.ndarray Estimate of coherency between the input time series. For each frequency, coherency takes a value between 0 and 1, with 0 or 1 representing no or perfect coherence, respectively. - When the input arrays `x` and `y` are multi-dimensional, `coherency` - is of the same shape as the inputs, and the frequency is indexed - depending on the type of the input. If the input is + When the input arrays `signal_i` and `signal_j` are multi-dimensional, + `coherency` is of the same shape as the inputs, and the frequency is + indexed depending on the type of the input. If the input is `neo.AnalogSignal`, the first axis indexes frequency. Otherwise, frequency is indexed by the last axis. phase_lag : pq.Quantity or np.ndarray Estimate of phase lag in radian between the input time series. For each frequency, phase lag takes a value between :math:`-\pi` and - :math:`\pi`, with positive values meaning phase precession of `x` - ahead of `y`, and vice versa. If `x` and `y` are `neo.AnalogSignal` or - `pq.Quantity`, a `pq.Quantity` array is returned. Otherwise, a - `np.ndarray` containing phase lag in radian is returned. - The axis for frequency index is determined in the same way as for - `coherency`. + :math:`\pi`, with positive values meaning phase precession of + `signal_i` ahead of `signal_j`, and vice versa. If `signal_i` and + `signal_j` are `neo.AnalogSignal` or `pq.Quantity`, a `pq.Quantity` + array is returned. Otherwise, a `np.ndarray` containing phase lag in + radian is returned. The axis for frequency index is determined in the + same way as for `coherency`. Raises ------ ValueError - If `overlap` is not in the interval [0, 1). - - If `freq_res` is not positive. - - If `freq_res` is too high for the given data size. - - If `freq_res` is None and `len_seg` is not a positive number. - - If `freq_res` is None and `len_seg` is greater than the length of data - on `axis`. - - If both `freq_res` and `len_seg` are None and `num_seg` is not a - positive number. - - If both `freq_res` and `len_seg` are None and `num_seg` is greater - than the length of data on `axis`. + Same as in :func:`welch_psd`. Notes ----- - 1. The parameters `window`, `nfft`, `detrend`, `scaling`, and `axis` are - directly passed to the helper function `_welch`. See the - respective descriptions in the docstring of `_welch` for usage. - 2. When only `num_seg` is given, parameter `nperseg` for `_welch` function - is determined according to the expression + 1. The computation steps used in this function are implemented in + `scipy.signal` module, and this function is a wrapper which provides + a proper set of parameters to `scipy.signal.welch` function. + 2. The parameters `window`, `nfft`, `detrend`, `return_onesided`, + `scaling`, and `axis` are directly passed to the `scipy.signal.welch` + function. See the respective descriptions in the docstring of + `scipy.signal.welch` for usage. + 3. When only `n_segments` is given, parameter `nperseg` of + `scipy.signal.welch` function is determined according to the expression - `x.shape[axis]` / (`num_seg` - `overlap` * (`num_seg` - 1)) + `signal.shape[axis] / (n_segments - overlap * (n_segments - 1))` converted to integer. See Also -------- - spectral._welch + welch_psd """ + # TODO: code duplication with welch_psd() + # initialize a parameter dict for scipy.signal.csd() params = {'window': window, 'nfft': nfft, 'detrend': detrend, 'scaling': scaling, 'axis': axis} # When the input is AnalogSignal, the axis for time index is rolled to # the last - xdata = np.asarray(x) - ydata = np.asarray(y) - if isinstance(x, neo.AnalogSignal): + xdata = np.asarray(signal_i) + ydata = np.asarray(signal_j) + if isinstance(signal_i, neo.AnalogSignal): xdata = np.rollaxis(xdata, 0, len(xdata.shape)) ydata = np.rollaxis(ydata, 0, len(ydata.shape)) # if the data is given as AnalogSignal, use its attribute to specify # the sampling frequency - if hasattr(x, 'sampling_rate'): - params['fs'] = x.sampling_rate.rescale('Hz').magnitude + if hasattr(signal_i, 'sampling_rate'): + params['fs'] = signal_i.sampling_rate.rescale('Hz').magnitude else: params['fs'] = fs @@ -389,31 +398,34 @@ def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, # determine the length of segments (i.e. *nperseg*) according to given # parameters - if freq_res is not None: - if freq_res <= 0: - raise ValueError("freq_res must be positive") - dF = freq_res.rescale('Hz').magnitude \ - if isinstance(freq_res, pq.quantity.Quantity) else freq_res + if frequency_resolution is not None: + if isinstance(frequency_resolution, pq.quantity.Quantity): + dF = frequency_resolution.rescale('Hz').magnitude + else: + dF = frequency_resolution nperseg = int(params['fs'] / dF) if nperseg > xdata.shape[axis]: - raise ValueError("freq_res is too high for the given data size") - elif len_seg is not None: - if len_seg <= 0: + raise ValueError("frequency_resolution is too high for the given" + "data size") + elif len_segment is not None: + if len_segment <= 0: raise ValueError("len_seg must be a positive number") - elif xdata.shape[axis] < len_seg: + elif xdata.shape[axis] < len_segment: raise ValueError("len_seg must be shorter than the data length") - nperseg = len_seg + nperseg = len_segment else: - if num_seg <= 0: - raise ValueError("num_seg must be a positive number") - elif xdata.shape[axis] < num_seg: - raise ValueError("num_seg must be smaller than the data length") - # when only *num_seg* is given, *nperseg* is determined by solving the - # following equation: - # num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1] - # ----------------- =============================== ^^^^^^^^^^^ - # summed segment lengths total overlap data length - nperseg = int(xdata.shape[axis] / (num_seg - overlap * (num_seg - 1))) + if n_segments <= 0: + raise ValueError("n_segments must be a positive number") + elif xdata.shape[axis] < n_segments: + raise ValueError("n_segments must be smaller than the data length") + # when only *n_segments* is given, *nperseg* is determined by solving + # the following equation: + # n_segments * nperseg - (n_segments-1) * overlap * nperseg = + # data.shape[-1] + # ------------------- =============================== ^^^^^^^^^^^ + # summed segment lengths total overlap data length + nperseg = int(xdata.shape[axis] / (n_segments - overlap * ( + n_segments - 1))) params['nperseg'] = nperseg params['noverlap'] = int(nperseg * overlap) @@ -425,14 +437,19 @@ def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5, phase_lag = np.angle(Pxy) # attach proper units to return values - if isinstance(x, pq.quantity.Quantity): + if isinstance(signal_i, pq.quantity.Quantity): freqs = freqs * pq.Hz phase_lag = phase_lag * pq.rad # When the input is AnalogSignal, the axis for frequency index is # rolled to the first to comply with the Neo convention about time axis - if isinstance(x, neo.AnalogSignal): + if isinstance(signal_i, neo.AnalogSignal): coherency = np.rollaxis(coherency, -1) phase_lag = np.rollaxis(phase_lag, -1) return freqs, coherency, phase_lag + + +def welch_cohere(*args, **kwargs): + warnings.warn("'welch_cohere' is deprecated; use 'welch_coherence'", + DeprecationWarning) diff --git a/elephant/spike_train_correlation.py b/elephant/spike_train_correlation.py index 84f26c18f..957240530 100644 --- a/elephant/spike_train_correlation.py +++ b/elephant/spike_train_correlation.py @@ -15,6 +15,8 @@ import scipy.signal from scipy import integrate +from elephant.utils import deprecated_alias + # The highest sparsity of the `BinnedSpikeTrain` matrix for which # memory-efficient (sparse) implementation of `covariance()` is faster than # with the corresponding numpy dense array. @@ -29,7 +31,8 @@ class _CrossCorrHist(object): Parameters ---------- - binned_st1, binned_st2 : elephant.conversion.BinnedSpikeTrain + binned_spiketrain_i, binned_spiketrain_j : + elephant.conversion.BinnedSpikeTrain Binned spike trains to cross-correlate. The two spike trains must have the same `t_start` and `t_stop`. window : list or tuple @@ -37,13 +40,13 @@ class _CrossCorrHist(object): Refer to the docs of `cross_correlation_histogram()`. """ - def __init__(self, binned_st1, binned_st2, window): - self.binned_st1 = binned_st1 - self.binned_st2 = binned_st2 + def __init__(self, binned_spiketrain_i, binned_spiketrain_j, window): + self.binned_spiketrain_i = binned_spiketrain_i + self.binned_spiketrain_j = binned_spiketrain_j self.window = window @staticmethod - def get_valid_lags(binned_st1, binned_st2): + def get_valid_lags(binned_spiketrain_i, binned_spiketrain_j): """ Computes the lags at which the cross-correlation of the input spiketrains can be calculated with full @@ -51,7 +54,8 @@ def get_valid_lags(binned_st1, binned_st2): Parameters ---------- - binned_st1, binned_st2 : elephant.conversion.BinnedSpikeTrain + binned_spiketrain_i, binned_spiketrain_j : + elephant.conversion.BinnedSpikeTrain Binned spike trains to cross-correlate. The input spike trains can have any `t_start` and `t_stop`. @@ -62,22 +66,22 @@ def get_valid_lags(binned_st1, binned_st2): at full overlap (valid mode). """ - binsize = binned_st1.binsize + bin_size = binned_spiketrain_i.bin_size # see cross_correlation_histogram for the examples - if binned_st1.num_bins < binned_st2.num_bins: + if binned_spiketrain_i.n_bins < binned_spiketrain_j.n_bins: # ex. 1) lags range: [-2, 5] ms # ex. 2) lags range: [1, 2] ms - left_edge = (binned_st2.t_start - - binned_st1.t_start) / binsize - right_edge = (binned_st2.t_stop - - binned_st1.t_stop) / binsize + left_edge = (binned_spiketrain_j.t_start - + binned_spiketrain_i.t_start) / bin_size + right_edge = (binned_spiketrain_j.t_stop - + binned_spiketrain_i.t_stop) / bin_size else: # ex. 3) lags range: [-1, 3] ms - left_edge = (binned_st2.t_stop - - binned_st1.t_stop) / binsize - right_edge = (binned_st2.t_start - - binned_st1.t_start) / binsize + left_edge = (binned_spiketrain_j.t_stop - + binned_spiketrain_i.t_stop) / bin_size + right_edge = (binned_spiketrain_j.t_start - + binned_spiketrain_i.t_start) / bin_size right_edge = int(right_edge.simplified.magnitude) left_edge = int(left_edge.simplified.magnitude) lags = np.arange(left_edge, right_edge + 1, dtype=np.int32) @@ -91,13 +95,14 @@ def correlate_memory(self, cch_mode): Return ------- cross_corr : np.ndarray - Cross-correlation of `self.binned_st` and `self.binned_st2`. + Cross-correlation of `self.binned_spiketrain1` and + `self.binned_spiketrain2`. """ - binned_st1 = self.binned_st1 - binned_st2 = self.binned_st2 + binned_spiketrain1 = self.binned_spiketrain_i + binned_spiketrain2 = self.binned_spiketrain_j - st1_spmat = self.binned_st1._sparse_mat_u - st2_spmat = self.binned_st2._sparse_mat_u + st1_spmat = self.binned_spiketrain_i._sparse_mat_u + st2_spmat = self.binned_spiketrain_j._sparse_mat_u left_edge, right_edge = self.window # extract the nonzero column indices of 1-d matrices @@ -107,7 +112,7 @@ def correlate_memory(self, cch_mode): # 'valid' mode requires bins correction due to the shift in t_starts # 'full' and 'pad' modes don't need this correction if cch_mode == "valid": - if binned_st1.num_bins > binned_st2.num_bins: + if binned_spiketrain1.n_bins > binned_spiketrain2.n_bins: st2_bin_idx_unique += right_edge else: st2_bin_idx_unique += left_edge @@ -148,11 +153,12 @@ def correlate_speed(self, cch_mode): Returns ------- cross_corr : np.ndarray - Cross-correlation of `self.binned_st` and `self.binned_st2`. + Cross-correlation of `self.binned_spiketrain1` and + `self.binned_spiketrain2`. """ # Retrieve the array of the binned spike trains - st1_arr = self.binned_st1.to_array()[0] - st2_arr = self.binned_st2.to_array()[0] + st1_arr = self.binned_spiketrain_i.to_array()[0] + st2_arr = self.binned_spiketrain_j.to_array()[0] left_edge, right_edge = self.window if cch_mode == 'pad': # Zero padding to stay between left_edge and right_edge @@ -179,10 +185,11 @@ def border_correction(self, cross_corr): np.ndarray Cross-correlation array with the border correction applied. """ - min_num_bins = min(self.binned_st1.num_bins, self.binned_st2.num_bins) + min_num_bins = min(self.binned_spiketrain_i.n_bins, + self.binned_spiketrain_j.n_bins) left_edge, right_edge = self.window - valid_lags = _CrossCorrHist.get_valid_lags(self.binned_st1, - self.binned_st2) + valid_lags = _CrossCorrHist.get_valid_lags(self.binned_spiketrain_i, + self.binned_spiketrain_j) lags_to_compute = np.arange(left_edge, right_edge + 1) outer_subtraction = np.subtract.outer(lags_to_compute, valid_lags) min_distance_from_window = np.abs(outer_subtraction).min(axis=1) @@ -190,7 +197,7 @@ def border_correction(self, cross_corr): correction = float(min_num_bins) / n_values_fall_in_window return cross_corr * correction - def cross_corr_coef(self, cross_corr): + def cross_correlation_coefficient(self, cross_corr): """ Normalizes the CCH to obtain the cross-correlation coefficient function, ranging from -1 to 1. @@ -210,11 +217,12 @@ def cross_corr_coef(self, cross_corr): np.ndarray Normalized cross-correlation array in range `[-1, 1]`. """ - max_num_bins = max(self.binned_st1.num_bins, self.binned_st2.num_bins) - n_spikes1 = self.binned_st1.get_num_of_spikes() - n_spikes2 = self.binned_st2.get_num_of_spikes() - data1 = self.binned_st1._sparse_mat_u.data - data2 = self.binned_st2._sparse_mat_u.data + max_num_bins = max(self.binned_spiketrain_i.n_bins, + self.binned_spiketrain_j.n_bins) + n_spikes1 = self.binned_spiketrain_i.get_num_of_spikes() + n_spikes2 = self.binned_spiketrain_j.get_num_of_spikes() + data1 = self.binned_spiketrain_i._sparse_mat_u.data + data2 = self.binned_spiketrain_j._sparse_mat_u.data ii = data1.dot(data1) jj = data2.dot(data2) cov_mean = n_spikes1 * n_spikes2 / max_num_bins @@ -223,13 +231,13 @@ def cross_corr_coef(self, cross_corr): cross_corr_normalized = (cross_corr - cov_mean) / std_xy return cross_corr_normalized - def kernel_smoothing(self, cross_corr, kernel): + def kernel_smoothing(self, cross_corr_array, kernel): """ Performs 1-d convolution with the `kernel`. Parameters ---------- - cross_corr : np.ndarray + cross_corr_array : np.ndarray Cross-correlation array. The output of `self.correlate_speed()` or `self.correlate_memory()`. kernel : list @@ -250,10 +258,11 @@ def kernel_smoothing(self, cross_corr, kernel): kern_len_max)) kernel = np.divide(kernel, kernel.sum()) # Smooth the cross-correlation histogram with the kern - return np.convolve(cross_corr, kernel, mode='same') + return np.convolve(cross_corr_array, kernel, mode='same') -def covariance(binned_sts, binary=False, fast=True): +@deprecated_alias(binned_sts='binned_spiketrain') +def covariance(binned_spiketrain, binary=False, fast=True): r""" Calculate the NxN matrix of pairwise covariances between all combinations of N binned spike trains. @@ -278,7 +287,7 @@ def covariance(binned_sts, binary=False, fast=True): Parameters ---------- - binned_sts : (N, ) elephant.conversion.BinnedSpikeTrain + binned_spiketrain : (N, ) elephant.conversion.BinnedSpikeTrain A binned spike train containing the spike trains to be evaluated. binary : bool, optional If True, the spikes of a particular spike train falling in the same bin @@ -286,7 +295,7 @@ def covariance(binned_sts, binary=False, fast=True): False, the binned vectors :math:`b_i` contain the spike counts per bin. Default: False. fast : bool, optional - If `fast=True` and the sparsity of `binned_sts` is `> 0.1`, use + If `fast=True` and the sparsity of `binned_spiketrain` is `> 0.1`, use `np.cov()`. Otherwise, use memory efficient implementation. See Notes [2]. Default: True. @@ -295,26 +304,27 @@ def covariance(binned_sts, binary=False, fast=True): ------- C : (N, N) np.ndarray The square matrix of covariances. The element :math:`C[i,j]=C[j,i]` is - the covariance between `binned_sts[i]` and `binned_sts[j]`. + the covariance between `binned_spiketrain[i]` and + `binned_spiketrain[j]`. Raises ------ MemoryError - When using `fast=True` and `binned_sts` shape is large. + When using `fast=True` and `binned_spiketrain` shape is large. Warns -------- UserWarning - If at least one row in `binned_sts` is empty (has no spikes). + If at least one row in `binned_spiketrain` is empty (has no spikes). See Also -------- - corrcoef : Pearson correlation coefficient + correlation_coefficient : Pearson correlation coefficient Notes ----- 1. The spike trains in the binned structure are assumed to cover the - complete time span `[t_start, t_stop)` of `binned_sts`. + complete time span `[t_start, t_stop)` of `binned_spiketrain`. 2. Using `fast=True` might lead to `MemoryError`. If it's the case, switch to `fast=False`. @@ -330,23 +340,24 @@ def covariance(binned_sts, binary=False, fast=True): ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) >>> st2 = homogeneous_poisson_process( ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) - >>> cov_matrix = covariance(BinnedSpikeTrain([st1, st2], binsize=5*ms)) + >>> cov_matrix = covariance(BinnedSpikeTrain([st1, st2], bin_size=5*ms)) >>> print(cov_matrix[0, 1]) -0.001668334167083546 """ if binary: - binned_sts = binned_sts.binarize(copy=True) + binned_spiketrain = binned_spiketrain.binarize(copy=True) - if fast and binned_sts.sparsity > _SPARSITY_MEMORY_EFFICIENT_THR: - array = binned_sts.to_array() + if fast and binned_spiketrain.sparsity > _SPARSITY_MEMORY_EFFICIENT_THR: + array = binned_spiketrain.to_array() return np.cov(array) return _covariance_sparse( - binned_sts, corrcoef_norm=False) + binned_spiketrain, corrcoef_norm=False) -def corrcoef(binned_sts, binary=False, fast=True): +@deprecated_alias(binned_sts='binned_spiketrain') +def correlation_coefficient(binned_spiketrain, binary=False, fast=True): r""" Calculate the NxN matrix of pairwise Pearson's correlation coefficients between all combinations of N binned spike trains. @@ -374,7 +385,7 @@ def corrcoef(binned_sts, binary=False, fast=True): Parameters ---------- - binned_sts : (N, ) elephant.conversion.BinnedSpikeTrain + binned_spiketrain : (N, ) elephant.conversion.BinnedSpikeTrain A binned spike train containing the spike trains to be evaluated. binary : bool, optional If True, two spikes of a particular spike train falling in the same bin @@ -382,7 +393,7 @@ def corrcoef(binned_sts, binary=False, fast=True): False, the binned vectors :math:`b_i` contain the spike counts per bin. Default: False. fast : bool, optional - If `fast=True` and the sparsity of `binned_sts` is `> 0.1`, use + If `fast=True` and the sparsity of `binned_spiketrain` is `> 0.1`, use `np.corrcoef()`. Otherwise, use memory efficient implementation. See Notes[2] Default: True. @@ -392,18 +403,18 @@ def corrcoef(binned_sts, binary=False, fast=True): C : (N, N) np.ndarray The square matrix of correlation coefficients. The element :math:`C[i,j]=C[j,i]` is the Pearson's correlation coefficient between - `binned_sts[i]` and `binned_sts[j]`. If `binned_sts` contains only one - `neo.SpikeTrain`, C=1.0. + `binned_spiketrain[i]` and `binned_spiketrain[j]`. + If `binned_spiketrain` contains only one `neo.SpikeTrain`, C=1.0. Raises ------ MemoryError - When using `fast=True` and `binned_sts` shape is large. + When using `fast=True` and `binned_spiketrain` shape is large. Warns -------- UserWarning - If at least one row in `binned_sts` is empty (has no spikes). + If at least one row in `binned_spiketrain` is empty (has no spikes). See Also -------- @@ -412,7 +423,7 @@ def corrcoef(binned_sts, binary=False, fast=True): Notes ----- 1. The spike trains in the binned structure are assumed to cover the - complete time span `[t_start, t_stop)` of `binned_sts`. + complete time span `[t_start, t_stop)` of `binned_spiketrain`. 2. Using `fast=True` might lead to `MemoryError`. If it's the case, switch to `fast=False`. @@ -428,23 +439,30 @@ def corrcoef(binned_sts, binary=False, fast=True): ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) >>> st2 = homogeneous_poisson_process( ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) - >>> cc_matrix = corrcoef(BinnedSpikeTrain([st1, st2], binsize=5*ms)) + >>> cc_matrix = correlation_coefficient(BinnedSpikeTrain([st1, st2], + ... bin_size=5*ms)) >>> print(cc_matrix[0, 1]) 0.015477320222075359 """ if binary: - binned_sts = binned_sts.binarize(copy=True) + binned_spiketrain = binned_spiketrain.binarize(copy=True) - if fast and binned_sts.sparsity > _SPARSITY_MEMORY_EFFICIENT_THR: - array = binned_sts.to_array() + if fast and binned_spiketrain.sparsity > _SPARSITY_MEMORY_EFFICIENT_THR: + array = binned_spiketrain.to_array() return np.corrcoef(array) return _covariance_sparse( - binned_sts, corrcoef_norm=True) + binned_spiketrain, corrcoef_norm=True) + + +def corrcoef(*args, **kwargs): + warnings.warn("'corrcoef' is deprecated; use 'correlation_coefficient'", + DeprecationWarning) + return correlation_coefficient(*args, **kwargs) -def _covariance_sparse(binned_sts, corrcoef_norm): +def _covariance_sparse(binned_spiketrain, corrcoef_norm): r""" Memory efficient helper function for `covariance()` and `corrcoef()` that performs the complete calculation for either the covariance @@ -466,7 +484,7 @@ def _covariance_sparse(binned_sts, corrcoef_norm): Parameters ---------- - binned_sts : (N, ) elephant.conversion.BinnedSpikeTrain + binned_spiketrain : (N, ) elephant.conversion.BinnedSpikeTrain See `covariance()` or `corrcoef()`, respectively. corrcoef_norm : bool Use normalization factor for the correlation coefficient rather than @@ -475,21 +493,21 @@ def _covariance_sparse(binned_sts, corrcoef_norm): Warns -------- UserWarning - If at least one row in `binned_sts` is empty (has no spikes). + If at least one row in `binned_spiketrain` is empty (has no spikes). Returns ------- (N, N) np.ndarray Pearson correlation or covariance matrix. """ - spmat = binned_sts._sparse_mat_u - n_bins = binned_sts.num_bins + spmat = binned_spiketrain._sparse_mat_u + n_bins = binned_spiketrain.n_bins # Check for empty spike trains n_spikes_per_row = spmat.sum(axis=1) if n_spikes_per_row.min() == 0: warnings.warn( - 'Detected empty spike trains (rows) in the argument binned_sts.') + 'Detected empty spike trains (rows) in the binned_spiketrain.') res = spmat.dot(spmat.T) - n_spikes_per_row * n_spikes_per_row.T / n_bins res = np.asarray(res) @@ -503,16 +521,21 @@ def _covariance_sparse(binned_sts, corrcoef_norm): return res +@deprecated_alias(binned_st1='binned_spiketrain_i', + binned_st2='binned_spiketrain_j', + cross_corr_coef='cross_correlation_coefficient') def cross_correlation_histogram( - binned_st1, binned_st2, window='full', border_correction=False, - binary=False, kernel=None, method='speed', cross_corr_coef=False): + binned_spiketrain_i, binned_spiketrain_j, window='full', + border_correction=False, binary=False, kernel=None, method='speed', + cross_correlation_coefficient=False): """ Computes the cross-correlation histogram (CCH) between two binned spike - trains `binned_st1` and `binned_st2`. + trains `binned_spiketrain_i` and `binned_spiketrain_j`. Parameters ---------- - binned_st1, binned_st2 : elephant.conversion.BinnedSpikeTrain + binned_spiketrain_i, binned_spiketrain_j : + elephant.conversion.BinnedSpikeTrain Binned spike trains of lengths N and M to cross-correlate. The input spike trains can have any `t_start` and `t_stop`. window : {'valid', 'full'} or list of int, optional @@ -563,7 +586,7 @@ def cross_correlation_histogram( implementation to calculate the correlation based on sparse matrices, which is more memory efficient but slower than the "speed" option. Default: "speed". - cross_corr_coef : bool, optional + cross_correlation_coefficient : bool, optional If True, a normalization is applied to the CCH to obtain the cross-correlation coefficient function ranging from -1 to 1 according to Equation (5.10) in [1]_. See Notes. @@ -572,20 +595,21 @@ def cross_correlation_histogram( Returns ------- cch_result : neo.AnalogSignal - Containing the cross-correlation histogram between `binned_st1` and - `binned_st2`. + Containing the cross-correlation histogram between + `binned_spiketrain_i` and `binned_spiketrain_j`. Offset bins correspond to correlations at delays equivalent - to the differences between the spike times of `binned_st1` and those of - `binned_st2`: an entry at positive lag corresponds to a spike in - `binned_st2` following a spike in `binned_st1` bins to the right, and - an entry at negative lag corresponds to a spike in `binned_st1` - following a spike in `binned_st2`. + to the differences between the spike times of `binned_spiketrain_i` and + those of `binned_spiketrain_j`: an entry at positive lag corresponds to + a spike in `binned_spiketrain_j` following a spike in + `binned_spiketrain_i` bins to the right, and an entry at negative lag + corresponds to a spike in `binned_spiketrain_i` following a spike in + `binned_spiketrain_j`. To illustrate this definition, consider two spike trains with the same `t_start` and `t_stop`: - `binned_st1` ('reference neuron') : 0 0 0 0 1 0 0 0 0 0 0 - `binned_st2` ('target neuron') : 0 0 0 0 0 0 0 1 0 0 0 + `binned_spiketrain_i` ('reference neuron') : 0 0 0 0 1 0 0 0 0 0 0 + `binned_spiketrain_j` ('target neuron') : 0 0 0 0 0 0 0 1 0 0 0 Here, the CCH will have an entry of `1` at `lag=+3`. Consistent with the definition of `neo.AnalogSignals`, the time axis @@ -617,18 +641,18 @@ def cross_correlation_histogram( >>> import matplotlib.pyplot as plt >>> import quantities as pq - >>> binned_st1 = elephant.conversion.BinnedSpikeTrain( + >>> binned_spiketrain_i = elephant.conversion.BinnedSpikeTrain( ... elephant.spike_train_generation.homogeneous_poisson_process( ... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms), - ... binsize=5. * pq.ms) - >>> binned_st2 = elephant.conversion.BinnedSpikeTrain( + ... bin_size=5. * pq.ms) + >>> binned_spiketrain_j = elephant.conversion.BinnedSpikeTrain( ... elephant.spike_train_generation.homogeneous_poisson_process( ... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms), - ... binsize=5. * pq.ms) + ... bin_size=5. * pq.ms) >>> cc_hist = \ ... elephant.spike_train_correlation.cross_correlation_histogram( - ... binned_st1, binned_st2, window=[-30,30], + ... binned_spiketrain_i, binned_spiketrain_j, window=[-30,30], ... border_correction=False, ... binary=False, kernel=None, method='memory') @@ -644,29 +668,31 @@ def cross_correlation_histogram( # Check that the spike trains are binned with the same temporal # resolution - if binned_st1.matrix_rows != 1 or binned_st2.matrix_rows != 1: + if binned_spiketrain_i.matrix_rows != 1 or \ + binned_spiketrain_j.matrix_rows != 1: raise ValueError("Spike trains must be one dimensional") - if not np.isclose(binned_st1.binsize.simplified.item(), - binned_st2.binsize.simplified.item()): + if not np.isclose(binned_spiketrain_i.bin_size.simplified.item(), + binned_spiketrain_j.bin_size.simplified.item()): raise ValueError("Bin sizes must be equal") - binsize = binned_st1.binsize - left_edge_min = -binned_st1.num_bins + 1 - right_edge_max = binned_st2.num_bins - 1 + bin_size = binned_spiketrain_i.bin_size + left_edge_min = -binned_spiketrain_i.n_bins + 1 + right_edge_max = binned_spiketrain_j.n_bins - 1 - t_lags_shift = (binned_st2.t_start - binned_st1.t_start) / binsize + t_lags_shift = (binned_spiketrain_j.t_start - + binned_spiketrain_i.t_start) / bin_size t_lags_shift = t_lags_shift.simplified.item() if not np.isclose(t_lags_shift, round(t_lags_shift)): - # For example, if binsize=1 ms, binned_st1.t_start=0 ms, and - # binned_st2.t_start=0.5 ms then there is a global shift in the - # binning of the spike trains. + # For example, if bin_size=1 ms, binned_spiketrain_i.t_start=0 ms, and + # binned_spiketrain_j.t_start=0.5 ms then there is a global shift in + # the binning of the spike trains. raise ValueError( - "Binned spiketrains time shift is not multiple of binsize") + "Binned spiketrains time shift is not multiple of bin_size") t_lags_shift = int(round(t_lags_shift)) # In the examples below we fix st2 and "move" st1. # Zero-lag is equal to `max(st1.t_start, st2.t_start)`. - # Binned spiketrains (t_start and t_stop) with binsize=1ms: + # Binned spiketrains (t_start and t_stop) with bin_size=1ms: # 1) st1=[3, 8] ms, st2=[1, 13] ms # t_start_shift = -2 ms # zero-lag is at 3 ms @@ -704,17 +730,18 @@ def cross_correlation_histogram( right_edge + 1 + t_lags_shift, dtype=np.int32) cch_mode = window elif window == 'valid': - lags = _CrossCorrHist.get_valid_lags(binned_st1, binned_st2) + lags = _CrossCorrHist.get_valid_lags(binned_spiketrain_i, + binned_spiketrain_j) left_edge, right_edge = lags[(0, -1), ] cch_mode = window else: raise ValueError("Invalid window parameter") if binary: - binned_st1 = binned_st1.binarize(copy=True) - binned_st2 = binned_st2.binarize(copy=True) + binned_spiketrain_i = binned_spiketrain_i.binarize(copy=True) + binned_spiketrain_j = binned_spiketrain_j.binarize(copy=True) - cch_builder = _CrossCorrHist(binned_st1, binned_st2, + cch_builder = _CrossCorrHist(binned_spiketrain_i, binned_spiketrain_j, window=(left_edge, right_edge)) if method == 'memory': cross_corr = cch_builder.correlate_memory(cch_mode=cch_mode) @@ -730,15 +757,15 @@ def cross_correlation_histogram( cross_corr = cch_builder.border_correction(cross_corr) if kernel is not None: cross_corr = cch_builder.kernel_smoothing(cross_corr, kernel=kernel) - if cross_corr_coef: - cross_corr = cch_builder.cross_corr_coef(cross_corr) + if cross_correlation_coefficient: + cross_corr = cch_builder.cross_correlation_coefficient(cross_corr) # Transform the array count into an AnalogSignal cch_result = neo.AnalogSignal( signal=np.expand_dims(cross_corr, axis=1), units=pq.dimensionless, - t_start=(lags[0] - 0.5) * binned_st1.binsize, - sampling_period=binned_st1.binsize) + t_start=(lags[0] - 0.5) * binned_spiketrain_i.bin_size, + sampling_period=binned_spiketrain_i.bin_size) return cch_result, lags @@ -746,7 +773,8 @@ def cross_correlation_histogram( cch = cross_correlation_histogram -def spike_time_tiling_coefficient(spiketrain_1, spiketrain_2, dt=0.005 * pq.s): +@deprecated_alias(spiketrain_1='spiketrain_i', spiketrain_2='spiketrain_j') +def spike_time_tiling_coefficient(spiketrain_i, spiketrain_j, dt=0.005 * pq.s): """ Calculates the Spike Time Tiling Coefficient (STTC) as described in [1]_ following their implementation in C. @@ -774,18 +802,19 @@ def spike_time_tiling_coefficient(spiketrain_1, spiketrain_2, dt=0.005 * pq.s): This is a Python implementation compatible with the elephant library of the original code by C. Cutts written in C and avaiable at: - (https://github.com/CCutts/Detecting_pairwise_correlations_in_spike_trains/blob/master/spike_time_tiling_coefficient.c) + (https://github.com/CCutts/Detecting_pairwise_correlations_in_spike_trains/ + blob/master/spike_time_tiling_coefficient.c) Parameters ---------- - spiketrain_1, spiketrain_2: neo.SpikeTrain + spiketrain_i, spiketrain_j: neo.SpikeTrain Spike trains to cross-correlate. They must have the same `t_start` and `t_stop`. dt: pq.Quantity. The synchronicity window is used for both: the quantification of the proportion of total recording time that lies `[-dt, +dt]` of each spike - in each train and the proportion of spikes in `spiketrain_1` that lies - `[-dt, +dt]` of any spike in `spiketrain_2`. + in each train and the proportion of spikes in `spiketrain_i` that lies + `[-dt, +dt]` of any spike in `spiketrain_j`. Default : `0.005 * pq.s` Returns @@ -806,40 +835,40 @@ def spike_time_tiling_coefficient(spiketrain_1, spiketrain_2, dt=0.005 * pq.s): Alias: `sttc` """ - def run_P(spiketrain_1, spiketrain_2): + def run_P(spiketrain_i, spiketrain_j): """ Check every spike in train 1 to see if there's a spike in train 2 within dt """ - N2 = len(spiketrain_2) + N2 = len(spiketrain_j) - # Search spikes of spiketrain_1 in spiketrain_2 + # Search spikes of spiketrain_i in spiketrain_j # ind will contain index of - ind = np.searchsorted(spiketrain_2.times, spiketrain_1.times) + ind = np.searchsorted(spiketrain_j.times, spiketrain_i.times) # To prevent IndexErrors - # If a spike of spiketrain_1 is after the last spike of spiketrain_2, - # the index is N2, however spiketrain_2[N2] raises an IndexError. - # By shifting this index, the spike of spiketrain_1 will be compared - # to the last 2 spikes of spiketrain_2 (negligible overhead). + # If a spike of spiketrain_i is after the last spike of spiketrain_j, + # the index is N2, however spiketrain_j[N2] raises an IndexError. + # By shifting this index, the spike of spiketrain_i will be compared + # to the last 2 spikes of spiketrain_j (negligible overhead). # Note: Not necessary for index 0 that will be shifted to -1, - # because spiketrain_2[-1] is valid (additional negligible comparison) + # because spiketrain_j[-1] is valid (additional negligible comparison) ind[ind == N2] = N2 - 1 - # Compare to nearest spike in spiketrain_2 BEFORE spike in spiketrain_1 + # Compare to nearest spike in spiketrain_j BEFORE spike in spiketrain_i close_left = np.abs( - spiketrain_2.times[ind - 1] - spiketrain_1.times) <= dt - # Compare to nearest spike in spiketrain_2 AFTER (or simultaneous) - # spike in spiketrain_2 + spiketrain_j.times[ind - 1] - spiketrain_i.times) <= dt + # Compare to nearest spike in spiketrain_j AFTER (or simultaneous) + # spike in spiketrain_j close_right = np.abs( - spiketrain_2.times[ind] - spiketrain_1.times) <= dt + spiketrain_j.times[ind] - spiketrain_i.times) <= dt - # spiketrain_2 spikes that are in [-dt, dt] range of spiketrain_1 + # spiketrain_j spikes that are in [-dt, dt] range of spiketrain_i # spikes are counted only ONCE (as per original implementation) close = close_left + close_right - # Count how many spikes in spiketrain_1 have a "partner" in - # spiketrain_2 + # Count how many spikes in spiketrain_i have a "partner" in + # spiketrain_j return np.count_nonzero(close) def run_T(spiketrain): @@ -872,17 +901,17 @@ def run_T(spiketrain): T = time_A / (spiketrain.t_stop - spiketrain.t_start) return T.simplified.item() # enforce simplification, strip units - N1 = len(spiketrain_1) - N2 = len(spiketrain_2) + N1 = len(spiketrain_i) + N2 = len(spiketrain_j) if N1 == 0 or N2 == 0: index = np.nan else: - TA = run_T(spiketrain_1) - TB = run_T(spiketrain_2) - PA = run_P(spiketrain_1, spiketrain_2) + TA = run_T(spiketrain_i) + TB = run_T(spiketrain_j) + PA = run_P(spiketrain_i, spiketrain_j) PA = PA / N1 - PB = run_P(spiketrain_2, spiketrain_1) + PB = run_P(spiketrain_j, spiketrain_i) PB = PB / N2 # check if the P and T values are 1 to avoid division by zero # This only happens for TA = PB = 1 and/or TB = PA = 1, @@ -906,7 +935,8 @@ def run_T(spiketrain): sttc = spike_time_tiling_coefficient -def spike_train_timescale(binned_st, tau_max): +@deprecated_alias(binned_st='binned_spiketrain', tau_max='max_tau') +def spike_train_timescale(binned_spiketrain, max_tau): r""" Calculates the auto-correlation time of a binned spike train. Uses the definition of the auto-correlation time proposed in [[1]_, @@ -921,17 +951,19 @@ def spike_train_timescale(binned_st, tau_max): Parameters ---------- - binned_st : elephant.conversion.BinnedSpikeTrain + binned_spiketrain : elephant.conversion.BinnedSpikeTrain A binned spike train containing the spike train to be evaluated. - tau_max : pq.Quantity - Maximal integration time of the auto-correlation function. It needs to - be a multiple of the binsize of `binned_st`. + max_tau : pq.Quantity + Maximal integration time :math:`\tau_{max}` of the auto-correlation + function. It needs to be a multiple of the `bin_size` of + `binned_spiketrain`. Returns ------- timescale : pq.Quantity - The auto-correlation time of the binned spiketrain. If `binned_st` has - less than 2 spikes `np.nan` is returned. + The auto-correlation time of the binned spiketrain. If + `binned_spiketrain` has less than 2 spikes, a warning is raised and + `np.nan` is returned. Notes ----- @@ -941,9 +973,9 @@ def spike_train_timescale(binned_st, tau_max): necessary to introduce a cutoff for the numerical integration - this cutoff should be neither smaller than the true auto-correlation time nor much bigger. - * The bin size of `binned_st` is another critical parameter as it defines - the discretization of the integral :math:`d\tau`. If it is too big, the - numerical approximation of the integral is inaccurate. + * The bin size of `binned_spiketrain` is another critical parameter as it + defines the discretization of the integral :math:`d\tau`. If it is too + big, the numerical approximation of the integral is inaccurate. References ---------- @@ -951,29 +983,30 @@ def spike_train_timescale(binned_st, tau_max): Slow fluctuations in recurrent networks of spiking neurons. Physical Review E, 92(4), 040901. """ - if binned_st.get_num_of_spikes() < 2: + if binned_spiketrain.get_num_of_spikes() < 2: warnings.warn("Spike train contains less than 2 spikes! " "np.nan will be returned.") return np.nan - binsize = binned_st.binsize - if not (tau_max / binsize).simplified.units == pq.dimensionless: - raise ValueError("tau_max needs units of time") + bin_size = binned_spiketrain.bin_size + if not (max_tau / bin_size).simplified.units == pq.dimensionless: + raise ValueError("max_tau needs units of time") - # safe casting of tau_max/binsize to integer - tau_max_bins = int(np.round((tau_max / binsize).simplified.magnitude)) - if not np.isclose(tau_max.simplified.magnitude, - (tau_max_bins * binsize).simplified.magnitude): - raise ValueError("tau_max has to be a multiple of the binsize") + # safe casting of max_tau/bin_size to integer + max_tau_bins = int(np.round((max_tau / bin_size).simplified.magnitude)) + if not np.isclose(max_tau.simplified.magnitude, + (max_tau_bins * bin_size).simplified.magnitude): + raise ValueError("max_tau has to be a multiple of the bin_size") - cch_window = [-tau_max_bins, tau_max_bins] + cch_window = [-max_tau_bins, max_tau_bins] corrfct, bin_ids = cross_correlation_histogram( - binned_st, binned_st, window=cch_window, cross_corr_coef=True + binned_spiketrain, binned_spiketrain, window=cch_window, + cross_correlation_coefficient=True ) # Take only t > 0 values, in particular neglecting the delta peak. - corrfct_pos = corrfct.time_slice(binsize / 2, corrfct.t_stop).flatten() + corrfct_pos = corrfct.time_slice(bin_size / 2, corrfct.t_stop).flatten() # Calculate the timescale using trapezoidal integration integr = np.abs((corrfct_pos / corrfct_pos[0]).magnitude)**2 - timescale = 2 * integrate.trapz(integr, dx=binsize) + timescale = 2 * integrate.trapz(integr, dx=bin_size) return timescale diff --git a/elephant/spike_train_dissimilarity.py b/elephant/spike_train_dissimilarity.py index bb23bd242..194d8fe05 100644 --- a/elephant/spike_train_dissimilarity.py +++ b/elephant/spike_train_dissimilarity.py @@ -15,12 +15,16 @@ from __future__ import division, print_function, unicode_literals -import quantities as pq +import warnings + import numpy as np +import quantities as pq import scipy as sp -import elephant.kernels as kernels from neo.core import SpikeTrain +import elephant.kernels as kernels +from elephant.utils import deprecated_alias + def _create_matrix_from_indexed_function( shape, func, symmetric_2d=False, **func_params): @@ -35,8 +39,9 @@ def _create_matrix_from_indexed_function( return mat -def victor_purpura_dist( - trains, q=1.0 * pq.Hz, kernel=None, sort=True, algorithm='fast'): +@deprecated_alias(trains='spiketrains', q='cost_factor') +def victor_purpura_distance(spiketrains, cost_factor=1.0 * pq.Hz, kernel=None, + sort=True, algorithm='fast'): """ Calculates the Victor-Purpura's (VP) distance. It is often denoted as :math:`D^{\\text{spike}}[q]`. @@ -59,16 +64,16 @@ def victor_purpura_dist( Parameters ---------- - trains : Sequence of :class:`neo.core.SpikeTrain` objects of - which the distance will be calculated pairwise. - q: Quantity scalar - Cost factor for spike shifts as inverse time scalar. + spiketrains : list of neo.SpikeTrain + Spike trains to calculate pairwise distance. + cost_factor: pq.Quantity + A cost factor :math:`q` for spike shifts as inverse time scalar. Extreme values :math:`q=0` meaning no cost for any shift of spikes, or :math: `q=np.inf` meaning infinite cost for any spike shift and hence exclusion of spike shifts, are explicitly allowed. If `kernel` is not `None`, :math:`q` will be ignored. Default: 1.0 * pq.Hz - kernel: :class:`.kernels.Kernel` + kernel: kernels.Kernel Kernel to use in the calculation of the distance. If `kernel` is `None`, an unnormalized triangular kernel with standard deviation of :math:'2.0/(q * sqrt(6.0))' corresponding to a half width of @@ -98,37 +103,39 @@ def victor_purpura_dist( Examples -------- >>> import quantities as pq - >>> from elephant.spike_train_dissimilarity import victor_purpura_dist + >>> from elephant.spike_train_dissimilarity import victor_purpura_distance >>> q = 1.0 / (10.0 * pq.ms) >>> st_a = SpikeTrain([10, 20, 30], units='ms', t_stop= 1000.0) >>> st_b = SpikeTrain([12, 24, 30], units='ms', t_stop= 1000.0) - >>> vp_f = victor_purpura_dist([st_a, st_b], q)[0, 1] - >>> vp_i = victor_purpura_dist([st_a, st_b], q, + >>> vp_f = victor_purpura_distance([st_a, st_b], q)[0, 1] + >>> vp_i = victor_purpura_distance([st_a, st_b], q, ... algorithm='intuitive')[0, 1] """ - for train in trains: + for train in spiketrains: if not (isinstance(train, (pq.quantity.Quantity, SpikeTrain)) and train.dimensionality.simplified == pq.Quantity(1, "s").dimensionality.simplified): raise TypeError("Spike trains must have a time unit.") - if not (isinstance(q, pq.quantity.Quantity) and - q.dimensionality.simplified == + if not (isinstance(cost_factor, pq.quantity.Quantity) and + cost_factor.dimensionality.simplified == pq.Quantity(1, "Hz").dimensionality.simplified): - raise TypeError("q must be a rate quantity.") + raise TypeError("cost_factor must be a rate quantity.") if kernel is None: - if q == 0.0: - num_spikes = np.atleast_2d([st.size for st in trains]) + if cost_factor == 0.0: + num_spikes = np.atleast_2d([st.size for st in spiketrains]) return np.absolute(num_spikes.T - num_spikes) - elif q == np.inf: - num_spikes = np.atleast_2d([st.size for st in trains]) + elif cost_factor == np.inf: + num_spikes = np.atleast_2d([st.size for st in spiketrains]) return num_spikes.T + num_spikes else: - kernel = kernels.TriangularKernel(2.0 / (np.sqrt(6.0) * q)) + kernel = kernels.TriangularKernel( + sigma=2.0 / (np.sqrt(6.0) * cost_factor)) if sort: - trains = [np.sort(st.view(type=pq.Quantity)) for st in trains] + spiketrains = [np.sort(st.view(type=pq.Quantity)) + for st in spiketrains] def compute(i, j): if i == j: @@ -136,19 +143,25 @@ def compute(i, j): else: if algorithm == 'fast': return _victor_purpura_dist_for_st_pair_fast( - trains[i], trains[j], kernel) + spiketrains[i], spiketrains[j], kernel) elif algorithm == 'intuitive': return _victor_purpura_dist_for_st_pair_intuitive( - trains[i], trains[j], q) + spiketrains[i], spiketrains[j], cost_factor) else: raise NameError("algorithm must be either 'fast' " "or 'intuitive'.") return _create_matrix_from_indexed_function( - (len(trains), len(trains)), compute, kernel.is_symmetric()) + (len(spiketrains), len(spiketrains)), compute, kernel.is_symmetric()) -def _victor_purpura_dist_for_st_pair_fast(train_a, train_b, kernel): +def victor_purpura_dist(*args, **kwargs): + warnings.warn("'victor_purpura_dist' funcion is deprecated; " + "use 'victor_purpura_distance'", DeprecationWarning) + return victor_purpura_distance(*args, **kwargs) + + +def _victor_purpura_dist_for_st_pair_fast(spiketrain_a, spiketrain_b, kernel): """ The algorithm used is based on the one given in @@ -186,7 +199,7 @@ def _victor_purpura_dist_for_st_pair_fast(train_a, train_b, kernel): Parameters ---------- - train_a, train_b : :class:`neo.core.SpikeTrain` objects of + spiketrain_a, spiketrain_b : :class:`neo.core.SpikeTrain` objects of which the Victor-Purpura distance will be calculated pairwise. kernel: :class:`.kernels.Kernel` Kernel to use in the calculation of the distance. @@ -197,17 +210,17 @@ def _victor_purpura_dist_for_st_pair_fast(train_a, train_b, kernel): The Victor-Purpura distance of train_a and train_b """ - if train_a.size <= 0 or train_b.size <= 0: - return max(train_a.size, train_b.size) + if spiketrain_a.size <= 0 or spiketrain_b.size <= 0: + return max(spiketrain_a.size, spiketrain_b.size) - if train_a.size < train_b.size: - train_a, train_b = train_b, train_a + if spiketrain_a.size < spiketrain_b.size: + spiketrain_a, spiketrain_b = spiketrain_b, spiketrain_a - min_dim, max_dim = train_b.size, train_a.size + 1 + min_dim, max_dim = spiketrain_b.size, spiketrain_a.size + 1 cost = np.asfortranarray(np.tile(np.arange(float(max_dim)), (2, 1))) decreasing_sequence = np.asfortranarray(cost[:, ::-1]) - kern = kernel((np.atleast_2d(train_a).T.view(type=pq.Quantity) - - train_b.view(type=pq.Quantity))) + kern = kernel((np.atleast_2d(spiketrain_a).T.view(type=pq.Quantity) - + spiketrain_b.view(type=pq.Quantity))) as_fortran = np.asfortranarray( ((np.sqrt(6.0) * kernel.sigma) * kern).simplified) k = 1 - 2 * as_fortran @@ -215,8 +228,8 @@ def _victor_purpura_dist_for_st_pair_fast(train_a, train_b, kernel): for i in range(min_dim): # determine G[i, i] == accumulated_min[:, 0] accumulated_min = cost[:, :-i - 1] + k[i:, i] - accumulated_min[1, :train_b.size - i] = \ - cost[1, :train_b.size - i] + k[i, i:] + accumulated_min[1, :spiketrain_b.size - i] = \ + cost[1, :spiketrain_b.size - i] + k[i, i:] accumulated_min = np.minimum( accumulated_min, # shift cost[:, 1:max_dim - i]) # insert @@ -230,8 +243,8 @@ def _victor_purpura_dist_for_st_pair_fast(train_a, train_b, kernel): return cost[0, -min_dim - 1] -def _victor_purpura_dist_for_st_pair_intuitive( - train_a, train_b, q=1.0 * pq.Hz): +def _victor_purpura_dist_for_st_pair_intuitive(spiketrain_a, spiketrain_b, + cost_factor=1.0 * pq.Hz): """ Function to calculate the Victor-Purpura distance between two spike trains described in *J. D. Victor and K. P. Purpura, Nature and precision of @@ -254,9 +267,9 @@ def _victor_purpura_dist_for_st_pair_intuitive( Parameters ---------- - train_a, train_b : :class:`neo.core.SpikeTrain` objects of + spiketrain_a, spiketrain_b : :class:`neo.core.SpikeTrain` objects of which the Victor-Purpura distance will be calculated pairwise. - q : Quantity scalar of rate dimension + cost_factor : Quantity scalar of rate dimension The cost parameter. Default: 1.0 * pq.Hz @@ -265,8 +278,8 @@ def _victor_purpura_dist_for_st_pair_intuitive( float The Victor-Purpura distance of train_a and train_b """ - nspk_a = len(train_a) - nspk_b = len(train_b) + nspk_a = len(spiketrain_a) + nspk_b = len(spiketrain_b) scr = np.zeros((nspk_a+1, nspk_b+1)) scr[:, 0] = range(0, nspk_a+1) scr[0, :] = range(0, nspk_b+1) @@ -275,19 +288,22 @@ def _victor_purpura_dist_for_st_pair_intuitive( for i in range(1, nspk_a+1): for j in range(1, nspk_b+1): scr[i, j] = min(scr[i-1, j]+1, scr[i, j-1]+1) - scr[i, j] = min(scr[i, j], scr[i-1, j-1] + np.float64(( - q*abs(train_a[i-1]-train_b[j-1])).simplified)) + scr[i, j] = min(scr[i, j], scr[i-1, j-1] + + np.float64(( + cost_factor * abs( + spiketrain_a[i - 1] - + spiketrain_b[j - 1])).simplified)) return scr[nspk_a, nspk_b] -def van_rossum_dist(trains, tau=1.0 * pq.s, sort=True): +@deprecated_alias(trains='spiketrains', tau='time_constant') +def van_rossum_distance(spiketrains, time_constant=1.0 * pq.s, sort=True): """ Calculates the van Rossum distance. It is defined as Euclidean distance of the spike trains convolved with a causal decaying exponential smoothing filter. A detailed description can - be found in *Rossum, M. C. W. (2001). A novel spike distance. Neural - Computation, 13(4), 751-763.* This implementation is normalized to yield + be found in [1]_. This implementation is normalized to yield a distance of 1.0 for the distance between an empty spike train and a spike train with a single spike. Divide the result by sqrt(2.0) to get the normalization used in the cited paper. @@ -297,13 +313,14 @@ def van_rossum_dist(trains, tau=1.0 * pq.s, sort=True): Parameters ---------- - trains : Sequence of :class:`neo.core.SpikeTrain` objects of + spiketrains : Sequence of :class:`neo.core.SpikeTrain` objects of which the van Rossum distance will be calculated pairwise. - tau : Quantity scalar + time_constant : Quantity scalar Decay rate of the exponential function as time scalar. Controls for - which time scale the metric will be sensitive. This parameter will - be ignored if `kernel` is not `None`. May also be :const:`scipy.inf` - which will lead to only measuring differences in spike count. + which time scale the metric will be sensitive. Denoted as :math:`t_c` + in [1]_. This parameter will be ignored if `kernel` is not `None`. + May also be :const:`scipy.inf` which will lead to only measuring + differences in spike count. Default: 1.0 * pq.s sort : bool Spike trains with sorted spike times might be needed for the @@ -317,34 +334,40 @@ def van_rossum_dist(trains, tau=1.0 * pq.s, sort=True): 2-D Matrix containing the van Rossum distances for all pairs of spike trains. + References + ---------- + [1] Rossum, M. V. (2001). A novel spike distance. Neural computation, + 13(4), 751-763. + Examples -------- - >>> from elephant.spike_train_dissimilarity import van_rossum_dist + >>> from elephant.spike_train_dissimilarity import van_rossum_distance >>> tau = 10.0 * pq.ms >>> st_a = SpikeTrain([10, 20, 30], units='ms', t_stop= 1000.0) >>> st_b = SpikeTrain([12, 24, 30], units='ms', t_stop= 1000.0) - >>> vr = van_rossum_dist([st_a, st_b], tau)[0, 1] + >>> vr = van_rossum_distance([st_a, st_b], tau)[0, 1] """ - for train in trains: + for train in spiketrains: if not (isinstance(train, (pq.quantity.Quantity, SpikeTrain)) and train.dimensionality.simplified == pq.Quantity(1, "s").dimensionality.simplified): raise TypeError("Spike trains must have a time unit.") - if not (isinstance(tau, pq.quantity.Quantity) and - tau.dimensionality.simplified == + if not (isinstance(time_constant, pq.quantity.Quantity) and + time_constant.dimensionality.simplified == pq.Quantity(1, "s").dimensionality.simplified): raise TypeError("tau must be a time quantity.") - if tau == 0: - spike_counts = [st.size for st in trains] + if time_constant == 0: + spike_counts = [st.size for st in spiketrains] return np.sqrt(spike_counts + np.atleast_2d(spike_counts).T) - elif tau == np.inf: - spike_counts = [st.size for st in trains] + elif time_constant == np.inf: + spike_counts = [st.size for st in spiketrains] return np.absolute(spike_counts - np.atleast_2d(spike_counts).T) k_dist = _summed_dist_matrix( - [st.view(type=pq.Quantity) for st in trains], tau, not sort) + [st.view(type=pq.Quantity) + for st in spiketrains], time_constant, not sort) vr_dist = np.empty_like(k_dist) for i, j in np.ndindex(k_dist.shape): vr_dist[i, j] = ( @@ -352,6 +375,12 @@ def van_rossum_dist(trains, tau=1.0 * pq.s, sort=True): return sp.sqrt(vr_dist) +def van_rossum_dist(*args, **kwargs): + warnings.warn("'van_rossum_dist' function is deprecated; " + "use 'van_rossum_distance'", DeprecationWarning) + return van_rossum_distance(*args, **kwargs) + + def _summed_dist_matrix(spiketrains, tau, presorted=False): # The algorithm underlying this implementation is described in # Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van diff --git a/elephant/spike_train_generation.py b/elephant/spike_train_generation.py index c8b613c93..ed7d50484 100644 --- a/elephant/spike_train_generation.py +++ b/elephant/spike_train_generation.py @@ -20,10 +20,12 @@ import quantities as pq from elephant.spike_train_surrogates import dither_spike_train +from elephant.utils import deprecated_alias +@deprecated_alias(extr_interval='interval') def spike_extraction(signal, threshold=0.0 * pq.mV, sign='above', - time_stamps=None, extr_interval=(-2 * pq.ms, 4 * pq.ms)): + time_stamps=None, interval=(-2 * pq.ms, 4 * pq.ms)): """ Return the peak times for all events that cross threshold and the waveforms. Usually used for extracting spikes from a membrane @@ -46,7 +48,7 @@ def spike_extraction(signal, threshold=0.0 * pq.mV, sign='above', function `peak_detection` is used to calculate the time_stamps from signal. Default: None. - extr_interval : tuple of pq.Quantity + interval : tuple of pq.Quantity Specifies the time interval around the `time_stamps` where the waveform is extracted. Default: (-2 * pq.ms, 4 * pq.ms). @@ -77,11 +79,11 @@ def spike_extraction(signal, threshold=0.0 * pq.mV, sign='above', sampling_rate=signal.sampling_rate) # Unpack the extraction interval from tuple or array - extr_left, extr_right = extr_interval + extr_left, extr_right = interval if extr_left > extr_right: - raise ValueError("extr_interval[0] must be < extr_interval[1]") + raise ValueError("interval[0] must be < interval[1]") - if any(np.diff(time_stamps) < extr_interval[1]): + if any(np.diff(time_stamps) < interval[1]): warnings.warn("Waveforms overlap.", UserWarning) data_left = (extr_left * signal.sampling_rate).simplified.magnitude @@ -103,7 +105,7 @@ def spike_extraction(signal, threshold=0.0 * pq.mV, sign='above', np.split(np.array(signal), borders.astype(int))[1::2]) * signal.units # len(np.shape(waveforms)) == 1 if waveforms do not have the same width. - # this can occur when extr_interval indexes beyond the signal. + # this can occur when extraction interval indexes beyond the signal. # Workaround: delete spikes shorter than the maximum length with if len(np.shape(waveforms)) == 1: max_len = (np.array([len(x) for x in waveforms])).max() @@ -114,7 +116,7 @@ def spike_extraction(signal, threshold=0.0 * pq.mV, sign='above', warnings.warn("Waveforms " + ("{:d}, " * len(to_delete)).format(*to_delete) + "exceeded signal and had to be deleted. " + - "Change extr_interval to keep.") + "Change 'interval' to keep.") waveforms = waveforms[:, np.newaxis, :] @@ -183,7 +185,9 @@ def threshold_detection(signal, threshold=0.0 * pq.mV, sign='above'): return result_st -def peak_detection(signal, threshold=0.0 * pq.mV, sign='above', format=None): +@deprecated_alias(format='as_array') +def peak_detection(signal, threshold=0.0 * pq.mV, sign='above', + as_array=False): """ Return the peak times for all events that cross threshold. Usually used for extracting spike times from a membrane potential. @@ -200,9 +204,16 @@ def peak_detection(signal, threshold=0.0 * pq.mV, sign='above', format=None): Determines whether to count threshold crossings that cross above or below the threshold. Default: 'above'. + as_array : bool, optional + If True, a NumPy array of the resulting peak times is returned instead + of a (default) `neo.SpikeTrain` object. + Default: False. format : {None, 'raw'}, optional + .. deprecated:: 0.8.0 Whether to return as SpikeTrain (None) or as a plain array of times ('raw'). + Deprecated. Use `as_array=False` for None format and `as_array=True` + otherwise. Default: None. Returns @@ -217,8 +228,10 @@ def peak_detection(signal, threshold=0.0 * pq.mV, sign='above', format=None): if sign not in ('above', 'below'): raise ValueError("sign should be 'above' or 'below'") - if format not in (None, 'raw'): - raise ValueError("Format argument must be None or 'raw'") + if as_array in (None, 'raw'): + warnings.warn("'format' is deprecated; use as_array=True", + DeprecationWarning) + as_array = bool(as_array) if sign == 'above': cutout = np.where(signal > threshold)[0] @@ -235,8 +248,7 @@ def peak_detection(signal, threshold=0.0 * pq.mV, sign='above', format=None): # This avoids empty slices border_start = np.where(np.diff(cutout) > 1)[0] border_end = border_start + 1 - borders = np.r_[0, border_start, border_end, len(cutout) - 1] - borders.sort() + borders = sorted(np.r_[0, border_start, border_end, len(cutout) - 1]) true_borders = cutout[borders] right_borders = true_borders[1::2] + 1 true_borders = np.sort(np.append(true_borders[0::2], right_borders)) @@ -261,13 +273,12 @@ def peak_detection(signal, threshold=0.0 * pq.mV, sign='above', format=None): # bug in quantities. events_base = np.array( [event.magnitude for event in events]) # Workaround - if format is None: - result_st = neo.SpikeTrain(events_base, units=signal.times.units, - t_start=signal.t_start, - t_stop=signal.t_stop) - else: - # format == 'raw' - result_st = events_base + + result_st = neo.SpikeTrain(events_base, units=signal.times.units, + t_start=signal.t_start, + t_stop=signal.t_stop) + if as_array: + result_st = result_st.magnitude return result_st @@ -376,7 +387,8 @@ def homogeneous_poisson_process(rate, t_start=0.0 * pq.ms, raise ValueError("rate must be of type pq.Quantity") if not isinstance(refractory_period, pq.Quantity) and \ refractory_period is not None: - raise ValueError("refr_period must be of type pq.Quantity or None") + raise ValueError("refractory_period must be of type pq.Quantity or" + "None") rate = rate.simplified @@ -457,7 +469,8 @@ def inhomogeneous_poisson_process(rate, as_array=False, 'rate at time t') if not isinstance(refractory_period, pq.Quantity) and \ refractory_period is not None: - raise ValueError("refr_period must be of type pq.Quantity or None") + raise ValueError("refractory_period must be of type pq.Quantity or" + "None") rate_max = np.max(rate) if refractory_period is not None: @@ -625,6 +638,9 @@ def homogeneous_gamma_process(a, b, t_start=0.0 * pq.ms, t_stop=1000.0 * pq.ms, ... 5.0, 20*pq.Hz, 5000*pq.ms, 10000*pq.ms, as_array=True) """ + # note that the rate of the gamma distribution is called 'b' and not 'rate' + # to avoid false thoughts that 'rate' could be the mean firing rate, which + # equals to b / a if not (isinstance(t_start, pq.Quantity) and isinstance(t_stop, pq.Quantity)): raise ValueError("t_start and t_stop must be of type pq.pq.Quantity") @@ -701,10 +717,12 @@ def _n_poisson(rate, t_stop, t_start=0.0 * pq.ms, n=1): for rate in rates] +@deprecated_alias(rate_c='coincidence_rate', n='n_spiketrains', + return_coinc='return_coincidences') def single_interaction_process( - rate, rate_c, t_stop, n=2, jitter=0 * pq.ms, + rate, coincidence_rate, t_stop, n_spiketrains=2, jitter=0 * pq.ms, coincidences='deterministic', t_start=0 * pq.ms, min_delay=0 * pq.ms, - return_coinc=False): + return_coincidences=False): """ Generates a multidimensional Poisson SIP (single interaction process) plus independent Poisson processes @@ -722,20 +740,23 @@ def single_interaction_process( 0 and `t_stop`. rate : pq.Quantity Overall mean rate of the time series to be generated (coincidence - rate `rate_c` is subtracted to determine the background rate). Can be: + rate `coincidence_rate` is subtracted to determine the background + rate). Can be: * a float, representing the overall mean rate of each process. If - so, it must be higher than `rate_c`. + so, it must be higher than `coincidence_rate`. * an iterable of floats (one float per process), each float representing the overall mean rate of a process. If so, all the - entries must be larger than `rate_c`. - rate_c : pq.Quantity + entries must be larger than `coincidence_rate`. + coincidence_rate : pq.Quantity Coincidence rate (rate of coincidences for the n-dimensional SIP). - The SIP spike trains will have coincident events with rate `rate_c` - plus independent 'background' events with rate `rate-rate_c`. - n : int, optional - If `rate` is a single pq.Quantity value, `n` specifies the number of - SpikeTrains to be generated. If rate is an array, `n` is ignored and - the number of SpikeTrains is equal to `len(rate)`. + The SIP spike trains will have coincident events with rate + `coincidence_rate` plus independent 'background' events with rate + `rate-rate_coincidence`. + n_spiketrains : int, optional + If `rate` is a single pq.Quantity value, `n_spiketrains` specifies the + number of SpikeTrains to be generated. If rate is an array, + `n_spiketrains` is ignored and the number of SpikeTrains is equal to + `len(rate)`. Default: 2 jitter : pq.Quantity, optional Jitter for the coincident events. If `jitter == 0`, the events of all @@ -744,9 +765,9 @@ def single_interaction_process( Default: 0 * pq.ms coincidences : {'deterministic', 'stochastic'}, optional Whether the total number of injected coincidences must be determin- - istic (i.e. rate_c is the actual rate with which coincidences are - generated) or stochastic (i.e. rate_c is the mean rate of coincid- - ences): + istic (i.e. rate_coincidence is the actual rate with which coincidences + are generated) or stochastic (i.e. rate_coincidence is the mean rate of + coincidences): * 'deterministic': deterministic rate * 'stochastic': stochastic rate @@ -758,7 +779,7 @@ def single_interaction_process( min_delay : pq.Quantity, optional Minimum delay between consecutive coincidence times. Default: 0 * pq.ms - return_coinc : bool, optional + return_coincidences : bool, optional Whether to return the coincidence times for the SIP process Default: False @@ -779,16 +800,17 @@ def single_interaction_process( -------- >>> import quantities as pq >>> import elephant.spike_train_generation as stg - # TODO: check if rate_c=4 is correct. - >>> sip, coinc = stg.single_interaction_process(rate=20*pq.Hz, rate_c=4, - ... t_stop=1*pq.s, - ... n=10, return_coinc = True) + # TODO: check if rate_coincidence=4 is correct. + >>> sip, coinc = stg.single_interaction_process( + ... rate=20*pq.Hz, coincidence_rate=4, + ... t_stop=1*pq.s, n_spiketrains=10, return_coincidences = True) """ # Check if n is a positive integer - if not (isinstance(n, int) and n > 0): - raise ValueError('n (={}) must be a positive integer'.format(n)) + if not (isinstance(n_spiketrains, int) and n_spiketrains > 0): + raise ValueError( + 'n (={}) must be a positive integer'.format(n_spiketrains)) if coincidences not in ('deterministic', 'stochastic'): raise ValueError( "coincidences must be 'deterministic' or 'stochastic'") @@ -803,26 +825,29 @@ def single_interaction_process( if rate < 0 * pq.Hz: raise ValueError( 'rate (={}) must be non-negative.'.format(rate)) - rates_b = np.repeat(rate, n) + rates_b = np.repeat(rate, n_spiketrains) else: rates_b = rate.flatten() if not all(rates_b >= 0. * pq.Hz): raise ValueError('*rate* must have non-negative elements') - # Check: rate>=rate_c - if np.any(rates_b < rate_c): - raise ValueError('all elements of *rate* must be >= *rate_c*') + # Check: rate>=rate_coincidence + if np.any(rates_b < coincidence_rate): + raise ValueError( + 'all elements of *rate* must be >= *rate_coincidence*') - # Check min_delay < 1./rate_c - if not (rate_c == 0 * pq.Hz or min_delay < 1. / rate_c): + # Check min_delay < 1./rate_coincidence + if not (coincidence_rate == 0 * pq.Hz + or min_delay < 1. / coincidence_rate): raise ValueError( - "'*min_delay* (%s) must be lower than 1/*rate_c* (%s)." % - (str(min_delay), str((1. / rate_c).rescale(min_delay.units)))) + "'*min_delay* (%s) must be lower than 1/*rate_coincidence* (%s)." % + (str(min_delay), str((1. / coincidence_rate).rescale( + min_delay.units)))) # Generate the n Poisson processes there are the basis for the SIP # (coincidences still lacking) embedded_poisson_trains = _n_poisson( - rate=rates_b - rate_c, t_stop=t_stop, t_start=t_start) + rate=rates_b - coincidence_rate, t_stop=t_stop, t_start=t_start) # Convert the trains from neo SpikeTrain objects to simpler pq.Quantity # objects embedded_poisson_trains = [ @@ -833,7 +858,7 @@ def single_interaction_process( if coincidences == 'deterministic': # P. Bouss: we want the closest approximation to the average # coincidence count. - n_coincidences = (t_stop - t_start) * rate_c + n_coincidences = (t_stop - t_start) * coincidence_rate # Conversion to integer necessary for python 2 n_coincidences = int(round(n_coincidences.simplified.item())) while True: @@ -845,7 +870,7 @@ def single_interaction_process( else: # coincidences == 'stochastic' while True: coinc_times = homogeneous_poisson_process( - rate=rate_c, t_stop=t_stop, t_start=t_start) + rate=coincidence_rate, t_stop=t_stop, t_start=t_start) if len(coinc_times) < 2 or min(np.diff(coinc_times)) >= min_delay: break coinc_times = coinc_times.simplified @@ -884,7 +909,7 @@ def single_interaction_process( for t in embedded_coinc] # Return the processes in the specified output_format - if not return_coinc: + if not return_coincidences: output = sip_process else: output = sip_process, coinc_times @@ -1157,21 +1182,22 @@ def _cpp_het_stat(A, t_stop, rates, t_start=0. * pq.ms): in zip(compound_poisson_spiketrains, poisson_spiketrains)] +@deprecated_alias(A='amplitude_distribution') def compound_poisson_process( - rate, A, t_stop, shift=None, t_start=0 * pq.ms): + rate, amplitude_distribution, t_stop, shift=None, t_start=0 * pq.ms): """ - Generate a Compound Poisson Process (CPP; see _[1]) with a given amplitude - distribution A and stationary marginal rates r. + Generate a Compound Poisson Process (CPP; see _[1]) with a given + `amplitude_distribution` :math:`A` and stationary marginal rates `rate`. The CPP process is a model for parallel, correlated processes with Poisson - spiking statistics at pre-defined firing rates. It is composed of len(A)-1 - spike trains with a correlation structure determined by the amplitude - distribution A: A[j] is the probability that a spike occurs synchronously - in any j spike trains. + spiking statistics at pre-defined firing rates. It is composed of + `len(A)-1` spike trains with a correlation structure determined by the + amplitude distribution :math:`A`: A[j] is the probability that a spike + occurs synchronously in any `j` spike trains. The CPP is generated by creating a hidden mother Poisson process, and then - copying spikes of the mother process to j of the output spike trains with - probability A[j]. + copying spikes of the mother process to `j` of the output spike trains with + probability `A[j]`. Note that this function decorrelates the firing rate of each SpikeTrain from the probability for that SpikeTrain to participate in a synchronous @@ -1184,10 +1210,11 @@ def compound_poisson_process( - a single value, all spike trains will have same rate rate - an array of values (of length len(A)-1), each indicating the firing rate of one process in output - A : np.ndarray - CPP's amplitude distribution. `A[j]` represents the probability of - a synchronous event of size j among the generated spike trains. - The sum over all entries of A must be equal to one. + amplitude_distribution : np.ndarray + CPP's amplitude distribution :math:`A`. `A[j]` represents the + probability of a synchronous event of size `j` among the generated + spike trains. The sum over all entries of :math:`A` must be equal to + one. t_stop : pq.Quantity The end time of the output spike trains. shift : pq.Quantity, optional @@ -1196,45 +1223,49 @@ def compound_poisson_process( independently by a random amount in the interval `[-shift, +shift]`. Default: None t_start : pq.Quantity, optional - The t_start time of the output spike trains. + The `t_start` time of the output spike trains. Default: 0 pq.ms Returns ------- list of neo.SpikeTrain SpikeTrains with specified firing rates forming the CPP with amplitude - distribution A. + distribution :math:`A`. References ---------- .. [1] Staude, Rotter, Gruen (2010) J Comput Neurosci 29:327-350. """ - if not isinstance(A, np.ndarray): - A = np.array(A) + if not isinstance(amplitude_distribution, np.ndarray): + amplitude_distribution = np.array(amplitude_distribution) # Check A is a probability distribution (it sums to 1 and is positive) - if abs(sum(A) - 1) > np.finfo('float').eps: - raise ValueError( - 'A must be a probability vector,' - ' sum(A)= %f !=1' % (sum(A))) - if np.any(A < 0): + if abs(sum(amplitude_distribution) - 1) > np.finfo('float').eps: raise ValueError( - 'A must be a probability vector, each element must be >0') + "'amplitude_distribution' must be a probability vector: " + "sum(A) = {} != 1".format(sum(amplitude_distribution))) + if np.any(amplitude_distribution < 0): + raise ValueError("'amplitude_distribution' must be a probability " + "vector with positive entries") # Check that the rate is not an empty pq.Quantity if rate.ndim == 1 and len(rate) == 0: raise ValueError('Rate is an empty pq.Quantity array') # Return empty spike trains for specific parameters - if A[0] == 1 or np.sum(np.abs(rate.magnitude)) == 0: - return [neo.SpikeTrain([] * t_stop.units, t_stop=t_stop, - t_start=t_start)] * (len(A) - 1) + if amplitude_distribution[0] == 1 or np.sum(np.abs(rate.magnitude)) == 0: + return [neo.SpikeTrain([] * t_stop.units, + t_stop=t_stop, + t_start=t_start)] * ( + len(amplitude_distribution) - 1) # Homogeneous rates if rate.ndim == 0: compound_poisson_spiketrains = _cpp_hom_stat( - A=A, t_stop=t_stop, rate=rate, t_start=t_start) + A=amplitude_distribution, t_stop=t_stop, rate=rate, + t_start=t_start) # Heterogeneous rates else: compound_poisson_spiketrains = _cpp_het_stat( - A=A, t_stop=t_stop, rates=rate, t_start=t_start) + A=amplitude_distribution, t_stop=t_stop, rates=rate, + t_start=t_start) if shift is not None: # Dither the output spiketrains diff --git a/elephant/spike_train_surrogates.py b/elephant/spike_train_surrogates.py index e61728ad6..4ef4cbc53 100644 --- a/elephant/spike_train_surrogates.py +++ b/elephant/spike_train_surrogates.py @@ -46,7 +46,7 @@ from __future__ import division, print_function, unicode_literals import random -from functools import partial +import warnings import neo import numpy as np @@ -54,6 +54,7 @@ from scipy.ndimage import gaussian_filter from elephant.statistics import isi +from elephant.utils import deprecated_alias # List of all available surrogate methods SURR_METHODS = ['dither_spike_train', 'dither_spikes', 'jitter_spikes', @@ -61,7 +62,7 @@ 'dither_spikes_with_refractory_period'] -def _dither_spikes_with_refractory_period(spiketrain, dither, n, +def _dither_spikes_with_refractory_period(spiketrain, dither, n_surrogates, refractory_period): units = spiketrain.units t_start = spiketrain.t_start.rescale(units).magnitude @@ -75,7 +76,7 @@ def _dither_spikes_with_refractory_period(spiketrain, dither, n, initial=refractory_period) dithered_spiketrains = [] - for _ in range(n): + for _ in range(n_surrogates): dithered_st = np.copy(spiketrain.magnitude) random_ordered_ids = np.arange(len(spiketrain)) np.random.shuffle(random_ordered_ids) @@ -108,8 +109,9 @@ def _dither_spikes_with_refractory_period(spiketrain, dither, n, return dithered_spiketrains -def dither_spikes(spiketrain, dither, n=1, decimals=None, edges=True, - refractory_period=None): +@deprecated_alias(n='n_surrogates') +def dither_spikes(spiketrain, dither, n_surrogates=1, decimals=None, + edges=True, refractory_period=None): """ Generates surrogates of a spike train by spike dithering. @@ -128,7 +130,7 @@ def dither_spikes(spiketrain, dither, n=1, decimals=None, edges=True, dither : pq.Quantity Amount of dithering. A spike at time `t` is placed randomly within `]t-dither, t+dither[`. - n : int, optional + n_surrogates : int, optional Number of surrogates to be generated. Default: 1. decimals : int or None, optional @@ -169,7 +171,7 @@ def dither_spikes(spiketrain, dither, n=1, decimals=None, edges=True, >>> print(dither_spikes(st, dither = 20 * pq.ms)) # doctest: +SKIP [] - >>> print(dither_spikes(st, dither = 20 * pq.ms, n=2)) # doctest: +SKIP + >>> print(dither_spikes(st, dither = 20 * pq.ms, n_surrogates=2)) [, >> print(randomise_spikes(st)) # doctest: +SKIP [] - >>> print(randomise_spikes(st, n=2)) # doctest: +SKIP + >>> print(randomise_spikes(st, n_surrogates=2)) # doctest: +SKIP [, >> print(shuffle_isis(st)) # doctest: +SKIP [] - >>> print(shuffle_isis(st, n=2)) # doctest: +SKIP + >>> print(shuffle_isis(st, n_surrogates=2)) # doctest: +SKIP [, >> print(dither_spike_train(st, shift = 20*pq.ms)) # doctest: +SKIP [] - >>> print(dither_spike_train(st, shift = 20*pq.ms, n=2)) # doctest: +SKIP + >>> print(dither_spike_train(st, shift = 20*pq.ms, n_surrogates=2)) [, >> import neo ... >>> st = neo.SpikeTrain([80, 150, 320, 480] * pq.ms, t_stop=1 * pq.s) - >>> print(jitter_spikes(st, binsize=100 * pq.ms)) # doctest: +SKIP + >>> print(jitter_spikes(st, bin_size=100 * pq.ms)) # doctest: +SKIP [] - >>> print(jitter_spikes(st, binsize=100 * pq.ms, n=2)) # doctest: +SKIP + >>> print(jitter_spikes(st, bin_size=100 * pq.ms, n_surrogates=2)) [, ] - >>> print(jitter_spikes(st, binsize=100 * pq.ms)) # doctest: +SKIP + >>> print(jitter_spikes(st, bin_size=100 * pq.ms)) # doctest: +SKIP [] """ # Define standard time unit; all time Quantities are converted to # scalars after being rescaled to this unit, to use the power of numpy - std_unit = binsize.units + std_unit = bin_size.units # Compute bin edges for the jittering procedure # !: the last bin arrives until spiketrain.t_stop and might have - # size != binsize + # size != bin_size start_dl = spiketrain.t_start.rescale(std_unit).magnitude stop_dl = spiketrain.t_stop.rescale(std_unit).magnitude - bin_edges = start_dl + np.arange(start_dl, stop_dl, binsize.magnitude) + bin_edges = start_dl + np.arange(start_dl, stop_dl, bin_size.magnitude) bin_edges = np.hstack([bin_edges, stop_dl]) # Create n surrogates with spikes randomly placed in the interval (0,1) - surr_poiss01 = np.random.random_sample((n, len(spiketrain))) + surr_poiss01 = np.random.random_sample((n_surrogates, len(spiketrain))) # Compute the bin id of each spike bin_ids = np.array( (spiketrain.view(pq.Quantity) / - binsize).rescale(pq.dimensionless).magnitude, dtype=int) + bin_size).rescale(pq.dimensionless).magnitude, dtype=int) # Compute the size of each time bin (as a numpy array) bin_sizes_dl = np.diff(bin_edges) @@ -567,9 +575,9 @@ class JointISI(object): The Joint-ISI histogram is calculated for :math:`(ISI_i, ISI_{i+1})` from 0 to `truncation_limit`. Default: 100 * pq.ms. - num_bins : int, optional + n_bins : int, optional The size of the joint-ISI-distribution will be - `num_bins*num_bins/2`. + `n_bins*n_bins/2`. Default: 100. sigma : pq.Quantity, optional The standard deviation of the Gaussian kernel, with which @@ -596,7 +604,7 @@ class JointISI(object): period, which will be destroyed by the convolution with the 2d-Gaussian function. Default: True. - refr_period : pq.Quantity, optional + refractory_period : pq.Quantity, optional Defines the refractory period of the dithered `spiketrain` unless the smallest ISI of the `spiketrain` is lower than this value. Default: 4. * pq.ms. @@ -614,21 +622,22 @@ class JointISI(object): # Otherwise, the original spiketrain is copied N times. MIN_SPIKES = 3 + @deprecated_alias(num_bins='n_bins', refr_period='refractory_period') def __init__(self, spiketrain, dither=15. * pq.ms, truncation_limit=100. * pq.ms, - num_bins=100, + n_bins=100, sigma=2. * pq.ms, alternate=True, use_sqrt=False, method='fast', cutoff=True, - refr_period=4. * pq.ms + refractory_period=4. * pq.ms ): self.spiketrain = spiketrain self.truncation_limit = self.get_magnitude(truncation_limit) - self.num_bins = num_bins + self.n_bins = n_bins self.dither = self.get_magnitude(dither) @@ -640,11 +649,11 @@ def __init__(self, "but not '{}'".format(method)) self.method = method - refr_period = self.get_magnitude(refr_period) + refractory_period = self.get_magnitude(refractory_period) if not self.too_less_spikes: minimal_isi = np.min(self.isi) - refr_period = min(refr_period, minimal_isi) - self.refr_period = refr_period + refractory_period = min(refractory_period, minimal_isi) + self.refractory_period = refractory_period self.cutoff = cutoff self.use_sqrt = use_sqrt @@ -653,6 +662,18 @@ def __init__(self, self.max_change_index = self.isi_to_index(self.dither) self.max_change_isi = self.index_to_isi(self.max_change_index) + @property + def refr_period(self): + warnings.warn("'.refr_period' is deprecated; use '.refractory_period'", + DeprecationWarning) + return self.refractory_period + + @property + def num_bins(self): + warnings.warn("'.num_bins' is deprecated; use '.n_bins'", + DeprecationWarning) + return self.n_bins + def get_magnitude(self, quantity): """ Parameters @@ -693,7 +714,7 @@ def isi(self): @property def bin_width(self): - return self.truncation_limit / self.num_bins + return self.truncation_limit / self.n_bins def isi_to_index(self, inter_spike_interval): """ @@ -742,7 +763,7 @@ def joint_isi_histogram(self): isis = self.isi joint_isi_histogram = np.histogram2d( isis[:-1], isis[1:], - bins=[self.num_bins, self.num_bins], + bins=[self.n_bins, self.n_bins], range=[[0., self.truncation_limit], [0., self.truncation_limit]])[0] @@ -751,7 +772,7 @@ def joint_isi_histogram(self): if self.sigma: if self.cutoff: - start_index = self.isi_to_index(self.refr_period) + start_index = self.isi_to_index(self.refractory_period) joint_isi_histogram[ start_index:, start_index:] = gaussian_filter( joint_isi_histogram[start_index:, start_index:], @@ -828,10 +849,10 @@ def _determine_cumulative_functions(self): if self.method == 'fast': self._jisih_cumulatives = [] - for double_index in range(self.num_bins): + for double_index in range(self.n_bins): # Taking anti-diagonals of the original joint-ISI histogram diagonal = np.diagonal( - rotated_jisih, offset=-self.num_bins + double_index + 1) + rotated_jisih, offset=-self.n_bins + double_index + 1) jisih_cum = self.normalize_cumulative_distribution( np.cumsum(diagonal)) self._jisih_cumulatives.append(jisih_cum) @@ -842,10 +863,10 @@ def _determine_cumulative_functions(self): def _window_cumulatives(self, rotated_jisih): jisih_diag_cums = self._window_diagonal_cumulatives(rotated_jisih) jisih_cumulatives = np.zeros( - (self.num_bins, self.num_bins, + (self.n_bins, self.n_bins, 2 * self.max_change_index + 1)) - for curr_isi_id in range(self.num_bins): - for next_isi_id in range(self.num_bins - curr_isi_id): + for curr_isi_id in range(self.n_bins): + for next_isi_id in range(self.n_bins - curr_isi_id): double_index = next_isi_id + curr_isi_id cum_slice = jisih_diag_cums[ double_index, @@ -860,15 +881,15 @@ def _window_diagonal_cumulatives(self, rotated_jisih): # An element of the first axis is defined as the sum of indices # for previous and subsequent ISI. - jisih_diag_cums = np.zeros((self.num_bins, - self.num_bins + jisih_diag_cums = np.zeros((self.n_bins, + self.n_bins + 2 * self.max_change_index)) # double_index corresponds to the sum of the indices for the previous # and the subsequent ISI. - for double_index in range(self.num_bins): + for double_index in range(self.n_bins): cum_diag = np.cumsum(np.diagonal(rotated_jisih, - - self.num_bins + - self.n_bins + double_index + 1)) right_padding = jisih_diag_cums.shape[1] - \ @@ -913,7 +934,7 @@ def _get_dithering_step(self, curr_isi_id = dithered_isi_indices[i] next_isi_id = dithered_isi_indices[i + 1] double_index = curr_isi_id + next_isi_id - if double_index < self.num_bins: + if double_index < self.n_bins: if self.method == 'fast': cum_dist_func = self._jisih_cumulatives[ double_index] @@ -937,14 +958,15 @@ def _get_dithering_step(self, def _uniform_dither_not_jisi_movable_spikes(self, curr_isi, next_isi): - left_dither = min(curr_isi - self.refr_period, self.dither) - right_dither = min(next_isi - self.refr_period, self.dither) + left_dither = min(curr_isi - self.refractory_period, self.dither) + right_dither = min(next_isi - self.refractory_period, self.dither) step = random.random() * (right_dither + left_dither) - left_dither return step -def surrogates(spiketrain, n=1, surr_method='dither_spike_train', dt=None, - decimals=None, edges=True): +@deprecated_alias(n='n_surrogates', surr_method='method') +def surrogates(spiketrain, n_surrogates=1, method='dither_spike_train', + dt=None, decimals=None, edges=True): """ Generates surrogates of a `spiketrain` by a desired generation method. @@ -960,10 +982,10 @@ def surrogates(spiketrain, n=1, surr_method='dither_spike_train', dt=None, ---------- spiketrain : neo.SpikeTrain The spike train from which to generate the surrogates - n : int, optional + n_surrogates : int, optional Number of surrogates to be generated. Default: 1. - surr_method : str, optional + method : str, optional The method to use to generate surrogate spike trains. Can be one of: * 'dither_spike_train': see `surrogates.dither_spike_train` [dt needed] * 'dither_spikes': see `surrogates.dither_spikes` [dt needed] @@ -1009,22 +1031,23 @@ def surrogates(spiketrain, n=1, surr_method='dither_spike_train', dt=None, 'joint_isi_dithering': JointISI(spiketrain).dithering, } - if surr_method not in surrogate_types.keys(): + if method not in surrogate_types.keys(): raise ValueError("Specified surrogate method ('{}') " - "is not valid".format(surr_method)) - surr_method = surrogate_types[surr_method] + "is not valid".format(method)) + method = surrogate_types[method] # PYTHON2: replace with inspect.signature() - if dt is None and surr_method in (dither_spike_train, dither_spikes, - jitter_spikes): + if dt is None and method in (dither_spike_train, dither_spikes, + jitter_spikes): raise ValueError("{}() method requires 'dt' parameter to be " - "not None".format(surr_method.__name__)) - - if surr_method in (dither_spike_train, dither_spikes): - return surr_method(spiketrain, dt, n=n, decimals=decimals, edges=edges) - if surr_method in (randomise_spikes, shuffle_isis): - return surr_method(spiketrain, n=n, decimals=decimals) - if surr_method == jitter_spikes: - return surr_method(spiketrain, dt, n=n) - # surr_method == 'joint_isi_dithering': - return surr_method(n) + "not None".format(method.__name__)) + + if method in (dither_spike_train, dither_spikes): + return method(spiketrain, dt, n=n_surrogates, decimals=decimals, + edges=edges) + if method in (randomise_spikes, shuffle_isis): + return method(spiketrain, n=n_surrogates, decimals=decimals) + if method == jitter_spikes: + return method(spiketrain, dt, n=n_surrogates) + # method == 'joint_isi_dithering': + return method(n_surrogates) diff --git a/elephant/sta.py b/elephant/sta.py index 58f292efa..d6fcaa9be 100644 --- a/elephant/sta.py +++ b/elephant/sta.py @@ -202,7 +202,7 @@ def spike_field_coherence(signal, spiketrain, **kwargs): signal : neo AnalogSignal object 'signal' contains n analog signals. spiketrain : SpikeTrain or BinnedSpikeTrain - Single spike train to perform the analysis on. The binsize of the + Single spike train to perform the analysis on. The bin_size of the binned spike train must match the sampling_rate of signal. **kwargs: All kwargs are passed to `scipy.signal.coherence()`. @@ -277,7 +277,7 @@ def spike_field_coherence(signal, spiketrain, **kwargs): # bin spiketrain if necessary if isinstance(spiketrain, SpikeTrain): spiketrain = BinnedSpikeTrain( - spiketrain, binsize=signal.sampling_period) + spiketrain, bin_size=signal.sampling_period) # check the start and stop times of signal and spike trains if spiketrain.t_start < signal.t_start: @@ -288,18 +288,18 @@ def spike_field_coherence(signal, spiketrain, **kwargs): "The spiketrain stops later than the analog signal.") # check equal time resolution for both signals - if spiketrain.binsize != signal.sampling_period: + if spiketrain.bin_size != signal.sampling_period: raise ValueError( "The spiketrain and signal must have a " - "common sampling frequency / binsize") + "common sampling frequency / bin_size") # calculate how many bins to add on the left of the binned spike train delta_t = spiketrain.t_start - signal.t_start - if delta_t % spiketrain.binsize == 0: - left_edge = int((delta_t / spiketrain.binsize).magnitude) + if delta_t % spiketrain.bin_size == 0: + left_edge = int((delta_t / spiketrain.bin_size).magnitude) else: raise ValueError("Incompatible binning of spike train and LFP") - right_edge = int(left_edge + spiketrain.num_bins) + right_edge = int(left_edge + spiketrain.n_bins) # duplicate spike trains spiketrain_array = np.zeros((1, len_signals)) diff --git a/elephant/statistics.py b/elephant/statistics.py index cc24b0c8f..2334fae28 100644 --- a/elephant/statistics.py +++ b/elephant/statistics.py @@ -61,16 +61,19 @@ # do not import unicode_literals # (quantities rescale does not work with unicodes) +import warnings + +import neo import numpy as np import math import quantities as pq -import scipy.stats import scipy.signal -import neo +import scipy.stats from neo.core import SpikeTrain + import elephant.conversion as conv import elephant.kernels as kernels -import warnings +from elephant.utils import deprecated_alias from elephant.utils import is_time_quantity @@ -281,7 +284,8 @@ def __variation_check(v, with_nan): return None -def lv(v, with_nan=False): +@deprecated_alias(v='time_intervals') +def lv(time_intervals, with_nan=False): r""" Calculate the measure of local variation LV for a sequence of time intervals between events. @@ -300,7 +304,7 @@ def lv(v, with_nan=False): Parameters ---------- - v : pq.Quantity or np.ndarray or list + time_intervals : pq.Quantity or np.ndarray or list Vector of consecutive time intervals. with_nan : bool, optional If True, `lv` of a spike train with less than two spikes results in a @@ -337,16 +341,17 @@ def lv(v, with_nan=False): """ # convert to array, cast to float - v = np.asarray(v) - np_nan = __variation_check(v, with_nan) + time_intervals = np.asarray(time_intervals) + np_nan = __variation_check(time_intervals, with_nan) if np_nan is not None: return np_nan - # calculate LV and return result - return 3. * np.mean(np.power(np.diff(v) / (v[:-1] + v[1:]), 2)) + cv_i = np.diff(time_intervals) / (time_intervals[:-1] + time_intervals[1:]) + return 3. * np.mean(np.power(cv_i, 2)) -def cv2(v, with_nan=False): +@deprecated_alias(v='time_intervals') +def cv2(time_intervals, with_nan=False): r""" Calculate the measure of CV2 for a sequence of time intervals between events. @@ -366,7 +371,7 @@ def cv2(v, with_nan=False): Parameters ---------- - v : pq.Quantity or np.ndarray or list + time_intervals : pq.Quantity or np.ndarray or list Vector of consecutive time intervals. with_nan : bool, optional If True, `cv2` of a spike train with less than two spikes results in a @@ -404,13 +409,14 @@ def cv2(v, with_nan=False): """ # convert to array, cast to float - v = np.asarray(v) - np_nan = __variation_check(v, with_nan) + time_intervals = np.asarray(time_intervals) + np_nan = __variation_check(time_intervals, with_nan) if np_nan is not None: return np_nan # calculate CV2 and return result - return 2. * np.mean(np.absolute(np.diff(v)) / (v[:-1] + v[1:])) + cv_i = np.diff(time_intervals) / (time_intervals[:-1] + time_intervals[1:]) + return 2. * np.mean(np.abs(cv_i)) def instantaneous_rate(spiketrain, sampling_period, kernel='auto', @@ -567,8 +573,8 @@ def instantaneous_rate(spiketrain, sampling_period, kernel='auto', if kernel == 'auto': kernel_width_sigma = None if len(spiketrain) > 0: - kernel_width_sigma = sskernel( - spiketrain.magnitude, tin=None, bootstrap=False)['optw'] + kernel_width_sigma = optimal_kernel_bandwidth( + spiketrain.magnitude, times=None, bootstrap=False)['optw'] if kernel_width_sigma is None: raise ValueError( "Unable to calculate optimal kernel width for " @@ -668,7 +674,8 @@ def instantaneous_rate(spiketrain, sampling_period, kernel='auto', return rate -def time_histogram(spiketrains, binsize, t_start=None, t_stop=None, +@deprecated_alias(binsize='bin_size') +def time_histogram(spiketrains, bin_size, t_start=None, t_stop=None, output='counts', binary=False): """ Time Histogram of a list of `neo.SpikeTrain` objects. @@ -677,7 +684,7 @@ def time_histogram(spiketrains, binsize, t_start=None, t_stop=None, ---------- spiketrains : list of neo.SpikeTrain `neo.SpikeTrain`s with a common time axis (same `t_start` and `t_stop`) - binsize : pq.Quantity + bin_size : pq.Quantity Width of the histogram's time bins. t_start : pq.Quantity, optional Start time of the histogram. Only events in `spiketrains` falling @@ -714,7 +721,7 @@ def time_histogram(spiketrains, binsize, t_start=None, t_stop=None, neo.AnalogSignal A `neo.AnalogSignal` object containing the histogram values. `neo.AnalogSignal[j]` is the histogram computed between - `t_start + j * binsize` and `t_start + (j + 1) * binsize`. + `t_start + j * bin_size` and `t_start + (j + 1) * bin_size`. Raises ------ @@ -760,7 +767,7 @@ def time_histogram(spiketrains, binsize, t_start=None, t_stop=None, # Bin the spike trains and sum across columns bs = conv.BinnedSpikeTrain(sts_cut, t_start=t_start, t_stop=t_stop, - binsize=binsize) + bin_size=bin_size) if binary: bin_hist = bs.to_sparse_bool_array().sum(axis=0) @@ -777,22 +784,23 @@ def time_histogram(spiketrains, binsize, t_start=None, t_stop=None, bin_hist = bin_hist * 1. / len(spiketrains) * pq.dimensionless elif output == 'rate': # Divide by number of input spike trains and bin width - bin_hist = bin_hist * 1. / len(spiketrains) / binsize + bin_hist = bin_hist * 1. / len(spiketrains) / bin_size else: raise ValueError('Parameter output is not valid.') return neo.AnalogSignal(signal=np.expand_dims(bin_hist, axis=1), - sampling_period=binsize, units=bin_hist.units, + sampling_period=bin_size, units=bin_hist.units, t_start=t_start) -def complexity_pdf(spiketrains, binsize): +@deprecated_alias(binsize='bin_size') +def complexity_pdf(spiketrains, bin_size): """ Complexity Distribution of a list of `neo.SpikeTrain` objects. Probability density computed from the complexity histogram which is the histogram of the entries of the population histogram of clipped (binary) - spike trains computed with a bin width of `binsize`. + spike trains computed with a bin width of `bin_size`. It provides for each complexity (== number of active neurons per bin) the number of occurrences. The normalization of that histogram to 1 is the probability density. @@ -803,7 +811,7 @@ def complexity_pdf(spiketrains, binsize): ---------- spiketrains : list of neo.SpikeTrain Spike trains with a common time axis (same `t_start` and `t_stop`) - binsize : pq.Quantity + bin_size : pq.Quantity Width of the histogram's time bins. Returns @@ -811,7 +819,7 @@ def complexity_pdf(spiketrains, binsize): complexity_distribution : neo.AnalogSignal A `neo.AnalogSignal` object containing the histogram values. `neo.AnalogSignal[j]` is the histogram computed between - `t_start + j * binsize` and `t_start + (j + 1) * binsize`. + `t_start + j * bin_size` and `t_start + (j + 1) * bin_size`. See also -------- @@ -827,7 +835,7 @@ def complexity_pdf(spiketrains, binsize): """ # Computing the population histogram with parameter binary=True to clip the # spike trains before summing - pophist = time_histogram(spiketrains, binsize, binary=True) + pophist = time_histogram(spiketrains, bin_size, binary=True) # Computing the histogram of the entries of pophist (=Complexity histogram) complexity_hist = np.histogram( @@ -938,7 +946,9 @@ def cost_function(x, N, w, dt): return C, yh -def sskernel(spiketimes, tin=None, w=None, bootstrap=False): +@deprecated_alias(tin='times', w='bandwidth') +def optimal_kernel_bandwidth(spiketimes, times=None, bandwidth=None, + bootstrap=False): """ Calculates optimal fixed kernel bandwidth, given as the standard deviation sigma. @@ -947,15 +957,15 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): ---------- spiketimes : np.ndarray Sequence of spike times (sorted to be ascending). - tin : np.ndarray, optional + times : np.ndarray, optional Time points at which the kernel bandwidth is to be estimated. If None, `spiketimes` is used. Default: None. - w : np.ndarray, optional + bandwidth : np.ndarray, optional Vector of kernel bandwidths (standard deviation sigma). If specified, optimal bandwidth is selected from this. - If None, `w` is obtained through a golden-section search on a log-exp - scale. + If None, `bandwidth` is obtained through a golden-section search on a + log-exp scale. Default: None. bootstrap : bool, optional If True, calculates the 95% confidence interval using Bootstrap. @@ -973,7 +983,7 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): 'w' : np.ndarray Kernel bandwidths examined (standard deviation sigma). 'C' : np.ndarray - Cost functions of `w`. + Cost functions of `bandwidth`. 'confb95' : tuple of np.ndarray Bootstrap 95% confidence interval: (lower level, upper level). If `bootstrap` is False, `confb95` is None. @@ -992,38 +1002,38 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): """ - if tin is None: + if times is None: time = np.max(spiketimes) - np.min(spiketimes) isi = np.diff(spiketimes) isi = isi[isi > 0].copy() dt = np.min(isi) - tin = np.linspace(np.min(spiketimes), - np.max(spiketimes), - min(int(time / dt + 0.5), - 1000)) # The 1000 seems somewhat arbitrary - t = tin + times = np.linspace(np.min(spiketimes), + np.max(spiketimes), + min(int(time / dt + 0.5), + 1000)) # The 1000 seems somewhat arbitrary + t = times else: - time = np.max(tin) - np.min(tin) - spiketimes = spiketimes[(spiketimes >= np.min(tin)) & - (spiketimes <= np.max(tin))].copy() + time = np.max(times) - np.min(times) + spiketimes = spiketimes[(spiketimes >= np.min(times)) & + (spiketimes <= np.max(times))].copy() isi = np.diff(spiketimes) isi = isi[isi > 0].copy() dt = np.min(isi) - if dt > np.min(np.diff(tin)): - t = np.linspace(np.min(tin), np.max(tin), + if dt > np.min(np.diff(times)): + t = np.linspace(np.min(times), np.max(times), min(int(time / dt + 0.5), 1000)) else: - t = tin - dt = np.min(np.diff(tin)) + t = times + dt = np.min(np.diff(times)) yhist, bins = np.histogram(spiketimes, np.r_[t - dt / 2, t[-1] + dt / 2]) N = np.sum(yhist) yhist = yhist / (N * dt) # density optw = None y = None - if w is not None: - C = np.zeros(len(w)) + if bandwidth is not None: + C = np.zeros(len(bandwidth)) Cmin = np.inf - for k, w_ in enumerate(w): + for k, w_ in enumerate(bandwidth): C[k], yh = cost_function(yhist, N, w_, dt) if C[k] < Cmin: Cmin = C[k] @@ -1034,7 +1044,7 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): wmin = 2 * dt wmax = max(spiketimes) - min(spiketimes) imax = 20 # max iterations - w = np.zeros(imax) + bandwidth = np.zeros(imax) C = np.zeros(imax) tolerance = 1e-5 phi = 0.5 * (np.sqrt(5) + 1) # The Golden ratio @@ -1053,7 +1063,7 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): c1 = (phi - 1) * a + (2 - phi) * b f2 = f1 f1, y1 = cost_function(yhist, N, logexp(c1), dt) - w[k] = logexp(c1) + bandwidth[k] = logexp(c1) C[k] = f1 optw = logexp(c1) y = y1 / (np.sum(y1 * dt)) @@ -1063,7 +1073,7 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): c2 = (2 - phi) * a + (phi - 1) * b f1 = f2 f2, y2 = cost_function(yhist, N, logexp(c2), dt) - w[k] = logexp(c2) + bandwidth[k] = logexp(c2) C[k] = f2 optw = logexp(c2) y = y2 / np.sum(y2 * dt) @@ -1074,7 +1084,7 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): # If bootstrap is requested, and an optimal kernel was found if bootstrap and optw: nbs = 1000 - yb = np.zeros((nbs, len(tin))) + yb = np.zeros((nbs, len(times))) for ii in range(nbs): idx = np.floor(np.random.rand(N) * N).astype(int) xb = spiketimes[idx] @@ -1082,23 +1092,29 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False): xb, np.r_[t - dt / 2, t[-1] + dt / 2]) / dt / N yb_buf = fftkernel(y_histb, optw / dt).real yb_buf = yb_buf / np.sum(yb_buf * dt) - yb[ii, :] = np.interp(tin, t, yb_buf) + yb[ii, :] = np.interp(times, t, yb_buf) ybsort = np.sort(yb, axis=0) y95b = ybsort[np.floor(0.05 * nbs).astype(int), :] y95u = ybsort[np.floor(0.95 * nbs).astype(int), :] confb95 = (y95b, y95u) # Only perform interpolation if y could be calculated if y is not None: - y = np.interp(tin, t, y) + y = np.interp(times, t, y) return {'y': y, - 't': tin, + 't': times, 'optw': optw, - 'w': w, + 'w': bandwidth, 'C': C, 'confb95': confb95, 'yb': yb} +def sskernel(*args, **kwargs): + warnings.warn("'sskernel' function is deprecated; " + "use 'optimal_kernel_bandwidth'", DeprecationWarning) + return optimal_kernel_bandwidth(*args, **kwargs) + + def _check_consistency_of_spiketrains(spiketrains, t_start=None, t_stop=None): for st in spiketrains: diff --git a/elephant/test/make_spike_extraction_test_data.py b/elephant/test/make_spike_extraction_test_data.py index 84158bcde..c17a4c5a4 100644 --- a/elephant/test/make_spike_extraction_test_data.py +++ b/elephant/test/make_spike_extraction_test_data.py @@ -1,64 +1,65 @@ -def main(): # pragma: no cover - from brian2 import start_scope,mvolt,ms,NeuronGroup,StateMonitor,run - import matplotlib.pyplot as plt - import neo - import quantities as pq +def main(): # pragma: no cover + from brian2 import start_scope, mvolt, ms, NeuronGroup, StateMonitor, run + import matplotlib.pyplot as plt + import neo + import quantities as pq - start_scope() - - # Izhikevich neuron parameters. - a = 0.02/ms - b = 0.2/ms - c = -65*mvolt - d = 6*mvolt/ms - I = 4*mvolt/ms - - # Standard Izhikevich neuron equations. - eqs = ''' + start_scope() + + # Izhikevich neuron parameters. + a = 0.02 / ms + b = 0.2 / ms + c = -65 * mvolt + d = 6 * mvolt / ms + I = 4 * mvolt / ms + + # Standard Izhikevich neuron equations. + eqs = ''' dv/dt = 0.04*v**2/(ms*mvolt) + (5/ms)*v + 140*mvolt/ms - u + I : volt du/dt = a*((b*v) - u) : volt/second ''' - - reset = ''' + + reset = ''' v = c u += d ''' - - # Setup and run simulation. - G = NeuronGroup(1, eqs, threshold='v>30*mvolt', reset='v = -70*mvolt') - G.v = -65*mvolt - G.u = b*G.v - M = StateMonitor(G, 'v', record=True) - run(300*ms) - - # Store results in neo format. - vm = neo.core.AnalogSignal(M.v[0], units=pq.V, sampling_period=0.1*pq.ms) - - # Plot results. - plt.figure() - plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s. - plt.xlabel('Time (ms)') - plt.ylabel('mv') - - # Save results. - iom = neo.io.PyNNNumpyIO('spike_extraction_test_data') - block = neo.core.Block() - segment = neo.core.Segment() - segment.analogsignals.append(vm) - block.segments.append(segment) - iom.write(block) - - # Load results. - iom2 = neo.io.PyNNNumpyIO('spike_extraction_test_data.npz') - data = iom2.read() - vm = data[0].segments[0].analogsignals[0] - - # Plot results. - # The two figures should match. - plt.figure() - plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s. - plt.xlabel('Time (ms)') - plt.ylabel('mv') - + + # Setup and run simulation. + G = NeuronGroup(1, eqs, threshold='v>30*mvolt', reset='v = -70*mvolt') + G.v = -65 * mvolt + G.u = b * G.v + M = StateMonitor(G, 'v', record=True) + run(300 * ms) + + # Store results in neo format. + vm = neo.core.AnalogSignal(M.v[0], units=pq.V, sampling_period=0.1 * pq.ms) + + # Plot results. + plt.figure() + plt.plot(vm.times * 1000, vm * 1000) # Plot mV and ms instead of V and s. + plt.xlabel('Time (ms)') + plt.ylabel('mv') + + # Save results. + iom = neo.io.PyNNNumpyIO('spike_extraction_test_data') + block = neo.core.Block() + segment = neo.core.Segment() + segment.analogsignals.append(vm) + block.segments.append(segment) + iom.write(block) + + # Load results. + iom2 = neo.io.PyNNNumpyIO('spike_extraction_test_data.npz') + data = iom2.read() + vm = data[0].segments[0].analogsignals[0] + + # Plot results. + # The two figures should match. + plt.figure() + plt.plot(vm.times * 1000, vm * 1000) # Plot mV and ms instead of V and s. + plt.xlabel('Time (ms)') + plt.ylabel('mv') + + if __name__ == '__main__': - main() + main() diff --git a/elephant/test/test_asset.py b/elephant/test/test_asset.py index 0fee8fe19..73fef2b5f 100644 --- a/elephant/test/test_asset.py +++ b/elephant/test/test_asset.py @@ -197,8 +197,8 @@ def test_intersection_matrix(self): bin_size = 1 * pq.ms asset_obj_same_t_start_stop = asset.ASSET( - [st1, st2], bin_size=bin_size, t_stop_x=5 * pq.ms, - t_stop_y=5 * pq.ms) + [st1, st2], bin_size=bin_size, t_stop_i=5 * pq.ms, + t_stop_j=5 * pq.ms) # Check that the routine works for correct input... # ...same t_start, t_stop on both time axes @@ -215,9 +215,9 @@ def test_intersection_matrix(self): assert_array_equal(imat_1_2, trueimat_1_2) # correct matrix # ...different t_start, t_stop on the two time axes asset_obj_different_t_start_stop = asset.ASSET( - [st1, st2], spiketrains_y=[st + 6 * pq.ms for st in [st1, st2]], - bin_size=bin_size, t_start_y=6 * pq.ms, t_stop_x=5 * pq.ms, - t_stop_y=11 * pq.ms) + [st1, st2], spiketrains_j=[st + 6 * pq.ms for st in [st1, st2]], + bin_size=bin_size, t_start_j=6 * pq.ms, t_stop_i=5 * pq.ms, + t_stop_j=11 * pq.ms) imat_1_2 = asset_obj_different_t_start_stop.intersection_matrix() assert_array_equal(asset_obj_different_t_start_stop.x_edges, np.arange(6) * pq.ms) # correct bins @@ -259,15 +259,15 @@ def test_intersection_matrix(self): # Check that errors are raised correctly... # ...for partially overlapping time intervals self.assertRaises(ValueError, asset.ASSET, - spiketrains=[st1, st2], bin_size=bin_size, - t_start_y=1 * pq.ms) + spiketrains_i=[st1, st2], bin_size=bin_size, + t_start_j=1 * pq.ms) # ...for different SpikeTrain's t_starts self.assertRaises(ValueError, asset.ASSET, - spiketrains=[st1, st3], bin_size=bin_size) + spiketrains_i=[st1, st3], bin_size=bin_size) # ...for different SpikeTrain's t_stops self.assertRaises(ValueError, asset.ASSET, - spiketrains=[st1, st2], bin_size=bin_size, - t_stop_x=5 * pq.ms) + spiketrains_i=[st1, st2], bin_size=bin_size, + t_stop_j=5 * pq.ms) def test_combinations_with_replacement(self): # Test that _combinations_with_replacement yields the same tuples @@ -317,7 +317,7 @@ def test_probability_matrix_symmetric(self): asset_obj = asset.ASSET(spiketrains, bin_size=self.bin_size) asset_obj_symmetric = asset.ASSET(spiketrains, - spiketrains_y=spiketrains_copy, + spiketrains_j=spiketrains_copy, bin_size=self.bin_size) imat = asset_obj.intersection_matrix() @@ -326,7 +326,7 @@ def test_probability_matrix_symmetric(self): imat_symm = asset_obj_symmetric.intersection_matrix() pmat_symm = asset_obj_symmetric.probability_matrix_analytical( - kernel_width=kernel_width) + kernel_width=kernel_width) assert_array_almost_equal(pmat, pmat_symm) assert_array_almost_equal(imat, imat_symm) @@ -379,10 +379,10 @@ def _get_rates(_spiketrains): # calculate probability matrix montecarlo pmat_montecarlo = asset_obj.probability_matrix_montecarlo( - n_surrogates=n_surr, - imat=imat, - surrogate_dt=surrogate_dt, - surrogate_method='dither_spikes') + n_surrogates=n_surr, + imat=imat, + surrogate_dt=surrogate_dt, + surrogate_method='dither_spikes') # test probability matrices assert_array_equal(np.where(pmat > alpha), indices_pmat) diff --git a/elephant/test/test_cell_assembly_detection.py b/elephant/test/test_cell_assembly_detection.py index 78ce5feee..0f31c8ca4 100644 --- a/elephant/test/test_cell_assembly_detection.py +++ b/elephant/test/test_cell_assembly_detection.py @@ -17,10 +17,10 @@ class CadTestCase(unittest.TestCase): def setUp(self): # Parameters - self.binsize = 1*pq.ms + self.bin_size = 1 * pq.ms self.alpha = 0.05 self.size_chunks = 100 - self.maxlag = 10 + self.max_lag = 10 self.reference_lag = 2 self.min_occ = 1 self.max_spikes = np.inf @@ -53,32 +53,32 @@ def setUp(self): np.random.seed(1) self.patt1_times = neo.SpikeTrain( np.random.uniform(0, 1 - max(self.lags1), self.n_occ1) * pq.s, - t_start=0*pq.s, t_stop=1*pq.s) + t_start=0 * pq.s, t_stop=1 * pq.s) self.patt2_times = neo.SpikeTrain( np.random.uniform(0, 1 - max(self.lags2), self.n_occ2) * pq.s, - t_start=0*pq.s, t_stop=1*pq.s) + t_start=0 * pq.s, t_stop=1 * pq.s) self.patt3_times = neo.SpikeTrain( np.random.uniform(0, 1 - max(self.lags3), self.n_occ3) * pq.s, - t_start=0*pq.s, t_stop=1*pq.s) + t_start=0 * pq.s, t_stop=1 * pq.s) # Patterns self.patt1 = [self.patt1_times] + [neo.SpikeTrain( - self.patt1_times+l * pq.s, t_start=self.t_start * pq.s, + self.patt1_times + l * pq.s, t_start=self.t_start * pq.s, t_stop=self.t_stop * pq.s) for l in self.lags1] self.patt2 = [self.patt2_times] + [neo.SpikeTrain( - self.patt2_times+l * pq.s, t_start=self.t_start * pq.s, + self.patt2_times + l * pq.s, t_start=self.t_start * pq.s, t_stop=self.t_stop * pq.s) for l in self.lags2] self.patt3 = [self.patt3_times] + [neo.SpikeTrain( - self.patt3_times+l * pq.s, t_start=self.t_start * pq.s, + self.patt3_times + l * pq.s, t_start=self.t_start * pq.s, t_stop=self.t_stop * pq.s) for l in self.lags3] # Binning spiketrains self.bin_patt1 = conv.BinnedSpikeTrain(self.patt1, - binsize=self.binsize) + bin_size=self.bin_size) # Data self.msip = self.patt1 + self.patt2 + self.patt3 - self.msip = conv.BinnedSpikeTrain(self.msip, binsize=self.binsize) + self.msip = conv.BinnedSpikeTrain(self.msip, bin_size=self.bin_size) # Expected results self.n_spk1 = len(self.lags1) + 1 @@ -92,11 +92,11 @@ def setUp(self): range(self.n_spk1 + self.n_spk2, self.n_spk1 + self.n_spk2 + self.n_spk3)] self.occ1 = np.unique(conv.BinnedSpikeTrain( - self.patt1_times, self.binsize).spike_indices[0]) + self.patt1_times, self.bin_size).spike_indices[0]) self.occ2 = np.unique(conv.BinnedSpikeTrain( - self.patt2_times, self.binsize).spike_indices[0]) + self.patt2_times, self.bin_size).spike_indices[0]) self.occ3 = np.unique(conv.BinnedSpikeTrain( - self.patt3_times, self.binsize).spike_indices[0]) + self.patt3_times, self.bin_size).spike_indices[0]) self.occ_msip = [list(self.occ1), list(self.occ2), list(self.occ3)] self.lags_msip = [self.output_lags1, self.output_lags2, @@ -105,8 +105,8 @@ def setUp(self): # test for single pattern injection input def test_cad_single_sip(self): # collecting cad output - output_single = cad.\ - cell_assembly_detection(data=self.bin_patt1, maxlag=self.maxlag) + output_single = cad.cell_assembly_detection( + binned_spiketrain=self.bin_patt1, max_lag=self.max_lag) # check neurons in the pattern assert_array_equal(sorted(output_single[0]['neurons']), self.elements1) @@ -120,8 +120,8 @@ def test_cad_single_sip(self): # test with multiple (3) patterns injected in the data def test_cad_msip(self): # collecting cad output - output_msip = cad.\ - cell_assembly_detection(data=self.msip, maxlag=self.maxlag) + output_msip = cad.cell_assembly_detection( + binned_spiketrain=self.msip, max_lag=self.max_lag) elements_msip = [] occ_msip = [] @@ -149,54 +149,65 @@ def test_cad_raise_error(self): # test error data input format self.assertRaises(TypeError, cad.cell_assembly_detection, data=[[1, 2, 3], [3, 4, 5]], - maxlag=self.maxlag) + maxlag=self.max_lag) # test error significance level self.assertRaises(ValueError, cad.cell_assembly_detection, data=conv.BinnedSpikeTrain( - [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s), - neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)], - binsize=self.binsize), - maxlag=self.maxlag, + [neo.SpikeTrain([1, 2, 3] * pq.s, + t_stop=5 * pq.s), + neo.SpikeTrain([3, 4, 5] * pq.s, + t_stop=5 * pq.s)], + bin_size=self.bin_size), + maxlag=self.max_lag, alpha=-3) # test error minimum number of occurrences self.assertRaises(ValueError, cad.cell_assembly_detection, data=conv.BinnedSpikeTrain( - [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s), - neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)], - binsize=self.binsize), - maxlag=self.maxlag, + [neo.SpikeTrain([1, 2, 3] * pq.s, + t_stop=5 * pq.s), + neo.SpikeTrain([3, 4, 5] * pq.s, + t_stop=5 * pq.s)], + bin_size=self.bin_size), + maxlag=self.max_lag, min_occ=-1) # test error minimum number of spikes in a pattern self.assertRaises(ValueError, cad.cell_assembly_detection, data=conv.BinnedSpikeTrain( - [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s), - neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)], - binsize=self.binsize), - maxlag=self.maxlag, + [neo.SpikeTrain([1, 2, 3] * pq.s, + t_stop=5 * pq.s), + neo.SpikeTrain([3, 4, 5] * pq.s, + t_stop=5 * pq.s)], + bin_size=self.bin_size), + maxlag=self.max_lag, max_spikes=1) # test error chunk size for variance computation self.assertRaises(ValueError, cad.cell_assembly_detection, data=conv.BinnedSpikeTrain( - [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s), - neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)], - binsize=self.binsize), - maxlag=self.maxlag, + [neo.SpikeTrain([1, 2, 3] * pq.s, + t_stop=5 * pq.s), + neo.SpikeTrain([3, 4, 5] * pq.s, + t_stop=5 * pq.s)], + bin_size=self.bin_size), + maxlag=self.max_lag, size_chunks=1) # test error maximum lag self.assertRaises(ValueError, cad.cell_assembly_detection, data=conv.BinnedSpikeTrain( - [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s), - neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)], - binsize=self.binsize), + [neo.SpikeTrain([1, 2, 3] * pq.s, + t_stop=5 * pq.s), + neo.SpikeTrain([3, 4, 5] * pq.s, + t_stop=5 * pq.s)], + bin_size=self.bin_size), maxlag=1) # test error minimum length spike train self.assertRaises(ValueError, cad.cell_assembly_detection, data=conv.BinnedSpikeTrain( - [neo.SpikeTrain([1, 2, 3]*pq.ms, t_stop=6*pq.ms), - neo.SpikeTrain([3, 4, 5]*pq.ms, - t_stop=6*pq.ms)], - binsize=1*pq.ms), - maxlag=self.maxlag) + [neo.SpikeTrain([1, 2, 3] * pq.ms, + t_stop=6 * pq.ms), + neo.SpikeTrain([3, 4, 5] * pq.ms, + t_stop=6 * pq.ms)], + bin_size=1 * pq.ms), + maxlag=self.max_lag) def suite(): diff --git a/elephant/test/test_change_point_detection.py b/elephant/test/test_change_point_detection.py index ecef67a4a..57e197cf5 100644 --- a/elephant/test/test_change_point_detection.py +++ b/elephant/test/test_change_point_detection.py @@ -6,9 +6,9 @@ import unittest import elephant.change_point_detection as mft from numpy.testing.utils import assert_array_almost_equal, assert_allclose - - -#np.random.seed(13) + + +# np.random.seed(13) class FilterTestCase(unittest.TestCase): def setUp(self): @@ -21,7 +21,7 @@ def setUp(self): mu_le = (0.1 + 0.15 + 0.05) / 3 sigma_ri = ((0.25 - 0.15) ** 2 + (0.05 - 0.15) ** 2) / 2 sigma_le = ((0.1 - 0.1) ** 2 + (0.15 - 0.1) ** 2 + ( - 0.05 - 0.1) ** 2) / 3 + 0.05 - 0.1) ** 2) / 3 self.targ_t08_h025 = 0 self.targ_t08_h05 = (3 - 4) / np.sqrt( (sigma_ri / mu_ri ** (3)) * 0.5 + (sigma_le / mu_le ** (3)) * 0.5) @@ -36,7 +36,7 @@ def test_filter_with_spiketrain_h05(self): self.assertRaises(ValueError, mft._filter, 0.8 * pq.s, 0.5, st) self.assertRaises(ValueError, mft._filter, 0.8 * pq.s, 0.5 * pq.s, self.test_array) - + # Window Small # def test_filter_with_spiketrain_h025(self): st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.0) @@ -55,7 +55,7 @@ def test_filter_with_plain_array_h025(self): target = self.targ_t08_h025 res = mft._filter(0.8 * pq.s, 0.25 * pq.s, st * pq.s) assert_array_almost_equal(res, target, decimal=9) - + def test_isi_with_quantities_h05(self): st = pq.Quantity(self.test_array, units='s') target = self.targ_t08_h05 @@ -84,15 +84,15 @@ def test_filter_process_with_spiketrain_h05(self): res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]])) assert_array_almost_equal(res[1], target[1], decimal=3) - - self.assertRaises(ValueError, mft._filter_process, 0.5 , 0.5 * pq.s, - st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]])) + + self.assertRaises(ValueError, mft._filter_process, 0.5, 0.5 * pq.s, + st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]])) self.assertRaises(ValueError, mft._filter_process, 0.5 * pq.s, 0.5, - st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]])) + st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]])) self.assertRaises(ValueError, mft._filter_process, 0.5 * pq.s, 0.5 * pq.s, self.test_array, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]])) - + def test_filter_proces_with_quantities_h05(self): st = pq.Quantity(self.test_array, units='s') target = self.targ_h05 @@ -113,49 +113,68 @@ class MultipleFilterAlgorithmTestCase(unittest.TestCase): def setUp(self): self.test_array = [1.1, 1.2, 1.4, 1.6, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95] self.targ_h05_dt05 = [1.5 * pq.s] - - # to speed up the test, the following `test_param` and `test_quantile` + + # to speed up the test, the following `test_param` and `test_quantile` # paramters have been calculated offline using the function: - # empirical_parameters([10, 25, 50, 75, 100, 125, 150]*pq.s,700*pq.s,5, + # empirical_parameters([10, 25, 50, 75, 100, 125, 150]*pq.s,700*pq.s,5, # 10000) - # the user should do the same, if the metohd has to be applied to several - # spike trains of the same length `T` and with the same set of window. - self.test_param = np.array([[10., 25., 50., 75., 100., 125., 150.], - [3.167, 2.955, 2.721, 2.548, 2.412, 2.293, 2.180], - [0.150, 0.185, 0.224, 0.249, 0.269, 0.288, 0.301]]) + # the user should do the same, if the metohd has to be applied to + # several spike trains of the same length `T` and with the same set of + # window. + self.test_param = np.array([[10., + 25., + 50., + 75., + 100., + 125., + 150.], + [3.167, + 2.955, + 2.721, + 2.548, + 2.412, + 2.293, + 2.180], + [0.150, + 0.185, + 0.224, + 0.249, + 0.269, + 0.288, + 0.301]]) self.test_quantile = 2.75 def test_MultipleFilterAlgorithm_with_spiketrain_h05(self): st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.1) target = [self.targ_h05_dt05] res = mft.multiple_filter_test([0.5] * pq.s, st, 2.1 * pq.s, 5, 100, - dt=0.1 * pq.s) + time_step=0.1 * pq.s) assert_array_almost_equal(res, target, decimal=9) def test_MultipleFilterAlgorithm_with_quantities_h05(self): st = pq.Quantity(self.test_array, units='s') target = [self.targ_h05_dt05] res = mft.multiple_filter_test([0.5] * pq.s, st, 2.1 * pq.s, 5, 100, - dt=0.5 * pq.s) + time_step=0.5 * pq.s) assert_array_almost_equal(res, target, decimal=9) def test_MultipleFilterAlgorithm_with_plain_array_h05(self): st = self.test_array target = [self.targ_h05_dt05] res = mft.multiple_filter_test([0.5] * pq.s, st * pq.s, 2.1 * pq.s, 5, - 100, dt=0.5 * pq.s) + 100, time_step=0.5 * pq.s) self.assertNotIsInstance(res, pq.Quantity) assert_array_almost_equal(res, target, decimal=9) - + def test_MultipleFilterAlgorithm_with_longdata(self): - + def gamma_train(k, teta, tmax): x = np.random.gamma(k, teta, int(tmax * (k * teta) ** (-1) * 3)) s = np.cumsum(x) idx = np.where(s < tmax) s = s[idx] # gamma process return s - + def alternative_hypothesis(k1, teta1, c1, k2, teta2, c2, k3, teta3, c3, k4, teta4, T): s1 = gamma_train(k1, teta1, c1) @@ -169,22 +188,29 @@ def alternative_hypothesis(k1, teta1, c1, k2, teta2, c2, k3, teta3, c3, 2, 1 / 33., 200)[0] window_size = [10, 25, 50, 75, 100, 125, 150] * pq.s - self.target_points = [150, 180, 500] + self.target_points = [150, 180, 500] target = self.target_points - - result = mft.multiple_filter_test(window_size, st * pq.s, 700 * pq.s, 5, - 10000, test_quantile=self.test_quantile, test_param=self.test_param, - dt=1 * pq.s) + + result = mft.multiple_filter_test( + window_size, + st * pq.s, + 700 * pq.s, + 5, + 10000, + test_quantile=self.test_quantile, + test_param=self.test_param, + time_step=1 * pq.s) self.assertNotIsInstance(result, pq.Quantity) result_concatenated = [] for i in result: result_concatenated = np.hstack([result_concatenated, i]) - result_concatenated = np.sort(result_concatenated) + result_concatenated = np.sort(result_concatenated) assert_allclose(result_concatenated[:3], target[:3], rtol=0, atol=5) print('detected {0} cps: {1}'.format(len(result_concatenated), - result_concatenated)) - + result_concatenated)) + + if __name__ == '__main__': unittest.main() diff --git a/elephant/test/test_conversion.py b/elephant/test/test_conversion.py index 5ae4598ce..c3f6d237a 100644 --- a/elephant/test/test_conversion.py +++ b/elephant/test/test_conversion.py @@ -182,7 +182,7 @@ def test_bin_edges(self): st = neo.SpikeTrain(times=np.array([2.5]) * pq.s, t_start=0 * pq.s, t_stop=3 * pq.s) with self.assertWarns(UserWarning): - bst = cv.BinnedSpikeTrain(st, binsize=2 * pq.s, t_start=0 * pq.s, + bst = cv.BinnedSpikeTrain(st, bin_size=2 * pq.s, t_start=0 * pq.s, t_stop=3 * pq.s) assert_array_equal(bst.bin_edges, [0., 2.] * pq.s) assert_array_equal(bst.spike_indices, [[]]) # no binned spikes @@ -195,18 +195,18 @@ def setUp(self): [0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s) self.spiketrain_b = neo.SpikeTrain( [0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s) - self.binsize = 1 * pq.s + self.bin_size = 1 * pq.s self.tolerance = 1e-8 def test_get_num_of_spikes(self): spiketrains = [self.spiketrain_a, self.spiketrain_b] for spiketrain in spiketrains: - binned = cv.BinnedSpikeTrain(spiketrain, num_bins=10, - binsize=1 * pq.s, t_start=0 * pq.s) + binned = cv.BinnedSpikeTrain(spiketrain, n_bins=10, + bin_size=1 * pq.s, t_start=0 * pq.s) self.assertEqual(binned.get_num_of_spikes(), len(binned.spike_indices[0])) - binned_matrix = cv.BinnedSpikeTrain(spiketrains, num_bins=10, - binsize=1 * pq.s) + binned_matrix = cv.BinnedSpikeTrain(spiketrains, n_bins=10, + bin_size=1 * pq.s) n_spikes_per_row = binned_matrix.get_num_of_spikes(axis=1) n_spikes_per_row_from_indices = list(map(len, binned_matrix.spike_indices)) @@ -217,9 +217,9 @@ def test_get_num_of_spikes(self): def test_binned_spiketrain_sparse(self): a = neo.SpikeTrain([1.7, 1.8, 4.3] * pq.s, t_stop=10.0 * pq.s) b = neo.SpikeTrain([1.7, 1.8, 4.3] * pq.s, t_stop=10.0 * pq.s) - binsize = 1 * pq.s + bin_size = 1 * pq.s nbins = 10 - x = cv.BinnedSpikeTrain([a, b], num_bins=nbins, binsize=binsize, + x = cv.BinnedSpikeTrain([a, b], n_bins=nbins, bin_size=bin_size, t_start=0 * pq.s) x_sparse = [2, 1, 2, 1] s = x.to_sparse_array() @@ -228,10 +228,10 @@ def test_binned_spiketrain_sparse(self): def test_binned_spiketrain_shape(self): a = self.spiketrain_a - x = cv.BinnedSpikeTrain(a, num_bins=10, - binsize=self.binsize, + x = cv.BinnedSpikeTrain(a, n_bins=10, + bin_size=self.bin_size, t_start=0 * pq.s) - x_bool = cv.BinnedSpikeTrain(a, num_bins=10, binsize=self.binsize, + x_bool = cv.BinnedSpikeTrain(a, n_bins=10, bin_size=self.bin_size, t_start=0 * pq.s) self.assertTrue(x.to_array().shape == (1, 10)) self.assertTrue(x_bool.to_bool_array().shape == (1, 10)) @@ -242,9 +242,9 @@ def test_binned_spiketrain_shape_list(self): b = self.spiketrain_b c = [a, b] nbins = 5 - x = cv.BinnedSpikeTrain(c, num_bins=nbins, t_start=0 * pq.s, + x = cv.BinnedSpikeTrain(c, n_bins=nbins, t_start=0 * pq.s, t_stop=10.0 * pq.s) - x_bool = cv.BinnedSpikeTrain(c, num_bins=nbins, t_start=0 * pq.s, + x_bool = cv.BinnedSpikeTrain(c, n_bins=nbins, t_start=0 * pq.s, t_stop=10.0 * pq.s) self.assertTrue(x.to_array().shape == (2, 5)) self.assertTrue(x_bool.to_bool_array().shape == (2, 5)) @@ -253,9 +253,9 @@ def test_binned_spiketrain_neg_times(self): a = neo.SpikeTrain( [-6.5, 0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_start=-6.5 * pq.s, t_stop=10.0 * pq.s) - binsize = self.binsize + bin_size = self.bin_size nbins = 16 - x = cv.BinnedSpikeTrain(a, num_bins=nbins, binsize=binsize, + x = cv.BinnedSpikeTrain(a, n_bins=nbins, bin_size=bin_size, t_start=-6.5 * pq.s) y = [ np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0])] @@ -271,9 +271,9 @@ def test_binned_spiketrain_neg_times_list(self): t_start=-1 * pq.s, t_stop=8 * pq.s) c = [a, b] - binsize = self.binsize + bin_size = self.bin_size with self.assertWarns(UserWarning): - x_bool = cv.BinnedSpikeTrain(c, binsize=binsize) + x_bool = cv.BinnedSpikeTrain(c, bin_size=bin_size) y_bool = [[0, 1, 1, 0, 1, 1, 1, 1], [1, 0, 1, 1, 0, 1, 1, 0]] @@ -283,11 +283,11 @@ def test_binned_spiketrain_neg_times_list(self): # checking spike_indices(f) and matrix(m) for 1 spiketrain def test_binned_spiketrain_indices(self): a = self.spiketrain_a - binsize = self.binsize + bin_size = self.bin_size nbins = 10 - x = cv.BinnedSpikeTrain(a, num_bins=nbins, binsize=binsize, + x = cv.BinnedSpikeTrain(a, n_bins=nbins, bin_size=bin_size, t_start=0 * pq.s) - x_bool = cv.BinnedSpikeTrain(a, num_bins=nbins, binsize=binsize, + x_bool = cv.BinnedSpikeTrain(a, n_bins=nbins, bin_size=bin_size, t_start=0 * pq.s) y_matrix = [ np.array([2., 1., 0., 1., 1., 1., 1., 0., 0., 0.])] @@ -308,12 +308,12 @@ def test_binned_spiketrain_list(self): a = self.spiketrain_a b = self.spiketrain_b - binsize = self.binsize + bin_size = self.bin_size nbins = 10 c = [a, b] - x = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize, + x = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size, t_start=0 * pq.s) - x_bool = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize, + x_bool = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size, t_start=0 * pq.s) y_matrix = np.array( [[2, 1, 0, 1, 1, 1, 1, 0, 0, 0], @@ -332,12 +332,12 @@ def test_binned_spiketrain_list_t_stop(self): a = self.spiketrain_a b = self.spiketrain_b c = [a, b] - binsize = self.binsize + bin_size = self.bin_size nbins = 10 - x = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize, + x = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size, t_start=0 * pq.s, t_stop=None) - x_bool = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize, + x_bool = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size, t_start=0 * pq.s) self.assertTrue(x.t_stop == 10 * pq.s) self.assertTrue(x_bool.t_stop == 10 * pq.s) @@ -347,21 +347,21 @@ def test_binned_spiketrain_list_numbins(self): a = self.spiketrain_a b = self.spiketrain_b c = [a, b] - binsize = 1 * pq.s - x = cv.BinnedSpikeTrain(c, binsize=binsize, t_start=0 * pq.s, + bin_size = 1 * pq.s + x = cv.BinnedSpikeTrain(c, bin_size=bin_size, t_start=0 * pq.s, t_stop=10. * pq.s) - x_bool = cv.BinnedSpikeTrain(c, binsize=binsize, t_start=0 * pq.s, + x_bool = cv.BinnedSpikeTrain(c, bin_size=bin_size, t_start=0 * pq.s, t_stop=10. * pq.s) - self.assertTrue(x.num_bins == 10) - self.assertTrue(x_bool.num_bins == 10) + self.assertTrue(x.n_bins == 10) + self.assertTrue(x_bool.n_bins == 10) def test_binned_spiketrain_matrix(self): # Init a = self.spiketrain_a b = self.spiketrain_b - x_bool_a = cv.BinnedSpikeTrain(a, binsize=pq.s, t_start=0 * pq.s, + x_bool_a = cv.BinnedSpikeTrain(a, bin_size=pq.s, t_start=0 * pq.s, t_stop=10. * pq.s) - x_bool_b = cv.BinnedSpikeTrain(b, binsize=pq.s, t_start=0 * pq.s, + x_bool_b = cv.BinnedSpikeTrain(b, bin_size=pq.s, t_start=0 * pq.s, t_stop=10. * pq.s) # Assumed results @@ -382,9 +382,9 @@ def test_binned_spiketrain_matrix_storing(self): a = self.spiketrain_a b = self.spiketrain_b - x_bool = cv.BinnedSpikeTrain(a, binsize=pq.s, t_start=0 * pq.s, + x_bool = cv.BinnedSpikeTrain(a, bin_size=pq.s, t_start=0 * pq.s, t_stop=10. * pq.s) - x = cv.BinnedSpikeTrain(b, binsize=pq.s, t_start=0 * pq.s, + x = cv.BinnedSpikeTrain(b, bin_size=pq.s, t_start=0 * pq.s, t_stop=10. * pq.s) # Store Matrix in variable matrix_bool = x_bool.to_bool_array() @@ -408,7 +408,7 @@ def test_binned_spiketrain_matrix_storing(self): x_bool.to_sparse_bool_array().toarray())) # New class without calculating the matrix - x = cv.BinnedSpikeTrain(b, binsize=pq.s, t_start=0 * pq.s, + x = cv.BinnedSpikeTrain(b, bin_size=pq.s, t_start=0 * pq.s, t_stop=10. * pq.s) # No matrix calculated, should be None self.assertEqual(x._mat_u, None) @@ -418,7 +418,7 @@ def test_binned_spiketrain_matrix_storing(self): # Test matrix removing def test_binned_spiketrain_remove_matrix(self): a = self.spiketrain_a - x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, num_bins=10, + x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s, n_bins=10, t_stop=10. * pq.s) # Store x.to_array(store_array=True) @@ -430,18 +430,18 @@ def test_binned_spiketrain_remove_matrix(self): # Test if t_start is calculated correctly def test_binned_spiketrain_parameter_calc_tstart(self): a = self.spiketrain_a - x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, num_bins=10, + x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s, n_bins=10, t_stop=10. * pq.s) self.assertEqual(x.t_start, 0. * pq.s) self.assertEqual(x.t_stop, 10. * pq.s) - self.assertEqual(x.binsize, 1 * pq.s) - self.assertEqual(x.num_bins, 10) + self.assertEqual(x.bin_size, 1 * pq.s) + self.assertEqual(x.n_bins, 10) - # Test if error raises when type of num_bins is not an integer + # Test if error raises when type of n_bins is not an integer def test_binned_spiketrain_numbins_type_error(self): a = self.spiketrain_a - self.assertRaises(TypeError, cv.BinnedSpikeTrain, a, binsize=pq.s, - num_bins=1.4, t_start=0 * pq.s, + self.assertRaises(TypeError, cv.BinnedSpikeTrain, a, bin_size=pq.s, + n_bins=1.4, t_start=0 * pq.s, t_stop=10. * pq.s) # Test if error is raised when providing insufficient number of @@ -449,19 +449,26 @@ def test_binned_spiketrain_numbins_type_error(self): def test_binned_spiketrain_insufficient_arguments(self): a = self.spiketrain_a self.assertRaises(AttributeError, cv.BinnedSpikeTrain, a) - self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, binsize=1 * pq.s, - t_start=0 * pq.s, t_stop=0 * pq.s) + self.assertRaises( + ValueError, + cv.BinnedSpikeTrain, + a, + bin_size=1 * pq.s, + t_start=0 * pq.s, + t_stop=0 * pq.s) def test_calc_attributes_error(self): - self.assertRaises(ValueError, cv._calc_num_bins, + self.assertRaises(ValueError, cv._calc_number_of_bins, 1, 1 * pq.s, 0 * pq.s, self.tolerance) - self.assertRaises(ValueError, cv._calc_binsize, + self.assertRaises(ValueError, cv._calc_bin_size, 1, 1 * pq.s, 0 * pq.s) def test_different_input_types(self): a = self.spiketrain_a q = [1, 2, 3] * pq.s - self.assertRaises(TypeError, cv.BinnedSpikeTrain, [a, q], binsize=pq.s) + self.assertRaises( + TypeError, cv.BinnedSpikeTrain, [ + a, q], bin_size=pq.s) def test_get_start_stop(self): a = self.spiketrain_a @@ -480,24 +487,29 @@ def test_consistency_errors(self): b = neo.SpikeTrain([-2, -1] * pq.s, t_start=-2 * pq.s, t_stop=-1 * pq.s) self.assertRaises(ValueError, cv.BinnedSpikeTrain, [a, b], t_start=5, - t_stop=0, binsize=pq.s, num_bins=10) + t_stop=0, bin_size=pq.s, n_bins=10) b = neo.SpikeTrain([-7, -8, -9] * pq.s, t_start=-9 * pq.s, t_stop=-7 * pq.s) self.assertRaises(ValueError, cv.BinnedSpikeTrain, b, t_start=0, - t_stop=10, binsize=pq.s, num_bins=10) + t_stop=10, bin_size=pq.s, n_bins=10) self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, t_start=0 * pq.s, - t_stop=10 * pq.s, binsize=3 * pq.s, num_bins=10) + t_stop=10 * pq.s, bin_size=3 * pq.s, n_bins=10) b = neo.SpikeTrain([-4, -2, 0, 1] * pq.s, t_start=-4 * pq.s, t_stop=1 * pq.s) - self.assertRaises(TypeError, cv.BinnedSpikeTrain, b, binsize=-2 * pq.s, - t_start=-4 * pq.s, t_stop=0 * pq.s) + self.assertRaises( + TypeError, + cv.BinnedSpikeTrain, + b, + bin_size=-2 * pq.s, + t_start=-4 * pq.s, + t_stop=0 * pq.s) # Test edges def test_binned_spiketrain_bin_edges(self): a = self.spiketrain_a - x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, num_bins=10, + x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s, n_bins=10, t_stop=10. * pq.s) # Test all edges edges = [float(i) for i in range(11)] @@ -519,9 +531,9 @@ def test_binned_spiketrain_bin_edges(self): def test_binned_spiketrain_different_units(self): a = self.spiketrain_a b = a.rescale(pq.ms) - binsize = 1 * pq.s - xa = cv.BinnedSpikeTrain(a, binsize=binsize) - xb = cv.BinnedSpikeTrain(b, binsize=binsize.rescale(pq.ms)) + bin_size = 1 * pq.s + xa = cv.BinnedSpikeTrain(a, bin_size=bin_size) + xb = cv.BinnedSpikeTrain(b, bin_size=bin_size.rescale(pq.ms)) self.assertTrue( np.array_equal(xa.to_bool_array(), xb.to_bool_array())) self.assertTrue( @@ -529,7 +541,7 @@ def test_binned_spiketrain_different_units(self): xb.to_sparse_array().data)) self.assertTrue( np.array_equal(xa.bin_edges[:-1], - xb.bin_edges[:-1].rescale(binsize.units))) + xb.bin_edges[:-1].rescale(bin_size.units))) def test_binary_to_binned_matrix(self): a = [[1, 0, 0, 0], [0, 1, 1, 0]] @@ -538,24 +550,24 @@ def test_binary_to_binned_matrix(self): self.assertTrue(np.array_equal(a, x.to_bool_array())) self.assertTrue(np.array_equal(np.array(a), x.to_bool_array())) self.assertTrue(np.array_equal(a, x.to_bool_array())) - self.assertEqual(x.num_bins, 4) - self.assertEqual(x.binsize, 1.25 * pq.s) + self.assertEqual(x.n_bins, 4) + self.assertEqual(x.bin_size, 1.25 * pq.s) - x = cv.BinnedSpikeTrain(a, t_start=1 * pq.s, binsize=2 * pq.s) + x = cv.BinnedSpikeTrain(a, t_start=1 * pq.s, bin_size=2 * pq.s) self.assertTrue(np.array_equal(a, x.to_bool_array())) self.assertEqual(x.t_stop, 9 * pq.s) - x = cv.BinnedSpikeTrain(a, t_stop=9 * pq.s, binsize=2 * pq.s) + x = cv.BinnedSpikeTrain(a, t_stop=9 * pq.s, bin_size=2 * pq.s) self.assertEqual(x.t_start, 1 * pq.s) # Raise error self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, - t_start=5 * pq.s, t_stop=0 * pq.s, binsize=pq.s, - num_bins=10) + t_start=5 * pq.s, t_stop=0 * pq.s, bin_size=pq.s, + n_bins=10) self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, t_start=0 * pq.s, - t_stop=10 * pq.s, binsize=3 * pq.s, num_bins=10) + t_stop=10 * pq.s, bin_size=3 * pq.s, n_bins=10) self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, - binsize=-2 * pq.s, t_start=-4 * pq.s, + bin_size=-2 * pq.s, t_start=-4 * pq.s, t_stop=0 * pq.s) # Check binary property @@ -563,21 +575,21 @@ def test_binary_to_binned_matrix(self): def test_binned_to_binned(self): a = self.spiketrain_a - x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s).to_array() - y = cv.BinnedSpikeTrain(x, binsize=1 * pq.s, t_start=0 * pq.s) + x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s).to_array() + y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s) self.assertTrue(np.array_equal(x, y.to_array())) # test with a list - x = cv.BinnedSpikeTrain([[0, 1, 2, 3]], binsize=1 * pq.s, - t_stop=3*pq.s).to_array() - y = cv.BinnedSpikeTrain(x, binsize=1 * pq.s, t_start=0 * pq.s) + x = cv.BinnedSpikeTrain([[0, 1, 2, 3]], bin_size=1 * pq.s, + t_stop=3 * pq.s).to_array() + y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s) self.assertTrue(np.array_equal(x, y.to_array())) # test with a numpy array a = np.array([[0, 1, 2, 3], [1, 2, 2.5, 3]]) - x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, - t_stop=3*pq.s).to_array() - y = cv.BinnedSpikeTrain(x, binsize=1 * pq.s, t_start=0 * pq.s) + x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s, + t_stop=3 * pq.s).to_array() + y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s) self.assertTrue(np.array_equal(x, y.to_array())) # Check binary property @@ -588,21 +600,21 @@ def test_binned_to_binned(self): # produce a TypeError a = np.array([[0, 1, 2, 3], [1, 2, 3]]) self.assertRaises(TypeError, cv.BinnedSpikeTrain, a, t_start=0 * pq.s, - binsize=1 * pq.s) + bin_size=1 * pq.s) # Give no t_start or t_stop a = np.array([[0, 1, 2, 3], [1, 2, 3, 4]]) self.assertRaises(AttributeError, cv.BinnedSpikeTrain, a, - binsize=1 * pq.s) + bin_size=1 * pq.s) # Input format not supported a = np.array(([0, 1, 2], [0, 1, 2, 3, 4])) - self.assertRaises(TypeError, cv.BinnedSpikeTrain, a, binsize=1 * pq.s) + self.assertRaises(TypeError, cv.BinnedSpikeTrain, a, bin_size=1 * pq.s) def test_binnend_spiketrain_rescaling(self): train = neo.SpikeTrain(times=np.array([1.001, 1.002, 1.005]) * pq.s, t_start=1 * pq.s, t_stop=1.01 * pq.s) bst = cv.BinnedSpikeTrain(train, t_start=1 * pq.s, t_stop=1.01 * pq.s, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) target_edges = np.array([1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010], dtype=np.float) target_centers = np.array( @@ -614,7 +626,7 @@ def test_binnend_spiketrain_rescaling(self): self.assertTrue(bst.bin_edges.units == pq.ms) bst = cv.BinnedSpikeTrain(train, t_start=1 * pq.s, t_stop=1010 * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) self.assertTrue(np.allclose(bst.bin_edges.magnitude, target_edges)) self.assertTrue(np.allclose(bst.bin_centers.magnitude, target_centers)) self.assertTrue(bst.bin_centers.units == pq.ms) @@ -622,18 +634,18 @@ def test_binnend_spiketrain_rescaling(self): def test_binned_sparsity(self): train = neo.SpikeTrain(np.arange(10), t_stop=10 * pq.s, units=pq.s) - bst = cv.BinnedSpikeTrain(train, num_bins=100) + bst = cv.BinnedSpikeTrain(train, n_bins=100) self.assertAlmostEqual(bst.sparsity, 0.1) # Test fix for rounding errors @unittest.skipUnless(python_version_major == 3, "assertWarns requires 3.2") def test_binned_spiketrain_rounding(self): train = neo.SpikeTrain(times=np.arange(120000) / 30000. * pq.s, - t_start=0*pq.s, t_stop=4*pq.s) + t_start=0 * pq.s, t_stop=4 * pq.s) with self.assertWarns(UserWarning): bst = cv.BinnedSpikeTrain(train, - t_start=0*pq.s, t_stop=4*pq.s, - binsize=1./30000.*pq.s) + t_start=0 * pq.s, t_stop=4 * pq.s, + bin_size=1. / 30000. * pq.s) assert_array_equal(bst.to_array().nonzero()[1], np.arange(120000)) diff --git a/elephant/test/test_cubic.py b/elephant/test/test_cubic.py index ffe72d1ec..6907e6893 100644 --- a/elephant/test/test_cubic.py +++ b/elephant/test/test_cubic.py @@ -33,13 +33,14 @@ class CubicTestCase(unittest.TestCase): ---------- [1]Staude, Rotter, Gruen, (2009) J. Comp. Neurosci ''' + def setUp(self): n2 = 300 - n0 = 100000-n2 + n0 = 100000 - n2 self.xi = 10 self.data_signal = neo.AnalogSignal( numpy.array([self.xi] * n2 + [0] * n0).reshape(n0 + n2, 1) * - pq.dimensionless, sampling_period=1*pq.s) + pq.dimensionless, sampling_period=1 * pq.s) self.data_array = numpy.array([self.xi] * n2 + [0] * n0) self.alpha = 0.05 self.ximax = 10 @@ -114,7 +115,7 @@ def test_cubic_ximax(self): # Test exceeding ximax with self.assertWarns(UserWarning): xi_ximax, p_vals_ximax, k_ximax, test_aborted = cubic.cubic( - self.data_signal, alpha=1, ximax=self.ximax) + self.data_signal, alpha=1, max_iterations=self.ximax) self.assertEqual(test_aborted, True) self.assertEqual(xi_ximax - 1, self.ximax) @@ -126,7 +127,7 @@ def test_cubic_errors(self): # Empty signal self.assertRaises( ValueError, cubic.cubic, neo.AnalogSignal( - []*pq.dimensionless, sampling_period=10*pq.ms)) + [] * pq.dimensionless, sampling_period=10 * pq.ms)) dummy_data = numpy.tile([1, 2, 3], reps=3) # Multidimensional array @@ -144,8 +145,8 @@ def test_cubic_errors(self): # Checking case in which the second cumulant of the signal is smaller # than the first cumulant (analitycal constrain of the method) self.assertRaises(ValueError, cubic.cubic, neo.AnalogSignal( - numpy.array([1]*1000).reshape(1000, 1), units=pq.dimensionless, - sampling_period=10*pq.ms), alpha=self.alpha) + numpy.array([1] * 1000).reshape(1000, 1), units=pq.dimensionless, + sampling_period=10 * pq.ms), alpha=self.alpha) def suite(): diff --git a/elephant/test/test_gpfa.py b/elephant/test/test_gpfa.py index 8ec47b000..a9d68ee9a 100644 --- a/elephant/test/test_gpfa.py +++ b/elephant/test/test_gpfa.py @@ -111,7 +111,12 @@ def test_transform_testing_data(self): with self.assertRaises(ValueError): gpfa1.transform(self.data2) + @unittest.skipUnless(python_version_major == 3, + "sklearn py2 has a bug in cross_val_score") def test_cross_validation(self): + # If GPFA.__init__ is decorated, sklearn signature function parsing + # magic throws the error + # __init__() got an unexpected keyword argument 'args' lls = [] for x_dim in range(1, self.x_dim + 1): gpfa = GPFA(x_dim=x_dim, em_max_iters=self.n_iters) diff --git a/elephant/test/test_icsd.py b/elephant/test/test_icsd.py index 20d796be9..764fcf7a3 100644 --- a/elephant/test/test_icsd.py +++ b/elephant/test/test_icsd.py @@ -12,13 +12,13 @@ from elephant.current_source_density import icsd import unittest -#patch quantities with the SI unit Siemens if it does not exist +# patch quantities with the SI unit Siemens if it does not exist for symbol, prefix, definition, u_symbol in zip( ['siemens', 'S', 'mS', 'uS', 'nS', 'pS'], ['', '', 'milli', 'micro', 'nano', 'pico'], - [pq.A/pq.V, pq.A/pq.V, 'S', 'mS', 'uS', 'nS'], - [None, None, None, None, u'µS', None]): - if type(definition) is str: + [pq.A / pq.V, pq.A / pq.V, 'S', 'mS', 'uS', 'nS'], + [None, None, None, None, u'µS', None]): + if isinstance(definition, str): definition = lastdefinition / 1000 if not hasattr(pq, symbol): setattr(pq, symbol, pq.UnitQuantity( @@ -29,9 +29,9 @@ lastdefinition = definition -def potential_of_plane(z_j, z_i=0.*pq.m, - C_i=1*pq.A/pq.m**2, - sigma=0.3*pq.S/pq.m): +def potential_of_plane(z_j, z_i=0. * pq.m, + C_i=1 * pq.A / pq.m**2, + sigma=0.3 * pq.S / pq.m): ''' Return potential of infinite horizontal plane with constant current source density at a vertical offset z_j. @@ -57,17 +57,17 @@ def potential_of_plane(z_j, z_i=0.*pq.m, assert(z_j.units == z_i.units) except AssertionError as ae: print('units of z_j ({}) and z_i ({}) not equal'.format(z_j.units, - z_i.units)) + z_i.units)) raise ae - return -C_i/(2*sigma)*abs(z_j-z_i).simplified + return -C_i / (2 * sigma) * abs(z_j - z_i).simplified def potential_of_disk(z_j, - z_i=0.*pq.m, - C_i=1*pq.A/pq.m**2, - R_i=1E-3*pq.m, - sigma=0.3*pq.S/pq.m): + z_i=0. * pq.m, + C_i=1 * pq.A / pq.m**2, + R_i=1E-3 * pq.m, + sigma=0.3 * pq.S / pq.m): ''' Return potential of circular disk in horizontal plane with constant current source density at a vertical offset z_j. @@ -92,15 +92,16 @@ def potential_of_disk(z_j, z_j.units, z_i.units, R_i.units)) raise ae - return C_i/(2*sigma)*(np.sqrt((z_j-z_i)**2 + R_i**2) - abs(z_j-z_i)).simplified + return C_i / (2 * sigma) * ( + np.sqrt((z_j - z_i) ** 2 + R_i**2) - abs(z_j - z_i)).simplified def potential_of_cylinder(z_j, - z_i=0.*pq.m, - C_i=1*pq.A/pq.m**3, - R_i=1E-3*pq.m, - h_i=0.1*pq.m, - sigma=0.3*pq.S/pq.m, + z_i=0. * pq.m, + C_i=1 * pq.A / pq.m**3, + R_i=1E-3 * pq.m, + h_i=0.1 * pq.m, + sigma=0.3 * pq.S / pq.m, ): ''' Return potential of cylinder in horizontal plane with constant homogeneous @@ -131,48 +132,49 @@ def potential_of_cylinder(z_j, >>>from sympy import * >>>C_i, z_i, h, z_j, z_j, sigma, R = symbols('C_i z_i h z z_j sigma R') - >>>C_i*integrate(1/(2*sigma)*(sqrt((z-z_j)**2 + R**2) - abs(z-z_j)), (z, z_i-h/2, z_i+h/2)) + >>>C_i*integrate(1/(2*sigma)*(sqrt((z-z_j)**2 + R**2) - + ... abs(z-z_j)), (z, z_i-h/2, z_i+h/2)) ''' try: assert(z_j.units == z_i.units == R_i.units == h_i.units) except AssertionError as ae: - print('units of z_j ({}), z_i ({}), R_i ({}) and h ({}) not equal'.format( - z_j.units, z_i.units, R_i.units, h_i.units)) + print('units of z_j ({}), z_i ({}), R_i ({}) and h ({}) not equal' + .format(z_j.units, z_i.units, R_i.units, h_i.units)) raise ae - #speed up tests by stripping units + # speed up tests by stripping units _sigma = float(sigma) _R_i = float(R_i) _z_i = float(z_i) _z_j = float(z_j) - #evaluate integrand using quad + # evaluate integrand using quad def integrand(z): - return 1/(2*_sigma)*(np.sqrt((z-_z_j)**2 + _R_i**2) - abs(z-_z_j)) + return 1 / (2 * _sigma) * \ + (np.sqrt((z - _z_j)**2 + _R_i**2) - abs(z - _z_j)) - phi_j, abserr = C_i*si.quad(integrand, z_i-h_i/2, z_i+h_i/2) + phi_j, abserr = C_i * si.quad(integrand, z_i - h_i / 2, z_i + h_i / 2) return (phi_j * z_i.units**2 / sigma.units) - -def get_lfp_of_planes(z_j=np.arange(21)*1E-4*pq.m, - z_i=np.array([8E-4, 10E-4, 12E-4])*pq.m, - C_i=np.array([-.5, 1., -.5])*pq.A/pq.m**2, - sigma=0.3*pq.S/pq.m, +def get_lfp_of_planes(z_j=np.arange(21) * 1E-4 * pq.m, + z_i=np.array([8E-4, 10E-4, 12E-4]) * pq.m, + C_i=np.array([-.5, 1., -.5]) * pq.A / pq.m**2, + sigma=0.3 * pq.S / pq.m, plot=True): ''' Compute the lfp of spatially separated planes with given current source density ''' - phi_j = np.zeros(z_j.size)*pq.V + phi_j = np.zeros(z_j.size) * pq.V for i, (zi, Ci) in enumerate(zip(z_i, C_i)): for j, zj in enumerate(z_j): phi_j[j] += potential_of_plane(zj, zi, Ci, sigma) - #test plot + # test plot if plot: import matplotlib.pyplot as plt plt.figure() @@ -196,22 +198,22 @@ def get_lfp_of_planes(z_j=np.arange(21)*1E-4*pq.m, return phi_j, C_i -def get_lfp_of_disks(z_j=np.arange(21)*1E-4*pq.m, - z_i=np.array([8E-4, 10E-4, 12E-4])*pq.m, - C_i=np.array([-.5, 1., -.5])*pq.A/pq.m**2, - R_i = np.array([1, 1, 1])*1E-3*pq.m, - sigma=0.3*pq.S/pq.m, +def get_lfp_of_disks(z_j=np.arange(21) * 1E-4 * pq.m, + z_i=np.array([8E-4, 10E-4, 12E-4]) * pq.m, + C_i=np.array([-.5, 1., -.5]) * pq.A / pq.m**2, + R_i=np.array([1, 1, 1]) * 1E-3 * pq.m, + sigma=0.3 * pq.S / pq.m, plot=True): ''' Compute the lfp of spatially separated disks with a given current source density ''' - phi_j = np.zeros(z_j.size)*pq.V + phi_j = np.zeros(z_j.size) * pq.V for i, (zi, Ci, Ri) in enumerate(zip(z_i, C_i, R_i)): for j, zj in enumerate(z_j): phi_j[j] += potential_of_disk(zj, zi, Ci, Ri, sigma) - #test plot + # test plot if plot: import matplotlib.pyplot as plt plt.figure() @@ -235,30 +237,30 @@ def get_lfp_of_disks(z_j=np.arange(21)*1E-4*pq.m, return phi_j, C_i -def get_lfp_of_cylinders(z_j=np.arange(21)*1E-4*pq.m, - z_i=np.array([8E-4, 10E-4, 12E-4])*pq.m, - C_i=np.array([-.5, 1., -.5])*pq.A/pq.m**3, - R_i = np.array([1, 1, 1])*1E-3*pq.m, - h_i=np.array([1, 1, 1])*1E-4*pq.m, - sigma=0.3*pq.S/pq.m, +def get_lfp_of_cylinders(z_j=np.arange(21) * 1E-4 * pq.m, + z_i=np.array([8E-4, 10E-4, 12E-4]) * pq.m, + C_i=np.array([-.5, 1., -.5]) * pq.A / pq.m**3, + R_i=np.array([1, 1, 1]) * 1E-3 * pq.m, + h_i=np.array([1, 1, 1]) * 1E-4 * pq.m, + sigma=0.3 * pq.S / pq.m, plot=True): ''' Compute the lfp of spatially separated disks with a given current source density ''' - phi_j = np.zeros(z_j.size)*pq.V + phi_j = np.zeros(z_j.size) * pq.V for i, (zi, Ci, Ri, hi) in enumerate(zip(z_i, C_i, R_i, h_i)): for j, zj in enumerate(z_j): phi_j[j] += potential_of_cylinder(zj, zi, Ci, Ri, hi, sigma) - #test plot + # test plot if plot: import matplotlib.pyplot as plt plt.figure() plt.subplot(121) ax = plt.gca() ax.plot(np.zeros(z_j.size), z_j, 'r-o') - ax.barh(np.asarray(z_i-h_i/2), + ax.barh(np.asarray(z_i - h_i / 2), np.asarray(C_i), np.asarray(h_i), color='r') ax.set_ylim(z_j.min(), z_j.max()) @@ -276,10 +278,6 @@ def get_lfp_of_cylinders(z_j=np.arange(21)*1E-4*pq.m, return phi_j, C_i - - - - class TestICSD(unittest.TestCase): ''' Set of test functions for each CSD estimation method comparing @@ -288,32 +286,32 @@ class TestICSD(unittest.TestCase): def test_StandardCSD_00(self): '''test using standard SI units''' - #set some parameters for ground truth csd and csd estimates. + # set some parameters for ground truth csd and csd estimates. - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #uniform conductivity - sigma = 0.3*pq.S/pq.m + # uniform conductivity + sigma = 0.3 * pq.S / pq.m - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot) std_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'sigma' : sigma, - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j, + 'sigma': sigma, + 'f_type': 'gaussian', + 'f_order': (3, 1), } std_csd = icsd.StandardCSD(**std_input) csd = std_csd.get_csd() @@ -321,35 +319,34 @@ def test_StandardCSD_00(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_StandardCSD_01(self): '''test using non-standard SI units 1''' - #set some parameters for ground truth csd and csd estimates. + # set some parameters for ground truth csd and csd estimates. - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*1E3*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * 1E3 * pq.A / pq.m**2 - #uniform conductivity - sigma = 0.3*pq.S/pq.m + # uniform conductivity + sigma = 0.3 * pq.S / pq.m - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot) std_input = { - 'lfp' : phi_j*1E3*pq.mV/pq.V, - 'coord_electrode' : z_j, - 'sigma' : sigma, - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j * 1E3 * pq.mV / pq.V, + 'coord_electrode': z_j, + 'sigma': sigma, + 'f_type': 'gaussian', + 'f_order': (3, 1), } std_csd = icsd.StandardCSD(**std_input) csd = std_csd.get_csd() @@ -357,35 +354,34 @@ def test_StandardCSD_01(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_StandardCSD_02(self): '''test using non-standard SI units 2''' - #set some parameters for ground truth csd and csd estimates. + # set some parameters for ground truth csd and csd estimates. - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #uniform conductivity - sigma = 0.3*pq.S/pq.m + # uniform conductivity + sigma = 0.3 * pq.S / pq.m - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot) std_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j*1E3*pq.mm/pq.m, - 'sigma' : sigma, - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j * 1E3 * pq.mm / pq.m, + 'sigma': sigma, + 'f_type': 'gaussian', + 'f_order': (3, 1), } std_csd = icsd.StandardCSD(**std_input) csd = std_csd.get_csd() @@ -393,35 +389,34 @@ def test_StandardCSD_02(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_StandardCSD_03(self): '''test using non-standard SI units 3''' - #set some parameters for ground truth csd and csd estimates. + # set some parameters for ground truth csd and csd estimates. - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #uniform conductivity - sigma = 0.3*pq.mS/pq.m + # uniform conductivity + sigma = 0.3 * pq.mS / pq.m - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot) std_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'sigma' : sigma*1E3*pq.mS/pq.S, - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j, + 'sigma': sigma * 1E3 * pq.mS / pq.S, + 'f_type': 'gaussian', + 'f_order': (3, 1), } std_csd = icsd.StandardCSD(**std_input) csd = std_csd.get_csd() @@ -429,43 +424,42 @@ def test_StandardCSD_03(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_DeltaiCSD_00(self): '''test using standard SI units''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma, plot) delta_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'diam' : R_i.mean()*2, # source diameter - 'sigma' : sigma, # extracellular conductivity - 'sigma_top' : sigma_top, # conductivity on top of cortex - 'f_type' : 'gaussian', # gaussian filter - 'f_order' : (3, 1), # 3-point filter, sigma = 1. + 'lfp': phi_j, + 'coord_electrode': z_j, + 'diam': R_i.mean() * 2, # source diameter + 'sigma': sigma, # extracellular conductivity + 'sigma_top': sigma_top, # conductivity on top of cortex + 'f_type': 'gaussian', # gaussian filter + 'f_order': (3, 1), # 3-point filter, sigma = 1. } delta_icsd = icsd.DeltaiCSD(**delta_input) @@ -474,43 +468,42 @@ def test_DeltaiCSD_00(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_DeltaiCSD_01(self): '''test using non-standard SI units 1''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma, plot) delta_input = { - 'lfp' : phi_j*1E3*pq.mV/pq.V, - 'coord_electrode' : z_j, - 'diam' : R_i.mean()*2, # source diameter - 'sigma' : sigma, # extracellular conductivity - 'sigma_top' : sigma_top, # conductivity on top of cortex - 'f_type' : 'gaussian', # gaussian filter - 'f_order' : (3, 1), # 3-point filter, sigma = 1. + 'lfp': phi_j * 1E3 * pq.mV / pq.V, + 'coord_electrode': z_j, + 'diam': R_i.mean() * 2, # source diameter + 'sigma': sigma, # extracellular conductivity + 'sigma_top': sigma_top, # conductivity on top of cortex + 'f_type': 'gaussian', # gaussian filter + 'f_order': (3, 1), # 3-point filter, sigma = 1. } delta_icsd = icsd.DeltaiCSD(**delta_input) @@ -519,43 +512,42 @@ def test_DeltaiCSD_01(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_DeltaiCSD_02(self): '''test using non-standard SI units 2''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma, plot) delta_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j*1E3*pq.mm/pq.m, - 'diam' : R_i.mean()*2*1E3*pq.mm/pq.m, # source diameter - 'sigma' : sigma, # extracellular conductivity - 'sigma_top' : sigma_top, # conductivity on top of cortex - 'f_type' : 'gaussian', # gaussian filter - 'f_order' : (3, 1), # 3-point filter, sigma = 1. + 'lfp': phi_j, + 'coord_electrode': z_j * 1E3 * pq.mm / pq.m, + 'diam': R_i.mean() * 2 * 1E3 * pq.mm / pq.m, # source diameter + 'sigma': sigma, # extracellular conductivity + 'sigma_top': sigma_top, # conductivity on top of cortex + 'f_type': 'gaussian', # gaussian filter + 'f_order': (3, 1), # 3-point filter, sigma = 1. } delta_icsd = icsd.DeltaiCSD(**delta_input) @@ -564,43 +556,43 @@ def test_DeltaiCSD_02(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_DeltaiCSD_03(self): '''test using non-standard SI units 3''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma, plot) delta_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'diam' : R_i.mean()*2, # source diameter - 'sigma' : sigma*1E3*pq.mS/pq.S, # extracellular conductivity - 'sigma_top' : sigma_top*1E3*pq.mS/pq.S, # conductivity on top of cortex - 'f_type' : 'gaussian', # gaussian filter - 'f_order' : (3, 1), # 3-point filter, sigma = 1. + 'lfp': phi_j, + 'coord_electrode': z_j, + 'diam': R_i.mean() * 2, # source diameter + 'sigma': sigma * 1E3 * pq.mS / pq.S, # extracellular conductivity + 'sigma_top': sigma_top * 1E3 * pq.mS / pq.S, # conductivity on + # top of cortex + 'f_type': 'gaussian', # gaussian filter + 'f_order': (3, 1), # 3-point filter, sigma = 1. } delta_icsd = icsd.DeltaiCSD(**delta_input) @@ -609,44 +601,43 @@ def test_DeltaiCSD_03(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_DeltaiCSD_04(self): '''test non-continous z_j array''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**2 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**2 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2 - #source radius (delta, step) - R_i = np.ones(z_j.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_j.size) * 1E-3 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma, plot) inds = np.delete(np.arange(21), 5) delta_input = { - 'lfp' : phi_j[inds], - 'coord_electrode' : z_j[inds], - 'diam' : R_i[inds]*2, # source diameter - 'sigma' : sigma, # extracellular conductivity - 'sigma_top' : sigma_top, # conductivity on top of cortex - 'f_type' : 'gaussian', # gaussian filter - 'f_order' : (3, 1), # 3-point filter, sigma = 1. + 'lfp': phi_j[inds], + 'coord_electrode': z_j[inds], + 'diam': R_i[inds] * 2, # source diameter + 'sigma': sigma, # extracellular conductivity + 'sigma_top': sigma_top, # conductivity on top of cortex + 'f_type': 'gaussian', # gaussian filter + 'f_order': (3, 1), # 3-point filter, sigma = 1. } delta_icsd = icsd.DeltaiCSD(**delta_input) @@ -655,49 +646,48 @@ def test_DeltaiCSD_04(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i[inds], csd) - def test_StepiCSD_units_00(self): '''test using standard SI units''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i, sigma, plot) step_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'diam' : R_i.mean()*2, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'h' : h_i, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j, + 'diam': R_i.mean() * 2, + 'sigma': sigma, + 'sigma_top': sigma, + 'h': h_i, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } step_icsd = icsd.StepiCSD(**step_input) csd = step_icsd.get_csd() @@ -705,49 +695,48 @@ def test_StepiCSD_units_00(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_StepiCSD_01(self): '''test using non-standard SI units 1''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i, sigma, plot) step_input = { - 'lfp' : phi_j*1E3*pq.mV/pq.V, - 'coord_electrode' : z_j, - 'diam' : R_i.mean()*2, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'h' : h_i, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j * 1E3 * pq.mV / pq.V, + 'coord_electrode': z_j, + 'diam': R_i.mean() * 2, + 'sigma': sigma, + 'sigma_top': sigma, + 'h': h_i, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } step_icsd = icsd.StepiCSD(**step_input) csd = step_icsd.get_csd() @@ -755,49 +744,48 @@ def test_StepiCSD_01(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_StepiCSD_02(self): '''test using non-standard SI units 2''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i, sigma, plot) step_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j*1E3*pq.mm/pq.m, - 'diam' : R_i.mean()*2*1E3*pq.mm/pq.m, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'h' : h_i*1E3*pq.mm/pq.m, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j * 1E3 * pq.mm / pq.m, + 'diam': R_i.mean() * 2 * 1E3 * pq.mm / pq.m, + 'sigma': sigma, + 'sigma_top': sigma, + 'h': h_i * 1E3 * pq.mm / pq.m, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } step_icsd = icsd.StepiCSD(**step_input) csd = step_icsd.get_csd() @@ -805,49 +793,48 @@ def test_StepiCSD_02(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_StepiCSD_03(self): '''test using non-standard SI units 3''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i, sigma, plot) step_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'diam' : R_i.mean()*2, - 'sigma' : sigma*1E3*pq.mS/pq.S, - 'sigma_top' : sigma*1E3*pq.mS/pq.S, - 'h' : h_i, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j, + 'diam': R_i.mean() * 2, + 'sigma': sigma * 1E3 * pq.mS / pq.S, + 'sigma_top': sigma * 1E3 * pq.mS / pq.S, + 'h': h_i, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } step_icsd = icsd.StepiCSD(**step_input) csd = step_icsd.get_csd() @@ -855,49 +842,48 @@ def test_StepiCSD_03(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd) - def test_StepiCSD_units_04(self): '''test non-continous z_j array''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i, sigma, plot) inds = np.delete(np.arange(21), 5) step_input = { - 'lfp' : phi_j[inds], - 'coord_electrode' : z_j[inds], - 'diam' : R_i[inds]*2, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'h' : h_i[inds], - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j[inds], + 'coord_electrode': z_j[inds], + 'diam': R_i[inds] * 2, + 'sigma': sigma, + 'sigma_top': sigma, + 'h': h_i[inds], + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } step_icsd = icsd.StepiCSD(**step_input) csd = step_icsd.get_csd() @@ -905,61 +891,61 @@ def test_StepiCSD_units_04(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i[inds], csd) - def test_SplineiCSD_00(self): '''test using standard SI units''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #construct interpolators, spline method assume underlying source - #pattern generating LFPs that are cubic spline interpolates between - #contacts so we generate CSD data relying on the same assumption + # construct interpolators, spline method assume underlying source + # pattern generating LFPs that are cubic spline interpolates between + # contacts so we generate CSD data relying on the same assumption f_C = interp1d(z_i, C_i, kind='cubic') f_R = interp1d(z_i, R_i) num_steps = 201 - z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units - C_i_i = f_C(np.asarray(z_i_i))*C_i.units - R_i_i = f_R(z_i_i)*R_i.units + z_i_i = np.linspace(float(z_i[0]), float( + z_i[-1]), num_steps) * z_i.units + C_i_i = f_C(np.asarray(z_i_i)) * C_i.units + R_i_i = f_R(z_i_i) * R_i.units - h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min() + h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min() - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i, sigma, plot) spline_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'diam' : R_i*2, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'num_steps' : num_steps, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j, + 'diam': R_i * 2, + 'sigma': sigma, + 'sigma_top': sigma, + 'num_steps': num_steps, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } spline_icsd = icsd.SplineiCSD(**spline_input) csd = spline_icsd.get_csd() @@ -967,61 +953,61 @@ def test_SplineiCSD_00(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd, decimal=3) - def test_SplineiCSD_01(self): '''test using standard SI units, deep electrode coordinates''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(10, 31)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(10, 31) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #construct interpolators, spline method assume underlying source - #pattern generating LFPs that are cubic spline interpolates between - #contacts so we generate CSD data relying on the same assumption + # construct interpolators, spline method assume underlying source + # pattern generating LFPs that are cubic spline interpolates between + # contacts so we generate CSD data relying on the same assumption f_C = interp1d(z_i, C_i, kind='cubic') f_R = interp1d(z_i, R_i) num_steps = 201 - z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units - C_i_i = f_C(np.asarray(z_i_i))*C_i.units - R_i_i = f_R(z_i_i)*R_i.units + z_i_i = np.linspace(float(z_i[0]), float( + z_i[-1]), num_steps) * z_i.units + C_i_i = f_C(np.asarray(z_i_i)) * C_i.units + R_i_i = f_R(z_i_i) * R_i.units - h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min() + h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min() - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i, sigma, plot) spline_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'diam' : R_i*2, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'num_steps' : num_steps, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j, + 'diam': R_i * 2, + 'sigma': sigma, + 'sigma_top': sigma, + 'num_steps': num_steps, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } spline_icsd = icsd.SplineiCSD(**spline_input) csd = spline_icsd.get_csd() @@ -1029,61 +1015,61 @@ def test_SplineiCSD_01(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd, decimal=3) - def test_SplineiCSD_02(self): '''test using non-standard SI units''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #construct interpolators, spline method assume underlying source - #pattern generating LFPs that are cubic spline interpolates between - #contacts so we generate CSD data relying on the same assumption + # construct interpolators, spline method assume underlying source + # pattern generating LFPs that are cubic spline interpolates between + # contacts so we generate CSD data relying on the same assumption f_C = interp1d(z_i, C_i, kind='cubic') f_R = interp1d(z_i, R_i) num_steps = 201 - z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units - C_i_i = f_C(np.asarray(z_i_i))*C_i.units - R_i_i = f_R(z_i_i)*R_i.units + z_i_i = np.linspace(float(z_i[0]), float( + z_i[-1]), num_steps) * z_i.units + C_i_i = f_C(np.asarray(z_i_i)) * C_i.units + R_i_i = f_R(z_i_i) * R_i.units - h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min() + h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min() - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i, sigma, plot) spline_input = { - 'lfp' : phi_j*1E3*pq.mV/pq.V, - 'coord_electrode' : z_j, - 'diam' : R_i*2, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'num_steps' : num_steps, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j * 1E3 * pq.mV / pq.V, + 'coord_electrode': z_j, + 'diam': R_i * 2, + 'sigma': sigma, + 'sigma_top': sigma, + 'num_steps': num_steps, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } spline_icsd = icsd.SplineiCSD(**spline_input) csd = spline_icsd.get_csd() @@ -1091,61 +1077,61 @@ def test_SplineiCSD_02(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd, decimal=3) - def test_SplineiCSD_03(self): '''test using standard SI units''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #construct interpolators, spline method assume underlying source - #pattern generating LFPs that are cubic spline interpolates between - #contacts so we generate CSD data relying on the same assumption + # construct interpolators, spline method assume underlying source + # pattern generating LFPs that are cubic spline interpolates between + # contacts so we generate CSD data relying on the same assumption f_C = interp1d(z_i, C_i, kind='cubic') f_R = interp1d(z_i, R_i) num_steps = 201 - z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units - C_i_i = f_C(np.asarray(z_i_i))*C_i.units - R_i_i = f_R(z_i_i)*R_i.units + z_i_i = np.linspace(float(z_i[0]), float( + z_i[-1]), num_steps) * z_i.units + C_i_i = f_C(np.asarray(z_i_i)) * C_i.units + R_i_i = f_R(z_i_i) * R_i.units - h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min() + h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min() - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i, sigma, plot) spline_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j*1E3*pq.mm/pq.m, - 'diam' : R_i*2*1E3*pq.mm/pq.m, - 'sigma' : sigma, - 'sigma_top' : sigma, - 'num_steps' : num_steps, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j * 1E3 * pq.mm / pq.m, + 'diam': R_i * 2 * 1E3 * pq.mm / pq.m, + 'sigma': sigma, + 'sigma_top': sigma, + 'num_steps': num_steps, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } spline_icsd = icsd.SplineiCSD(**spline_input) csd = spline_icsd.get_csd() @@ -1153,61 +1139,61 @@ def test_SplineiCSD_03(self): self.assertEqual(C_i.units, csd.units) nt.assert_array_almost_equal(C_i, csd, decimal=3) - def test_SplineiCSD_04(self): '''test using standard SI units''' - #set some parameters for ground truth csd and csd estimates., e.g., - #we will use same source diameter as in ground truth + # set some parameters for ground truth csd and csd estimates., e.g., + # we will use same source diameter as in ground truth - #contact point coordinates - z_j = np.arange(21)*1E-4*pq.m + # contact point coordinates + z_j = np.arange(21) * 1E-4 * pq.m - #source coordinates + # source coordinates z_i = z_j - #current source density magnitude - C_i = np.zeros(z_i.size)*pq.A/pq.m**3 - C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3 + # current source density magnitude + C_i = np.zeros(z_i.size) * pq.A / pq.m**3 + C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3 - #source radius (delta, step) - R_i = np.ones(z_i.size)*1E-3*pq.m + # source radius (delta, step) + R_i = np.ones(z_i.size) * 1E-3 * pq.m - #source height (cylinder) - h_i = np.ones(z_i.size)*1E-4*pq.m + # source height (cylinder) + h_i = np.ones(z_i.size) * 1E-4 * pq.m - #conductivity, use same conductivity for top layer (z_j < 0) - sigma = 0.3*pq.S/pq.m + # conductivity, use same conductivity for top layer (z_j < 0) + sigma = 0.3 * pq.S / pq.m sigma_top = sigma - #construct interpolators, spline method assume underlying source - #pattern generating LFPs that are cubic spline interpolates between - #contacts so we generate CSD data relying on the same assumption + # construct interpolators, spline method assume underlying source + # pattern generating LFPs that are cubic spline interpolates between + # contacts so we generate CSD data relying on the same assumption f_C = interp1d(z_i, C_i, kind='cubic') f_R = interp1d(z_i, R_i) num_steps = 201 - z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units - C_i_i = f_C(np.asarray(z_i_i))*C_i.units - R_i_i = f_R(z_i_i)*R_i.units + z_i_i = np.linspace(float(z_i[0]), float( + z_i[-1]), num_steps) * z_i.units + C_i_i = f_C(np.asarray(z_i_i)) * C_i.units + R_i_i = f_R(z_i_i) * R_i.units - h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min() + h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min() - #flag for debug plots + # flag for debug plots plot = False - #get LFP and CSD at contacts + # get LFP and CSD at contacts phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i, sigma, plot) spline_input = { - 'lfp' : phi_j, - 'coord_electrode' : z_j, - 'diam' : R_i*2, - 'sigma' : sigma*1E3*pq.mS/pq.S, - 'sigma_top' : sigma*1E3*pq.mS/pq.S, - 'num_steps' : num_steps, - 'tol' : 1E-12, # Tolerance in numerical integration - 'f_type' : 'gaussian', - 'f_order' : (3, 1), + 'lfp': phi_j, + 'coord_electrode': z_j, + 'diam': R_i * 2, + 'sigma': sigma * 1E3 * pq.mS / pq.S, + 'sigma_top': sigma * 1E3 * pq.mS / pq.S, + 'num_steps': num_steps, + 'tol': 1E-12, # Tolerance in numerical integration + 'f_type': 'gaussian', + 'f_order': (3, 1), } spline_icsd = icsd.SplineiCSD(**spline_input) csd = spline_icsd.get_csd() @@ -1216,7 +1202,7 @@ def test_SplineiCSD_04(self): nt.assert_array_almost_equal(C_i, csd, decimal=3) -#def suite(verbosity=2): +# def suite(verbosity=2): # ''' # Run unittests for the CSD toolbox # @@ -1232,7 +1218,7 @@ def test_SplineiCSD_04(self): # # # -#if __name__ == '__main__': +# if __name__ == '__main__': # suite() @@ -1240,6 +1226,7 @@ def suite(): suite = unittest.makeSuite(TestICSD, 'test') return suite + if __name__ == "__main__": runner = unittest.TextTestRunner(verbosity=2) runner.run(suite()) diff --git a/elephant/test/test_kcsd.py b/elephant/test/test_kcsd.py index 561b2fb4a..6a8527f30 100644 --- a/elephant/test/test_kcsd.py +++ b/elephant/test/test_kcsd.py @@ -31,7 +31,7 @@ def setUp(self): for ii in range(len(self.pots)): temp_signals.append(self.pots[ii]) self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV, - sampling_rate=1000 * pq.Hz) + sampling_rate=1000 * pq.Hz) chidx = neo.ChannelIndex(range(len(self.pots))) chidx.analogsignals.append(self.an_sigs) chidx.coordinates = self.ele_pos * pq.mm @@ -71,7 +71,11 @@ def setUp(self): ylims=[0.05, 0.95]) self.ele_pos = np.vstack((xx_ele, yy_ele)).T self.csd_profile = utils.large_source_2D - pots = CSD.generate_lfp(self.csd_profile, xx_ele, yy_ele, res=100) + pots = CSD.generate_lfp( + self.csd_profile, + xx_ele, + yy_ele, + resolution=100) self.pots = np.reshape(pots, (-1, 1)) self.test_method = 'KCSD2D' self.test_params = {'gdx': 0.25, 'gdy': 0.25, 'R_init': 0.08, @@ -81,14 +85,13 @@ def setUp(self): for ii in range(len(self.pots)): temp_signals.append(self.pots[ii]) self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV, - sampling_rate=1000 * pq.Hz) + sampling_rate=1000 * pq.Hz) chidx = neo.ChannelIndex(range(len(self.pots))) chidx.analogsignals.append(self.an_sigs) chidx.coordinates = self.ele_pos * pq.mm chidx.create_relationship() - def test_kcsd2d_estimate(self, cv_params={}): self.test_params.update(cv_params) result = CSD.estimate_csd(self.an_sigs, method=self.test_method, @@ -145,7 +148,7 @@ def setUp(self): for ii in range(len(self.pots)): temp_signals.append(self.pots[ii]) self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV, - sampling_rate=1000 * pq.Hz) + sampling_rate=1000 * pq.Hz) chidx = neo.ChannelIndex(range(len(self.pots))) chidx.analogsignals.append(self.an_sigs) chidx.coordinates = self.ele_pos * pq.mm @@ -179,5 +182,6 @@ def test_valid_inputs(self): cv_params = {'InvalidCVArg': np.array((0.1, 0.25, 0.5))} self.assertRaises(TypeError, self.test_kcsd3d_estimate, cv_params) + if __name__ == '__main__': unittest.main() diff --git a/elephant/test/test_kernels.py b/elephant/test/test_kernels.py index 009ca7df5..2f7242ceb 100644 --- a/elephant/test/test_kernels.py +++ b/elephant/test/test_kernels.py @@ -148,10 +148,8 @@ def test_element_wise_only(self): for kern_cls in self.kernel_types: for invert in (False, True): kernel = kern_cls(sigma=1 * pq.s, invert=invert) - kernel_shuffled = kernel(t_shuffled) - kernel_shuffled.sort() - kernel_expected = kernel(t_array) - kernel_expected.sort() + kernel_shuffled = np.sort(kernel(t_shuffled)) + kernel_expected = np.sort(kernel(t_array)) assert_array_almost_equal(kernel_shuffled, kernel_expected) def test_kernel_pdf_range(self): diff --git a/elephant/test/test_neo_tools.py b/elephant/test/test_neo_tools.py index c08ca9c5b..ed554ebbc 100644 --- a/elephant/test/test_neo_tools.py +++ b/elephant/test/test_neo_tools.py @@ -317,16 +317,16 @@ def test__extract_neo_attrs__spiketrain_noarray(self): targ = get_fake_values('SpikeTrain', seed=0) targ = strip_iter_values(targ) - res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True) - res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) - res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True) - res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) + res00 = nt.extract_neo_attributes(obj, parents=False, skip_array=True) + res10 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res20 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) + res01 = nt.extract_neo_attributes(obj, parents=True, skip_array=True) + res11 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res21 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) self.assertEqual(targ, res00) self.assertEqual(targ, res10) @@ -343,18 +343,18 @@ def test__extract_neo_attrs__spiketrain_noarray_skip_none(self): if value is None: del targ[key] - res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - skip_none=True) - res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True, skip_none=True) - res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False, skip_none=True) - res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True, - skip_none=True) - res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True, skip_none=True) - res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False, skip_none=True) + res00 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + skip_none=True) + res10 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True, skip_none=True) + res20 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False, skip_none=True) + res01 = nt.extract_neo_attributes(obj, parents=True, skip_array=True, + skip_none=True) + res11 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True, skip_none=True) + res21 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False, skip_none=True) self.assertEqual(targ, res00) self.assertEqual(targ, res10) @@ -368,16 +368,16 @@ def test__extract_neo_attrs__epoch_noarray(self): targ = get_fake_values('Epoch', seed=0) targ = strip_iter_values(targ) - res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True) - res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) - res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True) - res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) + res00 = nt.extract_neo_attributes(obj, parents=False, skip_array=True) + res10 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res20 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) + res01 = nt.extract_neo_attributes(obj, parents=True, skip_array=True) + res11 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res21 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) self.assertEqual(targ, res00) self.assertEqual(targ, res10) @@ -391,16 +391,16 @@ def test__extract_neo_attrs__event_noarray(self): targ = get_fake_values('Event', seed=0) targ = strip_iter_values(targ) - res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True) - res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) - res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True) - res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) + res00 = nt.extract_neo_attributes(obj, parents=False, skip_array=True) + res10 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res20 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) + res01 = nt.extract_neo_attributes(obj, parents=True, skip_array=True) + res11 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res21 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) self.assertEqual(targ, res00) self.assertEqual(targ, res10) @@ -414,22 +414,26 @@ def test__extract_neo_attrs__spiketrain_parents_empty_array(self): targ = get_fake_values('SpikeTrain', seed=0) del targ['times'] - res000 = nt.extract_neo_attrs(obj, parents=False) - res100 = nt.extract_neo_attrs(obj, parents=False, child_first=True) - res200 = nt.extract_neo_attrs(obj, parents=False, child_first=False) - res010 = nt.extract_neo_attrs(obj, parents=False, skip_array=False) - res110 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=True) - res210 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=False) - res001 = nt.extract_neo_attrs(obj, parents=True) - res101 = nt.extract_neo_attrs(obj, parents=True, child_first=True) - res201 = nt.extract_neo_attrs(obj, parents=True, child_first=False) - res011 = nt.extract_neo_attrs(obj, parents=True, skip_array=False) - res111 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=True) - res211 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=False) + res000 = nt.extract_neo_attributes(obj, parents=False) + res100 = nt.extract_neo_attributes( + obj, parents=False, child_first=True) + res200 = nt.extract_neo_attributes( + obj, parents=False, child_first=False) + res010 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False) + res110 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False, child_first=True) + res210 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False, child_first=False) + res001 = nt.extract_neo_attributes(obj, parents=True) + res101 = nt.extract_neo_attributes(obj, parents=True, child_first=True) + res201 = nt.extract_neo_attributes( + obj, parents=True, child_first=False) + res011 = nt.extract_neo_attributes(obj, parents=True, skip_array=False) + res111 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=True) + res211 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=False) self.assert_dicts_equal(targ, res000) self.assert_dicts_equal(targ, res100) @@ -460,22 +464,26 @@ def test__extract_neo_attrs__epoch_parents_empty_array(self): obj = self._fix_neo_issue_749(obj, targ) del targ['times'] - res000 = nt.extract_neo_attrs(obj, parents=False) - res100 = nt.extract_neo_attrs(obj, parents=False, child_first=True) - res200 = nt.extract_neo_attrs(obj, parents=False, child_first=False) - res010 = nt.extract_neo_attrs(obj, parents=False, skip_array=False) - res110 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=True) - res210 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=False) - res001 = nt.extract_neo_attrs(obj, parents=True) - res101 = nt.extract_neo_attrs(obj, parents=True, child_first=True) - res201 = nt.extract_neo_attrs(obj, parents=True, child_first=False) - res011 = nt.extract_neo_attrs(obj, parents=True, skip_array=False) - res111 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=True) - res211 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=False) + res000 = nt.extract_neo_attributes(obj, parents=False) + res100 = nt.extract_neo_attributes( + obj, parents=False, child_first=True) + res200 = nt.extract_neo_attributes( + obj, parents=False, child_first=False) + res010 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False) + res110 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False, child_first=True) + res210 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False, child_first=False) + res001 = nt.extract_neo_attributes(obj, parents=True) + res101 = nt.extract_neo_attributes(obj, parents=True, child_first=True) + res201 = nt.extract_neo_attributes( + obj, parents=True, child_first=False) + res011 = nt.extract_neo_attributes(obj, parents=True, skip_array=False) + res111 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=True) + res211 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=False) self.assert_dicts_equal(targ, res000) self.assert_dicts_equal(targ, res100) @@ -495,22 +503,26 @@ def test__extract_neo_attrs__event_parents_empty_array(self): targ = get_fake_values('Event', seed=0) del targ['times'] - res000 = nt.extract_neo_attrs(obj, parents=False) - res100 = nt.extract_neo_attrs(obj, parents=False, child_first=True) - res200 = nt.extract_neo_attrs(obj, parents=False, child_first=False) - res010 = nt.extract_neo_attrs(obj, parents=False, skip_array=False) - res110 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=True) - res210 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=False) - res001 = nt.extract_neo_attrs(obj, parents=True) - res101 = nt.extract_neo_attrs(obj, parents=True, child_first=True) - res201 = nt.extract_neo_attrs(obj, parents=True, child_first=False) - res011 = nt.extract_neo_attrs(obj, parents=True, skip_array=False) - res111 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=True) - res211 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=False) + res000 = nt.extract_neo_attributes(obj, parents=False) + res100 = nt.extract_neo_attributes( + obj, parents=False, child_first=True) + res200 = nt.extract_neo_attributes( + obj, parents=False, child_first=False) + res010 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False) + res110 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False, child_first=True) + res210 = nt.extract_neo_attributes( + obj, parents=False, skip_array=False, child_first=False) + res001 = nt.extract_neo_attributes(obj, parents=True) + res101 = nt.extract_neo_attributes(obj, parents=True, child_first=True) + res201 = nt.extract_neo_attributes( + obj, parents=True, child_first=False) + res011 = nt.extract_neo_attributes(obj, parents=True, skip_array=False) + res111 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=True) + res211 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=False) self.assert_dicts_equal(targ, res000) self.assert_dicts_equal(targ, res100) @@ -530,11 +542,11 @@ def test__extract_neo_attrs__spiketrain_noparents_noarray(self): targ = get_fake_values('SpikeTrain', seed=obj.annotations['seed']) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=False, skip_array=True) - res1 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res2 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=False, skip_array=True) + res1 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res2 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) del res0['i'] del res1['i'] @@ -552,11 +564,11 @@ def test__extract_neo_attrs__epoch_noparents_noarray(self): targ = get_fake_values('Epoch', seed=obj.annotations['seed']) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=False, skip_array=True) - res1 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res2 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=False, skip_array=True) + res1 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res2 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) del res0['i'] del res1['i'] @@ -574,11 +586,11 @@ def test__extract_neo_attrs__event_noparents_noarray(self): targ = get_fake_values('Event', seed=obj.annotations['seed']) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=False, skip_array=True) - res1 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=True) - res2 = nt.extract_neo_attrs(obj, parents=False, skip_array=True, - child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=False, skip_array=True) + res1 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=True) + res2 = nt.extract_neo_attributes(obj, parents=False, skip_array=True, + child_first=False) del res0['i'] del res1['i'] @@ -596,14 +608,15 @@ def test__extract_neo_attrs__spiketrain_noparents_array(self): targ = get_fake_values('SpikeTrain', seed=obj.annotations['seed']) del targ['times'] - res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=False) - res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=True) - res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=False) - res01 = nt.extract_neo_attrs(obj, parents=False) - res11 = nt.extract_neo_attrs(obj, parents=False, child_first=True) - res21 = nt.extract_neo_attrs(obj, parents=False, child_first=False) + res00 = nt.extract_neo_attributes(obj, parents=False, skip_array=False) + res10 = nt.extract_neo_attributes(obj, parents=False, skip_array=False, + child_first=True) + res20 = nt.extract_neo_attributes(obj, parents=False, skip_array=False, + child_first=False) + res01 = nt.extract_neo_attributes(obj, parents=False) + res11 = nt.extract_neo_attributes(obj, parents=False, child_first=True) + res21 = nt.extract_neo_attributes( + obj, parents=False, child_first=False) del res00['i'] del res10['i'] @@ -633,14 +646,15 @@ def test__extract_neo_attrs__epoch_noparents_array(self): obj = self._fix_neo_issue_749(obj, targ) del targ['times'] - res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=False) - res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=True) - res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=False) - res01 = nt.extract_neo_attrs(obj, parents=False) - res11 = nt.extract_neo_attrs(obj, parents=False, child_first=True) - res21 = nt.extract_neo_attrs(obj, parents=False, child_first=False) + res00 = nt.extract_neo_attributes(obj, parents=False, skip_array=False) + res10 = nt.extract_neo_attributes(obj, parents=False, skip_array=False, + child_first=True) + res20 = nt.extract_neo_attributes(obj, parents=False, skip_array=False, + child_first=False) + res01 = nt.extract_neo_attributes(obj, parents=False) + res11 = nt.extract_neo_attributes(obj, parents=False, child_first=True) + res21 = nt.extract_neo_attributes( + obj, parents=False, child_first=False) del res00['i'] del res10['i'] @@ -667,14 +681,15 @@ def test__extract_neo_attrs__event_noparents_array(self): targ = get_fake_values('Event', seed=obj.annotations['seed']) del targ['times'] - res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=False) - res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=True) - res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=False, - child_first=False) - res01 = nt.extract_neo_attrs(obj, parents=False) - res11 = nt.extract_neo_attrs(obj, parents=False, child_first=True) - res21 = nt.extract_neo_attrs(obj, parents=False, child_first=False) + res00 = nt.extract_neo_attributes(obj, parents=False, skip_array=False) + res10 = nt.extract_neo_attributes(obj, parents=False, skip_array=False, + child_first=True) + res20 = nt.extract_neo_attributes(obj, parents=False, skip_array=False, + child_first=False) + res01 = nt.extract_neo_attributes(obj, parents=False) + res11 = nt.extract_neo_attributes(obj, parents=False, child_first=True) + res21 = nt.extract_neo_attributes( + obj, parents=False, child_first=False) del res00['i'] del res10['i'] @@ -712,15 +727,16 @@ def test__extract_neo_attrs__spiketrain_parents_childfirst_noarray(self): seed=obj.annotations['seed'])) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True) - res1 = nt.extract_neo_attrs(obj, parents=True, skip_array=True, - child_first=True) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=True) + res1 = nt.extract_neo_attributes(obj, parents=True, skip_array=True, + child_first=True) del res0['i'] del res1['i'] del res0['j'] del res1['j'] - del res0['index'] # name clash between Block.index and ChannelIndex.index + # name clash between Block.index and ChannelIndex.index + del res0['index'] del res1['index'] self.assertEqual(targ, res0) @@ -736,15 +752,16 @@ def test__extract_neo_attrs__epoch_parents_childfirst_noarray(self): targ.update(get_fake_values('Epoch', seed=obj.annotations['seed'])) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True) - res1 = nt.extract_neo_attrs(obj, parents=True, skip_array=True, - child_first=True) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=True) + res1 = nt.extract_neo_attributes(obj, parents=True, skip_array=True, + child_first=True) del res0['i'] del res1['i'] del res0['j'] del res1['j'] - del res0['index'] # name clash between Block.index and ChannelIndex.index + # name clash between Block.index and ChannelIndex.index + del res0['index'] del res1['index'] self.assertEqual(targ, res0) @@ -760,15 +777,16 @@ def test__extract_neo_attrs__event_parents_childfirst_noarray(self): targ.update(get_fake_values('Event', seed=obj.annotations['seed'])) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True) - res1 = nt.extract_neo_attrs(obj, parents=True, skip_array=True, - child_first=True) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=True) + res1 = nt.extract_neo_attributes(obj, parents=True, skip_array=True, + child_first=True) del res0['i'] del res1['i'] del res0['j'] del res1['j'] - del res0['index'] # name clash between Block.index and ChannelIndex.index + # name clash between Block.index and ChannelIndex.index + del res0['index'] del res1['index'] self.assertEqual(targ, res0) @@ -789,12 +807,13 @@ def test__extract_neo_attrs__spiketrain_parents_parentfirst_noarray(self): targ.update(get_fake_values('Block', seed=blk.annotations['seed'])) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True, - child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=True, + child_first=False) del res0['i'] del res0['j'] - del res0['index'] # name clash between Block.index and ChannelIndex.index + # name clash between Block.index and ChannelIndex.index + del res0['index'] self.assertEqual(targ, res0) @@ -808,12 +827,13 @@ def test__extract_neo_attrs__epoch_parents_parentfirst_noarray(self): targ.update(get_fake_values('Block', seed=blk.annotations['seed'])) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True, - child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=True, + child_first=False) del res0['i'] del res0['j'] - del res0['index'] # name clash between Block.index and ChannelIndex.index + # name clash between Block.index and ChannelIndex.index + del res0['index'] self.assertEqual(targ, res0) @@ -827,12 +847,13 @@ def test__extract_neo_attrs__event_parents_parentfirst_noarray(self): targ.update(get_fake_values('Block', seed=blk.annotations['seed'])) targ = strip_iter_values(targ) - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True, - child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=True, + child_first=False) del res0['i'] del res0['j'] - del res0['index'] # name clash between Block.index and ChannelIndex.index + # name clash between Block.index and ChannelIndex.index + del res0['index'] self.assertEqual(targ, res0) @@ -853,11 +874,11 @@ def test__extract_neo_attrs__spiketrain_parents_childfirst_array(self): seed=obj.annotations['seed'])) del targ['times'] - res00 = nt.extract_neo_attrs(obj, parents=True, skip_array=False) - res10 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=True) - res01 = nt.extract_neo_attrs(obj, parents=True) - res11 = nt.extract_neo_attrs(obj, parents=True, child_first=True) + res00 = nt.extract_neo_attributes(obj, parents=True, skip_array=False) + res10 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=True) + res01 = nt.extract_neo_attributes(obj, parents=True) + res11 = nt.extract_neo_attributes(obj, parents=True, child_first=True) del res00['i'] del res10['i'] @@ -885,11 +906,11 @@ def test__extract_neo_attrs__epoch_parents_childfirst_array(self): obj = self._fix_neo_issue_749(obj, targ) del targ['times'] - res00 = nt.extract_neo_attrs(obj, parents=True, skip_array=False) - res10 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=True) - res01 = nt.extract_neo_attrs(obj, parents=True) - res11 = nt.extract_neo_attrs(obj, parents=True, child_first=True) + res00 = nt.extract_neo_attributes(obj, parents=True, skip_array=False) + res10 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=True) + res01 = nt.extract_neo_attributes(obj, parents=True) + res11 = nt.extract_neo_attributes(obj, parents=True, child_first=True) del res00['i'] del res10['i'] @@ -915,11 +936,11 @@ def test__extract_neo_attrs__event_parents_childfirst_array(self): targ.update(get_fake_values('Event', seed=obj.annotations['seed'])) del targ['times'] - res00 = nt.extract_neo_attrs(obj, parents=True, skip_array=False) - res10 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=True) - res01 = nt.extract_neo_attrs(obj, parents=True) - res11 = nt.extract_neo_attrs(obj, parents=True, child_first=True) + res00 = nt.extract_neo_attributes(obj, parents=True, skip_array=False) + res10 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=True) + res01 = nt.extract_neo_attributes(obj, parents=True) + res11 = nt.extract_neo_attributes(obj, parents=True, child_first=True) del res00['i'] del res10['i'] @@ -952,9 +973,9 @@ def test__extract_neo_attrs__spiketrain_parents_parentfirst_array(self): del targ['index'] del targ['channel_names'] - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=False) - res1 = nt.extract_neo_attrs(obj, parents=True, child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=False) + res1 = nt.extract_neo_attributes(obj, parents=True, child_first=False) del res0['i'] del res1['i'] @@ -980,9 +1001,9 @@ def test__extract_neo_attrs__epoch_parents_parentfirst_array(self): obj = self._fix_neo_issue_749(obj, targ) del targ['times'] - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=False) - res1 = nt.extract_neo_attrs(obj, parents=True, child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=False) + res1 = nt.extract_neo_attributes(obj, parents=True, child_first=False) del res0['i'] del res1['i'] @@ -1002,9 +1023,9 @@ def test__extract_neo_attrs__event_parents_parentfirst_array(self): targ.update(get_fake_values('Block', seed=blk.annotations['seed'])) del targ['times'] - res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=False, - child_first=False) - res1 = nt.extract_neo_attrs(obj, parents=True, child_first=False) + res0 = nt.extract_neo_attributes(obj, parents=True, skip_array=False, + child_first=False) + res1 = nt.extract_neo_attributes(obj, parents=True, child_first=False) del res0['i'] del res1['i'] diff --git a/elephant/test/test_pandas_bridge.py b/elephant/test/test_pandas_bridge.py index c098762dc..b66b632bc 100644 --- a/elephant/test/test_pandas_bridge.py +++ b/elephant/test/test_pandas_bridge.py @@ -1644,7 +1644,6 @@ def test__multi_events_to_dataframe__segment_default(self): np.array(targ.values, dtype=np.float), np.array(res0.values, dtype=np.float)) - assert_frame_equal(targ, res0) def test__multi_events_to_dataframe__block_noparents(self): diff --git a/elephant/test/test_phase_analysis.py b/elephant/test/test_phase_analysis.py index 8c8d851d1..8385e3cc7 100644 --- a/elephant/test/test_phase_analysis.py +++ b/elephant/test/test_phase_analysis.py @@ -141,7 +141,7 @@ def test_spike_earlier_than_hilbert(self): # This is a spike clearly outside the bounds st = SpikeTrain( [-50, 50], - units='s', t_start=-100*pq.s, t_stop=100*pq.s) + units='s', t_start=-100 * pq.s, t_stop=100 * pq.s) phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase( elephant.signal_processing.hilbert(self.anasig0), st, @@ -154,7 +154,7 @@ def test_spike_earlier_than_hilbert(self): # spike is to be considered. st = SpikeTrain( [0, 50], - units='s', t_start=-100*pq.s, t_stop=100*pq.s) + units='s', t_start=-100 * pq.s, t_stop=100 * pq.s) phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase( elephant.signal_processing.hilbert(self.anasig0), st, @@ -165,7 +165,7 @@ def test_spike_later_than_hilbert(self): # This is a spike clearly outside the bounds st = SpikeTrain( [1, 250], - units='s', t_start=-1*pq.s, t_stop=300*pq.s) + units='s', t_start=-1 * pq.s, t_stop=300 * pq.s) phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase( elephant.signal_processing.hilbert(self.anasig0), st, @@ -178,7 +178,7 @@ def test_spike_later_than_hilbert(self): # spike is not to be considered. st = SpikeTrain( [1, 100], - units='s', t_start=-1*pq.s, t_stop=200*pq.s) + units='s', t_start=-1 * pq.s, t_stop=200 * pq.s) phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase( elephant.signal_processing.hilbert(self.anasig0), st, @@ -192,9 +192,9 @@ def test_regression_269(self): # before the end of the signal cu = pq.CompoundUnit("1/30000.*s") st = SpikeTrain( - [30000., (self.anasig0.t_stop-1*pq.s).rescale(cu).magnitude], + [30000., (self.anasig0.t_stop - 1 * pq.s).rescale(cu).magnitude], units=pq.CompoundUnit("1/30000.*s"), - t_start=-1*pq.s, t_stop=300*pq.s) + t_start=-1 * pq.s, t_stop=300 * pq.s) phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase( elephant.signal_processing.hilbert(self.anasig0), st, diff --git a/elephant/test/test_signal_processing.py b/elephant/test/test_signal_processing.py index efe283b55..4c14ae253 100644 --- a/elephant/test/test_signal_processing.py +++ b/elephant/test/test_signal_processing.py @@ -62,48 +62,47 @@ def test_cross_correlation_nlags(self): signal[:, 0] = 0.2 * np.sin(2. * np.pi * self.freq * self.times) signal[:, 1] = 5.3 * np.cos(2. * np.pi * self.freq * self.times) # Convert signal to neo.AnalogSignal - signal = neo.AnalogSignal(signal, units='mV', t_start=0.*pq.ms, + signal = neo.AnalogSignal(signal, units='mV', t_start=0. * pq.ms, sampling_rate=self.sampling_rate, dtype=float) rho = elephant.signal_processing.cross_correlation_function( - signal, [0, 1], nlags=nlags) + signal, [0, 1], n_lags=nlags) # Test if vector of lags tau has correct length - assert len(rho.times) == 2*int(nlags)+1 + assert len(rho.times) == 2 * int(nlags) + 1 def test_cross_correlation_phi(self): ''' Sine with phase shift phi vs cosine ''' - phi = np.pi/6. + phi = np.pi / 6. signal = np.zeros((self.n_samples, 2)) signal[:, 0] = 0.2 * np.sin(2. * np.pi * self.freq * self.times + phi) signal[:, 1] = 5.3 * np.cos(2. * np.pi * self.freq * self.times) # Convert signal to neo.AnalogSignal - signal = neo.AnalogSignal(signal, units='mV', t_start=0.*pq.ms, + signal = neo.AnalogSignal(signal, units='mV', t_start=0. * pq.ms, sampling_rate=self.sampling_rate, dtype=float) rho = elephant.signal_processing.cross_correlation_function( signal, [0, 1]) # Cross-correlation of sine and cosine should be sine + phi - assert_array_almost_equal( - rho.magnitude[:, 0], np.sin(2.*np.pi*self.freq*rho.times+phi), - decimal=2) + assert_array_almost_equal(rho.magnitude[:, 0], np.sin( + 2. * np.pi * self.freq * rho.times + phi), decimal=2) def test_cross_correlation_envelope(self): ''' Envelope of sine vs cosine ''' # Sine with phase shift phi vs cosine for different frequencies - nlags = 800 # nlags need to be smaller than N/2 b/c border effects + nlags = 800 # nlags need to be smaller than N/2 b/c border effects signal = np.zeros((self.n_samples, 2)) signal[:, 0] = 0.2 * np.sin(2. * np.pi * self.freq * self.times) signal[:, 1] = 5.3 * np.cos(2. * np.pi * self.freq * self.times) # Convert signal to neo.AnalogSignal - signal = neo.AnalogSignal(signal, units='mV', t_start=0.*pq.ms, + signal = neo.AnalogSignal(signal, units='mV', t_start=0. * pq.ms, sampling_rate=self.sampling_rate, dtype=float) envelope = elephant.signal_processing.cross_correlation_function( - signal, [0, 1], nlags=nlags, env=True) + signal, [0, 1], n_lags=nlags, hilbert_envelope=True) # Envelope should be one for sinusoidal function assert_array_almost_equal(envelope, np.ones_like(envelope), decimal=2) @@ -499,7 +498,7 @@ def test_butter_input_types(self): # check input as NumPy ndarray filtered_noise_np = elephant.signal_processing.butter( - noise_np, 400.0, 100.0, fs=1000.0) + noise_np, 400.0, 100.0, sampling_frequency=1000.0) self.assertTrue(isinstance(filtered_noise_np, np.ndarray)) self.assertFalse(isinstance(filtered_noise_np, pq.quantity.Quantity)) self.assertFalse(isinstance(filtered_noise_np, neo.AnalogSignal)) @@ -507,7 +506,7 @@ def test_butter_input_types(self): # check input as Quantity array filtered_noise_pq = elephant.signal_processing.butter( - noise_pq, 400.0 * pq.Hz, 100.0 * pq.Hz, fs=1000.0) + noise_pq, 400.0 * pq.Hz, 100.0 * pq.Hz, sampling_frequency=1000.0) self.assertTrue(isinstance(filtered_noise_pq, pq.quantity.Quantity)) self.assertFalse(isinstance(filtered_noise_pq, neo.AnalogSignal)) self.assertEqual(filtered_noise_pq.shape, noise_pq.shape) @@ -528,9 +527,9 @@ def test_butter_input_types(self): def test_butter_axis(self): noise = np.random.normal(size=(4, 5000)) filtered_noise = elephant.signal_processing.butter( - noise, 250.0, fs=1000.0) + noise, 250.0, sampling_frequency=1000.0) filtered_noise_transposed = elephant.signal_processing.butter( - noise.T, 250.0, fs=1000.0, axis=0) + noise.T, 250.0, sampling_frequency=1000.0, axis=0) self.assertTrue(np.all(filtered_noise == filtered_noise_transposed.T)) def test_butter_multidim_input(self): @@ -540,7 +539,7 @@ def test_butter_multidim_input(self): noise_neo1d = neo.AnalogSignal( noise_pq[0], sampling_rate=1000.0 * pq.Hz) filtered_noise_pq = elephant.signal_processing.butter( - noise_pq, 250.0, fs=1000.0) + noise_pq, 250.0, sampling_frequency=1000.0) filtered_noise_neo = elephant.signal_processing.butter( noise_neo, 250.0) filtered_noise_neo1d = elephant.signal_processing.butter( @@ -617,17 +616,17 @@ def test_hilbert_output_shape(self): """ true_shape = np.shape(self.long_signals) output = elephant.signal_processing.hilbert( - self.long_signals, N='nextpow') + self.long_signals, padding='nextpow') self.assertEqual(np.shape(output), true_shape) self.assertEqual(output.units, pq.dimensionless) output = elephant.signal_processing.hilbert( - self.long_signals, N=16384) + self.long_signals, padding=16384) self.assertEqual(np.shape(output), true_shape) self.assertEqual(output.units, pq.dimensionless) def test_hilbert_array_annotations(self): output = elephant.signal_processing.hilbert(self.long_signals, - N='nextpow') + padding='nextpow') # Test if array_annotations are preserved self.assertSetEqual(set(output.array_annotations.keys()), {"my_list"}) assert_array_equal(output.array_annotations['my_list'], @@ -642,7 +641,7 @@ def test_hilbert_theoretical_long_signals(self): for padding in ['nextpow', 'none', 16384]: h = elephant.signal_processing.hilbert( - self.long_signals, N=padding) + self.long_signals, padding=padding) phase = np.angle(h.magnitude) amplitude = np.abs(h.magnitude) @@ -688,7 +687,7 @@ def test_hilbert_theoretical_one_period(self): for padding in ['nextpow', 'none', 512]: h = elephant.signal_processing.hilbert( - self.one_period, N=padding) + self.one_period, padding=padding) amplitude = np.abs(h.magnitude) phase = np.angle(h.magnitude) @@ -736,19 +735,31 @@ class WaveletTestCase(unittest.TestCase): def setUp(self): # generate a 10-sec test data of pure 50 Hz cosine wave self.fs = 1000.0 - self.times = np.arange(0, 10.0, 1/self.fs) + self.times = np.arange(0, 10.0, 1 / self.fs) self.test_freq1 = 50.0 self.test_freq2 = 60.0 - self.test_data1 = np.cos(2*np.pi*self.test_freq1*self.times) - self.test_data2 = np.sin(2*np.pi*self.test_freq2*self.times) + self.test_data1 = np.cos(2 * np.pi * self.test_freq1 * self.times) + self.test_data2 = np.sin(2 * np.pi * self.test_freq2 * self.times) self.test_data_arr = np.vstack([self.test_data1, self.test_data2]) self.test_data = neo.AnalogSignal( - self.test_data_arr.T*pq.mV, t_start=self.times[0]*pq.s, - t_stop=self.times[-1]*pq.s, sampling_period=(1/self.fs)*pq.s) + self.test_data_arr.T * pq.mV, t_start=self.times[0] * pq.s, + t_stop=self.times[-1] * pq.s, sampling_period=(1 / self.fs) * pq.s) self.true_phase1 = np.angle( - self.test_data1 + 1j*np.sin(2*np.pi*self.test_freq1*self.times)) + self.test_data1 + + 1j * + np.sin( + 2 * + np.pi * + self.test_freq1 * + self.times)) self.true_phase2 = np.angle( - self.test_data2 - 1j*np.cos(2*np.pi*self.test_freq2*self.times)) + self.test_data2 - + 1j * + np.cos( + 2 * + np.pi * + self.test_freq2 * + self.times)) self.wt_freqs = [10, 20, 30] def test_wavelet_errors(self): @@ -756,24 +767,27 @@ def test_wavelet_errors(self): Tests if errors are raised as expected. """ # too high center frequency - kwds = {'signal': self.test_data, 'freq': self.fs/2} + kwds = {'signal': self.test_data, 'freq': self.fs / 2} self.assertRaises( ValueError, elephant.signal_processing.wavelet_transform, **kwds) - kwds = {'signal': self.test_data_arr, 'freq': self.fs/2, 'fs': self.fs} + kwds = { + 'signal': self.test_data_arr, + 'freq': self.fs / 2, + 'fs': self.fs} self.assertRaises( ValueError, elephant.signal_processing.wavelet_transform, **kwds) # too high center frequency in a list - kwds = {'signal': self.test_data, 'freq': [self.fs/10, self.fs/2]} + kwds = {'signal': self.test_data, 'freq': [self.fs / 10, self.fs / 2]} self.assertRaises( ValueError, elephant.signal_processing.wavelet_transform, **kwds) kwds = {'signal': self.test_data_arr, - 'freq': [self.fs/10, self.fs/2], 'fs': self.fs} + 'freq': [self.fs / 10, self.fs / 2], 'fs': self.fs} self.assertRaises( ValueError, elephant.signal_processing.wavelet_transform, **kwds) # nco is not positive - kwds = {'signal': self.test_data, 'freq': self.fs/10, 'nco': 0} + kwds = {'signal': self.test_data, 'freq': self.fs / 10, 'nco': 0} self.assertRaises( ValueError, elephant.signal_processing.wavelet_transform, **kwds) @@ -786,13 +800,13 @@ def test_wavelet_io(self): # check the shape of the result array # --- case of single center frequency wt = elephant.signal_processing.wavelet_transform(self.test_data, - self.fs/10) + self.fs / 10) self.assertTrue(wt.ndim == self.test_data.ndim) self.assertTrue(wt.shape[0] == self.test_data.shape[0]) # time axis self.assertTrue(wt.shape[1] == self.test_data.shape[1]) # channel axis wt_arr = elephant.signal_processing.wavelet_transform( - self.test_data_arr, self.fs/10, fs=self.fs) + self.test_data_arr, self.fs / 10, sampling_frequency=self.fs) self.assertTrue(wt_arr.ndim == self.test_data.ndim) # channel axis self.assertTrue(wt_arr.shape[0] == self.test_data_arr.shape[0]) @@ -800,7 +814,7 @@ def test_wavelet_io(self): self.assertTrue(wt_arr.shape[1] == self.test_data_arr.shape[1]) wt_arr1d = elephant.signal_processing.wavelet_transform( - self.test_data1, self.fs/10, fs=self.fs) + self.test_data1, self.fs / 10, sampling_frequency=self.fs) self.assertTrue(wt_arr1d.ndim == self.test_data1.ndim) # time axis self.assertTrue(wt_arr1d.shape[0] == self.test_data1.shape[0]) @@ -808,14 +822,14 @@ def test_wavelet_io(self): # --- case of multiple center frequencies wt = elephant.signal_processing.wavelet_transform( self.test_data, self.wt_freqs) - self.assertTrue(wt.ndim == self.test_data.ndim+1) + self.assertTrue(wt.ndim == self.test_data.ndim + 1) self.assertTrue(wt.shape[0] == self.test_data.shape[0]) # time axis self.assertTrue(wt.shape[1] == self.test_data.shape[1]) # channel axis self.assertTrue(wt.shape[2] == len(self.wt_freqs)) # frequency axis wt_arr = elephant.signal_processing.wavelet_transform( - self.test_data_arr, self.wt_freqs, fs=self.fs) - self.assertTrue(wt_arr.ndim == self.test_data_arr.ndim+1) + self.test_data_arr, self.wt_freqs, sampling_frequency=self.fs) + self.assertTrue(wt_arr.ndim == self.test_data_arr.ndim + 1) # channel axis self.assertTrue(wt_arr.shape[0] == self.test_data_arr.shape[0]) # frequency axis @@ -824,8 +838,8 @@ def test_wavelet_io(self): self.assertTrue(wt_arr.shape[2] == self.test_data_arr.shape[1]) wt_arr1d = elephant.signal_processing.wavelet_transform( - self.test_data1, self.wt_freqs, fs=self.fs) - self.assertTrue(wt_arr1d.ndim == self.test_data1.ndim+1) + self.test_data1, self.wt_freqs, sampling_frequency=self.fs) + self.assertTrue(wt_arr1d.ndim == self.test_data1.ndim + 1) # frequency axis self.assertTrue(wt_arr1d.shape[0] == len(self.wt_freqs)) # time axis @@ -856,7 +870,7 @@ def test_wavelet_amplitude(self): wt = elephant.signal_processing.wavelet_transform(self.test_data, self.test_freq1) # take a middle segment in order to avoid edge effects - amp = np.abs(wt[int(len(wt)/3):int(len(wt)//3*2), 0]) + amp = np.abs(wt[int(len(wt) / 3):int(len(wt) // 3 * 2), 0]) mean_amp = amp.mean() assert_array_almost_equal((amp - mean_amp) / mean_amp, np.zeros_like(amp), decimal=6) @@ -864,14 +878,15 @@ def test_wavelet_amplitude(self): # check that the amplitude of WT is (almost) zero when center frequency # is considerably different from signal frequency wt_low = elephant.signal_processing.wavelet_transform( - self.test_data, self.test_freq1/10) - amp_low = np.abs(wt_low[int(len(wt)/3):int(len(wt)//3*2), 0]) + self.test_data, self.test_freq1 / 10) + amp_low = np.abs(wt_low[int(len(wt) / 3):int(len(wt) // 3 * 2), 0]) assert_array_almost_equal(amp_low, np.zeros_like(amp), decimal=6) # check that zero padding hardly affect the result wt_padded = elephant.signal_processing.wavelet_transform( self.test_data, self.test_freq1, zero_padding=False) - amp_padded = np.abs(wt_padded[int(len(wt)/3):int(len(wt)//3*2), 0]) + amp_padded = np.abs( + wt_padded[int(len(wt) / 3):int(len(wt) // 3 * 2), 0]) assert_array_almost_equal(amp_padded, amp, decimal=9) def test_wavelet_phase(self): @@ -882,17 +897,22 @@ def test_wavelet_phase(self): # sinusoid wt = elephant.signal_processing.wavelet_transform(self.test_data, self.test_freq1) - phase = np.angle(wt[int(len(wt)/3):int(len(wt)//3*2), 0]) - true_phase = self.true_phase1[int(len(wt)/3):int(len(wt)//3*2)] - assert_array_almost_equal(np.exp(1j*phase), np.exp(1j*true_phase), + phase = np.angle(wt[int(len(wt) / 3):int(len(wt) // 3 * 2), 0]) + true_phase = self.true_phase1[int(len(wt) / 3):int(len(wt) // 3 * 2)] + assert_array_almost_equal(np.exp(1j * phase), np.exp(1j * true_phase), decimal=6) # check that zero padding hardly affect the result wt_padded = elephant.signal_processing.wavelet_transform( self.test_data, self.test_freq1, zero_padding=False) - phase_padded = np.angle(wt_padded[int(len(wt)/3):int(len(wt)//3*2), 0]) - assert_array_almost_equal(np.exp(1j*phase_padded), np.exp(1j*phase), - decimal=9) + phase_padded = np.angle( + wt_padded[int(len(wt) / 3):int(len(wt) // 3 * 2), 0]) + assert_array_almost_equal( + np.exp( + 1j * phase_padded), + np.exp( + 1j * phase), + decimal=9) class DerivativeTestCase(unittest.TestCase): @@ -901,16 +921,16 @@ def setUp(self): self.fs = 1000.0 self.tmin = 0.0 self.tmax = 10.0 - self.times = np.arange(self.tmin, self.tmax, 1/self.fs) - self.test_data1 = np.cos(2*np.pi*self.times) + self.times = np.arange(self.tmin, self.tmax, 1 / self.fs) + self.test_data1 = np.cos(2 * np.pi * self.times) self.test_data2 = np.vstack( - [np.cos(2*np.pi*self.times), np.sin(2*np.pi*self.times)]).T + [np.cos(2 * np.pi * self.times), np.sin(2 * np.pi * self.times)]).T self.test_signal1 = neo.AnalogSignal( - self.test_data1*pq.mV, t_start=self.times[0]*pq.s, - t_stop=self.times[-1]*pq.s, sampling_period=(1/self.fs)*pq.s) + self.test_data1 * pq.mV, t_start=self.times[0] * pq.s, + t_stop=self.times[-1] * pq.s, sampling_period=(1 / self.fs) * pq.s) self.test_signal2 = neo.AnalogSignal( - self.test_data2*pq.mV, t_start=self.times[0]*pq.s, - t_stop=self.times[-1]*pq.s, sampling_period=(1/self.fs)*pq.s) + self.test_data2 * pq.mV, t_start=self.times[0] * pq.s, + t_stop=self.times[-1] * pq.s, sampling_period=(1 / self.fs) * pq.s) def test_derivative_invalid_signal(self): '''Test derivative on non-AnalogSignal''' @@ -925,7 +945,7 @@ def test_derivative_units(self): self.assertTrue(isinstance(derivative, neo.AnalogSignal)) self.assertEqual( derivative.units, - self.test_signal1.units/self.test_signal1.times.units) + self.test_signal1.units / self.test_signal1.times.units) def test_derivative_times(self): '''Test derivative returns AnalogSignal with correct times''' @@ -936,11 +956,11 @@ def test_derivative_times(self): # test that sampling period is correct self.assertEqual( derivative.sampling_period, - 1/self.fs * self.test_signal1.times.units) + 1 / self.fs * self.test_signal1.times.units) # test that all times are correct target_times = self.times[:-1] * self.test_signal1.times.units \ - + derivative.sampling_period/2 + + derivative.sampling_period / 2 assert_array_almost_equal(derivative.times, target_times) # test that t_start and t_stop are correct @@ -961,12 +981,12 @@ def test_derivative_values(self): # single channel assert_array_almost_equal( derivative1.magnitude, - np.vstack([np.diff(self.test_data1)]).T / (1/self.fs)) + np.vstack([np.diff(self.test_data1)]).T / (1 / self.fs)) # multi channel assert_array_almost_equal(derivative2.magnitude, np.vstack([ np.diff(self.test_data2[:, 0]), - np.diff(self.test_data2[:, 1])]).T / (1/self.fs)) + np.diff(self.test_data2[:, 1])]).T / (1 / self.fs)) class RAUCTestCase(unittest.TestCase): @@ -975,16 +995,16 @@ def setUp(self): self.fs = 1000.0 self.tmin = 0.0 self.tmax = 10.0 - self.times = np.arange(self.tmin, self.tmax, 1/self.fs) - self.test_data1 = np.cos(2*np.pi*self.times) + self.times = np.arange(self.tmin, self.tmax, 1 / self.fs) + self.test_data1 = np.cos(2 * np.pi * self.times) self.test_data2 = np.vstack( - [np.cos(2*np.pi*self.times), np.sin(2*np.pi*self.times)]).T + [np.cos(2 * np.pi * self.times), np.sin(2 * np.pi * self.times)]).T self.test_signal1 = neo.AnalogSignal( - self.test_data1*pq.mV, t_start=self.times[0]*pq.s, - t_stop=self.times[-1]*pq.s, sampling_period=(1/self.fs)*pq.s) + self.test_data1 * pq.mV, t_start=self.times[0] * pq.s, + t_stop=self.times[-1] * pq.s, sampling_period=(1 / self.fs) * pq.s) self.test_signal2 = neo.AnalogSignal( - self.test_data2*pq.mV, t_start=self.times[0]*pq.s, - t_stop=self.times[-1]*pq.s, sampling_period=(1/self.fs)*pq.s) + self.test_data2 * pq.mV, t_start=self.times[0] * pq.s, + t_stop=self.times[-1] * pq.s, sampling_period=(1 / self.fs) * pq.s) def test_rauc_invalid_signal(self): '''Test rauc on non-AnalogSignal''' @@ -1013,20 +1033,20 @@ def test_rauc_units(self): self.assertTrue(isinstance(rauc, pq.Quantity)) self.assertEqual( rauc.units, - self.test_signal1.units*self.test_signal1.times.units) + self.test_signal1.units * self.test_signal1.times.units) # test that multi-bin result is AnalogSignal with correct units rauc_arr = elephant.signal_processing.rauc( - self.test_signal1, bin_duration=1*pq.s) + self.test_signal1, bin_duration=1 * pq.s) self.assertTrue(isinstance(rauc_arr, neo.AnalogSignal)) self.assertEqual( rauc_arr.units, - self.test_signal1.units*self.test_signal1.times.units) + self.test_signal1.units * self.test_signal1.times.units) def test_rauc_times_without_overextending_bin(self): '''Test rauc returns correct times when signal is binned evenly''' - bin_duration = 1*pq.s # results in all bin centers < original t_stop + bin_duration = 1 * pq.s # results in all bin centers < original t_stop rauc_arr = elephant.signal_processing.rauc( self.test_signal1, bin_duration=bin_duration) self.assertTrue(isinstance(rauc_arr, neo.AnalogSignal)) @@ -1038,7 +1058,7 @@ def test_rauc_times_without_overextending_bin(self): target_times = np.arange(self.tmin, self.tmax, bin_duration.magnitude) \ - * bin_duration.units + bin_duration/2 + * bin_duration.units + bin_duration / 2 assert_array_almost_equal(rauc_arr.times, target_times) # test that t_start and t_stop are correct @@ -1050,7 +1070,7 @@ def test_rauc_times_without_overextending_bin(self): def test_rauc_times_with_overextending_bin(self): '''Test rauc returns correct times when signal is NOT binned evenly''' - bin_duration = 0.99*pq.s # results in one bin center > original t_stop + bin_duration = 0.99 * pq.s # results in one bin center > original t_stop rauc_arr = elephant.signal_processing.rauc( self.test_signal1, bin_duration=bin_duration) self.assertTrue(isinstance(rauc_arr, neo.AnalogSignal)) @@ -1062,7 +1082,7 @@ def test_rauc_times_with_overextending_bin(self): target_times = np.arange(self.tmin, self.tmax, bin_duration.magnitude) \ - * bin_duration.units + bin_duration/2 + * bin_duration.units + bin_duration / 2 assert_array_almost_equal(rauc_arr.times, target_times) # test that t_start and t_stop are correct @@ -1093,9 +1113,9 @@ def test_rauc_values_one_bin(self): def test_rauc_values_multi_bin(self): '''Test rauc returns correct values when there are multiple bins''' rauc_arr1 = elephant.signal_processing.rauc( - self.test_signal1, bin_duration=0.99*pq.s) + self.test_signal1, bin_duration=0.99 * pq.s) rauc_arr2 = elephant.signal_processing.rauc( - self.test_signal2, bin_duration=0.99*pq.s) + self.test_signal2, bin_duration=0.99 * pq.s) self.assertTrue(isinstance(rauc_arr1, neo.AnalogSignal)) self.assertTrue(isinstance(rauc_arr2, neo.AnalogSignal)) @@ -1168,9 +1188,9 @@ def test_rauc_median_baseline(self): def test_rauc_arbitrary_baseline(self): '''Test rauc returns correct values when arbitrary baseline is given''' rauc1 = elephant.signal_processing.rauc( - self.test_signal1, baseline=0.123*pq.mV) + self.test_signal1, baseline=0.123 * pq.mV) rauc2 = elephant.signal_processing.rauc( - self.test_signal2, baseline=0.123*pq.mV) + self.test_signal2, baseline=0.123 * pq.mV) self.assertTrue(isinstance(rauc1, pq.Quantity)) self.assertTrue(isinstance(rauc2, pq.Quantity)) @@ -1187,9 +1207,9 @@ def test_rauc_arbitrary_baseline(self): def test_rauc_time_slice(self): '''Test rauc returns correct values when t_start, t_stop are given''' rauc1 = elephant.signal_processing.rauc( - self.test_signal1, t_start=0.123*pq.s, t_stop=0.456*pq.s) + self.test_signal1, t_start=0.123 * pq.s, t_stop=0.456 * pq.s) rauc2 = elephant.signal_processing.rauc( - self.test_signal2, t_start=0.123*pq.s, t_stop=0.456*pq.s) + self.test_signal2, t_start=0.123 * pq.s, t_stop=0.456 * pq.s) self.assertTrue(isinstance(rauc1, pq.Quantity)) self.assertTrue(isinstance(rauc2, pq.Quantity)) diff --git a/elephant/test/test_spade.py b/elephant/test/test_spade.py index c06c48f04..d1d7c8f3a 100644 --- a/elephant/test/test_spade.py +++ b/elephant/test/test_spade.py @@ -32,7 +32,7 @@ class SpadeTestCase(unittest.TestCase): def setUp(self): # Spade parameters - self.binsize = 1 * pq.ms + self.bin_size = 1 * pq.ms self.winlen = 10 self.n_subset = 10 self.n_surr = 10 @@ -48,8 +48,10 @@ def setUp(self): # CPP parameters self.n_neu = 100 self.amplitude = [0] * self.n_neu + [1] - self.cpp = stg.cpp(rate=3 * pq.Hz, A=self.amplitude, - t_stop=5 * pq.s) + self.cpp = stg.cpp( + rate=3 * pq.Hz, + amplitude_distribution=self.amplitude, + t_stop=5 * pq.s) # Number of patterns' occurrences self.n_occ1 = 10 self.n_occ2 = 12 @@ -107,11 +109,11 @@ def setUp(self): self.n_spk2 + self.n_spk3))] self.occ1 = np.unique(conv.BinnedSpikeTrain( - self.patt1_times, self.binsize).spike_indices[0]) + self.patt1_times, self.bin_size).spike_indices[0]) self.occ2 = np.unique(conv.BinnedSpikeTrain( - self.patt2_times, self.binsize).spike_indices[0]) + self.patt2_times, self.bin_size).spike_indices[0]) self.occ3 = np.unique(conv.BinnedSpikeTrain( - self.patt3_times, self.binsize).spike_indices[0]) + self.patt3_times, self.bin_size).spike_indices[0]) self.occ_msip = [ list(self.occ1), list(self.occ2), list(self.occ3)] self.lags_msip = [self.lags1, self.lags2, self.lags3] @@ -120,7 +122,7 @@ def setUp(self): # Testing cpp @unittest.skipUnless(HAVE_FIM, "Time consuming with pythonic FIM") def test_spade_cpp(self): - output_cpp = spade.spade(self.cpp, self.binsize, 1, + output_cpp = spade.spade(self.cpp, self.bin_size, 1, approx_stab_pars=dict( n_subsets=self.n_subset, stability_thresh=self.stability_thresh), @@ -142,18 +144,19 @@ def test_spade_cpp(self): # Testing spectrum cpp def test_spade_spectrum_cpp(self): # Computing Spectrum - spectrum_cpp = spade.concepts_mining(self.cpp, self.binsize, + spectrum_cpp = spade.concepts_mining(self.cpp, self.bin_size, 1, report='#')[0] # Check spectrum assert_array_equal( - spectrum_cpp, - [(len(self.cpp), - np.sum(conv.BinnedSpikeTrain(self.cpp[0], - self.binsize).to_bool_array()), 1)]) + spectrum_cpp, [ + (len( + self.cpp), np.sum( + conv.BinnedSpikeTrain( + self.cpp[0], self.bin_size).to_bool_array()), 1)]) # Testing with multiple patterns input def test_spade_msip(self): - output_msip = spade.spade(self.msip, self.binsize, self.winlen, + output_msip = spade.spade(self.msip, self.bin_size, self.winlen, approx_stab_pars=dict( n_subsets=self.n_subset, stability_thresh=self.stability_thresh), @@ -189,7 +192,7 @@ def test_parameters(self): # n_surr=0 and alpha=0.05 spawns expected UserWarning output_msip_min_spikes = spade.spade( self.msip, - self.binsize, + self.bin_size, self.winlen, min_spikes=self.min_spikes, approx_stab_pars=dict(n_subsets=self.n_subset), @@ -221,14 +224,18 @@ def test_parameters(self): assert_array_equal(-1, pvalue) # test min_occ parameter - output_msip_min_occ = spade.spade(self.msip, self.binsize, self.winlen, - min_occ=self.min_occ, - approx_stab_pars=dict( - n_subsets=self.n_subset), - n_surr=self.n_surr, alpha=self.alpha, - psr_param=self.psr_param, - stat_corr='no', - output_format='patterns')['patterns'] + output_msip_min_occ = spade.spade( + self.msip, + self.bin_size, + self.winlen, + min_occ=self.min_occ, + approx_stab_pars=dict( + n_subsets=self.n_subset), + n_surr=self.n_surr, + alpha=self.alpha, + psr_param=self.psr_param, + stat_corr='no', + output_format='patterns')['patterns'] # collect spade output occ_msip_min_occ = [] for out in output_msip_min_occ: @@ -241,7 +248,7 @@ def test_parameters(self): # test max_spikes parameter output_msip_max_spikes = spade.spade( self.msip, - self.binsize, + self.bin_size, self.winlen, max_spikes=self.max_spikes, approx_stab_pars=dict( @@ -269,14 +276,18 @@ def test_parameters(self): [True] * len(lags_msip_max_spikes)) # test max_occ parameter - output_msip_max_occ = spade.spade(self.msip, self.binsize, self.winlen, - max_occ=self.max_occ, - approx_stab_pars=dict( - n_subsets=self.n_subset), - n_surr=self.n_surr, alpha=self.alpha, - psr_param=self.psr_param, - stat_corr='no', - output_format='patterns')['patterns'] + output_msip_max_occ = spade.spade( + self.msip, + self.bin_size, + self.winlen, + max_occ=self.max_occ, + approx_stab_pars=dict( + n_subsets=self.n_subset), + n_surr=self.n_surr, + alpha=self.alpha, + psr_param=self.psr_param, + stat_corr='no', + output_format='patterns')['patterns'] # collect spade output occ_msip_max_occ = [] for out in output_msip_max_occ: @@ -292,7 +303,7 @@ def test_parameters(self): def test_fpgrowth_fca(self): print("fim.so is found.") binary_matrix = conv.BinnedSpikeTrain( - self.patt1, self.binsize).to_sparse_bool_array().tocoo() + self.patt1, self.bin_size).to_sparse_bool_array().tocoo() context, transactions, rel_matrix = spade._build_context( binary_matrix, self.winlen) # mining the data with python fast_fca @@ -311,7 +322,7 @@ def test_fpgrowth_fca(self): # Tests 3d spectrum # Testing with multiple patterns input def test_spade_msip_3d(self): - output_msip = spade.spade(self.msip, self.binsize, self.winlen, + output_msip = spade.spade(self.msip, self.bin_size, self.winlen, approx_stab_pars=dict( n_subsets=self.n_subset, stability_thresh=self.stability_thresh), @@ -342,7 +353,7 @@ def test_parameters_3d(self): # test min_spikes parameter output_msip_min_spikes = spade.spade( self.msip, - self.binsize, + self.bin_size, self.winlen, min_spikes=self.min_spikes, approx_stab_pars=dict( @@ -373,16 +384,19 @@ def test_parameters_3d(self): el) >= self.min_spikes]) # test min_occ parameter - output_msip_min_occ = spade.spade(self.msip, self.binsize, self.winlen, - min_occ=self.min_occ, - approx_stab_pars=dict( - n_subsets=self.n_subset), - n_surr=self.n_surr, - spectrum='3d#', - alpha=self.alpha, - psr_param=self.psr_param, - stat_corr='no', - output_format='patterns')['patterns'] + output_msip_min_occ = spade.spade( + self.msip, + self.bin_size, + self.winlen, + min_occ=self.min_occ, + approx_stab_pars=dict( + n_subsets=self.n_subset), + n_surr=self.n_surr, + spectrum='3d#', + alpha=self.alpha, + psr_param=self.psr_param, + stat_corr='no', + output_format='patterns')['patterns'] # collect spade output occ_msip_min_occ = [] for out in output_msip_min_occ: @@ -395,11 +409,11 @@ def test_parameters_3d(self): # Test computation spectrum def test_spectrum(self): # test 2d spectrum - spectrum = spade.concepts_mining(self.patt1, self.binsize, + spectrum = spade.concepts_mining(self.patt1, self.bin_size, self.winlen, report='#')[0] # test 3d spectrum assert_array_equal(spectrum, [[len(self.lags1) + 1, self.n_occ1, 1]]) - spectrum_3d = spade.concepts_mining(self.patt1, self.binsize, + spectrum_3d = spade.concepts_mining(self.patt1, self.bin_size, self.winlen, report='3d#')[0] assert_array_equal(spectrum_3d, [ [len(self.lags1) + 1, self.n_occ1, max(self.lags1), 1]]) @@ -557,7 +571,6 @@ def test_pattern_set_reduction(self): winlen=winlen, spectrum='#') self.assertEqual(concepts, [concept3, concept4]) - @unittest.skipUnless(HAVE_STATSMODELS, "'fdr_bh' stat corr requires statsmodels") def test_signature_significance_fdr_bh_corr(self): diff --git a/elephant/test/test_spectral.py b/elephant/test/test_spectral.py index 24f8ab3b5..801bd8246 100644 --- a/elephant/test/test_spectral.py +++ b/elephant/test/test_spectral.py @@ -20,7 +20,7 @@ class WelchPSDTestCase(unittest.TestCase): def test_welch_psd_errors(self): # generate a dummy data - data = n.AnalogSignal(np.zeros(5000), sampling_period=0.001*pq.s, + data = n.AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s, units='mV') # check for invalid parameter values @@ -38,7 +38,7 @@ def test_welch_psd_errors(self): self.assertRaises(ValueError, elephant.spectral.welch_psd, data, freq_res=-1) self.assertRaises(ValueError, elephant.spectral.welch_psd, data, - freq_res=data.sampling_rate/(data.shape[0]+1)) + freq_res=data.sampling_rate / (data.shape[0] + 1)) # - overlap self.assertRaises(ValueError, elephant.spectral.welch_psd, data, overlap=-1.0) @@ -51,34 +51,43 @@ def test_welch_psd_behavior(self): sampling_period = 0.001 signal_freq = 100.0 noise = np.random.normal(size=data_length) - signal = [np.sin(2*np.pi*signal_freq*t) - for t in np.arange(0, data_length*sampling_period, + signal = [np.sin(2 * np.pi * signal_freq * t) + for t in np.arange(0, data_length * sampling_period, sampling_period)] - data = n.AnalogSignal(np.array(signal+noise), - sampling_period=sampling_period*pq.s, - units='mV') + data = n.AnalogSignal(np.array(signal + noise), + sampling_period=sampling_period * pq.s, + units='mV') # consistency between different ways of specifying segment length - freqs1, psd1 = elephant.spectral.welch_psd(data, len_seg=data_length//5, overlap=0) - freqs2, psd2 = elephant.spectral.welch_psd(data, num_seg=5, overlap=0) - self.assertTrue((psd1==psd2).all() and (freqs1==freqs2).all()) + freqs1, psd1 = elephant.spectral.welch_psd( + data, len_segment=data_length // 5, overlap=0) + freqs2, psd2 = elephant.spectral.welch_psd( + data, n_segments=5, overlap=0) + self.assertTrue((psd1 == psd2).all() and (freqs1 == freqs2).all()) # frequency resolution and consistency with data freq_res = 1.0 * pq.Hz - freqs, psd = elephant.spectral.welch_psd(data, freq_res=freq_res) - self.assertAlmostEqual(freq_res, freqs[1]-freqs[0]) + freqs, psd = elephant.spectral.welch_psd( + data, frequency_resolution=freq_res) + self.assertAlmostEqual(freq_res, freqs[1] - freqs[0]) self.assertEqual(freqs[psd.argmax()], signal_freq) - freqs_np, psd_np = elephant.spectral.welch_psd(data.magnitude.flatten(), fs=1/sampling_period, freq_res=freq_res) - self.assertTrue((freqs==freqs_np).all() and (psd==psd_np).all()) + freqs_np, psd_np = elephant.spectral.welch_psd( + data.magnitude.flatten(), fs=1 / sampling_period, + frequency_resolution=freq_res) + self.assertTrue((freqs == freqs_np).all() and (psd == psd_np).all()) # check of scipy.signal.welch() parameters params = {'window': 'hamming', 'nfft': 1024, 'detrend': 'linear', 'return_onesided': False, 'scaling': 'spectrum'} for key, val in params.items(): - freqs, psd = elephant.spectral.welch_psd(data, len_seg=1000, overlap=0, **{key: val}) - freqs_spsig, psd_spsig = spsig.welch(np.rollaxis(data, 0, len(data.shape)), - fs=1/sampling_period, nperseg=1000, noverlap=0, **{key: val}) - self.assertTrue((freqs==freqs_spsig).all() and (psd==psd_spsig).all()) + freqs, psd = elephant.spectral.welch_psd( + data, len_segment=1000, overlap=0, **{key: val}) + freqs_spsig, psd_spsig = spsig.welch(np.rollaxis(data, 0, len( + data.shape)), fs=1 / sampling_period, nperseg=1000, + noverlap=0, **{key: val}) + self.assertTrue( + (freqs == freqs_spsig).all() and ( + psd == psd_spsig).all()) # - generate multidimensional data for check of parameter `axis` num_channel = 4 @@ -86,15 +95,15 @@ def test_welch_psd_behavior(self): data_multidim = np.random.normal(size=(num_channel, data_length)) freqs, psd = elephant.spectral.welch_psd(data_multidim) freqs_T, psd_T = elephant.spectral.welch_psd(data_multidim.T, axis=0) - self.assertTrue(np.all(freqs==freqs_T)) - self.assertTrue(np.all(psd==psd_T.T)) + self.assertTrue(np.all(freqs == freqs_T)) + self.assertTrue(np.all(psd == psd_T.T)) def test_welch_psd_input_types(self): # generate a test data sampling_period = 0.001 data = n.AnalogSignal(np.array(np.random.normal(size=5000)), - sampling_period=sampling_period*pq.s, - units='mV') + sampling_period=sampling_period * pq.s, + units='mV') # outputs from AnalogSignal input are of Quantity type (standard usage) freqs_neo, psd_neo = elephant.spectral.welch_psd(data) @@ -102,18 +111,24 @@ def test_welch_psd_input_types(self): self.assertTrue(isinstance(psd_neo, pq.quantity.Quantity)) # outputs from Quantity array input are of Quantity type - freqs_pq, psd_pq = elephant.spectral.welch_psd(data.magnitude.flatten()*data.units, fs=1/sampling_period) + freqs_pq, psd_pq = elephant.spectral.welch_psd( + data.magnitude.flatten() * data.units, fs=1 / sampling_period) self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity)) self.assertTrue(isinstance(psd_pq, pq.quantity.Quantity)) # outputs from Numpy ndarray input are NOT of Quantity type - freqs_np, psd_np = elephant.spectral.welch_psd(data.magnitude.flatten(), fs=1/sampling_period) + freqs_np, psd_np = elephant.spectral.welch_psd( + data.magnitude.flatten(), fs=1 / sampling_period) self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity)) self.assertFalse(isinstance(psd_np, pq.quantity.Quantity)) # check if the results from different input types are identical - self.assertTrue((freqs_neo==freqs_pq).all() and (psd_neo==psd_pq).all()) - self.assertTrue((freqs_neo==freqs_np).all() and (psd_neo==psd_np).all()) + self.assertTrue( + (freqs_neo == freqs_pq).all() and ( + psd_neo == psd_pq).all()) + self.assertTrue( + (freqs_neo == freqs_np).all() and ( + psd_neo == psd_np).all()) def test_welch_psd_multidim_input(self): # generate multidimensional data @@ -126,51 +141,52 @@ def test_welch_psd_multidim_input(self): # conventional one, `data_np` needs to be transposed when its used to # define an AnalogSignal data_neo = n.AnalogSignal(data_np.T, - sampling_period=sampling_period*pq.s, - units='mV') + sampling_period=sampling_period * pq.s, + units='mV') data_neo_1dim = n.AnalogSignal(data_np[0], - sampling_period=sampling_period*pq.s, + sampling_period=sampling_period * pq.s, units='mV') # check if the results from different input types are identical freqs_np, psd_np = elephant.spectral.welch_psd(data_np, - fs=1/sampling_period) + fs=1 / sampling_period) freqs_neo, psd_neo = elephant.spectral.welch_psd(data_neo) - freqs_neo_1dim, psd_neo_1dim = elephant.spectral.welch_psd(data_neo_1dim) - self.assertTrue(np.all(freqs_np==freqs_neo)) - self.assertTrue(np.all(psd_np==psd_neo)) - self.assertTrue(np.all(psd_neo_1dim==psd_neo[0])) + freqs_neo_1dim, psd_neo_1dim = elephant.spectral.welch_psd( + data_neo_1dim) + self.assertTrue(np.all(freqs_np == freqs_neo)) + self.assertTrue(np.all(psd_np == psd_neo)) + self.assertTrue(np.all(psd_neo_1dim == psd_neo[0])) class WelchCohereTestCase(unittest.TestCase): def test_welch_cohere_errors(self): # generate a dummy data - x = n.AnalogSignal(np.zeros(5000), sampling_period=0.001*pq.s, - units='mV') - y = n.AnalogSignal(np.zeros(5000), sampling_period=0.001*pq.s, - units='mV') + x = n.AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s, + units='mV') + y = n.AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s, + units='mV') # check for invalid parameter values # - length of segments - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - len_seg=0) - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - len_seg=x.shape[0] * 2) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + len_seg=0) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + len_seg=x.shape[0] * 2) # - number of segments - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - num_seg=0) - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - num_seg=x.shape[0] * 2) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + num_seg=0) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + num_seg=x.shape[0] * 2) # - frequency resolution - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - freq_res=-1) - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - freq_res=x.sampling_rate/(x.shape[0]+1)) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + freq_res=-1) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + freq_res=x.sampling_rate / (x.shape[0] + 1)) # - overlap - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - overlap=-1.0) - self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y, - overlap=1.1) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + overlap=-1.0) + self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, + overlap=1.1) def test_welch_cohere_behavior(self): # generate data by adding white noise and a sinusoid @@ -179,38 +195,40 @@ def test_welch_cohere_behavior(self): signal_freq = 100.0 noise1 = np.random.normal(size=data_length) * 0.01 noise2 = np.random.normal(size=data_length) * 0.01 - signal1 = [np.cos(2*np.pi*signal_freq*t) - for t in np.arange(0, data_length*sampling_period, - sampling_period)] - signal2 = [np.sin(2*np.pi*signal_freq*t) - for t in np.arange(0, data_length*sampling_period, - sampling_period)] - x = n.AnalogSignal(np.array(signal1+noise1), units='mV', - sampling_period=sampling_period*pq.s) - y = n.AnalogSignal(np.array(signal2+noise2), units='mV', - sampling_period=sampling_period*pq.s) + signal1 = [np.cos(2 * np.pi * signal_freq * t) + for t in np.arange(0, data_length * sampling_period, + sampling_period)] + signal2 = [np.sin(2 * np.pi * signal_freq * t) + for t in np.arange(0, data_length * sampling_period, + sampling_period)] + x = n.AnalogSignal(np.array(signal1 + noise1), units='mV', + sampling_period=sampling_period * pq.s) + y = n.AnalogSignal(np.array(signal2 + noise2), units='mV', + sampling_period=sampling_period * pq.s) # consistency between different ways of specifying segment length - freqs1, coherency1, phase_lag1 = elephant.spectral.welch_cohere(x, y, - len_seg=data_length//5, overlap=0) - freqs2, coherency2, phase_lag2 = elephant.spectral.welch_cohere(x, y, - num_seg=5, overlap=0) - self.assertTrue((coherency1==coherency2).all() and - (phase_lag1==phase_lag2).all() and - (freqs1==freqs2).all()) + freqs1, coherency1, phase_lag1 = elephant.spectral.welch_coherence( + x, y, len_segment=data_length // 5, overlap=0) + freqs2, coherency2, phase_lag2 = elephant.spectral.welch_coherence( + x, y, n_segments=5, overlap=0) + self.assertTrue((coherency1 == coherency2).all() and + (phase_lag1 == phase_lag2).all() and + (freqs1 == freqs2).all()) # frequency resolution and consistency with data freq_res = 1.0 * pq.Hz - freqs, coherency, phase_lag = elephant.spectral.welch_cohere(x, y, - freq_res=freq_res) - self.assertAlmostEqual(freq_res, freqs[1]-freqs[0]) + freqs, coherency, phase_lag = elephant.spectral.welch_coherence( + x, y, frequency_resolution=freq_res) + self.assertAlmostEqual(freq_res, freqs[1] - freqs[0]) self.assertAlmostEqual(freqs[coherency.argmax()], signal_freq, - places=2) - self.assertAlmostEqual(phase_lag[coherency.argmax()], -np.pi/2, - places=2) + places=2) + self.assertAlmostEqual(phase_lag[coherency.argmax()], -np.pi / 2, + places=2) freqs_np, coherency_np, phase_lag_np =\ - elephant.spectral.welch_cohere(x.magnitude.flatten(), y.magnitude.flatten(), - fs=1/sampling_period, freq_res=freq_res) + elephant.spectral.welch_coherence(x.magnitude.flatten(), + y.magnitude.flatten(), + fs=1 / sampling_period, + frequency_resolution=freq_res) assert_array_equal(freqs.simplified.magnitude, freqs_np) assert_array_equal(coherency[:, 0], coherency_np) assert_array_equal(phase_lag[:, 0], phase_lag_np) @@ -221,9 +239,9 @@ def test_welch_cohere_behavior(self): x_multidim = np.random.normal(size=(num_channel, data_length)) y_multidim = np.random.normal(size=(num_channel, data_length)) freqs, coherency, phase_lag =\ - elephant.spectral.welch_cohere(x_multidim, y_multidim) - freqs_T, coherency_T, phase_lag_T =\ - elephant.spectral.welch_cohere(x_multidim.T, y_multidim.T, axis=0) + elephant.spectral.welch_coherence(x_multidim, y_multidim) + freqs_T, coherency_T, phase_lag_T = elephant.spectral.welch_coherence( + x_multidim.T, y_multidim.T, axis=0) assert_array_equal(freqs, freqs_T) assert_array_equal(coherency, coherency_T.T) assert_array_equal(phase_lag, phase_lag_T.T) @@ -232,40 +250,42 @@ def test_welch_cohere_input_types(self): # generate a test data sampling_period = 0.001 x = n.AnalogSignal(np.array(np.random.normal(size=5000)), - sampling_period=sampling_period*pq.s, - units='mV') + sampling_period=sampling_period * pq.s, + units='mV') y = n.AnalogSignal(np.array(np.random.normal(size=5000)), - sampling_period=sampling_period*pq.s, - units='mV') + sampling_period=sampling_period * pq.s, + units='mV') # outputs from AnalogSignal input are of Quantity type # (standard usage) freqs_neo, coherency_neo, phase_lag_neo =\ - elephant.spectral.welch_cohere(x, y) + elephant.spectral.welch_coherence(x, y) self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity)) self.assertTrue(isinstance(phase_lag_neo, pq.quantity.Quantity)) # outputs from Quantity array input are of Quantity type - freqs_pq, coherency_pq, phase_lag_pq =\ - elephant.spectral.welch_cohere(x.magnitude.flatten()*x.units, - y.magnitude.flatten()*y.units, fs=1/sampling_period) + freqs_pq, coherency_pq, phase_lag_pq = elephant.spectral\ + .welch_coherence(x.magnitude.flatten() * x.units, + y.magnitude.flatten() * y.units, + fs=1 / sampling_period) self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity)) self.assertTrue(isinstance(phase_lag_pq, pq.quantity.Quantity)) # outputs from Numpy ndarray input are NOT of Quantity type - freqs_np, coherency_np, phase_lag_np =\ - elephant.spectral.welch_cohere(x.magnitude.flatten(), y.magnitude.flatten(), - fs=1/sampling_period) + freqs_np, coherency_np, phase_lag_np = elephant.spectral\ + .welch_coherence(x.magnitude.flatten(), + y.magnitude.flatten(), + fs=1 / sampling_period) self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity)) self.assertFalse(isinstance(phase_lag_np, pq.quantity.Quantity)) # check if the results from different input types are identical - self.assertTrue((freqs_neo==freqs_pq).all() and - (coherency_neo[:, 0]==coherency_pq).all() and - (phase_lag_neo[:, 0]==phase_lag_pq).all()) - self.assertTrue((freqs_neo==freqs_np).all() and - (coherency_neo[:, 0]==coherency_np).all() and - (phase_lag_neo[:, 0]==phase_lag_np).all()) + self.assertTrue((freqs_neo == freqs_pq).all() and + (coherency_neo[:, 0] == coherency_pq).all() and + (phase_lag_neo[:, 0] == phase_lag_pq).all()) + self.assertTrue((freqs_neo == freqs_np).all() and + (coherency_neo[:, 0] == coherency_np).all() and + (phase_lag_neo[:, 0] == phase_lag_np).all()) def test_welch_cohere_multidim_input(self): # generate multidimensional data @@ -278,26 +298,28 @@ def test_welch_cohere_multidim_input(self): # convention in NumPy/SciPy, `data_np` needs to be transposed when its # used to define an AnalogSignal x_neo = n.AnalogSignal(x_np.T, units='mV', - sampling_period=sampling_period*pq.s) + sampling_period=sampling_period * pq.s) y_neo = n.AnalogSignal(y_np.T, units='mV', - sampling_period=sampling_period*pq.s) + sampling_period=sampling_period * pq.s) x_neo_1dim = n.AnalogSignal(x_np[0], units='mV', - sampling_period=sampling_period*pq.s) + sampling_period=sampling_period * pq.s) y_neo_1dim = n.AnalogSignal(y_np[0], units='mV', - sampling_period=sampling_period*pq.s) + sampling_period=sampling_period * pq.s) # check if the results from different input types are identical - freqs_np, coherency_np, phase_lag_np =\ - elephant.spectral.welch_cohere(x_np, y_np, fs=1/sampling_period) + freqs_np, coherency_np, phase_lag_np = elephant.spectral\ + .welch_coherence(x_np, y_np, fs=1 / sampling_period) freqs_neo, coherency_neo, phase_lag_neo =\ - elephant.spectral.welch_cohere(x_neo, y_neo) + elephant.spectral.welch_coherence(x_neo, y_neo) freqs_neo_1dim, coherency_neo_1dim, phase_lag_neo_1dim =\ - elephant.spectral.welch_cohere(x_neo_1dim, y_neo_1dim) - self.assertTrue(np.all(freqs_np==freqs_neo)) - self.assertTrue(np.all(coherency_np.T==coherency_neo)) - self.assertTrue(np.all(phase_lag_np.T==phase_lag_neo)) - self.assertTrue(np.all(coherency_neo_1dim[:, 0]==coherency_neo[:, 0])) - self.assertTrue(np.all(phase_lag_neo_1dim[:, 0]==phase_lag_neo[:, 0])) + elephant.spectral.welch_coherence(x_neo_1dim, y_neo_1dim) + self.assertTrue(np.all(freqs_np == freqs_neo)) + self.assertTrue(np.all(coherency_np.T == coherency_neo)) + self.assertTrue(np.all(phase_lag_np.T == phase_lag_neo)) + self.assertTrue( + np.all(coherency_neo_1dim[:, 0] == coherency_neo[:, 0])) + self.assertTrue( + np.all(phase_lag_neo_1dim[:, 0] == phase_lag_neo[:, 0])) def suite(): @@ -307,4 +329,4 @@ def suite(): if __name__ == "__main__": runner = unittest.TextTestRunner(verbosity=2) - runner.run(suite()) \ No newline at end of file + runner.run(suite()) diff --git a/elephant/test/test_spike_train_correlation.py b/elephant/test/test_spike_train_correlation.py index 55d3fae8f..14dfeb3f1 100644 --- a/elephant/test/test_spike_train_correlation.py +++ b/elephant/test/test_spike_train_correlation.py @@ -42,7 +42,7 @@ def setUp(self): # And binned counterparts self.binned_st = conv.BinnedSpikeTrain( [self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) def test_covariance_binned(self): ''' @@ -97,7 +97,7 @@ def test_covariance_binned_same_spiketrains(self): # Calculate correlation binned_st = conv.BinnedSpikeTrain( [self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) result = sc.covariance(binned_st, fast=False) # Check dimensions @@ -113,7 +113,7 @@ def test_covariance_binned_short_input(self): # Calculate correlation binned_st = conv.BinnedSpikeTrain( self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) result = sc.covariance(binned_st, binary=True, fast=False) # Check result unclipped against result calculated by numpy.corrcoef @@ -130,7 +130,7 @@ def test_covariance_binned_short_input(self): def test_covariance_fast_mode(self): np.random.seed(27) st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s) - binned_st = conv.BinnedSpikeTrain(st, num_bins=10) + binned_st = conv.BinnedSpikeTrain(st, n_bins=10) assert_array_almost_equal(sc.covariance(binned_st, fast=False), sc.covariance(binned_st, fast=True)) @@ -157,7 +157,7 @@ def setUp(self): # And binned counterparts self.binned_st = conv.BinnedSpikeTrain( [self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) def test_corrcoef_binned(self): ''' @@ -165,9 +165,9 @@ def test_corrcoef_binned(self): ''' # Calculate clipped and unclipped - res_clipped = sc.corrcoef( + res_clipped = sc.correlation_coefficient( self.binned_st, binary=True) - res_unclipped = sc.corrcoef( + res_unclipped = sc.correlation_coefficient( self.binned_st, binary=False) # Check dimensions @@ -218,15 +218,17 @@ def test_corrcoef_binned_same_spiketrains(self): # Calculate correlation binned_st = conv.BinnedSpikeTrain( [self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) - result = sc.corrcoef(binned_st, fast=False) + bin_size=1 * pq.ms) + result = sc.correlation_coefficient(binned_st, fast=False) target = np.ones((2, 2)) # Check dimensions self.assertEqual(len(result), 2) # Check result assert_array_almost_equal(result, target) - assert_array_almost_equal(result, sc.corrcoef(binned_st, fast=True)) + assert_array_almost_equal( + result, sc.correlation_coefficient( + binned_st, fast=True)) def test_corrcoef_binned_short_input(self): ''' @@ -235,14 +237,16 @@ def test_corrcoef_binned_short_input(self): # Calculate correlation binned_st = conv.BinnedSpikeTrain( self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) - result = sc.corrcoef(binned_st, fast=False) + bin_size=1 * pq.ms) + result = sc.correlation_coefficient(binned_st, fast=False) target = np.array(1.) # Check result and dimensionality of result self.assertEqual(result.ndim, 0) assert_array_almost_equal(result, target) - assert_array_almost_equal(result, sc.corrcoef(binned_st, fast=True)) + assert_array_almost_equal( + result, sc.correlation_coefficient( + binned_st, fast=True)) @unittest.skipUnless(python_version_major == 3, "assertWarns requires 3.2") def test_empty_spike_train(self): @@ -252,10 +256,10 @@ def test_empty_spike_train(self): ''' # st_2 is empty binned_12 = conv.BinnedSpikeTrain([self.st_1, self.st_2], - binsize=1 * pq.ms) + bin_size=1 * pq.ms) with self.assertWarns(UserWarning): - result = sc.corrcoef(binned_12, fast=False) + result = sc.correlation_coefficient(binned_12, fast=False) # test for NaNs in the output array target = np.zeros((2, 2)) * np.NaN @@ -265,9 +269,11 @@ def test_empty_spike_train(self): def test_corrcoef_fast_mode(self): np.random.seed(27) st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s) - binned_st = conv.BinnedSpikeTrain(st, num_bins=10) - assert_array_almost_equal(sc.corrcoef(binned_st, fast=False), - sc.corrcoef(binned_st, fast=True)) + binned_st = conv.BinnedSpikeTrain(st, n_bins=10) + assert_array_almost_equal( + sc.correlation_coefficient( + binned_st, fast=False), sc.correlation_coefficient( + binned_st, fast=True)) class CrossCorrelationHistogramTest(unittest.TestCase): @@ -289,27 +295,27 @@ def setUp(self): # And binned counterparts self.binned_st1 = conv.BinnedSpikeTrain( [self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) self.binned_st2 = conv.BinnedSpikeTrain( [self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) self.binned_sts = conv.BinnedSpikeTrain( [self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) # Binned sts to check errors raising - self.st_check_binsize = conv.BinnedSpikeTrain( + self.st_check_bin_size = conv.BinnedSpikeTrain( [self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=5 * pq.ms) + bin_size=5 * pq.ms) self.st_check_t_start = conv.BinnedSpikeTrain( [self.st_1], t_start=1 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) self.st_check_t_stop = conv.BinnedSpikeTrain( [self.st_1], t_start=0 * pq.ms, t_stop=40. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) self.st_check_dimension = conv.BinnedSpikeTrain( [self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) def test_cross_correlation_histogram(self): ''' @@ -368,11 +374,11 @@ def test_cross_correlation_histogram(self): st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms', t_start=t0 * pq.ms, t_stop=t1 * pq.ms) binned_sts = conv.BinnedSpikeTrain([st1, st2], - binsize=1 * pq.ms, + bin_size=1 * pq.ms, t_start=t0 * pq.ms, t_stop=t1 * pq.ms) # caluclate corrcoef - corrcoef = sc.corrcoef(binned_sts)[1, 0] + corrcoef = sc.correlation_coefficient(binned_sts)[1, 0] # expand t_stop to have two spike trains with same length as st1, # st2 @@ -384,15 +390,15 @@ def test_cross_correlation_histogram(self): t_stop=self.st_2.t_stop + np.abs(t) * pq.ms) binned_st1 = conv.BinnedSpikeTrain( st1, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) binned_st2 = conv.BinnedSpikeTrain( st2, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms, - binsize=1 * pq.ms) + bin_size=1 * pq.ms) # calculate CCHcoef and take value at t=tau CCHcoef, _ = sc.cch(binned_st1, binned_st2, - cross_corr_coef=True) - left_edge = - binned_st1.num_bins + 1 - tau_bin = int(t / float(binned_st1.binsize.magnitude)) + cross_correlation_coefficient=True) + left_edge = - binned_st1.n_bins + 1 + tau_bin = int(t / float(binned_st1.bin_size.magnitude)) assert_array_almost_equal( corrcoef, CCHcoef[tau_bin - left_edge].magnitude) @@ -405,10 +411,10 @@ def test_cross_correlation_histogram(self): # Check the time axis and bin IDs of the resulting AnalogSignal assert_array_almost_equal( - (bin_ids_clipped - 0.5) * self.binned_st1.binsize, + (bin_ids_clipped - 0.5) * self.binned_st1.bin_size, cch_unclipped.times) assert_array_almost_equal( - (bin_ids_clipped - 0.5) * self.binned_st1.binsize, + (bin_ids_clipped - 0.5) * self.binned_st1.bin_size, cch_clipped.times) # Calculate CCH using Elephant (normal and binary version) with @@ -462,10 +468,10 @@ def test_cross_correlation_histogram(self): # Check the time axis and bin IDs of the resulting AnalogSignal assert_array_equal( - (bin_ids_clipped - 0.5) * self.binned_st1.binsize, + (bin_ids_clipped - 0.5) * self.binned_st1.bin_size, cch_unclipped.times) assert_array_equal( - (bin_ids_clipped - 0.5) * self.binned_st1.binsize, + (bin_ids_clipped - 0.5) * self.binned_st1.bin_size, cch_clipped.times) # Check for wrong window parameter setting @@ -479,11 +485,11 @@ def test_cross_correlation_histogram(self): def test_raising_error_wrong_inputs(self): '''Check that an exception is thrown if the two spike trains are not fullfilling the requirement of the function''' - # Check the binsizes are the same + # Check the bin_sizes are the same self.assertRaises( ValueError, sc.cross_correlation_histogram, self.binned_st1, - self.st_check_binsize) + self.st_check_bin_size) # Check input are one dimensional self.assertRaises( ValueError, sc.cross_correlation_histogram, @@ -503,11 +509,11 @@ def test_window(self): self.assertEqual(len(bin_ids), cch_win.shape[0]) assert_array_equal(bin_ids, np.arange(-30, 31, 1)) assert_array_equal( - (bin_ids - 0.5) * self.binned_st1.binsize, cch_win.times) + (bin_ids - 0.5) * self.binned_st1.bin_size, cch_win.times) assert_array_equal(bin_ids_mem, np.arange(-30, 31, 1)) assert_array_equal( - (bin_ids_mem - 0.5) * self.binned_st1.binsize, cch_win.times) + (bin_ids_mem - 0.5) * self.binned_st1.bin_size, cch_win.times) assert_array_equal(cch_win, cch_win_mem) cch_unclipped, _ = sc.cross_correlation_histogram( @@ -574,10 +580,10 @@ def test_border_correction(self): self.binned_st1, self.binned_st2, window='full', border_correction=True) - num_bins_outside_window = np.min(np.abs( + n_bins_outside_window = np.min(np.abs( np.subtract.outer(lags_full, valid_lags)), axis=1) - min_num_bins = min(self.binned_st1.num_bins, self.binned_st2.num_bins) + min_n_bins = min(self.binned_st1.n_bins, self.binned_st2.n_bins) border_correction = (cch_full_corrected / cch_full).magnitude.flatten() @@ -586,8 +592,8 @@ def test_border_correction(self): np.testing.assert_array_almost_equal( border_correction[mask], - (float(min_num_bins) - / (min_num_bins - num_bins_outside_window))[mask]) + (float(min_n_bins) + / (min_n_bins - n_bins_outside_window))[mask]) def test_kernel(self): '''Test if the smoothing kernel is correctly defined, and wheter it is @@ -627,9 +633,9 @@ def _run_sub_tests(self, st1, st2, lags_true): for window in ('valid', 'full'): for method in ('speed', 'memory'): with self.subTest(window=window, method=method): - binsize = 1 * pq.s - st1_binned = conv.BinnedSpikeTrain(st1, binsize=binsize) - st2_binned = conv.BinnedSpikeTrain(st2, binsize=binsize) + bin_size = 1 * pq.s + st1_binned = conv.BinnedSpikeTrain(st1, bin_size=bin_size) + st2_binned = conv.BinnedSpikeTrain(st2, bin_size=bin_size) left, right = lags_true[window][(0, -1), ] cch_window, lags_window = sc.cross_correlation_histogram( st1_binned, st2_binned, window=(left, right), @@ -689,14 +695,14 @@ def test_cross_correlation_histogram_valid_no_overlap(self): self._run_sub_tests(st1, st2, lags_true) def test_invalid_time_shift(self): - # time shift of 0.4 s is not multiple of binsize=1 s + # time shift of 0.4 s is not multiple of bin_size=1 s st1 = neo.SpikeTrain([2.5, 3.5] * pq.s, t_start=1 * pq.s, t_stop=7 * pq.s) st2 = neo.SpikeTrain([3.5, 5.5] * pq.s, t_start=1.4 * pq.s, t_stop=7.4 * pq.s) - binsize = 1 * pq.s - st1_binned = conv.BinnedSpikeTrain(st1, binsize=binsize) - st2_binned = conv.BinnedSpikeTrain(st2, binsize=binsize) + bin_size = 1 * pq.s + st1_binned = conv.BinnedSpikeTrain(st1, bin_size=bin_size) + st2_binned = conv.BinnedSpikeTrain(st2, bin_size=bin_size) self.assertRaises(ValueError, sc.cross_correlation_histogram, st1_binned, st2_binned) @@ -773,21 +779,22 @@ def test_timescale_calculation(self): ''' nu = 25 / pq.s T = 15 * pq.min - binsize = 1 * pq.ms + bin_size = 1 * pq.ms timescale = 1 / (4 * nu) + np.random.seed(35) timescale_num = [] for _ in range(10): spikes = homogeneous_gamma_process(2, 2 * nu, 0 * pq.ms, T) - spikes_bin = conv.BinnedSpikeTrain(spikes, binsize) + spikes_bin = conv.BinnedSpikeTrain(spikes, bin_size) timescale_i = sc.spike_train_timescale(spikes_bin, 10 * timescale) timescale_i.units = timescale.units timescale_num.append(timescale_i.magnitude) - target = np.allclose(timescale.magnitude, timescale_num, rtol=2e-1) - self.assertTrue(target) + assert_array_almost_equal(timescale.magnitude, timescale_num, + decimal=3) def test_timescale_errors(self): - spikes = neo.SpikeTrain([1, 5, 7, 8]*pq.ms, t_stop=10*pq.ms) + spikes = neo.SpikeTrain([1, 5, 7, 8] * pq.ms, t_stop=10 * pq.ms) binsize = 1 * pq.ms spikes_bin = conv.BinnedSpikeTrain(spikes, binsize) @@ -797,18 +804,18 @@ def test_timescale_errors(self): sc.spike_train_timescale, spikes_bin, tau_max) # Tau max that is not a multiple of the binsize - tau_max = 1.1*pq.ms + tau_max = 1.1 * pq.ms self.assertRaises(ValueError, sc.spike_train_timescale, spikes_bin, tau_max) @unittest.skipUnless(python_version_major == 3, "assertWarns requires python 3.2") def test_timescale_nan(self): - st0 = neo.SpikeTrain([]*pq.ms, t_stop=10*pq.ms) - st1 = neo.SpikeTrain([1]*pq.ms, t_stop=10*pq.ms) - st2 = neo.SpikeTrain([1, 5]*pq.ms, t_stop=10*pq.ms) - st3 = neo.SpikeTrain([1, 5, 6]*pq.ms, t_stop=10*pq.ms) - st4 = neo.SpikeTrain([1, 5, 6, 9]*pq.ms, t_stop=10*pq.ms) + st0 = neo.SpikeTrain([] * pq.ms, t_stop=10 * pq.ms) + st1 = neo.SpikeTrain([1] * pq.ms, t_stop=10 * pq.ms) + st2 = neo.SpikeTrain([1, 5] * pq.ms, t_stop=10 * pq.ms) + st3 = neo.SpikeTrain([1, 5, 6] * pq.ms, t_stop=10 * pq.ms) + st4 = neo.SpikeTrain([1, 5, 6, 9] * pq.ms, t_stop=10 * pq.ms) binsize = 1 * pq.ms tau_max = 1 * pq.ms diff --git a/elephant/test/test_spike_train_dissimilarity.py b/elephant/test/test_spike_train_dissimilarity.py index 3b04fb4ff..9b71d0f03 100644 --- a/elephant/test/test_spike_train_dissimilarity.py +++ b/elephant/test/test_spike_train_dissimilarity.py @@ -16,6 +16,7 @@ import elephant.spike_train_generation as stg import elephant.spike_train_dissimilarity as stds + class TimeScaleDependSpikeTrainDissimMeasures_TestCase(unittest.TestCase): def setUp(self): self.st00 = SpikeTrain([], units='ms', t_stop=1000.0) @@ -36,9 +37,9 @@ def setUp(self): self.st15 = SpikeTrain([0.01, 0.02, 0.03, 0.04, 0.05], units='s', t_stop=1000.0) self.st16 = SpikeTrain([12, 16, 28, 30, 42], units='ms', t_stop=1000.0) - self.st21 = stg.homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms) - self.st22 = stg.homogeneous_poisson_process(40*Hz, 0*ms, 1000*ms) - self.st23 = stg.homogeneous_poisson_process(30*Hz, 0*ms, 1000*ms) + self.st21 = stg.homogeneous_poisson_process(50 * Hz, 0 * ms, 1000 * ms) + self.st22 = stg.homogeneous_poisson_process(40 * Hz, 0 * ms, 1000 * ms) + self.st23 = stg.homogeneous_poisson_process(30 * Hz, 0 * ms, 1000 * ms) self.rd_st_list = [self.st21, self.st22, self.st23] self.st31 = SpikeTrain([12.0], units='ms', t_stop=1000.0) self.st32 = SpikeTrain([12.0, 12.0], units='ms', t_stop=1000.0) @@ -67,136 +68,136 @@ def setUp(self): self.t = np.linspace(0, 200, 20000001) * ms def test_wrong_input(self): - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.array1, self.array2], self.q3) - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.qarray1, self.qarray2], self.q3) - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.qarray1, self.qarray2], 5.0 * ms) - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.array1, self.array2], self.q3, algorithm='intuitive') - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.qarray1, self.qarray2], self.q3, algorithm='intuitive') - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.qarray1, self.qarray2], 5.0 * ms, algorithm='intuitive') - self.assertRaises(TypeError, stds.van_rossum_dist, + self.assertRaises(TypeError, stds.van_rossum_distance, [self.array1, self.array2], self.tau3) - self.assertRaises(TypeError, stds.van_rossum_dist, + self.assertRaises(TypeError, stds.van_rossum_distance, [self.qarray1, self.qarray2], self.tau3) - self.assertRaises(TypeError, stds.van_rossum_dist, + self.assertRaises(TypeError, stds.van_rossum_distance, [self.qarray1, self.qarray2], 5.0 * Hz) - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.st11, self.st13], self.tau2) - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.st11, self.st13], 5.0) - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.st11, self.st13], self.tau2, algorithm='intuitive') - self.assertRaises(TypeError, stds.victor_purpura_dist, + self.assertRaises(TypeError, stds.victor_purpura_distance, [self.st11, self.st13], 5.0, algorithm='intuitive') - self.assertRaises(TypeError, stds.van_rossum_dist, + self.assertRaises(TypeError, stds.van_rossum_distance, [self.st11, self.st13], self.q4) - self.assertRaises(TypeError, stds.van_rossum_dist, + self.assertRaises(TypeError, stds.van_rossum_distance, [self.st11, self.st13], 5.0) - self.assertRaises(NotImplementedError, stds.victor_purpura_dist, + self.assertRaises(NotImplementedError, stds.victor_purpura_distance, [self.st01, self.st02], self.q3, kernel=kernels.Kernel(2.0 / self.q3)) - self.assertRaises(NotImplementedError, stds.victor_purpura_dist, + self.assertRaises(NotImplementedError, stds.victor_purpura_distance, [self.st01, self.st02], self.q3, kernel=kernels.SymmetricKernel(2.0 / self.q3)) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st02], self.q1, - kernel=kernels.TriangularKernel( - 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], - stds.victor_purpura_dist( - [self.st01, self.st02], self.q3, - kernel=kernels.TriangularKernel( - 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1]) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st02], - kernel=kernels.TriangularKernel( - 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0) - self.assertNotEqual(stds.victor_purpura_dist( - [self.st01, self.st02], - kernel=kernels.AlphaKernel( - 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st02], self.q1, + kernel=kernels.TriangularKernel( + 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], + stds.victor_purpura_distance( + [self.st01, self.st02], self.q3, + kernel=kernels.TriangularKernel( + 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st02], + kernel=kernels.TriangularKernel( + 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0) + self.assertNotEqual(stds.victor_purpura_distance( + [self.st01, self.st02], + kernel=kernels.AlphaKernel( + 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0) - self.assertRaises(NameError, stds.victor_purpura_dist, + self.assertRaises(NameError, stds.victor_purpura_distance, [self.st11, self.st13], self.q2, algorithm='slow') def test_victor_purpura_distance_fast(self): # Tests of distances of simplest spike trains: - self.assertEqual(stds.victor_purpura_dist( - [self.st00, self.st00], self.q2)[0, 1], 0.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st00, self.st01], self.q2)[0, 1], 1.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st00], self.q2)[0, 1], 1.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st01], self.q2)[0, 1], 0.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st00, self.st00], self.q2)[0, 1], 0.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st00, self.st01], self.q2)[0, 1], 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st00], self.q2)[0, 1], 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st01], self.q2)[0, 1], 0.0) # Tests of distances under elementary spike operations - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st02], self.q2)[0, 1], 1.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st03], self.q2)[0, 1], 1.9) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st04], self.q2)[0, 1], 2.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st05], self.q2)[0, 1], 2.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st00, self.st07], self.q2)[0, 1], 2.0) - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st07, self.st08], self.q4)[0, 1], 0.4) - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st07, self.st10], self.q3)[0, 1], 0.6 + 2) - self.assertEqual(stds.victor_purpura_dist( - [self.st11, self.st14], self.q2)[0, 1], 1) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st02], self.q2)[0, 1], 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st03], self.q2)[0, 1], 1.9) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st04], self.q2)[0, 1], 2.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st05], self.q2)[0, 1], 2.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st00, self.st07], self.q2)[0, 1], 2.0) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st07, self.st08], self.q4)[0, 1], 0.4) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st07, self.st10], self.q3)[0, 1], 0.6 + 2) + self.assertEqual(stds.victor_purpura_distance( + [self.st11, self.st14], self.q2)[0, 1], 1) # Tests on timescales - self.assertEqual(stds.victor_purpura_dist( - [self.st11, self.st14], self.q1)[0, 1], - stds.victor_purpura_dist( - [self.st11, self.st14], self.q5)[0, 1]) - self.assertEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q0)[0, 1], 6.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q1)[0, 1], 6.0) - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q5)[0, 1], 2.0, 5) - self.assertEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q6)[0, 1], 2.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st11, self.st14], self.q1)[0, 1], + stds.victor_purpura_distance( + [self.st11, self.st14], self.q5)[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q0)[0, 1], 6.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q1)[0, 1], 6.0) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q5)[0, 1], 2.0, 5) + self.assertEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q6)[0, 1], 2.0) # Tests on unordered spiketrains - self.assertEqual(stds.victor_purpura_dist( - [self.st11, self.st13], self.q4)[0, 1], - stds.victor_purpura_dist( - [self.st12, self.st13], self.q4)[0, 1]) - self.assertNotEqual(stds.victor_purpura_dist( - [self.st11, self.st13], self.q4, - sort=False)[0, 1], - stds.victor_purpura_dist( - [self.st12, self.st13], self.q4, - sort=False)[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st11, self.st13], self.q4)[0, 1], + stds.victor_purpura_distance( + [self.st12, self.st13], self.q4)[0, 1]) + self.assertNotEqual(stds.victor_purpura_distance( + [self.st11, self.st13], self.q4, + sort=False)[0, 1], + stds.victor_purpura_distance( + [self.st12, self.st13], self.q4, + sort=False)[0, 1]) # Tests on metric properties with random spiketrains # (explicit calculation of second metric axiom in particular case, # because from dist_matrix it is trivial) - dist_matrix = stds.victor_purpura_dist( - [self.st21, self.st22, self.st23], self.q3) + dist_matrix = stds.victor_purpura_distance( + [self.st21, self.st22, self.st23], self.q3) for i in range(3): for j in range(3): self.assertGreaterEqual(dist_matrix[i, j], 0) if dist_matrix[i, j] == 0: assert_array_equal(self.rd_st_list[i], self.rd_st_list[j]) - assert_array_equal(stds.victor_purpura_dist( - [self.st21, self.st22], self.q3), - stds.victor_purpura_dist( - [self.st22, self.st21], self.q3)) + assert_array_equal(stds.victor_purpura_distance( + [self.st21, self.st22], self.q3), + stds.victor_purpura_distance( + [self.st22, self.st21], self.q3)) self.assertLessEqual(dist_matrix[0, 1], dist_matrix[0, 2] + dist_matrix[1, 2]) self.assertLessEqual(dist_matrix[0, 2], @@ -205,119 +206,126 @@ def test_victor_purpura_distance_fast(self): dist_matrix[0, 1] + dist_matrix[0, 2]) # Tests on proper unit conversion self.assertAlmostEqual( - stds.victor_purpura_dist([self.st14, self.st16], self.q3)[0, 1], - stds.victor_purpura_dist([self.st15, self.st16], self.q3)[0, 1]) + stds.victor_purpura_distance([self.st14, self.st16], + self.q3)[0, 1], + stds.victor_purpura_distance([self.st15, self.st16], + self.q3)[0, 1]) self.assertAlmostEqual( - stds.victor_purpura_dist([self.st16, self.st14], self.q3)[0, 1], - stds.victor_purpura_dist([self.st16, self.st15], self.q3)[0, 1]) + stds.victor_purpura_distance([self.st16, self.st14], + self.q3)[0, 1], + stds.victor_purpura_distance([self.st16, self.st15], + self.q3)[0, 1]) self.assertAlmostEqual( - stds.victor_purpura_dist([self.st01, self.st05], self.q3)[0, 1], - stds.victor_purpura_dist([self.st01, self.st05], self.q7)[0, 1]) + stds.victor_purpura_distance([self.st01, self.st05], + self.q3)[0, 1], + stds.victor_purpura_distance([self.st01, self.st05], + self.q7)[0, 1]) # Tests on algorithmic behaviour for equal spike times + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st31, self.st34], self.q3)[0, 1], 0.8 + 1.0) self.assertAlmostEqual( - stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1], - 0.8 + 1.0) - self.assertAlmostEqual( - stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1], - stds.victor_purpura_dist([self.st32, self.st33], self.q3)[0, 1]) + stds.victor_purpura_distance([self.st31, self.st34], + self.q3)[0, 1], + stds.victor_purpura_distance([self.st32, self.st33], + self.q3)[0, 1]) self.assertAlmostEqual( - stds.victor_purpura_dist( - [self.st31, self.st33], self.q3)[0, 1] * 2.0, - stds.victor_purpura_dist( - [self.st32, self.st34], self.q3)[0, 1]) + stds.victor_purpura_distance( + [self.st31, self.st33], self.q3)[0, 1] * 2.0, + stds.victor_purpura_distance( + [self.st32, self.st34], self.q3)[0, 1]) # Tests on spike train list lengthes smaller than 2 - self.assertEqual(stds.victor_purpura_dist( - [self.st21], self.q3)[0, 0], 0) - self.assertEqual(len(stds.victor_purpura_dist([], self.q3)), 0) + self.assertEqual(stds.victor_purpura_distance( + [self.st21], self.q3)[0, 0], 0) + self.assertEqual(len(stds.victor_purpura_distance([], self.q3)), 0) def test_victor_purpura_distance_intuitive(self): # Tests of distances of simplest spike trains - self.assertEqual(stds.victor_purpura_dist( - [self.st00, self.st00], self.q2, - algorithm='intuitive')[0, 1], 0.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st00, self.st01], self.q2, - algorithm='intuitive')[0, 1], 1.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st00], self.q2, - algorithm='intuitive')[0, 1], 1.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st01], self.q2, - algorithm='intuitive')[0, 1], 0.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st00, self.st00], self.q2, + algorithm='intuitive')[0, 1], 0.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st00, self.st01], self.q2, + algorithm='intuitive')[0, 1], 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st00], self.q2, + algorithm='intuitive')[0, 1], 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st01], self.q2, + algorithm='intuitive')[0, 1], 0.0) # Tests of distances under elementary spike operations - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st02], self.q2, - algorithm='intuitive')[0, 1], 1.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st03], self.q2, - algorithm='intuitive')[0, 1], 1.9) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st04], self.q2, - algorithm='intuitive')[0, 1], 2.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st05], self.q2, - algorithm='intuitive')[0, 1], 2.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st00, self.st07], self.q2, - algorithm='intuitive')[0, 1], 2.0) - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st07, self.st08], self.q4, - algorithm='intuitive')[0, 1], 0.4) - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st07, self.st10], self.q3, - algorithm='intuitive')[0, 1], 2.6) - self.assertEqual(stds.victor_purpura_dist( - [self.st11, self.st14], self.q2, - algorithm='intuitive')[0, 1], 1) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st02], self.q2, + algorithm='intuitive')[0, 1], 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st03], self.q2, + algorithm='intuitive')[0, 1], 1.9) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st04], self.q2, + algorithm='intuitive')[0, 1], 2.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st05], self.q2, + algorithm='intuitive')[0, 1], 2.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st00, self.st07], self.q2, + algorithm='intuitive')[0, 1], 2.0) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st07, self.st08], self.q4, + algorithm='intuitive')[0, 1], 0.4) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st07, self.st10], self.q3, + algorithm='intuitive')[0, 1], 2.6) + self.assertEqual(stds.victor_purpura_distance( + [self.st11, self.st14], self.q2, + algorithm='intuitive')[0, 1], 1) # Tests on timescales - self.assertEqual(stds.victor_purpura_dist( - [self.st11, self.st14], self.q1, - algorithm='intuitive')[0, 1], - stds.victor_purpura_dist( - [self.st11, self.st14], self.q5, - algorithm='intuitive')[0, 1]) - self.assertEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q0, - algorithm='intuitive')[0, 1], 6.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q1, - algorithm='intuitive')[0, 1], 6.0) - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q5, - algorithm='intuitive')[0, 1], 2.0, 5) - self.assertEqual(stds.victor_purpura_dist( - [self.st07, self.st11], self.q6, - algorithm='intuitive')[0, 1], 2.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st11, self.st14], self.q1, + algorithm='intuitive')[0, 1], + stds.victor_purpura_distance( + [self.st11, self.st14], self.q5, + algorithm='intuitive')[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q0, + algorithm='intuitive')[0, 1], 6.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q1, + algorithm='intuitive')[0, 1], 6.0) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q5, + algorithm='intuitive')[0, 1], 2.0, 5) + self.assertEqual(stds.victor_purpura_distance( + [self.st07, self.st11], self.q6, + algorithm='intuitive')[0, 1], 2.0) # Tests on unordered spiketrains - self.assertEqual(stds.victor_purpura_dist( - [self.st11, self.st13], self.q4, - algorithm='intuitive')[0, 1], - stds.victor_purpura_dist( - [self.st12, self.st13], self.q4, - algorithm='intuitive')[0, 1]) - self.assertNotEqual(stds.victor_purpura_dist( - [self.st11, self.st13], self.q4, - sort=False, algorithm='intuitive')[0, 1], - stds.victor_purpura_dist( - [self.st12, self.st13], self.q4, - sort=False, algorithm='intuitive')[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st11, self.st13], self.q4, + algorithm='intuitive')[0, 1], + stds.victor_purpura_distance( + [self.st12, self.st13], self.q4, + algorithm='intuitive')[0, 1]) + self.assertNotEqual(stds.victor_purpura_distance( + [self.st11, self.st13], self.q4, + sort=False, algorithm='intuitive')[0, 1], + stds.victor_purpura_distance( + [self.st12, self.st13], self.q4, + sort=False, algorithm='intuitive')[0, 1]) # Tests on metric properties with random spiketrains # (explicit calculation of second metric axiom in particular case, # because from dist_matrix it is trivial) - dist_matrix = stds.victor_purpura_dist( - [self.st21, self.st22, self.st23], - self.q3, algorithm='intuitive') + dist_matrix = stds.victor_purpura_distance( + [self.st21, self.st22, self.st23], + self.q3, algorithm='intuitive') for i in range(3): for j in range(3): self.assertGreaterEqual(dist_matrix[i, j], 0) if dist_matrix[i, j] == 0: assert_array_equal(self.rd_st_list[i], self.rd_st_list[j]) - assert_array_equal(stds.victor_purpura_dist( - [self.st21, self.st22], self.q3, - algorithm='intuitive'), - stds.victor_purpura_dist( - [self.st22, self.st21], self.q3, - algorithm='intuitive')) + assert_array_equal(stds.victor_purpura_distance( + [self.st21, self.st22], self.q3, + algorithm='intuitive'), + stds.victor_purpura_distance( + [self.st22, self.st21], self.q3, + algorithm='intuitive')) self.assertLessEqual(dist_matrix[0, 1], dist_matrix[0, 2] + dist_matrix[1, 2]) self.assertLessEqual(dist_matrix[0, 2], @@ -325,155 +333,155 @@ def test_victor_purpura_distance_intuitive(self): self.assertLessEqual(dist_matrix[1, 2], dist_matrix[0, 1] + dist_matrix[0, 2]) # Tests on proper unit conversion - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st14, self.st16], self.q3, - algorithm='intuitive')[0, 1], - stds.victor_purpura_dist( - [self.st15, self.st16], self.q3, - algorithm='intuitive')[0, 1]) - self.assertAlmostEqual(stds.victor_purpura_dist( - [self.st16, self.st14], self.q3, - algorithm='intuitive')[0, 1], - stds.victor_purpura_dist( - [self.st16, self.st15], self.q3, - algorithm='intuitive')[0, 1]) - self.assertEqual(stds.victor_purpura_dist( - [self.st01, self.st05], self.q3, - algorithm='intuitive')[0, 1], - stds.victor_purpura_dist( - [self.st01, self.st05], self.q7, - algorithm='intuitive')[0, 1]) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st14, self.st16], self.q3, + algorithm='intuitive')[0, 1], + stds.victor_purpura_distance( + [self.st15, self.st16], self.q3, + algorithm='intuitive')[0, 1]) + self.assertAlmostEqual(stds.victor_purpura_distance( + [self.st16, self.st14], self.q3, + algorithm='intuitive')[0, 1], + stds.victor_purpura_distance( + [self.st16, self.st15], self.q3, + algorithm='intuitive')[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st01, self.st05], self.q3, + algorithm='intuitive')[0, 1], + stds.victor_purpura_distance( + [self.st01, self.st05], self.q7, + algorithm='intuitive')[0, 1]) # Tests on algorithmic behaviour for equal spike times - self.assertEqual(stds.victor_purpura_dist( - [self.st31, self.st34], self.q3, - algorithm='intuitive')[0, 1], - 0.8 + 1.0) - self.assertEqual(stds.victor_purpura_dist( - [self.st31, self.st34], self.q3, - algorithm='intuitive')[0, 1], - stds.victor_purpura_dist( - [self.st32, self.st33], self.q3, - algorithm='intuitive')[0, 1]) - self.assertEqual(stds.victor_purpura_dist( - [self.st31, self.st33], self.q3, - algorithm='intuitive')[0, 1] * 2.0, - stds.victor_purpura_dist( - [self.st32, self.st34], self.q3, - algorithm='intuitive')[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st31, self.st34], self.q3, + algorithm='intuitive')[0, 1], + 0.8 + 1.0) + self.assertEqual(stds.victor_purpura_distance( + [self.st31, self.st34], self.q3, + algorithm='intuitive')[0, 1], + stds.victor_purpura_distance( + [self.st32, self.st33], self.q3, + algorithm='intuitive')[0, 1]) + self.assertEqual(stds.victor_purpura_distance( + [self.st31, self.st33], self.q3, + algorithm='intuitive')[0, 1] * 2.0, + stds.victor_purpura_distance( + [self.st32, self.st34], self.q3, + algorithm='intuitive')[0, 1]) # Tests on spike train list lengthes smaller than 2 - self.assertEqual(stds.victor_purpura_dist( - [self.st21], self.q3, - algorithm='intuitive')[0, 0], 0) - self.assertEqual(len(stds.victor_purpura_dist( + self.assertEqual(stds.victor_purpura_distance( + [self.st21], self.q3, + algorithm='intuitive')[0, 0], 0) + self.assertEqual(len(stds.victor_purpura_distance( [], self.q3, algorithm='intuitive')), 0) def test_victor_purpura_algorithm_comparison(self): assert_array_almost_equal( - stds.victor_purpura_dist([self.st21, self.st22, self.st23], - self.q3), - stds.victor_purpura_dist([self.st21, self.st22, self.st23], - self.q3, algorithm='intuitive')) + stds.victor_purpura_distance([self.st21, self.st22, self.st23], + self.q3), + stds.victor_purpura_distance([self.st21, self.st22, self.st23], + self.q3, algorithm='intuitive')) def test_van_rossum_distance(self): # Tests of distances of simplest spike trains - self.assertEqual(stds.van_rossum_dist( - [self.st00, self.st00], self.tau2)[0, 1], 0.0) - self.assertEqual(stds.van_rossum_dist( - [self.st00, self.st01], self.tau2)[0, 1], 1.0) - self.assertEqual(stds.van_rossum_dist( - [self.st01, self.st00], self.tau2)[0, 1], 1.0) - self.assertEqual(stds.van_rossum_dist( - [self.st01, self.st01], self.tau2)[0, 1], 0.0) + self.assertEqual(stds.van_rossum_distance( + [self.st00, self.st00], self.tau2)[0, 1], 0.0) + self.assertEqual(stds.van_rossum_distance( + [self.st00, self.st01], self.tau2)[0, 1], 1.0) + self.assertEqual(stds.van_rossum_distance( + [self.st01, self.st00], self.tau2)[0, 1], 1.0) + self.assertEqual(stds.van_rossum_distance( + [self.st01, self.st01], self.tau2)[0, 1], 0.0) # Tests of distances under elementary spike operations - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st01, self.st02], self.tau2)[0, 1], - float(np.sqrt(2*(1.0-np.exp(-np.absolute( - ((self.st01[0]-self.st02[0]) / - self.tau2).simplified)))))) - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st01, self.st05], self.tau2)[0, 1], - float(np.sqrt(2*(1.0-np.exp(-np.absolute( - ((self.st01[0]-self.st05[0]) / - self.tau2).simplified)))))) - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st01, self.st05], self.tau2)[0, 1], - np.sqrt(2.0), 1) - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st01, self.st06], self.tau2)[0, 1], - np.sqrt(2.0), 20) - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st00, self.st07], self.tau1)[0, 1], - np.sqrt(0 + 2)) - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st07, self.st08], self.tau4)[0, 1], - float(np.sqrt(2*(1.0-np.exp(-np.absolute( - ((self.st07[0]-self.st08[-1]) / - self.tau4).simplified)))))) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st01, self.st02], self.tau2)[0, 1], + float(np.sqrt(2 * (1.0 - np.exp(-np.absolute( + ((self.st01[0] - self.st02[0]) / + self.tau2).simplified)))))) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st01, self.st05], self.tau2)[0, 1], + float(np.sqrt(2 * (1.0 - np.exp(-np.absolute( + ((self.st01[0] - self.st05[0]) / + self.tau2).simplified)))))) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st01, self.st05], self.tau2)[0, 1], + np.sqrt(2.0), 1) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st01, self.st06], self.tau2)[0, 1], + np.sqrt(2.0), 20) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st00, self.st07], self.tau1)[0, 1], + np.sqrt(0 + 2)) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st07, self.st08], self.tau4)[0, 1], + float(np.sqrt(2 * (1.0 - np.exp(-np.absolute( + ((self.st07[0] - self.st08[-1]) / + self.tau4).simplified)))))) f_minus_g_squared = ( - (self.t > self.st08[0]) * np.exp( - -((self.t-self.st08[0])/self.tau3).simplified) + - (self.t > self.st08[1]) * np.exp( - -((self.t-self.st08[1])/self.tau3).simplified) - - (self.t > self.st09[0]) * np.exp( - -((self.t-self.st09[0])/self.tau3).simplified))**2 + (self.t > self.st08[0]) * np.exp( + -((self.t - self.st08[0]) / self.tau3).simplified) + + (self.t > self.st08[1]) * np.exp( + -((self.t - self.st08[1]) / self.tau3).simplified) - + (self.t > self.st09[0]) * np.exp( + -((self.t - self.st09[0]) / self.tau3).simplified))**2 distance = np.sqrt(2.0 * spint.cumtrapz( y=f_minus_g_squared, x=self.t.magnitude)[-1] / self.tau3.rescale(self.t.units).magnitude) - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st08, self.st09], self.tau3)[0, 1], distance, 5) - self.assertAlmostEqual(stds.van_rossum_dist( - [self.st11, self.st14], self.tau2)[0, 1], 1) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st08, self.st09], self.tau3)[0, 1], distance, 5) + self.assertAlmostEqual(stds.van_rossum_distance( + [self.st11, self.st14], self.tau2)[0, 1], 1) # Tests on timescales self.assertAlmostEqual( - stds.van_rossum_dist([self.st11, self.st14], self.tau1)[0, 1], - stds.van_rossum_dist([self.st11, self.st14], self.tau5)[0, 1]) + stds.van_rossum_distance([self.st11, self.st14], self.tau1)[0, 1], + stds.van_rossum_distance([self.st11, self.st14], self.tau5)[0, 1]) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st11], self.tau0)[0, 1], - np.sqrt(len(self.st07) + len(self.st11))) + stds.van_rossum_distance([self.st07, self.st11], self.tau0)[0, 1], + np.sqrt(len(self.st07) + len(self.st11))) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st14], self.tau0)[0, 1], - np.sqrt(len(self.st07) + len(self.st14))) + stds.van_rossum_distance([self.st07, self.st14], self.tau0)[0, 1], + np.sqrt(len(self.st07) + len(self.st14))) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st11], self.tau1)[0, 1], - np.sqrt(len(self.st07) + len(self.st11))) + stds.van_rossum_distance([self.st07, self.st11], self.tau1)[0, 1], + np.sqrt(len(self.st07) + len(self.st11))) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st14], self.tau1)[0, 1], - np.sqrt(len(self.st07) + len(self.st14))) + stds.van_rossum_distance([self.st07, self.st14], self.tau1)[0, 1], + np.sqrt(len(self.st07) + len(self.st14))) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st11], self.tau5)[0, 1], - np.absolute(len(self.st07) - len(self.st11))) + stds.van_rossum_distance([self.st07, self.st11], self.tau5)[0, 1], + np.absolute(len(self.st07) - len(self.st11))) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st14], self.tau5)[0, 1], - np.absolute(len(self.st07) - len(self.st14))) + stds.van_rossum_distance([self.st07, self.st14], self.tau5)[0, 1], + np.absolute(len(self.st07) - len(self.st14))) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st11], self.tau6)[0, 1], - np.absolute(len(self.st07) - len(self.st11))) + stds.van_rossum_distance([self.st07, self.st11], self.tau6)[0, 1], + np.absolute(len(self.st07) - len(self.st11))) self.assertAlmostEqual( - stds.van_rossum_dist([self.st07, self.st14], self.tau6)[0, 1], - np.absolute(len(self.st07) - len(self.st14))) + stds.van_rossum_distance([self.st07, self.st14], self.tau6)[0, 1], + np.absolute(len(self.st07) - len(self.st14))) # Tests on unordered spiketrains self.assertEqual( - stds.van_rossum_dist([self.st11, self.st13], self.tau4)[0, 1], - stds.van_rossum_dist([self.st12, self.st13], self.tau4)[0, 1]) + stds.van_rossum_distance([self.st11, self.st13], self.tau4)[0, 1], + stds.van_rossum_distance([self.st12, self.st13], self.tau4)[0, 1]) self.assertNotEqual( - stds.van_rossum_dist([self.st11, self.st13], + stds.van_rossum_distance([self.st11, self.st13], self.tau4, sort=False)[0, 1], - stds.van_rossum_dist([self.st12, self.st13], + stds.van_rossum_distance([self.st12, self.st13], self.tau4, sort=False)[0, 1]) - # Tests on metric properties with random spiketrains + # Tests on metric properties with random spiketrains # (explicit calculation of second metric axiom in particular case, # because from dist_matrix it is trivial) - dist_matrix = stds.van_rossum_dist( - [self.st21, self.st22, self.st23], self.tau3) + dist_matrix = stds.van_rossum_distance( + [self.st21, self.st22, self.st23], self.tau3) for i in range(3): for j in range(3): self.assertGreaterEqual(dist_matrix[i, j], 0) if dist_matrix[i, j] == 0: assert_array_equal(self.rd_st_list[i], self.rd_st_list[j]) assert_array_equal( - stds.van_rossum_dist([self.st21, self.st22], self.tau3), - stds.van_rossum_dist([self.st22, self.st21], self.tau3)) + stds.van_rossum_distance([self.st21, self.st22], self.tau3), + stds.van_rossum_distance([self.st22, self.st21], self.tau3)) self.assertLessEqual(dist_matrix[0, 1], dist_matrix[0, 2] + dist_matrix[1, 2]) self.assertLessEqual(dist_matrix[0, 2], @@ -482,39 +490,41 @@ def test_van_rossum_distance(self): dist_matrix[0, 1] + dist_matrix[0, 2]) # Tests on proper unit conversion self.assertAlmostEqual( - stds.van_rossum_dist([self.st14, self.st16], self.tau3)[0, 1], - stds.van_rossum_dist([self.st15, self.st16], self.tau3)[0, 1]) + stds.van_rossum_distance([self.st14, self.st16], self.tau3)[0, 1], + stds.van_rossum_distance([self.st15, self.st16], self.tau3)[0, 1]) self.assertAlmostEqual( - stds.van_rossum_dist([self.st16, self.st14], self.tau3)[0, 1], - stds.van_rossum_dist([self.st16, self.st15], self.tau3)[0, 1]) + stds.van_rossum_distance([self.st16, self.st14], self.tau3)[0, 1], + stds.van_rossum_distance([self.st16, self.st15], self.tau3)[0, 1]) self.assertEqual( - stds.van_rossum_dist([self.st01, self.st05], self.tau3)[0, 1], - stds.van_rossum_dist([self.st01, self.st05], self.tau7)[0, 1]) + stds.van_rossum_distance([self.st01, self.st05], self.tau3)[0, 1], + stds.van_rossum_distance([self.st01, self.st05], self.tau7)[0, 1]) # Tests on algorithmic behaviour for equal spike times f_minus_g_squared = ( - (self.t > self.st31[0]) * np.exp( - -((self.t-self.st31[0])/self.tau3).simplified) - - (self.t > self.st34[0]) * np.exp( - -((self.t-self.st34[0])/self.tau3).simplified) - - (self.t > self.st34[1]) * np.exp( - -((self.t-self.st34[1])/self.tau3).simplified))**2 + (self.t > self.st31[0]) * np.exp( + -((self.t - self.st31[0]) / self.tau3).simplified) - + (self.t > self.st34[0]) * np.exp( + -((self.t - self.st34[0]) / self.tau3).simplified) - + (self.t > self.st34[1]) * np.exp( + -((self.t - self.st34[1]) / self.tau3).simplified))**2 distance = np.sqrt(2.0 * spint.cumtrapz( y=f_minus_g_squared, x=self.t.magnitude)[-1] / self.tau3.rescale(self.t.units).magnitude) - self.assertAlmostEqual(stds.van_rossum_dist([self.st31, self.st34], - self.tau3)[0, 1], + self.assertAlmostEqual(stds.van_rossum_distance([self.st31, self.st34], + self.tau3)[0, 1], distance, 5) - self.assertEqual(stds.van_rossum_dist([self.st31, self.st34], - self.tau3)[0, 1], - stds.van_rossum_dist([self.st32, self.st33], - self.tau3)[0, 1]) - self.assertEqual(stds.van_rossum_dist([self.st31, self.st33], - self.tau3)[0, 1] * 2.0, - stds.van_rossum_dist([self.st32, self.st34], - self.tau3)[0, 1]) + self.assertEqual(stds.van_rossum_distance([self.st31, self.st34], + self.tau3)[0, 1], + stds.van_rossum_distance([self.st32, self.st33], + self.tau3)[0, 1]) + self.assertEqual(stds.van_rossum_distance([self.st31, self.st33], + self.tau3)[0, 1] * 2.0, + stds.van_rossum_distance([self.st32, self.st34], + self.tau3)[0, 1]) # Tests on spike train list lengthes smaller than 2 - self.assertEqual(stds.van_rossum_dist([self.st21], self.tau3)[0, 0], 0) - self.assertEqual(len(stds.van_rossum_dist([], self.tau3)), 0) + self.assertEqual(stds.van_rossum_distance( + [self.st21], self.tau3)[0, 0], 0) + self.assertEqual(len(stds.van_rossum_distance([], self.tau3)), 0) + if __name__ == '__main__': unittest.main() diff --git a/elephant/test/test_spike_train_generation.py b/elephant/test/test_spike_train_generation.py index c1786e6f6..b54ae020b 100644 --- a/elephant/test/test_spike_train_generation.py +++ b/elephant/test/test_spike_train_generation.py @@ -138,7 +138,7 @@ def setUp(self): def test_spike_extraction_waveform(self): spike_train = stgen.spike_extraction(self.vm.reshape(-1), - extr_interval=(-1 * ms, 2 * ms)) + interval=(-1 * ms, 2 * ms)) try: assert_array_almost_equal( spike_train.waveforms[0][0].magnitude.reshape(-1), @@ -250,7 +250,7 @@ def test_compare_with_as_array(self): rate=rate, t_stop=t_stop, refractory_period=refractory_period, as_array=True) # don't check with isinstance: Quantity is a subclass of np.ndarray - self.assertTrue(type(spiketrain_array) is np.ndarray) + self.assertTrue(isinstance(spiketrain_array, np.ndarray)) assert_array_almost_equal(spiketrain.times.magnitude, spiketrain_array) @@ -446,7 +446,7 @@ def test_compare_with_as_array(self): spiketrain_array = stgen.homogeneous_gamma_process(a=a, b=b, as_array=True) # don't check with isinstance: pq.Quantity is a subclass of np.ndarray - self.assertTrue(type(spiketrain_array) is np.ndarray) + self.assertTrue(isinstance(spiketrain_array, np.ndarray)) assert_array_almost_equal(spiketrain.times.magnitude, spiketrain_array) @@ -461,7 +461,10 @@ def setUp(self): def test_poisson(self): # Check the output types for input rate + n number of neurons - pp = stgen._n_poisson(rate=self.rate, t_stop=self.t_stop, n=self.n) + pp = stgen._n_poisson( + rate=self.rate, + t_stop=self.t_stop, + n_spiketrains=self.n) self.assertIsInstance(pp, list) self.assertIsInstance(pp[0], neo.core.spiketrain.SpikeTrain) self.assertEqual(pp[0].simplified.units, 1000 * ms) @@ -508,8 +511,8 @@ def test_sip(self): # Generate an example SIP mode sip, coinc = stgen.single_interaction_process( - n=self.n, t_stop=self.t_stop, rate=self.rate, - rate_c=self.rate_c, return_coinc=True) + n_spiketrains=self.n, t_stop=self.t_stop, rate=self.rate, + coincidence_rate=self.rate_c, return_coincidences=True) # Check the output types self.assertEqual(type(sip), list) @@ -528,7 +531,7 @@ def test_sip(self): # Generate an example SIP mode giving a list of rates as imput sip, coinc = stgen.single_interaction_process( t_stop=self.t_stop, rate=self.rates, - rate_c=self.rate_c, return_coinc=True) + coincidence_rate=self.rate_c, return_coincidences=True) # Check the output types self.assertEqual(type(sip), list) @@ -544,8 +547,12 @@ def test_sip(self): # Generate an example SIP mode stochastic number of coincidences sip = stgen.single_interaction_process( - n=self.n, t_stop=self.t_stop, rate=self.rate, - rate_c=self.rate_c, coincidences='stochastic', return_coinc=False) + n_spiketrains=self.n, + t_stop=self.t_stop, + rate=self.rate, + coincidence_rate=self.rate_c, + coincidences='stochastic', + return_coincidences=False) # Check the output types self.assertEqual(type(sip), list) @@ -555,25 +562,30 @@ def test_sip(self): def test_sip_error(self): # Negative rate self.assertRaises( - ValueError, stgen.single_interaction_process, n=self.n, + ValueError, stgen.single_interaction_process, n_spiketrains=self.n, rate=-5 * Hz, - rate_c=self.rate_c, t_stop=self.t_stop) + coincidence_rate=self.rate_c, t_stop=self.t_stop) # Negative coincidence rate self.assertRaises( - ValueError, stgen.single_interaction_process, n=self.n, - rate=self.rate, rate_c=-3 * Hz, t_stop=self.t_stop) + ValueError, stgen.single_interaction_process, n_spiketrains=self.n, + rate=self.rate, coincidence_rate=-3 * Hz, t_stop=self.t_stop) # Negative value when rate is a list self.assertRaises( - ValueError, stgen.single_interaction_process, n=self.n, - rate=[-5, 3, 4, 2] * Hz, rate_c=self.rate_c, t_stop=self.t_stop) + ValueError, stgen.single_interaction_process, n_spiketrains=self.n, + rate=[-5, 3, 4, 2] * Hz, coincidence_rate=self.rate_c, + t_stop=self.t_stop) # Negative n self.assertRaises( - ValueError, stgen.single_interaction_process, n=-1, - rate=self.rate, rate_c=self.rate_c, t_stop=self.t_stop) + ValueError, stgen.single_interaction_process, n_spiketrains=-1, + rate=self.rate, coincidence_rate=self.rate_c, t_stop=self.t_stop) # Rate_c < rate self.assertRaises( - ValueError, stgen.single_interaction_process, n=self.n, - rate=self.rate, rate_c=self.rate + 1 * Hz, t_stop=self.t_stop) + ValueError, + stgen.single_interaction_process, + n_spiketrains=self.n, + rate=self.rate, + coincidence_rate=self.rate + 1 * Hz, + t_stop=self.t_stop) class cppTestCase(unittest.TestCase): diff --git a/elephant/test/test_spike_train_surrogates.py b/elephant/test/test_spike_train_surrogates.py index a63ffab20..1d288cd08 100644 --- a/elephant/test/test_spike_train_surrogates.py +++ b/elephant/test/test_spike_train_surrogates.py @@ -30,7 +30,7 @@ def test_dither_spikes_output_format(self): n_surrogates = 2 dither = 10 * pq.ms surrogate_trains = surr.dither_spikes( - spiketrain, dither=dither, n=n_surrogates) + spiketrain, dither=dither, n_surrogates=n_surrogates) print(surrogate_trains) self.assertIsInstance(surrogate_trains, list) @@ -49,7 +49,8 @@ def test_dither_spikes_empty_train(self): st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms) dither = 10 * pq.ms - surrogate_train = surr.dither_spikes(st, dither=dither, n=1)[0] + surrogate_train = surr.dither_spikes( + st, dither=dither, n_surrogates=1)[0] self.assertEqual(len(surrogate_train), 0) def test_dither_spikes_output_decimals(self): @@ -60,7 +61,7 @@ def test_dither_spikes_output_decimals(self): dither = 10 * pq.ms np.random.seed(42) surrogate_trains = surr.dither_spikes( - st, dither=dither, decimals=3, n=n_surrogates) + st, dither=dither, decimals=3, n_surrogates=n_surrogates) np.random.seed(42) dither_values = np.random.random_sample((n_surrogates, len(st))) @@ -82,7 +83,7 @@ def test_dither_spikes_false_edges(self): n_surrogates = 2 dither = 10 * pq.ms surrogate_trains = surr.dither_spikes( - st, dither=dither, n=n_surrogates, edges=False) + st, dither=dither, n_surrogates=n_surrogates, edges=False) for surrogate_train in surrogate_trains: for i in range(len(surrogate_train)): @@ -95,8 +96,8 @@ def test_dither_spikes_with_refractory_period_output_format(self): n_surrogates = 2 dither = 10 * pq.ms surrogate_trains = surr.dither_spikes( - spiketrain, dither=dither, n=n_surrogates, - refractory_period=4*pq.ms) + spiketrain, dither=dither, n_surrogates=n_surrogates, + refractory_period=4 * pq.ms) self.assertIsInstance(surrogate_trains, list) self.assertEqual(len(surrogate_trains), n_surrogates) @@ -122,8 +123,8 @@ def test_dither_spikes_with_refractory_period_empty_train(self): dither = 10 * pq.ms surrogate_train = surr.dither_spikes( - spiketrain, dither=dither, n=1, - refractory_period=4*pq.ms)[0] + spiketrain, dither=dither, n_surrogates=1, + refractory_period=4 * pq.ms)[0] self.assertEqual(len(surrogate_train), 0) def test_randomise_spikes_output_format(self): @@ -132,7 +133,8 @@ def test_randomise_spikes_output_format(self): [90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms) n_surrogates = 2 - surrogate_trains = surr.randomise_spikes(spiketrain, n=n_surrogates) + surrogate_trains = surr.randomise_spikes( + spiketrain, n_surrogates=n_surrogates) self.assertIsInstance(surrogate_trains, list) self.assertEqual(len(surrogate_trains), n_surrogates) @@ -148,7 +150,7 @@ def test_randomise_spikes_empty_train(self): spiketrain = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms) - surrogate_train = surr.randomise_spikes(spiketrain, n=1)[0] + surrogate_train = surr.randomise_spikes(spiketrain, n_surrogates=1)[0] self.assertEqual(len(surrogate_train), 0) def test_randomise_spikes_output_decimals(self): @@ -157,7 +159,7 @@ def test_randomise_spikes_output_decimals(self): n_surrogates = 2 surrogate_trains = surr.randomise_spikes( - spiketrain, n=n_surrogates, decimals=3) + spiketrain, n_surrogates=n_surrogates, decimals=3) for surrogate_train in surrogate_trains: for i in range(len(surrogate_train)): @@ -172,7 +174,8 @@ def test_shuffle_isis_output_format(self): [90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms) n_surrogates = 2 - surrogate_trains = surr.shuffle_isis(spiketrain, n=n_surrogates) + surrogate_trains = surr.shuffle_isis( + spiketrain, n_surrogates=n_surrogates) self.assertIsInstance(surrogate_trains, list) self.assertEqual(len(surrogate_trains), n_surrogates) @@ -188,7 +191,7 @@ def test_shuffle_isis_empty_train(self): spiketrain = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms) - surrogate_train = surr.shuffle_isis(spiketrain, n=1)[0] + surrogate_train = surr.shuffle_isis(spiketrain, n_surrogates=1)[0] self.assertEqual(len(surrogate_train), 0) def test_shuffle_isis_same_isis(self): @@ -196,7 +199,7 @@ def test_shuffle_isis_same_isis(self): spiketrain = neo.SpikeTrain( [90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms) - surrogate_train = surr.shuffle_isis(spiketrain, n=1)[0] + surrogate_train = surr.shuffle_isis(spiketrain, n_surrogates=1)[0] st_pq = spiketrain.view(pq.Quantity) surr_pq = surrogate_train.view(pq.Quantity) @@ -214,7 +217,8 @@ def test_shuffle_isis_output_decimals(self): spiketrain = neo.SpikeTrain( [90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms) - surrogate_train = surr.shuffle_isis(spiketrain, n=1, decimals=95)[0] + surrogate_train = surr.shuffle_isis( + spiketrain, n_surrogates=1, decimals=95)[0] st_pq = spiketrain.view(pq.Quantity) surr_pq = surrogate_train.view(pq.Quantity) @@ -235,7 +239,7 @@ def test_dither_spike_train_output_format(self): n_surrogates = 2 shift = 10 * pq.ms surrogate_trains = surr.dither_spike_train( - spiketrain, shift=shift, n=n_surrogates) + spiketrain, shift=shift, n_surrogates=n_surrogates) self.assertIsInstance(surrogate_trains, list) self.assertEqual(len(surrogate_trains), n_surrogates) @@ -253,7 +257,7 @@ def test_dither_spike_train_empty_train(self): shift = 10 * pq.ms surrogate_train = surr.dither_spike_train( - spiketrain, shift=shift, n=1)[0] + spiketrain, shift=shift, n_surrogates=1)[0] self.assertEqual(len(surrogate_train), 0) def test_dither_spike_train_output_decimals(self): @@ -262,7 +266,7 @@ def test_dither_spike_train_output_decimals(self): n_surrogates = 2 shift = 10 * pq.ms surrogate_trains = surr.dither_spike_train( - st, shift=shift, n=n_surrogates, decimals=3) + st, shift=shift, n_surrogates=n_surrogates, decimals=3) for surrogate_train in surrogate_trains: for i in range(len(surrogate_train)): @@ -279,7 +283,7 @@ def test_dither_spike_train_false_edges(self): n_surrogates = 2 shift = 10 * pq.ms surrogate_trains = surr.dither_spike_train( - spiketrain, shift=shift, n=n_surrogates, edges=False) + spiketrain, shift=shift, n_surrogates=n_surrogates, edges=False) for surrogate_train in surrogate_trains: for i in range(len(surrogate_train)): @@ -291,9 +295,9 @@ def test_jitter_spikes_output_format(self): [90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms) n_surrogates = 2 - binsize = 100 * pq.ms + bin_size = 100 * pq.ms surrogate_trains = surr.jitter_spikes( - spiketrain, binsize=binsize, n=n_surrogates) + spiketrain, bin_size=bin_size, n_surrogates=n_surrogates) self.assertIsInstance(surrogate_trains, list) self.assertEqual(len(surrogate_trains), n_surrogates) @@ -309,9 +313,9 @@ def test_jitter_spikes_empty_train(self): spiketrain = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms) - binsize = 75 * pq.ms + bin_size = 75 * pq.ms surrogate_train = surr.jitter_spikes( - spiketrain, binsize=binsize, n=1)[0] + spiketrain, bin_size=bin_size, n_surrogates=1)[0] self.assertEqual(len(surrogate_train), 0) def test_jitter_spikes_same_bins(self): @@ -319,20 +323,20 @@ def test_jitter_spikes_same_bins(self): spiketrain = neo.SpikeTrain( [90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms) - binsize = 100 * pq.ms + bin_size = 100 * pq.ms surrogate_train = surr.jitter_spikes( - spiketrain, binsize=binsize, n=1)[0] + spiketrain, bin_size=bin_size, n_surrogates=1)[0] bin_ids_orig = np.array( (spiketrain.view( pq.Quantity) / - binsize).rescale( + bin_size).rescale( pq.dimensionless).magnitude, dtype=int) bin_ids_surr = np.array( (surrogate_train.view( pq.Quantity) / - binsize).rescale( + bin_size).rescale( pq.dimensionless).magnitude, dtype=int) self.assertTrue(np.all(bin_ids_orig == bin_ids_surr)) @@ -341,25 +345,25 @@ def test_jitter_spikes_same_bins(self): # different number of spikes self.assertEqual(len(spiketrain), len(surrogate_train)) - def test_jitter_spikes_unequal_binsize(self): + def test_jitter_spikes_unequal_bin_size(self): spiketrain = neo.SpikeTrain( [90, 150, 180, 480] * pq.ms, t_stop=500 * pq.ms) - binsize = 75 * pq.ms + bin_size = 75 * pq.ms surrogate_train = surr.jitter_spikes( - spiketrain, binsize=binsize, n=1)[0] + spiketrain, bin_size=bin_size, n_surrogates=1)[0] bin_ids_orig = np.array( (spiketrain.view( pq.Quantity) / - binsize).rescale( + bin_size).rescale( pq.dimensionless).magnitude, dtype=int) bin_ids_surr = np.array( (surrogate_train.view( pq.Quantity) / - binsize).rescale( + bin_size).rescale( pq.dimensionless).magnitude, dtype=int) @@ -373,8 +377,8 @@ def test_surr_method(self): surrogate_trains = surr.surrogates( spiketrain, dt=3 * pq.ms, - n=n_surrogates, - surr_method='shuffle_isis', + n_surrogates=n_surrogates, + method='shuffle_isis', edges=False) self.assertRaises(ValueError, surr.surrogates, spiketrain, n=1, @@ -386,8 +390,8 @@ def test_surr_method(self): surrogate_trains2 = surr.surrogates( spiketrain, dt=5 * pq.ms, - n=n_surrogates2, - surr_method='dither_spike_train', + n_surrogates=n_surrogates2, + method='dither_spike_train', edges=True) for surrogate_train in surrogate_trains: @@ -434,7 +438,7 @@ def test_joint_isi_dithering_format(self): joint_isi_instance = surr.JointISI(spiketrain, method='window', dither=2 * dither, - num_bins=50) + n_bins=50) surrogate_trains = joint_isi_instance.dithering( n_surrogates=n_surrogates) @@ -451,7 +455,9 @@ def test_joint_isi_dithering_format(self): # Test surrogate methods wrapper surrogate_trains = surr.surrogates( - spiketrain, n=n_surrogates, surr_method='joint_isi_dithering') + spiketrain, + n_surrogates=n_surrogates, + method='joint_isi_dithering') self.assertIsInstance(surrogate_trains, list) self.assertEqual(len(surrogate_trains), n_surrogates) diff --git a/elephant/test/test_sta.py b/elephant/test/test_sta.py index ac6132eb9..196c0028f 100644 --- a/elephant/test/test_sta.py +++ b/elephant/test/test_sta.py @@ -20,6 +20,7 @@ import elephant.sta as sta import warnings + class sta_TestCase(unittest.TestCase): def setUp(self): @@ -43,8 +44,8 @@ def setUp(self): units='ms', t_stop=self.asiga1.t_stop), SpikeTrain([30, 35, 40], units='ms', t_stop=self.asiga1.t_stop)] - #*********************************************************************** - #************************ Test for typical values ********************** + # *********************************************************************** + # ************************ Test for typical values ********************** def test_spike_triggered_average_with_n_spikes_on_constant_function(self): '''Signal should average to the input''' @@ -58,7 +59,7 @@ def test_spike_triggered_average_with_n_spikes_on_constant_function(self): STA = sta.spike_triggered_average( asiga, st, (window_starttime, window_endtime)) a = int(((window_endtime - window_starttime) * - asiga.sampling_rate).simplified) + asiga.sampling_rate).simplified) cutout = asiga[0: a] cutout.t_start = window_starttime assert_array_almost_equal(STA, cutout, 12) @@ -86,93 +87,106 @@ def test_only_one_spike(self): STA = sta.spike_triggered_average( z, st, (window_starttime, window_endtime)) cutout = z[int(((spiketime + window_starttime) * sr).simplified): - int(((spiketime + window_endtime) * sr).simplified)] + int(((spiketime + window_endtime) * sr).simplified)] cutout.t_start = window_starttime assert_array_equal(STA, cutout) def test_usage_of_spikes(self): - st = SpikeTrain([16.5 * math.pi, 17.5 * math.pi, - 18.5 * math.pi, 19.5 * math.pi], units='ms', t_stop=20 * math.pi) + st = SpikeTrain([16.5 * math.pi, + 17.5 * math.pi, + 18.5 * math.pi, + 19.5 * math.pi], + units='ms', + t_stop=20 * math.pi) STA = sta.spike_triggered_average( self.asiga0, st, (-math.pi * ms, math.pi * ms)) self.assertEqual(STA.annotations['used_spikes'], 3) self.assertEqual(STA.annotations['unused_spikes'], 1) - - #*********************************************************************** - #**** Test for an invalid value, to check that the function raises ***** - #********* an exception or returns an error code *********************** + # *********************************************************************** + # **** Test for an invalid value, to check that the function raises ***** + # ********* an exception or returns an error code *********************** def test_analog_signal_of_wrong_type(self): '''Analog signal given as list, but must be AnalogSignal''' asiga = [0, 1, 2, 3, 4] self.assertRaises(TypeError, sta.spike_triggered_average, - asiga, self.st0, (-2 * ms, 2 * ms)) + asiga, self.st0, (-2 * ms, 2 * ms)) def test_spiketrain_of_list_type_in_wrong_sense(self): st = [10, 11, 12] self.assertRaises(TypeError, sta.spike_triggered_average, - self.asiga0, st, (1 * ms, 2 * ms)) + self.asiga0, st, (1 * ms, 2 * ms)) def test_spiketrain_of_nonlist_and_nonspiketrain_type(self): st = (10, 11, 12) self.assertRaises(TypeError, sta.spike_triggered_average, - self.asiga0, st, (1 * ms, 2 * ms)) + self.asiga0, st, (1 * ms, 2 * ms)) def test_forgotten_AnalogSignal_argument(self): self.assertRaises(TypeError, sta.spike_triggered_average, - self.st0, (-2 * ms, 2 * ms)) + self.st0, (-2 * ms, 2 * ms)) def test_one_smaller_nrspiketrains_smaller_nranalogsignals(self): '''Number of spiketrains between 1 and number of analogsignals''' self.assertRaises(ValueError, sta.spike_triggered_average, - self.asiga2, self.lst, (-2 * ms, 2 * ms)) + self.asiga2, self.lst, (-2 * ms, 2 * ms)) def test_more_spiketrains_than_analogsignals_forbidden(self): self.assertRaises(ValueError, sta.spike_triggered_average, - self.asiga0, self.lst, (-2 * ms, 2 * ms)) + self.asiga0, self.lst, (-2 * ms, 2 * ms)) def test_spike_earlier_than_analogsignal(self): st = SpikeTrain([-1 * math.pi, 2 * math.pi], - units='ms', t_start=-2 * math.pi, t_stop=20 * math.pi) + units='ms', t_start=-2 * math.pi, t_stop=20 * math.pi) self.assertRaises(ValueError, sta.spike_triggered_average, - self.asiga0, st, (-2 * ms, 2 * ms)) + self.asiga0, st, (-2 * ms, 2 * ms)) def test_spike_later_than_analogsignal(self): st = SpikeTrain( [math.pi, 21 * math.pi], units='ms', t_stop=25 * math.pi) self.assertRaises(ValueError, sta.spike_triggered_average, - self.asiga0, st, (-2 * ms, 2 * ms)) + self.asiga0, st, (-2 * ms, 2 * ms)) def test_impossible_window(self): self.assertRaises(ValueError, sta.spike_triggered_average, - self.asiga0, self.st0, (-2 * ms, -5 * ms)) + self.asiga0, self.st0, (-2 * ms, -5 * ms)) def test_window_larger_than_signal(self): - self.assertRaises(ValueError, sta.spike_triggered_average, - self.asiga0, self.st0, (-15 * math.pi * ms, 15 * math.pi * ms)) + self.assertRaises( + ValueError, + sta.spike_triggered_average, + self.asiga0, + self.st0, + (-15 * math.pi * ms, + 15 * math.pi * ms)) def test_wrong_window_starttime_unit(self): self.assertRaises(TypeError, sta.spike_triggered_average, - self.asiga0, self.st0, (-2 * mV, 2 * ms)) + self.asiga0, self.st0, (-2 * mV, 2 * ms)) def test_wrong_window_endtime_unit(self): self.assertRaises(TypeError, sta.spike_triggered_average, - self.asiga0, self.st0, (-2 * ms, 2 * Hz)) + self.asiga0, self.st0, (-2 * ms, 2 * Hz)) def test_window_borders_as_complex_numbers(self): - self.assertRaises(TypeError, sta.spike_triggered_average, self.asiga0, - self.st0, ((-2 * math.pi + 3j) * ms, (2 * math.pi + 3j) * ms)) - - #*********************************************************************** - #**** Test for an empty value (where the argument is a list, array, **** - #********* vector or other container datatype). ************************ + self.assertRaises( + TypeError, + sta.spike_triggered_average, + self.asiga0, + self.st0, + ((-2 * math.pi + 3j) * ms, + (2 * math.pi + 3j) * ms)) + + # *********************************************************************** + # **** Test for an empty value (where the argument is a list, array, **** + # ********* vector or other container datatype). ************************ def test_empty_analogsignal(self): asiga = AnalogSignal([], units='mV', sampling_rate=10 / ms) st = SpikeTrain([5], units='ms', t_stop=10) self.assertRaises(ValueError, sta.spike_triggered_average, - asiga, st, (-1 * ms, 1 * ms)) + asiga, st, (-1 * ms, 1 * ms)) def test_one_spiketrain_empty(self): '''Test for one empty SpikeTrain, but existing spikes in other''' @@ -204,7 +218,7 @@ def test_all_spiketrains_empty(self): nan_array = np.empty(20) nan_array.fill(np.nan) cmp_array = AnalogSignal(np.array([nan_array, nan_array]).T, - units='mV', sampling_rate=10 / ms) + units='mV', sampling_rate=10 / ms) assert_array_equal(STA.magnitude, cmp_array.magnitude) @@ -213,7 +227,7 @@ def test_all_spiketrains_empty(self): # ========================================================================= @unittest.skipIf(not hasattr(scipy.signal, 'coherence'), "Please update scipy " - "to a version >= 0.16") + "to a version >= 0.16") class sfc_TestCase_new_scipy(unittest.TestCase): def setUp(self): @@ -230,7 +244,7 @@ def setUp(self): self.st0 = SpikeTrain( np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms, t_start=0 * pq.ms, t_stop=tlen0) - self.bst0 = BinnedSpikeTrain(self.st0, binsize=fs0) + self.bst0 = BinnedSpikeTrain(self.st0, bin_size=fs0) # shortened analogsignals self.anasig1 = self.anasig0.time_slice(1 * pq.s, None) @@ -243,7 +257,7 @@ def setUp(self): units=pq.mV, t_start=0 * pq.ms, sampling_period=fs1) self.bst1 = BinnedSpikeTrain( self.st0.time_slice(self.anasig3.t_start, self.anasig3.t_stop), - binsize=fs1) + bin_size=fs1) # analogsignal containing multiple traces self.anasig4 = AnalogSignal( @@ -259,21 +273,24 @@ def setUp(self): (tlen0.rescale(pq.ms).magnitude * .25), (tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms, t_start=0 * pq.ms, t_stop=tlen0) - self.bst3 = BinnedSpikeTrain(self.st3, binsize=fs0) + self.bst3 = BinnedSpikeTrain(self.st3, bin_size=fs0) self.st4 = SpikeTrain(np.arange( (tlen0.rescale(pq.ms).magnitude * .25), (tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms, t_start=5 * fs0, t_stop=tlen0 - 5 * fs0) - self.bst4 = BinnedSpikeTrain(self.st4, binsize=fs0) + self.bst4 = BinnedSpikeTrain(self.st4, bin_size=fs0) - # spike train with incompatible binsize - self.bst5 = BinnedSpikeTrain(self.st3, binsize=fs0 * 2.) + # spike train with incompatible bin_size + self.bst5 = BinnedSpikeTrain(self.st3, bin_size=fs0 * 2.) - # spike train with same binsize as the analog signal, but with + # spike train with same bin_size as the analog signal, but with # bin edges not aligned to the time axis of the analog signal self.bst6 = BinnedSpikeTrain( - self.st3, binsize=fs0, t_start=4.5 * fs0, t_stop=tlen0 - 4.5 * fs0) + self.st3, + bin_size=fs0, + t_start=4.5 * fs0, + t_stop=tlen0 - 4.5 * fs0) # ========================================================================= # Tests for correct input handling @@ -305,8 +322,8 @@ def test_non_matching_input_binning(self): self.anasig0, self.bst1) def test_incompatible_spiketrain_analogsignal(self): - # These spike trains have incompatible binning (binsize or alignment to - # time axis of analog signal) + # These spike trains have incompatible binning (bin_size or alignment + # to time axis of analog signal) self.assertRaises(ValueError, sta.spike_field_coherence, self.anasig0, self.bst5) @@ -424,11 +441,12 @@ def setUp(self): self.st0 = SpikeTrain( np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms, t_start=0 * pq.ms, t_stop=tlen0) - self.bst0 = BinnedSpikeTrain(self.st0, binsize=fs0) + self.bst0 = BinnedSpikeTrain(self.st0, bin_size=fs0) def test_old_scipy_version(self): - self.assertRaises(AttributeError, sta.spike_field_coherence, - self.anasig0, self.bst0) + self.assertRaises(AttributeError, sta.spike_field_coherence, + self.anasig0, self.bst0) + if __name__ == '__main__': unittest.main() diff --git a/elephant/test/test_statistics.py b/elephant/test/test_statistics.py index aaf899ef4..9c331c8e6 100644 --- a/elephant/test/test_statistics.py +++ b/elephant/test/test_statistics.py @@ -134,7 +134,7 @@ def test_mean_firing_rate_with_spiketrain(self): def test_mean_firing_rate_typical_use_case(self): np.random.seed(92) - st = homogeneous_poisson_process(rate=100*pq.Hz, t_stop=100*pq.s) + st = homogeneous_poisson_process(rate=100 * pq.Hz, t_stop=100 * pq.s) rate1 = statistics.mean_firing_rate(st) rate2 = statistics.mean_firing_rate(st, t_start=st.t_start, t_stop=st.t_stop) @@ -425,9 +425,10 @@ def setUp(self): np.random.seed(19) duration_effective = self.st_dur - 2 * self.st_margin st_num_spikes = np.random.poisson(self.st_rate * duration_effective) - spike_train = np.random.rand(st_num_spikes) * duration_effective + \ - self.st_margin - spike_train.sort() + spike_train = sorted( + np.random.rand(st_num_spikes) * + duration_effective + + self.st_margin) # convert spike train into neo objects self.spike_train = neo.SpikeTrain(spike_train * pq.s, @@ -618,9 +619,10 @@ def test_instantaneous_rate_spiketrainlist(self): np.random.seed(19) duration_effective = self.st_dur - 2 * self.st_margin st_num_spikes = np.random.poisson(self.st_rate * duration_effective) - spike_train2 = np.random.rand(st_num_spikes) * duration_effective + \ - self.st_margin - spike_train2.sort() + spike_train2 = sorted( + np.random.rand(st_num_spikes) * + duration_effective + + self.st_margin) spike_train2 = neo.SpikeTrain(spike_train2 * pq.s, t_start=self.st_tr[0] * pq.s, t_stop=self.st_tr[1] * pq.s) @@ -660,8 +662,8 @@ def test_instantaneous_rate_regression_245(self): # of sskernel retrieves the kernel bandwidth of an optimal Gaussian # kernel in terms of its standard deviation sigma, then uses this value # directly in the function for creating the Gaussian kernel - kernel_width_sigma = statistics.sskernel( - spiketrain.magnitude, tin=None, bootstrap=False)['optw'] + kernel_width_sigma = statistics.optimal_kernel_bandwidth( + spiketrain.magnitude, times=None, bootstrap=False)['optw'] kernel = kernels.GaussianKernel(kernel_width_sigma * spiketrain.units) result_target = statistics.instantaneous_rate( spiketrain, 10 * pq.ms, kernel=kernel) @@ -695,19 +697,19 @@ def tearDown(self): def test_time_histogram(self): targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0]) - histogram = statistics.time_histogram(self.spiketrains, binsize=pq.s) + histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s) assert_array_equal(targ, histogram.magnitude[:, 0]) def test_time_histogram_binary(self): targ = np.array([2, 2, 1, 1, 2, 2, 1, 0, 1, 0]) - histogram = statistics.time_histogram(self.spiketrains, binsize=pq.s, + histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s, binary=True) assert_array_equal(targ, histogram.magnitude[:, 0]) def test_time_histogram_tstart_tstop(self): # Start, stop short range targ = np.array([2, 1]) - histogram = statistics.time_histogram(self.spiketrains, binsize=pq.s, + histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s, t_start=5 * pq.s, t_stop=7 * pq.s) assert_array_equal(targ, histogram.magnitude[:, 0]) @@ -715,25 +717,25 @@ def test_time_histogram_tstart_tstop(self): # Test without t_stop targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0]) histogram = statistics.time_histogram(self.spiketrains, - binsize=1 * pq.s, + bin_size=1 * pq.s, t_start=0 * pq.s) assert_array_equal(targ, histogram.magnitude[:, 0]) # Test without t_start histogram = statistics.time_histogram(self.spiketrains, - binsize=1 * pq.s, + bin_size=1 * pq.s, t_stop=10 * pq.s) assert_array_equal(targ, histogram.magnitude[:, 0]) def test_time_histogram_output(self): # Normalization mean - histogram = statistics.time_histogram(self.spiketrains, binsize=pq.s, + histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s, output='mean') targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float) / 2 assert_array_equal(targ.reshape(targ.size, 1), histogram.magnitude) # Normalization rate - histogram = statistics.time_histogram(self.spiketrains, binsize=pq.s, + histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s, output='rate') assert_array_equal(histogram.view(pq.Quantity), targ.reshape(targ.size, 1) * 1 / pq.s) @@ -741,7 +743,7 @@ def test_time_histogram_output(self): # Normalization unspecified, raises error self.assertRaises(ValueError, statistics.time_histogram, self.spiketrains, - binsize=pq.s, output=' ') + bin_size=pq.s, output=' ') class ComplexityPdfTestCase(unittest.TestCase): @@ -764,7 +766,7 @@ def tearDown(self): def test_complexity_pdf(self): targ = np.array([0.92, 0.01, 0.01, 0.06]) complexity = statistics.complexity_pdf(self.spiketrains, - binsize=0.1 * pq.s) + bin_size=0.1 * pq.s) assert_array_equal(targ, complexity.magnitude[:, 0]) self.assertEqual(1, complexity.magnitude[:, 0].sum()) self.assertEqual(len(self.spiketrains) + 1, len(complexity)) diff --git a/elephant/test/test_unitary_event_analysis.py b/elephant/test/test_unitary_event_analysis.py index 30d2484ad..beee05496 100644 --- a/elephant/test/test_unitary_event_analysis.py +++ b/elephant/test/test_unitary_event_analysis.py @@ -273,9 +273,9 @@ def test__rate_mat_avg_trial_default(self): def test__bintime(self): t = 13 * pq.ms - binsize = 3 * pq.ms + bin_size = 3 * pq.ms expected = 4 - self.assertTrue(np.allclose(expected, ue._bintime(t, binsize))) + self.assertTrue(np.allclose(expected, ue._bintime(t, bin_size))) def test__winpos(self): t_start = 10 * pq.ms @@ -332,11 +332,11 @@ def test_jointJ_window_analysis(self): sts2 = self.sts2_neo data = np.vstack((sts1, sts2)).T winsize = 100 * pq.ms - binsize = 5 * pq.ms + bin_size = 5 * pq.ms winstep = 20 * pq.ms pattern_hash = [3] UE_dic = ue.jointJ_window_analysis( - data, binsize, winsize, winstep, pattern_hash) + data, bin_size, winsize, winstep, pattern_hash) expected_Js = np.array( [0.57953708, 0.47348757, 0.1729669, 0.01883295, -0.21934742, -0.80608759]) @@ -451,7 +451,7 @@ def test_Riehle_et_al_97_UE(self): # calculating UE ... winsize = 100 * pq.ms - binsize = 5 * pq.ms + bin_size = 5 * pq.ms winstep = 5 * pq.ms pattern_hash = [3] t_start = spiketrain[0][0].t_start @@ -460,7 +460,7 @@ def test_Riehle_et_al_97_UE(self): significance_level = 0.05 UE = ue.jointJ_window_analysis( - spiketrain, binsize, winsize, winstep, + spiketrain, bin_size, winsize, winstep, pattern_hash, method='analytic_TrialAverage') # load extracted data from figure 2 of Riehle et al 1997 extracted_data = np.load( @@ -478,11 +478,11 @@ def test_Riehle_et_al_97_UE(self): indices_unique_significant = [] for j in sig_idx_win: significant = indices_unique[np.where( - (indices_unique * binsize >= t_winpos[j]) & - (indices_unique * binsize < t_winpos[j] + winsize))] + (indices_unique * bin_size >= t_winpos[j]) & + (indices_unique * bin_size < t_winpos[j] + winsize))] indices_unique_significant.extend(significant) x_tmp = np.unique(indices_unique_significant) * \ - binsize.magnitude + bin_size.magnitude if len(x_tmp) > 0: ue_trial = np.sort(extracted_data['ue'][y_cnt]) diff_UE_rep = np.append( diff --git a/elephant/unitary_event_analysis.py b/elephant/unitary_event_analysis.py index e7b7c6dcd..c4269b3a8 100644 --- a/elephant/unitary_event_analysis.py +++ b/elephant/unitary_event_analysis.py @@ -627,13 +627,13 @@ def _rate_mat_avg_trial(mat): return psth / (n_bins * n_trials) -def _bintime(t, binsize): +def _bintime(t, bin_size): """ - Change the real time to `binsize` units. + Change the real time to `bin_size` units. """ t_dl = t.rescale('ms').magnitude - binsize_dl = binsize.rescale('ms').magnitude - return np.floor(np.array(t_dl) / binsize_dl).astype(int) + bin_size_dl = bin_size.rescale('ms').magnitude + return np.floor(np.array(t_dl) / bin_size_dl).astype(int) def _winpos(t_start, t_stop, winsize, winstep, position='left-edge'): @@ -677,7 +677,7 @@ def _UE(mat, pattern_hash, method='analytic_TrialByTrial', n_surr=1): def jointJ_window_analysis( - data, binsize, winsize, winstep, pattern_hash, + data, bin_size, winsize, winstep, pattern_hash, method='analytic_TrialByTrial', t_start=None, t_stop=None, binary=True, n_surr=100): """ @@ -694,7 +694,7 @@ def jointJ_window_analysis( 1-axis --> Neurons 2-axis --> Spike times - binsize : pq.Quantity + bin_size : pq.Quantity The size of bins for discretizing spike trains. winsize : pq.Quantity The size of the window of analysis. @@ -757,7 +757,7 @@ def jointJ_window_analysis( Warns ----- UserWarning - The ratio between `winsize` or `winstep` and `binsize` is not an + The ratio between `winsize` or `winstep` and `bin_size` is not an integer. """ @@ -773,30 +773,32 @@ def jointJ_window_analysis( # position of all windows (left edges) t_winpos = _winpos(t_start, t_stop, winsize, winstep, position='left-edge') - t_winpos_bintime = _bintime(t_winpos, binsize) + t_winpos_bintime = _bintime(t_winpos, bin_size) - winsize_bintime = _bintime(winsize, binsize) - winstep_bintime = _bintime(winstep, binsize) + winsize_bintime = _bintime(winsize, bin_size) + winstep_bintime = _bintime(winstep, bin_size) - if winsize_bintime * binsize != winsize: - warnings.warn( - "The ratio between winsize ({winsize}) and binsize ({binsize}) is " - "not an integer".format(winsize=winsize, binsize=binsize)) + if winsize_bintime * bin_size != winsize: + warnings.warn("The ratio between the winsize ({winsize}) and the " + "bin_size ({bin_size}) is not an integer".format( + winsize=winsize, + bin_size=bin_size)) - if winstep_bintime * binsize != winstep: - warnings.warn( - "The ratio between winstep ({winstep}) and binsize ({binsize}) is " - "not an integer".format(winstep=winstep, binsize=binsize)) + if winstep_bintime * bin_size != winstep: + warnings.warn("The ratio between the winstep ({winstep}) and the " + "bin_size ({bin_size}) is not an integer".format( + winstep=winstep, + bin_size=bin_size)) num_tr, N = np.shape(data)[:2] - n_bins = int((t_stop - t_start) / binsize) + n_bins = int((t_stop - t_start) / bin_size) mat_tr_unit_spt = np.zeros((len(data), N, n_bins)) for tr, sts in enumerate(data): sts = list(sts) bs = conv.BinnedSpikeTrain( - sts, t_start=t_start, t_stop=t_stop, binsize=binsize) + sts, t_start=t_start, t_stop=t_stop, bin_size=bin_size) if binary is True: mat = bs.to_bool_array() else: @@ -826,4 +828,4 @@ def jointJ_window_analysis( 'trial' + str(j)] = np.append( indices_win['trial' + str(j)], indices_lst[j][0] + win_pos) return {'Js': Js_win, 'indices': indices_win, 'n_emp': n_emp_win, - 'n_exp': n_exp_win, 'rate_avg': rate_avg / binsize} + 'n_exp': n_exp_win, 'rate_avg': rate_avg / bin_size} diff --git a/elephant/utils.py b/elephant/utils.py index c761790f7..957b2e26d 100644 --- a/elephant/utils.py +++ b/elephant/utils.py @@ -1,5 +1,8 @@ from __future__ import division, print_function, unicode_literals +import warnings +from functools import wraps + import numpy as np import quantities as pq @@ -20,6 +23,53 @@ def is_binary(array): return ((array == 0) | (array == 1)).all() +def deprecated_alias(**aliases): + """ + A deprecation decorator constructor. + + Parameters + ---------- + aliases: str + The key-value pairs of mapping old --> new argument names of a + function. + + Returns + ------- + callable + A decorator for the specific mapping of deprecated argument names. + + Examples + -------- + In the example below, `my_function(binsize)` signature is marked as + deprecated (but still usable) and changed to `my_function(bin_size)`. + + >>> @deprecated_alias(binsize='bin_size') + ... def my_function(bin_size): + ... pass + + """ + def deco(func): + @wraps(func) + def wrapper(*args, **kwargs): + _rename_kwargs(func.__name__, kwargs, aliases) + return func(*args, **kwargs) + + return wrapper + + return deco + + +def _rename_kwargs(func_name, kwargs, aliases): + for old, new in aliases.items(): + if old in kwargs: + if new in kwargs: + raise TypeError("{} received both '{}' and '{}'".format( + func_name, old, new)) + warnings.warn("'{}' is deprecated; use '{}'".format(old, new), + DeprecationWarning) + kwargs[new] = kwargs.pop(old) + + def is_time_quantity(x, allow_none=False): """ Parameters