diff --git a/CHANGELOG.md b/CHANGELOG.md index c520f40..9b20d60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Change Log +## 0.7.0 + +- [#19](https://github.com/nikdon/pyEntropy/pull/19) - `weighted_permutation_entropy` + ## 0.6.0 - [#15](https://github.com/nikdon/pyEntropy/pull/15) - Sample entropy ignores last `M` values (thanks @CSchoel) diff --git a/README.md b/README.md index e95d904..914dc5c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # pyEntropy (pyEntrp) -[![pypi](https://img.shields.io/badge/pypi-0.6.0-green.svg)](https://pypi.python.org/pypi/pyentrp/0.6.0) +[![pypi](https://img.shields.io/badge/pypi-0.7.0-green.svg)](https://pypi.python.org/pypi/pyentrp/0.7.0) [![Build Status](https://travis-ci.org/nikdon/pyEntropy.svg?branch=master)](https://travis-ci.org/nikdon/pyEntropy) [![codecov](https://codecov.io/gh/nikdon/pyEntropy/branch/master/graph/badge.svg)](https://codecov.io/gh/nikdon/pyEntropy) ![py27 status](https://img.shields.io/badge/python2.7-supported-green.svg) @@ -12,14 +12,13 @@ This is a small set of functions on top of NumPy that help to compute different types of entropy for time series analysis. -Currently available: - + Shannon Entropy ```shannon_entropy``` + Sample Entropy ```sample_entropy``` + Multiscale Entropy ```multiscale_entropy``` + Composite Multiscale Entropy ```composite_multiscale_entropy``` + Permutation Entropy ```permutation_entropy``` + Multiscale Permutation Entropy ```multiscale_permutation_entropy``` ++ Weighted Permutation Entropy ```weighted_permutation_entropy``` ## Quick start @@ -47,6 +46,7 @@ sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts) * [Jakob Dreyer](https://github.com/jakobdreyer) * [Raphael Vallat](https://github.com/raphaelvallat) * [Christopher Schölzel](https://github.com/CSchoel) +* [Sam Dotson](https://github.com/samgdotson) Contributions are very welcome, documentation improvements/corrections, bug reports, even feature requests :) diff --git a/pyentrp/entropy.py b/pyentrp/entropy.py index 17e7497..3a2e13f 100644 --- a/pyentrp/entropy.py +++ b/pyentrp/entropy.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals -import itertools import numpy as np from math import factorial @@ -73,8 +72,8 @@ def util_granulate_time_series(time_series, scale): """ n = len(time_series) b = int(np.fix(n / scale)) - temp = np.reshape(time_series[0:b*scale], (b, scale)) - cts = np.mean(temp, axis = 1) + temp = np.reshape(time_series[0:b * scale], (b, scale)) + cts = np.mean(temp, axis=1) return cts @@ -110,7 +109,7 @@ def shannon_entropy(time_series): return ent -def sample_entropy(time_series, sample_length, tolerance = None): +def sample_entropy(time_series, sample_length, tolerance=None): """Calculates the sample entropy of degree m of a time_series. This method uses chebychev norm. @@ -135,36 +134,35 @@ def sample_entropy(time_series, sample_length, tolerance = None): [3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis of biological signals """ - #The code below follows the sample length convention of Ref [1] so: - M = sample_length - 1; + # The code below follows the sample length convention of Ref [1] so: + M = sample_length - 1 time_series = np.array(time_series) if tolerance is None: - tolerance = 0.1*np.std(time_series) + tolerance = 0.1 * np.std(time_series) n = len(time_series) - #Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k + # Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k Ntemp = np.zeros(M + 2) - #Templates of length 0 matches by definition: - Ntemp[0] = n*(n - 1) / 2 - + # Templates of length 0 matches by definition: + Ntemp[0] = n * (n - 1) / 2 for i in range(n - M - 1): - template = time_series[i:(i+M+1)];#We have 'M+1' elements in the template - rem_time_series = time_series[i+1:] + template = time_series[i:(i + M + 1)] # We have 'M+1' elements in the template + rem_time_series = time_series[i + 1:] - searchlist = np.arange(len(rem_time_series) - M, dtype=np.int32) - for length in range(1, len(template)+1): - hitlist = np.abs(rem_time_series[searchlist] - template[length-1]) < tolerance - Ntemp[length] += np.sum(hitlist) - searchlist = searchlist[hitlist] + 1 + search_list = np.arange(len(rem_time_series) - M, dtype=np.int32) + for length in range(1, len(template) + 1): + hit_list = np.abs(rem_time_series[search_list] - template[length - 1]) < tolerance + Ntemp[length] += np.sum(hit_list) + search_list = search_list[hit_list] + 1 - sampen = - np.log(Ntemp[1:] / Ntemp[:-1]) + sampen = -np.log(Ntemp[1:] / Ntemp[:-1]) return sampen -def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = None): +def multiscale_entropy(time_series, sample_length, tolerance=None, maxscale=None): """Calculate the Multiscale Entropy of the given time series considering different time-scales of the time series. @@ -181,15 +179,16 @@ def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = """ if tolerance is None: - #we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy() - tolerance = 0.1*np.std(time_series) + # We need to fix the tolerance at this level + # If it remains 'None' it will be changed in call to sample_entropy() + tolerance = 0.1 * np.std(time_series) if maxscale is None: maxscale = len(time_series) mse = np.zeros(maxscale) for i in range(maxscale): - temp = util_granulate_time_series(time_series, i+1) + temp = util_granulate_time_series(time_series, i + 1) mse[i] = sample_entropy(temp, sample_length, tolerance)[-1] return mse @@ -289,73 +288,72 @@ def multiscale_permutation_entropy(time_series, m, delay, scale): def weighted_permutation_entropy(time_series, order=2, delay=1, normalize=False): - """Calculate the weighted permuation entropy. Weighted permutation - entropy captures the information in the amplitude of a signal where - standard permutation entropy only measures the information in the - ordinal pattern, "motif." - - Parameters - ---------- - time_series : list or np.array - Time series - order : int - Order of permutation entropy - delay : int - Time delay - normalize : bool - If True, divide by log2(factorial(m)) to normalize the entropy - between 0 and 1. Otherwise, return the permutation entropy in bit. - - Returns - ------- - wpe : float - Weighted Permutation Entropy - - References - ---------- - .. [1] Bilal Fadlallah, Badong Chen, Andreas Keil, and José Príncipe - Phys. Rev. E 87, 022911 – Published 20 February 2013 - - Notes - ----- - Last updated (March 2021) by Samuel Dotson (samgdotson@gmail.com) - - Examples - -------- - 1. Weighted permutation entropy with order 2 - - >>> x = [4, 7, 9, 10, 6, 11, 3] - >>> # Return a value between 0 and log2(factorial(order)) - >>> print(permutation_entropy(x, order=2)) - 0.912 - - 2. Normalized weighted permutation entropy with order 3 - - >>> x = [4, 7, 9, 10, 6, 11, 3] - >>> # Return a value comprised between 0 and 1. - >>> print(permutation_entropy(x, order=3, normalize=True)) - 0.547 - """ - x = _embed(time_series, order=order, delay=delay) - - weights = np.var(x, axis=1) - sorted_idx = x.argsort(kind='quicksort', axis=1) - motifs, c = np.unique(sorted_idx, return_counts=True, axis=0) - pw = np.zeros(len(motifs)) - - # TODO hashmap - for i, j in zip(weights, sorted_idx): - idx = int(np.where((j==motifs).sum(1)==order)[0]) - pw[idx] += i - - pw /= weights.sum() - - b = np.log2(pw) - wpe = -np.dot(pw, b) - if normalize: - wpe /= np.log2(factorial(order)) - return wpe + """Calculate the weighted permutation entropy. Weighted permutation + entropy captures the information in the amplitude of a signal where + standard permutation entropy only measures the information in the + ordinal pattern, "motif." + + Parameters + ---------- + time_series : list or np.array + Time series + order : int + Order of permutation entropy + delay : int + Time delay + normalize : bool + If True, divide by log2(factorial(m)) to normalize the entropy + between 0 and 1. Otherwise, return the permutation entropy in bit. + + Returns + ------- + wpe : float + Weighted Permutation Entropy + References + ---------- + .. [1] Bilal Fadlallah, Badong Chen, Andreas Keil, and José Príncipe + Phys. Rev. E 87, 022911 – Published 20 February 2013 + + Notes + ----- + Last updated (March 2021) by Samuel Dotson (samgdotson@gmail.com) + + Examples + -------- + 1. Weighted permutation entropy with order 2 + + >>> x = [4, 7, 9, 10, 6, 11, 3] + >>> # Return a value between 0 and log2(factorial(order)) + >>> print(permutation_entropy(x, order=2)) + 0.912 + + 2. Normalized weighted permutation entropy with order 3 + + >>> x = [4, 7, 9, 10, 6, 11, 3] + >>> # Return a value comprised between 0 and 1. + >>> print(permutation_entropy(x, order=3, normalize=True)) + 0.547 + """ + x = _embed(time_series, order=order, delay=delay) + + weights = np.var(x, axis=1) + sorted_idx = x.argsort(kind='quicksort', axis=1) + motifs, c = np.unique(sorted_idx, return_counts=True, axis=0) + pw = np.zeros(len(motifs)) + + # TODO hashmap + for i, j in zip(weights, sorted_idx): + idx = int(np.where((j == motifs).sum(1) == order)[0]) + pw[idx] += i + + pw /= weights.sum() + + b = np.log2(pw) + wpe = -np.dot(pw, b) + if normalize: + wpe /= np.log2(factorial(order)) + return wpe # TODO add tests diff --git a/setup.py b/setup.py index 2cca0b4..ebf5a7b 100644 --- a/setup.py +++ b/setup.py @@ -2,10 +2,10 @@ setup( name='pyentrp', - version='0.6.0', + version='0.7.0', description='Functions on top of NumPy for computing different types of entropy', url='https://github.com/nikdon/pyEntropy', - download_url='https://github.com/nikdon/pyEntropy/archive/0.6.0.tar.gz', + download_url='https://github.com/nikdon/pyEntropy/archive/0.7.0.tar.gz', author='Nikolay Donets', author_email='nd.startup@gmail.com', maintainer='Nikolay Donets', @@ -18,8 +18,15 @@ ], test_suite="tests.test_entropy", - keywords=['entropy', 'python', 'sample entropy', 'multiscale entropy', 'permutation entropy', - 'composite multiscale entropy'], + keywords=[ + 'python', + 'entropy', + 'sample entropy', + 'multiscale entropy', + 'permutation entropy', + 'composite multiscale entropy', + 'multiscale permutation entropy' + ], classifiers=[ 'Development Status :: 5 - Production/Stable', @@ -36,6 +43,9 @@ 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Information Analysis',