Skip to content

Commit

Permalink
prepare release 0.7.0
Browse files Browse the repository at this point in the history
  • Loading branch information
nikdon committed Jun 24, 2021
1 parent 9d63700 commit 2275f0d
Show file tree
Hide file tree
Showing 4 changed files with 108 additions and 96 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Change Log

## 0.7.0

- [#19](https://github.com/nikdon/pyEntropy/pull/19) - `weighted_permutation_entropy`

## 0.6.0

- [#15](https://github.com/nikdon/pyEntropy/pull/15) - Sample entropy ignores last `M` values (thanks @CSchoel)
Expand Down
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# pyEntropy (pyEntrp)

[![pypi](https://img.shields.io/badge/pypi-0.6.0-green.svg)](https://pypi.python.org/pypi/pyentrp/0.6.0)
[![pypi](https://img.shields.io/badge/pypi-0.7.0-green.svg)](https://pypi.python.org/pypi/pyentrp/0.7.0)
[![Build Status](https://travis-ci.org/nikdon/pyEntropy.svg?branch=master)](https://travis-ci.org/nikdon/pyEntropy)
[![codecov](https://codecov.io/gh/nikdon/pyEntropy/branch/master/graph/badge.svg)](https://codecov.io/gh/nikdon/pyEntropy)
![py27 status](https://img.shields.io/badge/python2.7-supported-green.svg)
Expand All @@ -12,14 +12,13 @@

This is a small set of functions on top of NumPy that help to compute different types of entropy for time series analysis.

Currently available:

+ Shannon Entropy ```shannon_entropy```
+ Sample Entropy ```sample_entropy```
+ Multiscale Entropy ```multiscale_entropy```
+ Composite Multiscale Entropy ```composite_multiscale_entropy```
+ Permutation Entropy ```permutation_entropy```
+ Multiscale Permutation Entropy ```multiscale_permutation_entropy```
+ Weighted Permutation Entropy ```weighted_permutation_entropy```

## Quick start

Expand Down Expand Up @@ -47,6 +46,7 @@ sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts)
* [Jakob Dreyer](https://github.com/jakobdreyer)
* [Raphael Vallat](https://github.com/raphaelvallat)
* [Christopher Schölzel](https://github.com/CSchoel)
* [Sam Dotson](https://github.com/samgdotson)

Contributions are very welcome, documentation improvements/corrections, bug reports, even feature requests :)

Expand Down
176 changes: 87 additions & 89 deletions pyentrp/entropy.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from __future__ import unicode_literals

import itertools
import numpy as np
from math import factorial

Expand Down Expand Up @@ -73,8 +72,8 @@ def util_granulate_time_series(time_series, scale):
"""
n = len(time_series)
b = int(np.fix(n / scale))
temp = np.reshape(time_series[0:b*scale], (b, scale))
cts = np.mean(temp, axis = 1)
temp = np.reshape(time_series[0:b * scale], (b, scale))
cts = np.mean(temp, axis=1)
return cts


Expand Down Expand Up @@ -110,7 +109,7 @@ def shannon_entropy(time_series):
return ent


def sample_entropy(time_series, sample_length, tolerance = None):
def sample_entropy(time_series, sample_length, tolerance=None):
"""Calculates the sample entropy of degree m of a time_series.
This method uses chebychev norm.
Expand All @@ -135,36 +134,35 @@ def sample_entropy(time_series, sample_length, tolerance = None):
[3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis
of biological signals
"""
#The code below follows the sample length convention of Ref [1] so:
M = sample_length - 1;
# The code below follows the sample length convention of Ref [1] so:
M = sample_length - 1

time_series = np.array(time_series)
if tolerance is None:
tolerance = 0.1*np.std(time_series)
tolerance = 0.1 * np.std(time_series)

n = len(time_series)

#Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k
# Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k
Ntemp = np.zeros(M + 2)
#Templates of length 0 matches by definition:
Ntemp[0] = n*(n - 1) / 2

# Templates of length 0 matches by definition:
Ntemp[0] = n * (n - 1) / 2

for i in range(n - M - 1):
template = time_series[i:(i+M+1)];#We have 'M+1' elements in the template
rem_time_series = time_series[i+1:]
template = time_series[i:(i + M + 1)] # We have 'M+1' elements in the template
rem_time_series = time_series[i + 1:]

searchlist = np.arange(len(rem_time_series) - M, dtype=np.int32)
for length in range(1, len(template)+1):
hitlist = np.abs(rem_time_series[searchlist] - template[length-1]) < tolerance
Ntemp[length] += np.sum(hitlist)
searchlist = searchlist[hitlist] + 1
search_list = np.arange(len(rem_time_series) - M, dtype=np.int32)
for length in range(1, len(template) + 1):
hit_list = np.abs(rem_time_series[search_list] - template[length - 1]) < tolerance
Ntemp[length] += np.sum(hit_list)
search_list = search_list[hit_list] + 1

sampen = - np.log(Ntemp[1:] / Ntemp[:-1])
sampen = -np.log(Ntemp[1:] / Ntemp[:-1])
return sampen


def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = None):
def multiscale_entropy(time_series, sample_length, tolerance=None, maxscale=None):
"""Calculate the Multiscale Entropy of the given time series considering
different time-scales of the time series.
Expand All @@ -181,15 +179,16 @@ def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale =
"""

if tolerance is None:
#we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy()
tolerance = 0.1*np.std(time_series)
# We need to fix the tolerance at this level
# If it remains 'None' it will be changed in call to sample_entropy()
tolerance = 0.1 * np.std(time_series)
if maxscale is None:
maxscale = len(time_series)

mse = np.zeros(maxscale)

for i in range(maxscale):
temp = util_granulate_time_series(time_series, i+1)
temp = util_granulate_time_series(time_series, i + 1)
mse[i] = sample_entropy(temp, sample_length, tolerance)[-1]
return mse

Expand Down Expand Up @@ -289,73 +288,72 @@ def multiscale_permutation_entropy(time_series, m, delay, scale):


def weighted_permutation_entropy(time_series, order=2, delay=1, normalize=False):
"""Calculate the weighted permuation entropy. Weighted permutation
entropy captures the information in the amplitude of a signal where
standard permutation entropy only measures the information in the
ordinal pattern, "motif."
Parameters
----------
time_series : list or np.array
Time series
order : int
Order of permutation entropy
delay : int
Time delay
normalize : bool
If True, divide by log2(factorial(m)) to normalize the entropy
between 0 and 1. Otherwise, return the permutation entropy in bit.
Returns
-------
wpe : float
Weighted Permutation Entropy
References
----------
.. [1] Bilal Fadlallah, Badong Chen, Andreas Keil, and José Príncipe
Phys. Rev. E 87, 022911 – Published 20 February 2013
Notes
-----
Last updated (March 2021) by Samuel Dotson (samgdotson@gmail.com)
Examples
--------
1. Weighted permutation entropy with order 2
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value between 0 and log2(factorial(order))
>>> print(permutation_entropy(x, order=2))
0.912
2. Normalized weighted permutation entropy with order 3
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(permutation_entropy(x, order=3, normalize=True))
0.547
"""
x = _embed(time_series, order=order, delay=delay)

weights = np.var(x, axis=1)
sorted_idx = x.argsort(kind='quicksort', axis=1)
motifs, c = np.unique(sorted_idx, return_counts=True, axis=0)
pw = np.zeros(len(motifs))

# TODO hashmap
for i, j in zip(weights, sorted_idx):
idx = int(np.where((j==motifs).sum(1)==order)[0])
pw[idx] += i

pw /= weights.sum()

b = np.log2(pw)
wpe = -np.dot(pw, b)
if normalize:
wpe /= np.log2(factorial(order))
return wpe
"""Calculate the weighted permutation entropy. Weighted permutation
entropy captures the information in the amplitude of a signal where
standard permutation entropy only measures the information in the
ordinal pattern, "motif."
Parameters
----------
time_series : list or np.array
Time series
order : int
Order of permutation entropy
delay : int
Time delay
normalize : bool
If True, divide by log2(factorial(m)) to normalize the entropy
between 0 and 1. Otherwise, return the permutation entropy in bit.
Returns
-------
wpe : float
Weighted Permutation Entropy
References
----------
.. [1] Bilal Fadlallah, Badong Chen, Andreas Keil, and José Príncipe
Phys. Rev. E 87, 022911 – Published 20 February 2013
Notes
-----
Last updated (March 2021) by Samuel Dotson (samgdotson@gmail.com)
Examples
--------
1. Weighted permutation entropy with order 2
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value between 0 and log2(factorial(order))
>>> print(permutation_entropy(x, order=2))
0.912
2. Normalized weighted permutation entropy with order 3
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(permutation_entropy(x, order=3, normalize=True))
0.547
"""
x = _embed(time_series, order=order, delay=delay)

weights = np.var(x, axis=1)
sorted_idx = x.argsort(kind='quicksort', axis=1)
motifs, c = np.unique(sorted_idx, return_counts=True, axis=0)
pw = np.zeros(len(motifs))

# TODO hashmap
for i, j in zip(weights, sorted_idx):
idx = int(np.where((j == motifs).sum(1) == order)[0])
pw[idx] += i

pw /= weights.sum()

b = np.log2(pw)
wpe = -np.dot(pw, b)
if normalize:
wpe /= np.log2(factorial(order))
return wpe


# TODO add tests
Expand Down
18 changes: 14 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

setup(
name='pyentrp',
version='0.6.0',
version='0.7.0',
description='Functions on top of NumPy for computing different types of entropy',
url='https://github.com/nikdon/pyEntropy',
download_url='https://github.com/nikdon/pyEntropy/archive/0.6.0.tar.gz',
download_url='https://github.com/nikdon/pyEntropy/archive/0.7.0.tar.gz',
author='Nikolay Donets',
author_email='nd.startup@gmail.com',
maintainer='Nikolay Donets',
Expand All @@ -18,8 +18,15 @@
],
test_suite="tests.test_entropy",

keywords=['entropy', 'python', 'sample entropy', 'multiscale entropy', 'permutation entropy',
'composite multiscale entropy'],
keywords=[
'python',
'entropy',
'sample entropy',
'multiscale entropy',
'permutation entropy',
'composite multiscale entropy',
'multiscale permutation entropy'
],

classifiers=[
'Development Status :: 5 - Production/Stable',
Expand All @@ -36,6 +43,9 @@
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',

'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
Expand Down

0 comments on commit 2275f0d

Please sign in to comment.