Skip to content

Commit

Permalink
Merge pull request #23 from adtzlr/add-eigh
Browse files Browse the repository at this point in the history
Add `eigh()` and `expm()`
  • Loading branch information
adtzlr authored Dec 12, 2022
2 parents 37e5c82 + cbc0ece commit 5031ddd
Show file tree
Hide file tree
Showing 9 changed files with 163 additions and 7 deletions.
11 changes: 10 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ P_12 = δ(I1_C) # (= Δ(I1_C))
A_1223 = Δδ(I1_C)
```

To obtain full gradients and hessians in one function call, `tensortrax` provides helpers (decorators) which handle the multiple function calls.
To obtain full gradients and hessians of scalar-valued functions in one function call, `tensortrax` provides helpers (decorators) which handle the multiple function calls.

```python
fun = lambda F: trace(F.T() @ F)
Expand All @@ -164,6 +164,15 @@ grad = tr.gradient(fun)(x)
hess = tr.hessian(fun)(x)
```

For tensor-valued functions, use `jacobian()` instead of `gradient()`.

```python
fun = lambda F: F.T() @ F

func = tr.jacobian(fun)(x)
```


Evaluate the gradient- as well as the hessian-vector-product:

```python
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "tensortrax"
version = "0.2.2"
version = "0.2.3"
description = "Math on (Hyper-Dual) Tensors with Trailing Axes"
readme = "README.md"
requires-python = ">=3.7"
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = tensortrax
version = 0.2.2
version = 0.2.3
author = Andreas Dutzler
author_email = a.dutzler@gmail.com
description = Math on (Hyper-Dual) Tensors with Trailing Axes
Expand Down
1 change: 1 addition & 0 deletions tensortrax/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from ._helpers import f, δ, Δ, Δδ
from ._evaluate import (
function,
jacobian,
gradient,
hessian,
gradient_vector_product,
Expand Down
47 changes: 47 additions & 0 deletions tensortrax/_evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,53 @@ def evaluate_function(*args, **kwargs):
return evaluate_function


def jacobian(fun, wrt=0, ntrax=0, parallel=False, full_output=False):
"Evaluate the jacobian of a function."

def evaluate_jacobian(*args, **kwargs):

x = arg_to_tensor(args, kwargs, wrt)
t = Tensor(x, ntrax=ntrax)
δx = Δx = np.eye(t.size)
indices = range(t.size)

args0, kwargs0 = add_tensor(args, kwargs, wrt, None, None, ntrax)
shape = fun(*args0, **kwargs0).shape
axes = tuple([slice(None)] * len(shape))

fx = np.zeros((*shape, *t.trax))
dfdx = np.zeros((*shape, t.size, *t.trax))

def kernel(a, wrt, δx, Δx, ntrax, args, kwargs):
args, kwargs = add_tensor(args, kwargs, wrt, δx[a], Δx[a], ntrax)
func = fun(*args, **kwargs)
fx[axes] = f(func)
dfdx[(*axes, a)] = δ(func)

if not parallel:
for a in indices:
kernel(a, wrt, δx, Δx, ntrax, args, kwargs)

else:
threads = [
Thread(target=kernel, args=(a, wrt, δx, Δx, ntrax, args, kwargs))
for a in indices
]

for th in threads:
th.start()

for th in threads:
th.join()

if full_output:
return np.array(dfdx).reshape(*shape, *t.shape, *t.trax), fx
else:
return np.array(dfdx).reshape(*shape, *t.shape, *t.trax)

return evaluate_jacobian


def gradient(fun, wrt=0, ntrax=0, parallel=False, full_output=False):
"Evaluate the gradient of a scalar-valued function."

Expand Down
2 changes: 1 addition & 1 deletion tensortrax/math/_linalg/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@
"""

from ._linalg_array import det as _det, inv as _inv
from ._linalg_tensor import det, inv, eigvalsh
from ._linalg_tensor import det, inv, eigvalsh, eigh, expm
59 changes: 58 additions & 1 deletion tensortrax/math/_linalg/_linalg_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import numpy as np

from ..._tensor import Tensor, einsum, matmul, f, δ, Δ, Δδ
from .._math_tensor import transpose, ddot, sum
from .._math_tensor import transpose, ddot, sum, exp
from . import _linalg_array as array


Expand Down Expand Up @@ -104,3 +104,60 @@ def eigvalsh(A):
Δδx=Δδλ,
ntrax=A.ntrax,
)


def eigh(A):
"Eigenvalues and -bases of a symmetric Tensor (only first derivative)."

λ, N = [x.T for x in np.linalg.eigh(f(A).T)]
N = transpose(N)
M = einsum("ai...,aj...->aij...", N, N)

δλ = einsum("aij...,ij...->a...", M, δ(A))
Δλ = einsum("aij...,ij...->a...", M, Δ(A))

Γ = [(1, 2), (2, 0), (0, 1)]

δN = []
for α in range(3):
δNα = []
for γ in Γ[α]:
Mαγ = einsum("i...,j...->ij...", N[α], N[γ])
δAαγ = einsum("ij...,ij...->...", Mαγ, δ(A))
λαγ = λ[α] - λ[γ]
λ_equal = np.isclose(λ[α], λ[γ])
if np.any(λ_equal):
if len(λαγ.shape) == 0:
λαγ = np.inf
else:
λαγ[λ_equal] = np.inf
δNα.append(1 / λαγ * N[γ] * δAαγ)
δN.append(sum(δNα, axis=0))

δM = einsum("ai...,aj...->aij...", δN, N) + einsum("ai...,aj...->aij...", N, δN)
Δδλ = einsum("aij...,ij...->a...", δM, Δ(A)) + einsum(
"aij...,ij...->a...", M, Δδ(A)
)

return (
Tensor(
x=λ,
δx=δλ,
Δx=Δλ,
Δδx=Δδλ,
ntrax=A.ntrax,
),
Tensor(
x=M,
δx=δM,
Δx=δM * np.nan,
Δδx=δM * np.nan,
ntrax=A.ntrax,
),
)


def expm(A):
"Compute the matrix exponential of a symmetric array."
λ, M = eigh(A)
return einsum("a...,aij...->ij...", exp(λ), M)
28 changes: 28 additions & 0 deletions tests/test_jacobian.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import tensortrax as tr
import tensortrax.math as tm
import numpy as np


def right_cauchy_green(F):
return F.T() @ F


def test_jacobian():

F = (np.eye(3).ravel() + np.arange(9) / 10).reshape(3, 3, 1, 1)

for parallel in [False, True]:
for fun in [right_cauchy_green]:
c = tr.function(fun, ntrax=2, parallel=parallel)(F)
dCdF = tr.jacobian(fun, ntrax=2, parallel=parallel)(F)
dCdF, C = tr.jacobian(fun, ntrax=2, parallel=parallel, full_output=True)(F)

assert c.shape == (3, 3, 1, 1)
assert C.shape == (3, 3, 1, 1)
assert dCdF.shape == (3, 3, 3, 3, 1, 1)

assert np.allclose(C, c)


if __name__ == "__main__":
test_jacobian()
18 changes: 16 additions & 2 deletions tests/test_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,13 @@ def test_math():
]:
assert np.allclose(fun(F), fun(T).x)

for fun in [tm.linalg.det]:
for fun in [tm.linalg.det, tm.linalg.inv]:
assert np.allclose(fun(F), fun(T).x)

assert tm.linalg.eigvalsh(T).shape == (3,)

assert tm.linalg.expm(T).shape == (3, 3)

assert tm.array.cross(F, F).shape == F.shape
assert tm.array.eye(F).shape == F.shape

Expand Down Expand Up @@ -104,9 +106,21 @@ def test_math():

tm.reshape(t, (9,))
tm.reshape(t, (3, 3))

tm.reshape(x, (3, 3, 100))

F = np.diag([1.2, 1.2, 2.0])
T = tr.Tensor(F)

assert tm.linalg.eigh(T)[0].shape == (3,)
assert tm.linalg.eigh(T)[1].shape == (3, 3, 3)

F = np.tile(F.reshape(3, 3, 1), 5)
T = tr.Tensor(F, ntrax=1)

assert tm.linalg.eigh(T)[0].shape == (3,)
assert tm.linalg.eigh(T)[1].shape == (3, 3, 3)


if __name__ == "__main__":
test_math()

0 comments on commit 5031ddd

Please sign in to comment.