Skip to content

Commit

Permalink
Merge pull request #114 from adtzlr/add-linalg-eigvalsh-eigh-array
Browse files Browse the repository at this point in the history
Fix `math.linalg.eigvalsh()` and  `math.linalg.eigh()` for arrays
  • Loading branch information
adtzlr authored Jun 6, 2024
2 parents 2402c82 + e62fdbb commit ca0e5f6
Show file tree
Hide file tree
Showing 3 changed files with 129 additions and 113 deletions.
2 changes: 1 addition & 1 deletion src/tensortrax/__about__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
tensorTRAX: Math on (Hyper-Dual) Tensors with Trailing Axes.
"""

__version__ = "0.25.0"
__version__ = "0.25.1"
235 changes: 124 additions & 111 deletions src/tensortrax/math/linalg/_linalg_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
tensorTRAX: Math on (Hyper-Dual) Tensors with Trailing Axes.
"""


import numpy as np

from ..._tensor import Tensor, Δ, Δδ, einsum, f, matmul, δ
Expand Down Expand Up @@ -67,130 +66,144 @@ def pinv(A):
def eigvalsh(A, eps=np.sqrt(np.finfo(float).eps)):
"Eigenvalues of a symmetric Tensor."

A[0, 0] += eps
A[1, 1] -= eps

λ, N = [x.T for x in np.linalg.eigh(f(A).T)]
M = einsum("ai...,aj...->aij...", N, N)

dim = len(λ)
if isinstance(A, Tensor):

δλ = einsum("aij...,ij...->a...", M, δ(A))
Δλ = einsum("aij...,ij...->a...", M, Δ(A))
A[0, 0] += eps
A[1, 1] -= eps

# alpha = [0, 1, 2]
# beta = [(1, 2), (2, 0), (0, 1)]
λ, N = [x.T for x in np.linalg.eigh(f(A).T)]
M = einsum("ai...,aj...->aij...", N, N)

alpha = np.arange(dim)
beta = [
np.concatenate([np.arange(a + 1, dim), np.arange(a)]) for a in np.arange(dim)
]
dim = len(λ)

δN = []
for α in alpha:
δNα = []
for β in beta[α]:
Mαβ = einsum("i...,j...->ij...", N[α], N[β])
δAαβ = einsum("ij...,ij...->...", Mαβ, δ(A))
λαβ = λ[α] - λ[β]
δNα.append(1 / λαβ * N[β] * δAαβ)
δN.append(sum(δNα, axis=0))
δλ = einsum("aij...,ij...->a...", M, δ(A))
Δλ = einsum("aij...,ij...->a...", M, Δ(A))

δM = einsum("ai...,aj...->aij...", δN, N) + einsum("ai...,aj...->aij...", N, δN)
Δδλ = einsum("aij...,ij...->a...", δM, Δ(A)) + einsum(
"aij...,ij...->a...", M, Δδ(A)
)
# alpha = [0, 1, 2]
# beta = [(1, 2), (2, 0), (0, 1)]

return Tensor(
x=λ,
δx=δλ,
Δx=Δλ,
Δδx=Δδλ,
ntrax=A.ntrax,
)
alpha = np.arange(dim)
beta = [
np.concatenate([np.arange(a + 1, dim), np.arange(a)])
for a in np.arange(dim)
]

δN = []
for α in alpha:
δNα = []
for β in beta[α]:
Mαβ = einsum("i...,j...->ij...", N[α], N[β])
δAαβ = einsum("ij...,ij...->...", Mαβ, δ(A))
λαβ = λ[α] - λ[β]
δNα.append(1 / λαβ * N[β] * δAαβ)
δN.append(sum(δNα, axis=0))

def eigh(A, eps=np.sqrt(np.finfo(float).eps)):
"Eigenvalues and -bases of a symmetric Tensor (only first derivative)."
δM = einsum("ai...,aj...->aij...", δN, N) + einsum("ai...,aj...->aij...", N, δN)
Δδλ = einsum("aij...,ij...->a...", δM, Δ(A)) + einsum(
"aij...,ij...->a...", M, Δδ(A)
)

A[0, 0] += eps
A[1, 1] -= eps

λ, N = [x.T for x in np.linalg.eigh(f(A).T)]
M = einsum("ai...,aj...->aij...", N, N)

dim = len(λ)

δλ = einsum("aij...,ij...->a...", M, δ(A))
Δλ = einsum("aij...,ij...->a...", M, Δ(A))

# alpha = [0, 1, 2]
# beta = [(1, 2), (2, 0), (0, 1)]
alpha = np.arange(dim)
beta = [
np.concatenate([np.arange(a + 1, dim), np.arange(a)]) for a in np.arange(dim)
]

δN = []
ΔN = []
for α in alpha:
δNα = []
ΔNα = []
for β in beta[α]:
Mαβ = einsum("i...,j...->ij...", N[α], N[β])
δAαβ = einsum("ij...,ij...->...", Mαβ, δ(A))
ΔAαβ = einsum("ij...,ij...->...", Mαβ, Δ(A))
λαβ = λ[α] - λ[β]
δNα.append(1 / λαβ * N[β] * δAαβ)
ΔNα.append(1 / λαβ * N[β] * ΔAαβ)
δN.append(sum(δNα, axis=0))
ΔN.append(sum(ΔNα, axis=0))

ΔδN = []
for α in alpha:
ΔδNα = []
for β in beta[α]:
Mαβ = einsum("i...,j...->ij...", N[α], N[β])
δAαβ = einsum("ij...,ij...->...", Mαβ, δ(A))
ΔδAαβ = einsum("ij...,ij...->...", Mαβ, Δδ(A))
λαβ = λ[α] - λ[β]
Δλαβ = Δλ[α] - Δλ[β]
ΔδNα.append(
-(λαβ**-2) * Δλαβ * N[β] * δAαβ
+ 1 / λαβ * ΔN[β] * δAαβ
+ 1 / λαβ * N[β] * ΔδAαβ
)
ΔδN.append(sum(ΔδNα, axis=0))

δM = einsum("ai...,aj...->aij...", δN, N) + einsum("ai...,aj...->aij...", N, δN)
ΔM = einsum("ai...,aj...->aij...", ΔN, N) + einsum("ai...,aj...->aij...", N, ΔN)
Δδλ = einsum("aij...,ij...->a...", δM, Δ(A)) + einsum(
"aij...,ij...->a...", M, Δδ(A)
)

ΔδM = (
einsum("ai...,aj...->aij...", δN, ΔN)
+ einsum("ai...,aj...->aij...", ΔN, δN)
+ einsum("ai...,aj...->aij...", ΔδN, N)
+ einsum("ai...,aj...->aij...", N, ΔδN)
)

return (
Tensor(
return Tensor(
x=λ,
δx=δλ,
Δx=Δλ,
Δδx=Δδλ,
ntrax=A.ntrax,
),
Tensor(
x=M,
δx=δM,
Δx=ΔM,
Δδx=ΔδM,
ntrax=A.ntrax,
),
)
)

else:
return np.linalg.eigvalsh(A.T).T


def eigh(A, eps=np.sqrt(np.finfo(float).eps)):
"Eigenvalues and -bases of a symmetric Tensor (only first derivative)."

if isinstance(A, Tensor):

A[0, 0] += eps
A[1, 1] -= eps

λ, N = [x.T for x in np.linalg.eigh(f(A).T)]
M = einsum("ai...,aj...->aij...", N, N)

dim = len(λ)

δλ = einsum("aij...,ij...->a...", M, δ(A))
Δλ = einsum("aij...,ij...->a...", M, Δ(A))

# alpha = [0, 1, 2]
# beta = [(1, 2), (2, 0), (0, 1)]
alpha = np.arange(dim)
beta = [
np.concatenate([np.arange(a + 1, dim), np.arange(a)])
for a in np.arange(dim)
]

δN = []
ΔN = []
for α in alpha:
δNα = []
ΔNα = []
for β in beta[α]:
Mαβ = einsum("i...,j...->ij...", N[α], N[β])
δAαβ = einsum("ij...,ij...->...", Mαβ, δ(A))
ΔAαβ = einsum("ij...,ij...->...", Mαβ, Δ(A))
λαβ = λ[α] - λ[β]
δNα.append(1 / λαβ * N[β] * δAαβ)
ΔNα.append(1 / λαβ * N[β] * ΔAαβ)
δN.append(sum(δNα, axis=0))
ΔN.append(sum(ΔNα, axis=0))

ΔδN = []
for α in alpha:
ΔδNα = []
for β in beta[α]:
Mαβ = einsum("i...,j...->ij...", N[α], N[β])
δAαβ = einsum("ij...,ij...->...", Mαβ, δ(A))
ΔδAαβ = einsum("ij...,ij...->...", Mαβ, Δδ(A))
λαβ = λ[α] - λ[β]
Δλαβ = Δλ[α] - Δλ[β]
ΔδNα.append(
-(λαβ**-2) * Δλαβ * N[β] * δAαβ
+ 1 / λαβ * ΔN[β] * δAαβ
+ 1 / λαβ * N[β] * ΔδAαβ
)
ΔδN.append(sum(ΔδNα, axis=0))

δM = einsum("ai...,aj...->aij...", δN, N) + einsum("ai...,aj...->aij...", N, δN)
ΔM = einsum("ai...,aj...->aij...", ΔN, N) + einsum("ai...,aj...->aij...", N, ΔN)
Δδλ = einsum("aij...,ij...->a...", δM, Δ(A)) + einsum(
"aij...,ij...->a...", M, Δδ(A)
)

ΔδM = (
einsum("ai...,aj...->aij...", δN, ΔN)
+ einsum("ai...,aj...->aij...", ΔN, δN)
+ einsum("ai...,aj...->aij...", ΔδN, N)
+ einsum("ai...,aj...->aij...", N, ΔδN)
)

return (
Tensor(
x=λ,
δx=δλ,
Δx=Δλ,
Δδx=Δδλ,
ntrax=A.ntrax,
),
Tensor(
x=M,
δx=δM,
Δx=ΔM,
Δδx=ΔδM,
ntrax=A.ntrax,
),
)

else:
λ, N = [x.T for x in np.linalg.eigh(A.T)]
M = einsum("ai...,aj...->aij...", N, N)
return λ, M


def expm(A):
Expand Down
5 changes: 4 additions & 1 deletion tests/test_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,12 @@ def test_math():
C = F.T @ F
V = tr.Tensor(C)

for fun in [tm.linalg.det, tm.linalg.inv]:
for fun in [tm.linalg.det, tm.linalg.inv, tm.linalg.eigvalsh]:
assert np.allclose(fun(C), fun(V).x)

for fun in [tm.linalg.eigh]:
assert np.all([np.allclose(x, y.x) for x, y in zip(fun(C), fun(V))])

for fun in [
tm.special.dev,
tm.special.tresca,
Expand Down

0 comments on commit ca0e5f6

Please sign in to comment.