Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Full sparse PYED #3

Open
wants to merge 35 commits into
base: master
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Clean up
  • Loading branch information
Yaroslav committed Oct 7, 2017
commit 575103c06aa62ac414ad7f3e366059efcd3f8945
68 changes: 37 additions & 31 deletions pyed/SparseExactDiagonalization.py
Original file line number Diff line number Diff line change
@@ -25,13 +25,8 @@
from CubeTetras import CubeTetras
# ----------------------------------------------------------------------

<<<<<<< Updated upstream
def gf(M,E,x):
return np.sum(M/(x-E))
=======
def gf(M,E,eta,x):
return np.sum(M/(x+1j*eta-E))
>>>>>>> Stashed changes

# ----------------------------------------------------------------------
class SparseExactDiagonalization(object):
@@ -63,17 +58,10 @@ def _diagonalize_hamiltonian(self):
bar = progressbar.ProgressBar()
for i in bar(range(len(self.blocks))):
block=self.blocks[i]
<<<<<<< Updated upstream
X,Y=np.meshgrid(block,block)
E,U=np.linalg.eigh(self.H[X,Y].todense())
self.E[block]=E
self.U[Y,X]=U
=======
E,U=np.linalg.eigh(self.H[block][:,block].todense())
self.E[block]=E
for i,n in enumerate(block):
self.U[n,block]=U[i]
>>>>>>> Stashed changes
self.E=np.array(self.E)
self.E0 = np.min(self.E)
self.E = self.E-self.E0
@@ -90,19 +78,13 @@ def _calculate_partition_function(self):
# ------------------------------------------------------------------
def _calculate_density_matrix(self):
self.rho=csr_matrix(self.H.shape,dtype=np.float)
<<<<<<< Updated upstream
exp_bE=csr_matrix(self.H.shape,dtype=np.float)
exp_bE[range(self.E.size),range(self.E.size)]=np.exp(-self.beta * self.E) / self.Z
self.rho=self.U.getH()*exp_bE*self.U
=======
print 'Density matrix calculation:'
bar = progressbar.ProgressBar()
for i in bar(range(len(self.blocks))):
block=self.blocks[i]
X,Y=np.meshgrid(block,block)
exp_bE = np.exp(-self.beta * self.E[block]) / self.Z
self.rho[X,Y]= np.einsum('ij,j,jk->ik', self.U[X,Y].todense(), exp_bE, self.U[X,Y].H.todense())
>>>>>>> Stashed changes

# ------------------------------------------------------------------
def _operators_to_eigenbasis(self, op_vec):
@@ -114,12 +96,33 @@ def _operators_to_eigenbasis(self, op_vec):
return dop_vec

# ------------------------------------------------------------------
def get_expectation_value(self, operator):
def get_expectation_value_sparse(self, operator):

exp_val = 0.0
for idx in xrange(self.E.size):
vec = self.U[:, idx]
dot_prod = np.dot(vec.H, operator * vec)[0,0] # <n|O|n>
exp_val += np.exp(-self.beta * self.E[idx]) * dot_prod

exp_val /= self.Z

return exp_val

# ------------------------------------------------------------------
def get_expectation_value_dense(self, operator):

if not hasattr(self, 'rho'): self._calculate_density_matrix()
return np.sum((operator * self.rho).diagonal())


# ------------------------------------------------------------------
def get_expectation_value(self, operator):

if self.nstates is None:
return self.get_expectation_value_dense(operator)
else:
return self.get_expectation_value_sparse(operator)

# ------------------------------------------------------------------
def get_free_energy(self):

@@ -157,19 +160,13 @@ def get_ground_state_energy(self):
def get_grand_potential(self):
return self.E0-np.log(np.sum(np.exp(-self.beta*self.E)))/self.beta

def get_real_frequency_greens_function_component(self, w, op1, op2,eta,xi):
def get_real_frequency_greens_function_component(self, w, op1, op2,eta):
r"""
Returns:
G^{(2)}(i\omega_n) = -1/Z < O_1(i\omega_n) O_2(-i\omega_n) >
"""
op1_eig, op2_eig = self._operators_to_eigenbasis([op1, op2])
Q=(op1_eig.getH().multiply(op2_eig)).tocoo()
<<<<<<< Updated upstream
M=(np.exp(-self.beta*self.E[Q.row])-xi*np.exp(-self.beta*self.E[Q.col]))*Q.data
E=(self.E[Q.row]-self.E[Q.col])
G = np.zeros((len(w)), dtype=np.complex)
G = Parallel(n_jobs=4)(delayed(gf)(M,E-1j*eta,x) for x in w)
=======
M=(np.exp(-self.beta*self.E[Q.row])+np.exp(-self.beta*self.E[Q.col]))*Q.data
E=(self.E[Q.row]-self.E[Q.col])
G = np.zeros((len(w)), dtype=np.complex)
@@ -350,7 +347,6 @@ def get_tau_greens_function_component(self, tau, op1, op2):

G = -np.einsum('tn,tm,nm,mn->t', et_p, et_m, op1_eig, op2_eig)

>>>>>>> Stashed changes
G /= self.Z
return G

@@ -362,12 +358,22 @@ def get_frequency_greens_function_component(self, iwn, op1, op2, xi):
G^{(2)}(i\omega_n) = -1/Z < O_1(i\omega_n) O_2(-i\omega_n) >
"""

# -- Components of the Lehman expression
dE = - self.E[:, None] + self.E[None, :]
exp_bE = np.exp(-self.beta * self.E)
M = exp_bE[:, None] - xi * exp_bE[None, :]

inv_freq = iwn[:, None, None] - dE[None, :, :]
nonzero_idx = np.nonzero(inv_freq)
# -- Only eval for non-zero values
freq = np.zeros_like(inv_freq)
freq[nonzero_idx] = inv_freq[nonzero_idx]**(-1)

op1_eig, op2_eig = self._operators_to_eigenbasis([op1, op2])
Q=(op1_eig.getH().multiply(op2_eig)).tocoo()
M=(np.exp(-self.beta*self.E[Q.row])-xi*np.exp(-self.beta*self.E[Q.col]))*Q.data
E=(self.E[Q.row]-self.E[Q.col])

# -- Compute Lehman sum for all operator combinations
G = np.zeros((len(iwn)), dtype=np.complex)
G = Parallel(n_jobs=4)(delayed(gf)(M,E,x) for x in iwn)
G = np.einsum('nm,mn,nm,znm->z', op1_eig, op2_eig, M, freq)
G /= self.Z

return G
82 changes: 80 additions & 2 deletions pyed/TriqsExactDiagonalization.py
Original file line number Diff line number Diff line change
@@ -17,6 +17,8 @@

# ----------------------------------------------------------------------

from pyed.CubeTetras import CubeTetrasMesh, enumerate_tau3
from pyed.SquareTriangles import SquareTrianglesMesh, enumerate_tau2
from pyed.SparseExactDiagonalization import SparseExactDiagonalization
from pyed.SparseMatrixFockStates import SparseMatrixRepresentation

@@ -47,7 +49,7 @@ def get_ground_state_energy(self):
return self.ed.get_ground_state_energy()


def set_g2_w(self, g_w, op1, op2,eta=0.1,xi=-1):
def set_g2_w(self, g_w, op1, op2,eta=0.1):

op1_mat = self.rep.sparse_matrix(op1)
op2_mat = self.rep.sparse_matrix(op2)
@@ -56,7 +58,24 @@ def set_g2_w(self, g_w, op1, op2,eta=0.1,xi=-1):

g_w.data[:, 0, 0] = \
self.ed.get_real_frequency_greens_function_component(
w, op1_mat, op2_mat, eta, xi)
w, op1_mat, op2_mat, eta)
# ------------------------------------------------------------------
def set_g2_tau(self, g_tau, op1, op2):

assert( type(g_tau.mesh) == MeshImTime )
assert( self.beta == g_tau.mesh.beta )
assert( g_tau.target_shape == (1, 1) )

op1_mat = self.rep.sparse_matrix(op1)
op2_mat = self.rep.sparse_matrix(op2)

tau = np.array([tau for tau in g_tau.mesh])

g_tau.data[:, 0, 0] = \
self.ed.get_tau_greens_function_component(
tau, op1_mat, op2_mat)

self.set_tail(g_tau, op1_mat, op2_mat)

# ------------------------------------------------------------------
def set_g2_iwn(self, g_iwn, op1, op2):
@@ -90,3 +109,62 @@ def xi(self, mesh):
if mesh.statistic == 'Fermion': return -1.0
elif mesh.statistic == 'Boson': return +1.0
else: raise NotImplementedError

# ------------------------------------------------------------------
def set_g3_tau(self, g3_tau, op1, op2, op3):

assert( g3_tau.target_shape == (1,1,1,1) )

op1_mat = self.rep.sparse_matrix(op1)
op2_mat = self.rep.sparse_matrix(op2)
op3_mat = self.rep.sparse_matrix(op3)

ops_mat = np.array([op1_mat, op2_mat, op3_mat])

for idxs, taus, perm, perm_sign in SquareTrianglesMesh(g3_tau):

ops_perm_mat = ops_mat[perm + [2]]
taus_perm = np.array(taus).T[perm]

data = self.ed.get_timeordered_two_tau_greens_function(
taus_perm, ops_perm_mat)

for idx, d in zip(idxs, data):
g3_tau[list(idx)][:] = perm_sign * d

# ------------------------------------------------------------------
def set_g40_tau(self, g40_tau, g_tau):

assert( type(g_tau.mesh) == MeshImTime )
#assert( g_tau.target_shape == g40_tau.target_shape )

for (i1, i2, i3), (t1, t2, t3) in enumerate_tau3(g40_tau):
g40_tau[[i1, i2, i3]][:] = \
g_tau(t1-t2)*g_tau(t3) - g_tau(t1)*g_tau(t3-t2)

# ------------------------------------------------------------------
def set_g4_tau(self, g4_tau, op1, op2, op3, op4):

assert( g4_tau.target_shape == (1,1,1,1) )

op1_mat = self.rep.sparse_matrix(op1)
op2_mat = self.rep.sparse_matrix(op2)
op3_mat = self.rep.sparse_matrix(op3)
op4_mat = self.rep.sparse_matrix(op4)

ops_mat = np.array([op1_mat, op2_mat, op3_mat, op4_mat])

for idxs, taus, perm, perm_sign in CubeTetrasMesh(g4_tau):

ops_perm_mat = ops_mat[perm + [3]]
taus_perm = np.array(taus).T[perm]

data = self.ed.get_timeordered_three_tau_greens_function(
taus_perm, ops_perm_mat)

for idx, d in zip(idxs, data):
g4_tau[list(idx)][:] = perm_sign * d

# ------------------------------------------------------------------

# ----------------------------------------------------------------------