From 3173f86d54f1067032edb28cab796ae88f3f0933 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Sun, 14 Nov 2021 12:43:11 -0500 Subject: [PATCH 001/120] Add copy ctor and copy assign, and default move ctor and move assign for binary node. --- SeQuant/core/binary_node.hpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/SeQuant/core/binary_node.hpp b/SeQuant/core/binary_node.hpp index 67aec9be1..e9b014cfb 100644 --- a/SeQuant/core/binary_node.hpp +++ b/SeQuant/core/binary_node.hpp @@ -89,6 +89,19 @@ class BinaryNode { BinaryNode clone() const { return dnode->clone(); } + BinaryNode(BinaryNode const& other) noexcept { + *this = other.clone(); + } + + BinaryNode& operator=(BinaryNode const& other) noexcept { + *this = other.clone(); + return *this; + } + + BinaryNode(BinaryNode&& other) noexcept = default; + + BinaryNode& operator=(BinaryNode&& other) noexcept = default; + explicit BinaryNode(T d) : dnode{std::make_unique>(std::move(d))} {} From 3be61662074baa262ae1cc7abeba0c9ccd14383d Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Fri, 19 Nov 2021 08:37:05 -0500 Subject: [PATCH 002/120] Refactor clunky BinaryNode class. - Rename BinaryNode to FullBinaryNode after the full binary tree it represents. - Define copy ctor and assignments, and default move ctor and assignment for FullBinaryNode. Copy ctor was required while creating nested vector of FullBinaryNode objects - Deprecate and remove clone method from FullBinaryNode. --- SeQuant/core/binary_node.hpp | 192 ++++++++++++----------------- SeQuant/core/eval_seq.hpp | 14 +-- SeQuant/core/optimize/optimize.cpp | 5 +- tests/unit/test_binary_node.cpp | 63 ++++++---- tests/unit/test_eval_seq.cpp | 29 +++-- 5 files changed, 148 insertions(+), 155 deletions(-) diff --git a/SeQuant/core/binary_node.hpp b/SeQuant/core/binary_node.hpp index e9b014cfb..25f5c7228 100644 --- a/SeQuant/core/binary_node.hpp +++ b/SeQuant/core/binary_node.hpp @@ -10,111 +10,81 @@ namespace sequant { template -class BinaryNode { - template - struct DataNode { - virtual ~DataNode() = default; - virtual U const& data() const = 0; - virtual U& data() = 0; - [[nodiscard]] virtual bool leaf() const = 0; - virtual BinaryNode const& left() const = 0; - virtual BinaryNode const& right() const = 0; - virtual BinaryNode clone() const = 0; - }; // DataNode - - template - class data_node_internal final : public DataNode { - U data_; - BinaryNode left_; - BinaryNode right_; - - public: - data_node_internal(U d, BinaryNode&& l, BinaryNode&& r) - : data_{std::move(d)}, left_{std::move(l)}, right_{std::move(r)} {} - - U const& data() const override { return data_; } - - U& data() override { return data_; } - - [[nodiscard]] bool leaf() const override { return false; } - - BinaryNode const& left() const override { return left_; } - - BinaryNode const& right() const override { return right_; } - - BinaryNode clone() const override { - return BinaryNode{data_, left_.clone(), right_.clone()}; - } - }; // data_node_internal - - template - class data_node_leaf final : public DataNode { - U data_; - - public: - explicit data_node_leaf(U d) : data_{std::move(d)} {} - - U const& data() const override { return data_; } - - U& data() override { return data_; } - - [[nodiscard]] bool leaf() const override { return true; } - - BinaryNode const& left() const override { - throw std::logic_error("left() called on leaf node"); - } - - BinaryNode const& right() const override { - throw std::logic_error("right() called on leaf node"); - } - - BinaryNode clone() const override { return BinaryNode{data_}; } - }; // data_node_leaf +class FullBinaryNode { + public: + using node_ptr = std::unique_ptr>; private: - std::unique_ptr> dnode; + T data_; - public: - T const& operator*() const { return dnode->data(); } + node_ptr left_{nullptr}; - T& operator*() { return dnode->data(); } + node_ptr right_{nullptr}; - T* operator->() const { return &dnode->data(); } + node_ptr deep_copy() const { + return leaf() ? std::make_unique>(data_) + : std::make_unique>( + data_, left_->deep_copy(), right_->deep_copy()); + } - [[nodiscard]] bool leaf() const { return dnode->leaf(); } + static node_ptr const& checked_ptr_access(node_ptr const& n) { + if (n) + return n; + else + throw std::runtime_error( + "Dereferenced nullptr: use leaf() method to check leaf node."); + } - BinaryNode const& left() const { return dnode->left(); } + public: + FullBinaryNode(T d) : data_{std::move(d)} {} + + FullBinaryNode(T d, T l, T r) + : data_{std::move(d)}, + left_{std::make_unique(std::move(l))}, + right_{std::make_unique(std::move(r))} {} + + FullBinaryNode(T d, FullBinaryNode l, FullBinaryNode r) + : data_{std::move(d)}, + left_{std::make_unique>(std::move(l))}, + right_{std::make_unique>(std::move(r))} {} + + FullBinaryNode(T d, node_ptr&& l, node_ptr&& r) + : data_{std::move(d)}, left_{std::move(l)}, right_{std::move(r)} {} + + FullBinaryNode(FullBinaryNode const& other) + : data_{other.data_}, + left_{other.left_ ? other.left_->deep_copy() : nullptr}, + right_{other.right_ ? other.right_->deep_copy() : nullptr} {} + + FullBinaryNode& operator=(FullBinaryNode const& other) { + data_ = other.data_; + if (!other.leaf()) { + left_.reset(new FullBinaryNode{*other.left()}); + right_.reset(new FullBinaryNode{*other.right()}); + } + return *this; + } - BinaryNode const& right() const { return dnode->right(); } + FullBinaryNode(FullBinaryNode&&) = default; - BinaryNode clone() const { return dnode->clone(); } + FullBinaryNode& operator=(FullBinaryNode&&) = default; - BinaryNode(BinaryNode const& other) noexcept { - *this = other.clone(); - } + FullBinaryNode const& left() const { return *checked_ptr_access(left_); } - BinaryNode& operator=(BinaryNode const& other) noexcept { - *this = other.clone(); - return *this; - } + FullBinaryNode const& right() const { return *checked_ptr_access(right_); } - BinaryNode(BinaryNode&& other) noexcept = default; + bool leaf() const { return !(left_ || right_); } - BinaryNode& operator=(BinaryNode&& other) noexcept = default; + T const& operator*() const { return data_; } - explicit BinaryNode(T d) - : dnode{std::make_unique>(std::move(d))} {} + T& operator*() { return data_; } - BinaryNode(T d, BinaryNode&& ln, BinaryNode&& rn) - : dnode{std::make_unique>(std::move(d), // - std::move(ln), // - std::move(rn))} {} + T const* operator->() const { return &data_; } - BinaryNode(T d, T ld, T rd) - : BinaryNode{std::move(d), BinaryNode{ld}, BinaryNode{rd}} {} + T* operator->() { return &data_; } template - BinaryNode(Cont const& container, F&& binarize) { + FullBinaryNode(Cont const& container, F&& binarize) { using value_type = decltype(*ranges::begin(container)); static_assert(std::is_invocable_v, "Binarizer to handle terminal nodes missing"); @@ -136,19 +106,19 @@ class BinaryNode { using ranges::end; auto node = - accumulate(begin(container) + 1, end(container), // range - BinaryNode{binarize(*begin(container))}, // init - [&binarize](auto&& acc, const auto& val) { // predicate - auto rnode = BinaryNode{binarize(val)}; - return BinaryNode{binarize(*acc, *rnode), std::move(acc), - std::move(rnode)}; + accumulate(begin(container) + 1, end(container), // range + FullBinaryNode{binarize(*begin(container))}, // init + [&binarize](auto&& acc, const auto& val) { // predicate + auto rnode = FullBinaryNode{binarize(val)}; + return FullBinaryNode{binarize(*acc, *rnode), + std::move(acc), std::move(rnode)}; }); *this = std::move(node); } template const&>, + std::enable_if_t const&>, bool> = true> void visit(F&& pred) const { pred(*this); @@ -159,7 +129,7 @@ class BinaryNode { } template const&>, + std::enable_if_t const&>, bool> = true> void visit_internal(F&& pred) const { if (!leaf()) { @@ -170,7 +140,7 @@ class BinaryNode { } template const&>, + std::enable_if_t const&>, bool> = true> void visit_leaf(F&& pred) const { if (leaf()) { @@ -181,14 +151,15 @@ class BinaryNode { } } - template const&> && - std::is_invocable_v< - F, BinaryNode const&, - std::invoke_result_t const&> const&, - std::invoke_result_t const&> const&>, - bool> = true> + template < + typename F, + std::enable_if_t< + std::is_invocable_v const&> && + std::is_invocable_v< + F, FullBinaryNode const&, + std::invoke_result_t const&> const&, + std::invoke_result_t const&> const&>, + bool> = true> auto evaluate(F&& evaluator) const { if (leaf()) return evaluator(*this); return evaluator(*this, left().evaluate(std::forward(evaluator)), @@ -243,8 +214,8 @@ class BinaryNode { public: template string_t digraph(F&& label_gen, string_t const& graph_name = {}) const { - static_assert(std::is_invocable_r_v const&>, - "node label generator F(BinaryNode const &) should " + static_assert(std::is_invocable_r_v const&>, + "node label generator F(FullBinaryNode const &) should " "return string_t"); auto oss = std::basic_ostringstream{string_t{}}; @@ -258,8 +229,9 @@ class BinaryNode { } template - string_t tikz(std::function const&)> label_gen, - std::function const&)> spec_gen) const { + string_t tikz( + std::function const&)> label_gen, + std::function const&)> spec_gen) const { auto oss = std::basic_ostringstream{string_t{}}; oss << "\\tikz{\n\\"; tikz(oss, label_gen, spec_gen); @@ -268,10 +240,10 @@ class BinaryNode { return oss.str(); } -}; // BinaryNode +}; // FullBinaryNode template -bool operator==(BinaryNode const& lhs, BinaryNode const& rhs) { +bool operator==(FullBinaryNode const& lhs, FullBinaryNode const& rhs) { return ((*lhs == *rhs) && ((lhs.leaf() && rhs.leaf()) || (lhs.left() == rhs.left() && lhs.right() == rhs.right()))); diff --git a/SeQuant/core/eval_seq.hpp b/SeQuant/core/eval_seq.hpp index 1a8586f79..0ba3e8efd 100644 --- a/SeQuant/core/eval_seq.hpp +++ b/SeQuant/core/eval_seq.hpp @@ -121,20 +121,20 @@ class EvalSeq { // // struct { // auto operator()(T const &node) const { - // return BinaryNode{binarizer(node)}; + // return FullBinaryNode{binarizer(node)}; // } // - // auto operator()(BinaryNode &&lnode, - // BinaryNode &&rnode) const { - // auto pres = BinaryNode{binarizer(*lnode, *rnode)}; - // return BinaryNode{std::move(pres), std::move(lnode), + // auto operator()(FullBinaryNode &&lnode, + // FullBinaryNode &&rnode) const { + // auto pres = FullBinaryNode{binarizer(*lnode, *rnode)}; + // return FullBinaryNode{std::move(pres), std::move(lnode), // std::move(rnode)}; // } // } evaluator; // // return evaluate(std::forward(evaluator)); - auto parent_result = BinaryNode{binarizer(label())}; + auto parent_result = FullBinaryNode{binarizer(label())}; if (terminal()) return std::move(parent_result); @@ -146,7 +146,7 @@ class EvalSeq { auto bin_res = binarizer(*lexpr, *rnode); - return BinaryNode{std::move(bin_res), + return FullBinaryNode{std::move(bin_res), std::move(lexpr), std::move(rnode)}; }); } diff --git a/SeQuant/core/optimize/optimize.cpp b/SeQuant/core/optimize/optimize.cpp index 1cedacbab..96d362bec 100644 --- a/SeQuant/core/optimize/optimize.cpp +++ b/SeQuant/core/optimize/optimize.cpp @@ -42,9 +42,8 @@ EvalNode optimize(const ExprPtr& expr) { else if (expr->is()) { // canonicalization within sto doesn't seem beneficial bool canonize = false; - return single_term_opt(expr->as(), canonize) - .optimal_seqs.begin() - ->clone(); + return *( + single_term_opt(expr->as(), canonize).optimal_seqs.begin()); } else if (expr->is()) { auto smands = *expr | transform([](auto const& s) { return to_expr(optimize(s)); diff --git a/tests/unit/test_binary_node.cpp b/tests/unit/test_binary_node.cpp index 525315d76..61f901b88 100644 --- a/tests/unit/test_binary_node.cpp +++ b/tests/unit/test_binary_node.cpp @@ -5,19 +5,37 @@ #include #include -TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { +TEST_CASE("TEST BINARY_NODE", "[FullBinaryNode]") { using ranges::views::iota; using ranges::views::take; - using sequant::BinaryNode; + using sequant::FullBinaryNode; SECTION("construction") { - REQUIRE_NOTHROW(BinaryNode{0}); - REQUIRE_NOTHROW(BinaryNode{'a', 'b', 'c'}); - REQUIRE_NOTHROW(BinaryNode{'a', BinaryNode{'b'}, BinaryNode{'c'}}); + REQUIRE_NOTHROW(FullBinaryNode{0}); + REQUIRE_NOTHROW(FullBinaryNode{'a', 'b', 'c'}); + REQUIRE_NOTHROW( + FullBinaryNode{'a', FullBinaryNode{'b'}, FullBinaryNode{'c'}}); + } + + SECTION("copy ctor and assign") { + auto const n1 = FullBinaryNode{'a', 'b', 'c'}; + auto const n2{n1}; + REQUIRE(&n1 != &n2); + REQUIRE(n1 == n2); + + auto const n3 = n1; + REQUIRE(&n1 != &n3); + REQUIRE(n1 == n3); + } + + SECTION("move ctor and assign") { + auto n1{FullBinaryNode{1, 2, 3}}; + auto n2 = std::move(n1); + REQUIRE(n2 == FullBinaryNode{1,2,3}); } SECTION("derefence") { - auto const n1 = BinaryNode{100}; + auto const n1 = FullBinaryNode{100}; REQUIRE_NOTHROW(*n1); REQUIRE(*n1 == 100); @@ -26,13 +44,13 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { void dummy_fun() const {} }; - auto const n2 = BinaryNode{dummy{}}; + auto const n2 = FullBinaryNode{dummy{}}; REQUIRE_NOTHROW(n2->dummy_fun()); } SECTION("internal node") { - auto const n = BinaryNode{3, 2, 5}; + auto const n = FullBinaryNode{3, 2, 5}; REQUIRE_FALSE(n.leaf()); REQUIRE(*n == 3); @@ -41,7 +59,7 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { } SECTION("leaf node") { - auto const n = BinaryNode{'n'}; + auto const n = FullBinaryNode{'n'}; REQUIRE(*n == 'n'); REQUIRE(n.leaf()); @@ -57,7 +75,7 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { int operator()(int x, int y) const { return x + y; } }; - auto const node = BinaryNode{leaves, adder{}}; + auto const node = FullBinaryNode{leaves, adder{}}; REQUIRE(*node == 9); REQUIRE(*node.left() == 5); @@ -66,7 +84,7 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { REQUIRE(*node.left().right() == 2); auto const leaves2 = ranges::views::all(leaves); - auto const node2 = BinaryNode{leaves2, adder{}}; + auto const node2 = FullBinaryNode{leaves2, adder{}}; } SECTION("evaluation") { @@ -95,11 +113,11 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { }; // arithm_binarizer struct arithm_evaluator { - int operator()(BinaryNode const& av) const { + int operator()(FullBinaryNode const& av) const { return av->val; } - int operator()(BinaryNode const& av, int leval, + int operator()(FullBinaryNode const& av, int leval, int reval) const { return leval + reval; } @@ -107,7 +125,7 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { auto constexpr summands = std::array{1, 2, 3, 4, 5}; - auto const node = BinaryNode{summands, arithm_binarizer{}}; + auto const node = FullBinaryNode{summands, arithm_binarizer{}}; REQUIRE(node.evaluate(arithm_evaluator{}) == 15); @@ -128,14 +146,15 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { } }; // words_binarizer - auto const words_node = BinaryNode{words, words_binarizer{}}; + auto const words_node = + FullBinaryNode{words, words_binarizer{}}; struct string_concat { - std::string operator()(BinaryNode const& node) const { + std::string operator()(FullBinaryNode const& node) const { return node->str; } - std::string operator()(BinaryNode const& node, + std::string operator()(FullBinaryNode const& node, std::string const& lstr, std::string const& rstr) const { return lstr + rstr; @@ -157,11 +176,11 @@ TEST_CASE("TEST BINARY_NODE", "[BinaryNode]") { }; auto ms = make_sum{}; - auto const node1 = BinaryNode{take_nums(1), ms}; - auto const node2 = BinaryNode{take_nums(2), ms}; - auto const node3 = BinaryNode{take_nums(3), ms}; - auto const node4 = BinaryNode{6, BinaryNode{1}, - BinaryNode{take_nums(2, 2), ms}}; + auto const node1 = FullBinaryNode{take_nums(1), ms}; + auto const node2 = FullBinaryNode{take_nums(2), ms}; + auto const node3 = FullBinaryNode{take_nums(3), ms}; + auto const node4 = FullBinaryNode{ + 6, FullBinaryNode{1}, FullBinaryNode{take_nums(2, 2), ms}}; auto label_gen_str = [](auto const& n) { return std::to_string(*n); }; auto label_gen_wstr = [](auto const& n) { return std::to_wstring(*n); }; diff --git a/tests/unit/test_eval_seq.cpp b/tests/unit/test_eval_seq.cpp index f00af1009..a5b1b4ebd 100644 --- a/tests/unit/test_eval_seq.cpp +++ b/tests/unit/test_eval_seq.cpp @@ -22,8 +22,8 @@ Os& operator<<(Os& os, const sequant::EvalSeq& seq) { } TEST_CASE("TEST_EVAL_SEQUENCE", "[EvalSeq]") { - using sequant::BinaryNode; using sequant::EvalSeq; + using sequant::FullBinaryNode; auto init_rt_vec = [](size_t n) { std::vector> vec; vec.reserve(n); @@ -64,9 +64,11 @@ TEST_CASE("TEST_EVAL_SEQUENCE", "[EvalSeq]") { // 2 3 // clang-format on - auto const expected_node0 = BinaryNode{ - 74, BinaryNode{30, BinaryNode{10, 2, 3}, BinaryNode{5}}, - BinaryNode{7}}; + auto const expected_node0 = FullBinaryNode{ + 74, + FullBinaryNode{30, FullBinaryNode{10, 2, 3}, + FullBinaryNode{5}}, + FullBinaryNode{7}}; REQUIRE(*nbinarized == *expected_node0); @@ -88,9 +90,9 @@ TEST_CASE("TEST_EVAL_SEQUENCE", "[EvalSeq]") { // clang-format on auto const expected_node1 = - BinaryNode{10, // - BinaryNode{6, 3, 3}, // - BinaryNode{4}}; // + FullBinaryNode{10, // + FullBinaryNode{6, 3, 3}, // + FullBinaryNode{4}}; // REQUIRE(*sbinarized1 == *expected_node1); @@ -122,14 +124,15 @@ TEST_CASE("TEST_EVAL_SEQUENCE", "[EvalSeq]") { // // clang-format on - auto const expected_node2 = BinaryNode{ + auto const expected_node2 = FullBinaryNode{ 17, - BinaryNode{ + FullBinaryNode{ 13, - BinaryNode{6, BinaryNode{3}, BinaryNode{3}}, - BinaryNode{7, BinaryNode{4}, - BinaryNode{3}}}, - BinaryNode{4}}; + FullBinaryNode{6, FullBinaryNode{3}, + FullBinaryNode{3}}, + FullBinaryNode{7, FullBinaryNode{4}, + FullBinaryNode{3}}}, + FullBinaryNode{4}}; REQUIRE(*expected_node2 == *sbinarized2); } From 38dc37be639ba4b467e739fd7be0e114eee96836 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Fri, 19 Nov 2021 08:40:08 -0500 Subject: [PATCH 003/120] Represent asymptotic cost prefactors using boost::rational instead of just int. Fractional costs can be represented asymptotically. --- SeQuant/core/asy_cost.cpp | 63 ++++++++------- SeQuant/core/asy_cost.hpp | 142 ++++++++++++++++++++-------------- SeQuant/core/eval_node.cpp | 40 ++++++++-- SeQuant/core/eval_node.hpp | 35 +++++++-- tests/unit/test_asy_cost.cpp | 18 +++-- tests/unit/test_eval_node.cpp | 19 +++-- 6 files changed, 205 insertions(+), 112 deletions(-) diff --git a/SeQuant/core/asy_cost.cpp b/SeQuant/core/asy_cost.cpp index b222cfd51..d7c109744 100644 --- a/SeQuant/core/asy_cost.cpp +++ b/SeQuant/core/asy_cost.cpp @@ -1,57 +1,66 @@ #include "asy_cost.hpp" -#include namespace sequant { -AsyCost::AsyCostEntry::AsyCostEntry(size_t nocc, size_t nvirt) - : occ_{nocc}, virt_{nvirt} {} +AsyCostEntry::AsyCostEntry(size_t nocc, size_t nvirt, + boost::rational count) + : occ_{nocc}, virt_{nvirt}, count_{count} { + if (count_ == 0 || (occ_ == 0 && virt_ == 0)) { + occ_ = 0; + virt_ = 0; + occ_ = 0; + } +} -AsyCost::AsyCostEntry::AsyCostEntry(size_t nocc, size_t nvirt, int count) - : occ_{nocc}, virt_{nvirt}, count_{count} {} +AsyCostEntry const &AsyCostEntry::max() { + static AsyCostEntry const max_cost = AsyCostEntry{ + std::numeric_limits::max(), std::numeric_limits::max(), + boost::rational{std::numeric_limits::max(), + std::numeric_limits::max()}}; + return max_cost; +} -AsyCost::AsyCostEntry AsyCost::AsyCostEntry::max() { - return AsyCostEntry{std::numeric_limits::max(), - std::numeric_limits::max(), - std::numeric_limits::max()}; +AsyCostEntry const &AsyCostEntry::zero() { + static AsyCostEntry const zero_cost = AsyCostEntry{0, 0, 0}; + return zero_cost; } -size_t AsyCost::AsyCostEntry::occ() const { return occ_; } +size_t AsyCostEntry::occ() const { return occ_; } -size_t AsyCost::AsyCostEntry::virt() const { return virt_; } +size_t AsyCostEntry::virt() const { return virt_; } -int AsyCost::AsyCostEntry::count() const { return count_; } +boost::rational AsyCostEntry::count() const { return count_; } -void AsyCost::AsyCostEntry::set_count(int n) const { count_ = n; } +void AsyCostEntry::set_count(boost::rational n) const { count_ = n; } -bool AsyCost::AsyCostEntry::operator<(const AsyCost::AsyCostEntry &rhs) const { +bool AsyCostEntry::operator<(const AsyCostEntry &rhs) const { return virt() < rhs.virt() || (virt() == rhs.virt() && occ() < rhs.occ()); } -bool AsyCost::AsyCostEntry::operator==(const AsyCost::AsyCostEntry &rhs) const { +bool AsyCostEntry::operator==(const AsyCostEntry &rhs) const { return occ() == rhs.occ() && virt() == rhs.virt(); } -AsyCost::AsyCost(size_t nocc, size_t nvirt) { - if (!(nocc == 0 && nvirt == 0)) cost_.emplace(AsyCostEntry{nocc, nvirt}); +bool AsyCostEntry::operator!=(const AsyCostEntry &rhs) const { + return !(*this == rhs); } -AsyCost::AsyCost(size_t nocc, size_t nvirt, size_t count): AsyCost{nocc, nvirt} { - if (!cost_.empty() && count > 0) cost_.begin()->set_count(count); - else *this = AsyCost::zero(); +AsyCost::AsyCost(AsyCostEntry c) { + if (c != AsyCostEntry::zero()) cost_.emplace(c); } -AsyCost::AsyCost(AsyCostEntry c) : AsyCost{0, 0} { - cost_.emplace(std::move(c)); -} +AsyCost::AsyCost(size_t nocc, size_t nvirt, boost::rational count) + : AsyCost{AsyCostEntry{nocc, nvirt, count}} {} -signed long long AsyCost::ops(unsigned short nocc, unsigned short nvirt) const { - auto total = 0; +boost::rational AsyCost::ops(unsigned short nocc, + unsigned short nvirt) const { + boost::rational total = 0; for (auto &&c : cost_) { auto temp = 1; if (c.occ() > 0) temp *= static_cast(std::pow(nocc, c.occ())); if (c.virt() > 0) temp *= static_cast(std::pow(nvirt, c.virt())); // 2 * c.count() because matrix operations flops - total += temp > 1 ? 2 * c.count() * temp : 0; + total += temp > 1 ? 2 * boost::rational_cast(c.count()) * temp : 0; } return total; } @@ -129,7 +138,7 @@ AsyCost const &AsyCost::max() { } AsyCost const &AsyCost::zero() { - static const AsyCost zero = AsyCost{0, 0}; + static const AsyCost zero = AsyCost{AsyCostEntry::zero()}; return zero; } diff --git a/SeQuant/core/asy_cost.hpp b/SeQuant/core/asy_cost.hpp index 6347fcea1..db929fdb8 100644 --- a/SeQuant/core/asy_cost.hpp +++ b/SeQuant/core/asy_cost.hpp @@ -2,79 +2,104 @@ #define SEQUANT_ASY_COST_HPP #include +#include #include #include #include namespace sequant { -class AsyCost { - private: - class AsyCostEntry { - size_t occ_; - size_t virt_; - mutable int count_ = 1; +class AsyCostEntry { + size_t occ_; + size_t virt_; + mutable boost::rational count_; - public: - static AsyCostEntry max(); + public: + template + static Os &stream_out_rational(Os &os, boost::rational r) { + os << r.numerator(); + if (r.denominator() != IntType{1}) { + os << '/'; + os << r.denominator(); + } + return os; + } - AsyCostEntry(size_t nocc, size_t nvirt); + static AsyCostEntry const &max(); - AsyCostEntry(size_t nocc, size_t nvirt, int count); + static AsyCostEntry const &zero(); - AsyCostEntry(AsyCostEntry const &) = default; + AsyCostEntry(size_t nocc, size_t nvirt, boost::rational count); - AsyCostEntry(AsyCostEntry &&) = default; + AsyCostEntry(AsyCostEntry const &) = default; - AsyCostEntry &operator=(AsyCostEntry const &) = default; + AsyCostEntry(AsyCostEntry &&) = default; - AsyCostEntry &operator=(AsyCostEntry &&) = default; + AsyCostEntry &operator=(AsyCostEntry const &) = default; - size_t occ() const; + AsyCostEntry &operator=(AsyCostEntry &&) = default; - size_t virt() const; + size_t occ() const; - int count() const; + size_t virt() const; - void set_count(int n) const; + boost::rational count() const; - bool operator<(AsyCostEntry const &rhs) const; + void set_count(boost::rational n) const; - bool operator==(AsyCostEntry const &rhs) const; + bool operator<(AsyCostEntry const &rhs) const; - template - String_t text() const { - auto oss = std::basic_ostringstream{}; - if (*this == AsyCostEntry::max()) { - oss << "max"; - } else { - if (count_ < 0) { - if (count_ == -1) - oss << "- "; - else - oss << "- " << std::abs(count_) << "*"; - } else if (count_ > 1) { - oss << count_ << "*"; - } - oss << (occ_ > 0 ? "O" : ""); - if (occ_ > 1) oss << "^" << occ_; + bool operator==(AsyCostEntry const &rhs) const; - oss << (virt_ > 0 ? "V" : ""); - if (virt_ > 1) oss << "^" << virt_; + bool operator!=(AsyCostEntry const& rhs) const; + + template + String_t text() const { + auto oss = std::basic_ostringstream{}; + + if (*this == AsyCostEntry::max()) { + oss << "max"; + } else if (*this == AsyCostEntry::zero()) { + oss << "zero"; + } else { + auto abs_c = boost::abs(count_); + oss << (count_ < abs_c ? "- " : ""); + if (abs_c == 1) { + // do nothing + } else { + AsyCostEntry::stream_out_rational(oss, abs_c); + oss << "*"; } - return oss.str(); + oss << (occ_ > 0 ? "O" : ""); + if (occ_ > 1) oss << "^" << occ_; + + oss << (virt_ > 0 ? "V" : ""); + if (virt_ > 1) oss << "^" << virt_; } - template - String_t to_latex() const { - auto oss = std::basic_ostringstream{}; + return oss.str(); + } + + template + String_t to_latex() const { + auto oss = std::basic_ostringstream{}; - if (*this == AsyCostEntry::max()) { - oss << "max"; + if (*this == AsyCostEntry::max()) { + oss << "\\texttt{max}"; + } else if (*this == AsyCostEntry::zero()) { + oss << "\\texttt{zero}"; + } else { + auto abs_c = boost::abs(count_); + oss << (count_ < abs_c ? "- " : ""); + if (abs_c == 1) { + // do nothing } else { - if (count_ < 0) oss << "- "; - oss << "{"; - if (std::abs(count_) != 1) oss << std::abs(count_) << " "; + bool frac_mode = abs_c.denominator() != 1; + oss << (frac_mode ? "\\frac{" : ""); + oss << count_.numerator(); + if (frac_mode) { + oss << "}{" << count_.denominator() << "}"; + } oss << (occ_ > 0 ? "O" : ""); if (occ_ > 1) { oss << "^{" << occ_ << "}"; @@ -85,22 +110,24 @@ class AsyCost { } oss << "}"; } - return oss.str(); } - }; - - sequant::container::set cost_; - - AsyCost(AsyCostEntry); + return oss.str(); + } +}; +class AsyCost { public: static AsyCost const &max(); static AsyCost const &zero(); - AsyCost(size_t nocc, size_t nvirt); + private: + sequant::container::set cost_; + + public: + AsyCost(AsyCostEntry = AsyCostEntry::zero()); - AsyCost(size_t nocc, size_t nvirt, size_t count); + AsyCost(size_t nocc, size_t nvirt, boost::rational count = 1); AsyCost(AsyCost const &) = default; @@ -110,7 +137,8 @@ class AsyCost { AsyCost &operator=(AsyCost &&) = default; - signed long long ops(unsigned short nocc, unsigned short nvirt) const; + [[nodiscard]] boost::rational ops(unsigned short nocc, + unsigned short nvirt) const; AsyCost operator+(AsyCost const &rhs) const; @@ -151,7 +179,7 @@ class AsyCost { template Os &operator<<(Os &os, AsyCost const &cost) { - if (cost.cost_.empty()) { + if (cost == AsyCost::zero()) { os << 0; return os; } diff --git a/SeQuant/core/eval_node.cpp b/SeQuant/core/eval_node.cpp index 6655bc96a..5948d05e8 100644 --- a/SeQuant/core/eval_node.cpp +++ b/SeQuant/core/eval_node.cpp @@ -128,8 +128,8 @@ ExprPtr linearize_eval_node(EvalNode const& node) { } } -AsyCost asy_cost_single_node(EvalNode const& node) { - if (node.leaf()) return AsyCost::zero(); +AsyCostEntry asy_cost_single_node(EvalNode const& node) { + if (node.leaf()) return AsyCostEntry::zero(); auto bks = ranges::views::concat(node.left()->tensor().const_braket(), node.right()->tensor().const_braket(), @@ -145,18 +145,44 @@ AsyCost asy_cost_single_node(EvalNode const& node) { switch (node->op()) { case EvalOp::Symm: { - auto f = static_cast( + auto f = static_cast( boost::math::factorial(node->tensor().rank())); - return AsyCost{nocc, nvirt, f}; + return AsyCostEntry{nocc, nvirt, static_cast(f)}; } case EvalOp::Antisymm: { - auto f = static_cast( + auto f = static_cast( boost::math::factorial(node->tensor().rank())); - return AsyCost{nocc, nvirt, f * f}; + return AsyCostEntry{nocc, nvirt, static_cast(f * f)}; } default: - return AsyCost{nocc, nvirt}; + return AsyCostEntry{nocc, nvirt, 1}; } } +AsyCostEntry asy_cost_single_node_symmetry(const EvalNode& node) { + auto cost = asy_cost_single_node(node); + auto factorial = [](auto x) { + return static_cast(boost::math::factorial(x)); + }; + auto const psym = node->tensor().symmetry(); + auto const pbrank = node->tensor().bra_rank(); + auto const pkrank = node->tensor().ket_rank(); + if (psym == sequant::Symmetry::symm && + node.left()->tensor().symmetry() == psym && + node.right()->tensor().symmetry() == psym && + node->op() == sequant::EvalOp::Prod) + cost.set_count(cost.count() / + (factorial(pbrank) * factorial(pkrank))); + + else if (psym == sequant::Symmetry::symm) + cost.set_count(cost.count() / factorial(pbrank)); + else if (psym == sequant::Symmetry::antisymm) + cost.set_count(cost.count() / + (factorial(pbrank) * factorial(pkrank))); + else { + // do nothing. + } + return cost; +} + } // namespace sequant diff --git a/SeQuant/core/eval_node.hpp b/SeQuant/core/eval_node.hpp index d059eee88..d0c30eec4 100644 --- a/SeQuant/core/eval_node.hpp +++ b/SeQuant/core/eval_node.hpp @@ -9,9 +9,11 @@ #include "binary_node.hpp" #include "eval_expr.hpp" +#include + namespace sequant { -using EvalNode = BinaryNode; +using EvalNode = FullBinaryNode; EvalNode to_eval_node(ExprPtr const& expr); @@ -23,20 +25,39 @@ ExprPtr to_expr(EvalNode const& node); ExprPtr linearize_eval_node(EvalNode const& node); -AsyCost asy_cost_single_node(EvalNode const& node); +AsyCostEntry asy_cost_single_node(EvalNode const& node); + +AsyCostEntry asy_cost_single_node_symmetry(EvalNode const& node); +/// +/// \tparam F function type that takes EvalNode const& argument and returns +/// bool. +/// +/// \param node Node to compute asymptotic cost on. +/// +/// \param exploit_symmetry Whether to use symmetry properties of an +/// intermediate to get reduced cost. Default: true. +/// +/// \param pred pred is called +/// on every node and only those nodes that return true will be used to compute +/// cost. Default function: returns true. +/// +/// \return Asymptotic cost of evaluation +/// in terms of number of occupied and virtual orbitals. +/// template , std::enable_if_t, bool> = true> AsyCost asy_cost( - EvalNode const& node, F&& pred = [](auto const&) { return true; }) { - + EvalNode const& node, F&& pred = [](auto const&) { return true; }, + bool exploit_symmetry = true) { if (node.leaf() || !std::invoke(std::forward(pred), node)) return AsyCost::zero(); - return asy_cost_single_node(node) + // - asy_cost(node.left(), std::forward(pred)) + // - asy_cost(node.right(), std::forward(pred)); + return AsyCost{exploit_symmetry ? asy_cost_single_node_symmetry(node) + : asy_cost_single_node(node)} + // + asy_cost(node.left(), std::forward(pred), exploit_symmetry) + // + asy_cost(node.right(), std::forward(pred), exploit_symmetry); } } // namespace sequant diff --git a/tests/unit/test_asy_cost.cpp b/tests/unit/test_asy_cost.cpp index 672002963..209992277 100644 --- a/tests/unit/test_asy_cost.cpp +++ b/tests/unit/test_asy_cost.cpp @@ -1,16 +1,15 @@ #include "catch.hpp" #include - #include struct MatFlops { size_t occ_range_size; size_t virt_range_size; - size_t operator()(unsigned short nocc, unsigned short nvirt) const { - size_t ops = 1; - if (nocc > 0) ops *= static_cast(std::pow(occ_range_size, nocc)); - if (nvirt > 0) ops *= static_cast(std::pow(virt_range_size, nvirt)); + long long int operator()(unsigned short nocc, unsigned short nvirt) const { + long long int ops = 1; + if (nocc > 0) ops *= static_cast(std::pow(occ_range_size, nocc)); + if (nvirt > 0) ops *= static_cast(std::pow(virt_range_size, nvirt)); return ops > 1 ? 2 * ops : 0; } }; @@ -24,6 +23,10 @@ TEST_CASE("TEST ASY_COST", "[AsyCost]") { oss << AsyCost{0, 0}; REQUIRE(oss.str() == L"0"); + clear(); + oss << AsyCost{}; + REQUIRE(oss.str() == L"0"); + clear(); oss << AsyCost{1, 0}; REQUIRE(oss.str() == L"O"); @@ -58,8 +61,9 @@ TEST_CASE("TEST ASY_COST", "[AsyCost]") { clear(); oss << AsyCost{1, 1, 20}; + REQUIRE(oss.str() == L"20*OV"); - REQUIRE(AsyCost{0,0} == AsyCost::zero()); + REQUIRE(AsyCost{0, 0} == AsyCost::zero()); REQUIRE(AsyCost{1, 1, 0} == AsyCost::zero()); } @@ -95,4 +99,4 @@ TEST_CASE("TEST ASY_COST", "[AsyCost]") { auto const cost = AsyCost{3, 1} + AsyCost{2, 1}; REQUIRE(cost.ops(nocc, nvirt) == flops(3, 1) + flops(2, 1)); } -} +} \ No newline at end of file diff --git a/tests/unit/test_eval_node.cpp b/tests/unit/test_eval_node.cpp index 5eab308c1..8e75cb703 100644 --- a/tests/unit/test_eval_node.cpp +++ b/tests/unit/test_eval_node.cpp @@ -135,23 +135,27 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { SECTION("asy_cost_single_node") { auto const p1 = parse_expr_asymm(L"g_{i2, a1}^{a2, a3} * t_{a2, a3}^{i1, i2}"); - REQUIRE(asy_cost_single_node(to_eval_node(p1)) == AsyCost{2, 3}); + REQUIRE(AsyCost{asy_cost_single_node(to_eval_node(p1))} == AsyCost{2, 3}); auto const p2 = parse_expr_asymm( L"g_{i2,i3}^{a2,a3} * t_{a2}^{i1} * t_{a1,a3}^{i2,i3}"); auto const n2 = to_eval_node(p2); - REQUIRE(asy_cost_single_node(n2) == AsyCost{3, 2}); - REQUIRE(asy_cost_single_node(n2.left()) == AsyCost{3, 2}); + REQUIRE(AsyCost{asy_cost_single_node(n2)} == AsyCost{3, 2}); + REQUIRE(AsyCost{asy_cost_single_node(n2.left())} == AsyCost{3, 2}); auto const p3 = parse_expr_asymm(L"g_{i2,i3}^{i1,a2} * t_{a2}^{i2} * t_{a1}^{i3}"); auto const n3 = to_eval_node(p3); - REQUIRE(asy_cost_single_node(n3) == AsyCost{2, 1}); - REQUIRE(asy_cost_single_node(n3.left()) == AsyCost{3, 1}); + REQUIRE(AsyCost{asy_cost_single_node(n3)} == AsyCost{2, 1}); + REQUIRE(AsyCost{asy_cost_single_node(n3.left())} == AsyCost{3, 1}); } SECTION("asy_cost") { + auto asy_cost_no_exploit_sym = [](EvalNode const& n) { + return asy_cost( + n, [](auto const& n) { return true; }, false); + }; auto const p1 = parse_expr_asymm(L"g_{i2, a1}^{a2, a3} * t_{a2, a3}^{i1, i2}"); REQUIRE(asy_cost(to_eval_node(p1)) == AsyCost{2, 3}); @@ -169,9 +173,10 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { auto const t1 = parse_expr_asymm(L"I{i1,i2,i3;a1,a2,a3}"); auto const n4 = to_eval_node_antisymm(t1); - REQUIRE(asy_cost(n4) == AsyCost{3,3,36}); // 36*O^3*V^3 + + REQUIRE(asy_cost_no_exploit_sym(n4) == AsyCost{3, 3, 36}); // 36*O^3*V^3 auto const n5 = to_eval_node_symm(t1); - REQUIRE(asy_cost(n5) == AsyCost{3,3,6}); // 6*O^3*V^3 + REQUIRE(asy_cost_no_exploit_sym(n5) == AsyCost{3, 3, 6}); // 6*O^3*V^3 } } From 1256784e2a901b64fdaedd33e2caa7cc3cdbb015 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Tue, 23 Nov 2021 13:16:50 -0500 Subject: [PATCH 004/120] Bug fix. --- SeQuant/core/eval_expr.cpp | 2 +- tests/unit/test_eval_expr.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/SeQuant/core/eval_expr.cpp b/SeQuant/core/eval_expr.cpp index 439fb7c36..b5c14e91e 100644 --- a/SeQuant/core/eval_expr.cpp +++ b/SeQuant/core/eval_expr.cpp @@ -91,7 +91,7 @@ Symmetry EvalExpr::infer_tensor_symmetry_prod(EvalExpr const& xpr1, if (ranges::distance(uniq_idxs) == tnsr1.const_braket().size() + tnsr2.const_braket().size()) { - return Symmetry::symm; + return Symmetry::antisymm; } } diff --git a/tests/unit/test_eval_expr.cpp b/tests/unit/test_eval_expr.cpp index c475e56c0..505d738e9 100644 --- a/tests/unit/test_eval_expr.cpp +++ b/tests/unit/test_eval_expr.cpp @@ -150,7 +150,7 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { const auto& x56 = EvalExpr{EvalExpr{t5}, EvalExpr{t6}, EvalOp::Prod}; - REQUIRE(x56.tensor().symmetry() == Symmetry::symm); + REQUIRE(x56.tensor().symmetry() == Symmetry::antisymm); // contraction of some indices from a bra to a ket const auto t7 = parse_expr_asymm(L"g_{a1,a2}^{i1,a3}")->as(); From 35e5e8c184f526f5860ef03393b079559e9777a0 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Tue, 23 Nov 2021 13:17:32 -0500 Subject: [PATCH 005/120] Refactor asymptotic cost evaluation. --- SeQuant/core/asy_cost.cpp | 93 +++++++------- SeQuant/core/asy_cost.hpp | 225 ++++++++++++++++++---------------- SeQuant/core/eval_node.cpp | 59 +++++---- SeQuant/core/eval_node.hpp | 52 +++++--- tests/unit/test_asy_cost.cpp | 34 ++++- tests/unit/test_eval_node.cpp | 65 +++++++--- 6 files changed, 319 insertions(+), 209 deletions(-) diff --git a/SeQuant/core/asy_cost.cpp b/SeQuant/core/asy_cost.cpp index d7c109744..eacf405c8 100644 --- a/SeQuant/core/asy_cost.cpp +++ b/SeQuant/core/asy_cost.cpp @@ -2,7 +2,7 @@ namespace sequant { -AsyCostEntry::AsyCostEntry(size_t nocc, size_t nvirt, +AsyCost::AsyCostEntry::AsyCostEntry(size_t nocc, size_t nvirt, boost::rational count) : occ_{nocc}, virt_{nvirt}, count_{count} { if (count_ == 0 || (occ_ == 0 && virt_ == 0)) { @@ -12,7 +12,7 @@ AsyCostEntry::AsyCostEntry(size_t nocc, size_t nvirt, } } -AsyCostEntry const &AsyCostEntry::max() { +AsyCost::AsyCostEntry const &AsyCost::AsyCostEntry::max() { static AsyCostEntry const max_cost = AsyCostEntry{ std::numeric_limits::max(), std::numeric_limits::max(), boost::rational{std::numeric_limits::max(), @@ -20,28 +20,28 @@ AsyCostEntry const &AsyCostEntry::max() { return max_cost; } -AsyCostEntry const &AsyCostEntry::zero() { +AsyCost::AsyCostEntry const &AsyCost::AsyCostEntry::zero() { static AsyCostEntry const zero_cost = AsyCostEntry{0, 0, 0}; return zero_cost; } -size_t AsyCostEntry::occ() const { return occ_; } +size_t AsyCost::AsyCostEntry::occ() const { return occ_; } -size_t AsyCostEntry::virt() const { return virt_; } +size_t AsyCost::AsyCostEntry::virt() const { return virt_; } -boost::rational AsyCostEntry::count() const { return count_; } +boost::rational AsyCost::AsyCostEntry::count() const { return count_; } -void AsyCostEntry::set_count(boost::rational n) const { count_ = n; } +void AsyCost::AsyCostEntry::set_count(boost::rational n) const { count_ = n; } -bool AsyCostEntry::operator<(const AsyCostEntry &rhs) const { +bool AsyCost::AsyCostEntry::operator<(const AsyCost::AsyCostEntry &rhs) const { return virt() < rhs.virt() || (virt() == rhs.virt() && occ() < rhs.occ()); } -bool AsyCostEntry::operator==(const AsyCostEntry &rhs) const { +bool AsyCost::AsyCostEntry::operator==(const AsyCost::AsyCostEntry &rhs) const { return occ() == rhs.occ() && virt() == rhs.virt(); } -bool AsyCostEntry::operator!=(const AsyCostEntry &rhs) const { +bool AsyCost::AsyCostEntry::operator!=(const AsyCost::AsyCostEntry &rhs) const { return !(*this == rhs); } @@ -49,6 +49,8 @@ AsyCost::AsyCost(AsyCostEntry c) { if (c != AsyCostEntry::zero()) cost_.emplace(c); } +AsyCost::AsyCost(): AsyCost{AsyCostEntry::zero()} {} + AsyCost::AsyCost(size_t nocc, size_t nvirt, boost::rational count) : AsyCost{AsyCostEntry{nocc, nvirt, count}} {} @@ -65,8 +67,18 @@ boost::rational AsyCost::ops(unsigned short nocc, return total; } -AsyCost AsyCost::operator+(const AsyCost &rhs) const { - auto sum = AsyCost{*this}; +AsyCost const &AsyCost::max() { + static const AsyCost max = AsyCost{AsyCostEntry::max()}; + return max; +} + +AsyCost const &AsyCost::zero() { + static const AsyCost zero = AsyCost{AsyCostEntry::zero()}; + return zero; +} + +AsyCost operator+(AsyCost const& lhs, AsyCost const &rhs) { + auto sum = lhs; auto &data = sum.cost_; for (auto const &c : rhs.cost_) { if (auto found = data.find(c); found != data.end()) { @@ -79,35 +91,30 @@ AsyCost AsyCost::operator+(const AsyCost &rhs) const { return sum; } -AsyCost AsyCost::operator-(const AsyCost &rhs) const { - auto diff = AsyCost{*this}; - auto &data = diff.cost_; - for (auto const &c : rhs.cost_) { - if (auto found = data.find(c); found != data.end()) { - found->set_count(found->count() - c.count()); - if (found->count() == 0) data.erase(found); - } else { - data.emplace(AsyCostEntry{c.occ(), c.virt(), -c.count()}); - } - } - return diff; +AsyCost operator-(AsyCost const& lhs, AsyCost const& rhs) { + return lhs + (-1 * rhs); +} + +AsyCost operator*(AsyCost const& cost, boost::rational scale) { + auto ac = cost; + for (auto& c: ac.cost_) + c.set_count(c.count() * scale); + return ac; } -AsyCost &AsyCost::operator+=(const AsyCost &rhs) { - *this = *this + rhs; - return *this; +AsyCost operator*(boost::rational scale, AsyCost const& cost) { + return cost * scale; } -AsyCost &AsyCost::operator-=(const AsyCost &rhs) { - *this = *this - rhs; - return *this; +AsyCost operator/(AsyCost const& cost, boost::rational scale) { + return cost * (1 / scale); } -bool AsyCost::operator<(const AsyCost &rhs) const { +bool operator<(AsyCost const& lhs, AsyCost const &rhs) { using ranges::views::reverse; using ranges::views::zip; - for (auto &&[c1, c2] : reverse(zip(cost_, rhs.cost_))) { + for (auto &&[c1, c2] : reverse(zip(lhs.cost_, rhs.cost_))) { if (c1 < c2) return true; else if (c1 == c2) { @@ -119,27 +126,17 @@ bool AsyCost::operator<(const AsyCost &rhs) const { return false; } - return cost_.size() < rhs.cost_.size(); + return lhs.cost_.size() < rhs.cost_.size(); } -bool AsyCost::operator==(const AsyCost &rhs) const { - return cost_.size() == rhs.cost_.size() && !(*this < rhs || rhs < *this); -} - -bool AsyCost::operator!=(const AsyCost &rhs) const { return !(*this == rhs); } - -bool AsyCost::operator>(const AsyCost &rhs) const { - return !(*this < rhs || *this == rhs); +bool operator==(AsyCost const& lhs, AsyCost const &rhs) { + return lhs.cost_.size() == rhs.cost_.size() && !(lhs < rhs || rhs < lhs); } -AsyCost const &AsyCost::max() { - static const AsyCost max = AsyCost{AsyCostEntry::max()}; - return max; -} +bool operator!=(AsyCost const& lhs, AsyCost const &rhs) { return !(lhs == rhs); } -AsyCost const &AsyCost::zero() { - static const AsyCost zero = AsyCost{AsyCostEntry::zero()}; - return zero; +bool operator>(AsyCost const& lhs, AsyCost const &rhs) { + return !(lhs < rhs || lhs == rhs); } } // namespace sequant diff --git a/SeQuant/core/asy_cost.hpp b/SeQuant/core/asy_cost.hpp index db929fdb8..16595cf47 100644 --- a/SeQuant/core/asy_cost.hpp +++ b/SeQuant/core/asy_cost.hpp @@ -9,123 +9,125 @@ namespace sequant { -class AsyCostEntry { - size_t occ_; - size_t virt_; - mutable boost::rational count_; - - public: - template - static Os &stream_out_rational(Os &os, boost::rational r) { - os << r.numerator(); - if (r.denominator() != IntType{1}) { - os << '/'; - os << r.denominator(); +class AsyCost { + private: + class AsyCostEntry { + size_t occ_; + size_t virt_; + mutable boost::rational count_; + + public: + template + static Os &stream_out_rational(Os &os, boost::rational r) { + os << r.numerator(); + if (r.denominator() != IntType{1}) { + os << '/'; + os << r.denominator(); + } + return os; } - return os; - } - static AsyCostEntry const &max(); + static AsyCostEntry const &max(); - static AsyCostEntry const &zero(); + static AsyCostEntry const &zero(); - AsyCostEntry(size_t nocc, size_t nvirt, boost::rational count); + AsyCostEntry(size_t nocc, size_t nvirt, boost::rational count); - AsyCostEntry(AsyCostEntry const &) = default; + AsyCostEntry(AsyCostEntry const &) = default; - AsyCostEntry(AsyCostEntry &&) = default; + AsyCostEntry(AsyCostEntry &&) = default; - AsyCostEntry &operator=(AsyCostEntry const &) = default; + AsyCostEntry &operator=(AsyCostEntry const &) = default; - AsyCostEntry &operator=(AsyCostEntry &&) = default; + AsyCostEntry &operator=(AsyCostEntry &&) = default; - size_t occ() const; + size_t occ() const; - size_t virt() const; + size_t virt() const; - boost::rational count() const; + boost::rational count() const; - void set_count(boost::rational n) const; + void set_count(boost::rational n) const; - bool operator<(AsyCostEntry const &rhs) const; + bool operator<(AsyCostEntry const &rhs) const; - bool operator==(AsyCostEntry const &rhs) const; + bool operator==(AsyCostEntry const &rhs) const; - bool operator!=(AsyCostEntry const& rhs) const; + bool operator!=(AsyCostEntry const &rhs) const; - template - String_t text() const { - auto oss = std::basic_ostringstream{}; + template + String_t text() const { + auto oss = std::basic_ostringstream{}; - if (*this == AsyCostEntry::max()) { - oss << "max"; - } else if (*this == AsyCostEntry::zero()) { - oss << "zero"; - } else { - auto abs_c = boost::abs(count_); - oss << (count_ < abs_c ? "- " : ""); - if (abs_c == 1) { - // do nothing + if (*this == AsyCostEntry::max()) { + oss << "max"; + } else if (*this == AsyCostEntry::zero()) { + oss << "zero"; } else { - AsyCostEntry::stream_out_rational(oss, abs_c); - oss << "*"; + auto abs_c = boost::abs(count_); + oss << (count_ < abs_c ? "- " : ""); + if (abs_c == 1) { + // do nothing + } else { + AsyCostEntry::stream_out_rational(oss, abs_c); + oss << "*"; + } + oss << (occ_ > 0 ? "O" : ""); + if (occ_ > 1) oss << "^" << occ_; + + oss << (virt_ > 0 ? "V" : ""); + if (virt_ > 1) oss << "^" << virt_; } - oss << (occ_ > 0 ? "O" : ""); - if (occ_ > 1) oss << "^" << occ_; - oss << (virt_ > 0 ? "V" : ""); - if (virt_ > 1) oss << "^" << virt_; + return oss.str(); } - return oss.str(); - } - - template - String_t to_latex() const { - auto oss = std::basic_ostringstream{}; + template + String_t to_latex() const { + auto oss = std::basic_ostringstream{}; - if (*this == AsyCostEntry::max()) { - oss << "\\texttt{max}"; - } else if (*this == AsyCostEntry::zero()) { - oss << "\\texttt{zero}"; - } else { - auto abs_c = boost::abs(count_); - oss << (count_ < abs_c ? "- " : ""); - if (abs_c == 1) { - // do nothing + if (*this == AsyCostEntry::max()) { + oss << "\\texttt{max}"; + } else if (*this == AsyCostEntry::zero()) { + oss << "\\texttt{zero}"; } else { - bool frac_mode = abs_c.denominator() != 1; - oss << (frac_mode ? "\\frac{" : ""); - oss << count_.numerator(); - if (frac_mode) { - oss << "}{" << count_.denominator() << "}"; - } - oss << (occ_ > 0 ? "O" : ""); - if (occ_ > 1) { - oss << "^{" << occ_ << "}"; + auto abs_c = boost::abs(count_); + oss << (count_ < abs_c ? "- " : ""); + if (abs_c == 1) { + // do nothing + } else { + bool frac_mode = abs_c.denominator() != 1; + oss << (frac_mode ? "\\frac{" : ""); + oss << count_.numerator(); + if (frac_mode) { + oss << "}{" << count_.denominator() << "}"; + } + oss << (occ_ > 0 ? "O" : ""); + if (occ_ > 1) { + oss << "^{" << occ_ << "}"; + } + oss << (virt_ > 0 ? "V" : ""); + if (virt_ > 1) { + oss << "^{" << virt_ << "}"; + } + oss << "}"; } - oss << (virt_ > 0 ? "V" : ""); - if (virt_ > 1) { - oss << "^{" << virt_ << "}"; - } - oss << "}"; } + return oss.str(); } - return oss.str(); - } -}; + }; + + private: + sequant::container::set cost_; + + AsyCost(AsyCostEntry); -class AsyCost { public: static AsyCost const &max(); static AsyCost const &zero(); - private: - sequant::container::set cost_; - - public: - AsyCost(AsyCostEntry = AsyCostEntry::zero()); + AsyCost(); AsyCost(size_t nocc, size_t nvirt, boost::rational count = 1); @@ -140,22 +142,6 @@ class AsyCost { [[nodiscard]] boost::rational ops(unsigned short nocc, unsigned short nvirt) const; - AsyCost operator+(AsyCost const &rhs) const; - - AsyCost operator-(AsyCost const &rhs) const; - - AsyCost &operator+=(AsyCost const &rhs); - - AsyCost &operator-=(AsyCost const &rhs); - - bool operator==(AsyCost const &rhs) const; - - bool operator!=(AsyCost const &rhs) const; - - bool operator<(AsyCost const &rhs) const; - - bool operator>(AsyCost const &rhs) const; - template String_t to_latex() const { auto oss = std::basic_ostringstream{}; @@ -163,9 +149,11 @@ class AsyCost { if (cost_.empty()) oss << 0; else { - oss << ranges::front(cost_).to_latex(); + // stream out in reverse so that more expensive terms appear first + auto rev = ranges::views::reverse(cost_); + oss << ranges::front(rev).to_latex(); if (cost_.size() > 1) - for (auto &&c : ranges::views::tail(cost_)) { + for (auto &&c : ranges::views::tail(rev)) { oss << (c.count() > 0 ? " + " : " ") << c.to_latex(); } } @@ -173,21 +161,48 @@ class AsyCost { return oss.str(); } + friend AsyCost operator+(AsyCost const &lhs, AsyCost const &rhs); + + friend AsyCost operator*(AsyCost const &lhs, boost::rational scale); + + friend bool operator<(AsyCost const &lhs, AsyCost const &rhs); + + friend bool operator==(AsyCost const &lhs, AsyCost const &rhs); + template friend Os &operator<<(Os &os, AsyCost const &cost); }; +AsyCost operator+(AsyCost const &lhs, AsyCost const &rhs); + +AsyCost operator-(AsyCost const &lhs, AsyCost const &rhs); + +AsyCost operator*(AsyCost const &cost, boost::rational scale); + +AsyCost operator*(boost::rational scale, AsyCost const &cost); + +AsyCost operator/(AsyCost const &cost, boost::rational scale); + +bool operator==(AsyCost const &lhs, AsyCost const &rhs); + +bool operator!=(AsyCost const &lhs, AsyCost const &rhs); + +bool operator<(AsyCost const &lhs, AsyCost const &rhs); + +bool operator>(AsyCost const &lhs, AsyCost const &rhs); + template Os &operator<<(Os &os, AsyCost const &cost) { if (cost == AsyCost::zero()) { os << 0; return os; } - os << ranges::front(cost.cost_) - .text>(); + // stream out in reverse so that more expensive terms appear first + auto rev = ranges::views::reverse(cost.cost_); + os << ranges::front(rev).text>(); if (cost.cost_.size() > 1) - for (auto &&c : ranges::views::tail(cost.cost_)) + for (auto &&c : ranges::views::tail(rev)) os << (c.count() > 0 ? " + " : " ") << c.text>(); diff --git a/SeQuant/core/eval_node.cpp b/SeQuant/core/eval_node.cpp index 5948d05e8..7ae9b2ad3 100644 --- a/SeQuant/core/eval_node.cpp +++ b/SeQuant/core/eval_node.cpp @@ -128,8 +128,8 @@ ExprPtr linearize_eval_node(EvalNode const& node) { } } -AsyCostEntry asy_cost_single_node(EvalNode const& node) { - if (node.leaf()) return AsyCostEntry::zero(); +AsyCost asy_cost_single_node_symm_off(EvalNode const& node) { + if (node.leaf()) return AsyCost::zero(); auto bks = ranges::views::concat(node.left()->tensor().const_braket(), node.right()->tensor().const_braket(), @@ -147,41 +147,58 @@ AsyCostEntry asy_cost_single_node(EvalNode const& node) { case EvalOp::Symm: { auto f = static_cast( boost::math::factorial(node->tensor().rank())); - return AsyCostEntry{nocc, nvirt, static_cast(f)}; + return AsyCost{nocc, nvirt, static_cast(f)}; } case EvalOp::Antisymm: { auto f = static_cast( boost::math::factorial(node->tensor().rank())); - return AsyCostEntry{nocc, nvirt, static_cast(f * f)}; + return AsyCost{nocc, nvirt, static_cast(f * f)}; } default: - return AsyCostEntry{nocc, nvirt, 1}; + return AsyCost{nocc, nvirt, 1}; } } -AsyCostEntry asy_cost_single_node_symmetry(const EvalNode& node) { - auto cost = asy_cost_single_node(node); +AsyCost asy_cost_single_node(const EvalNode& node) { + auto cost = asy_cost_single_node_symm_off(node); auto factorial = [](auto x) { return static_cast(boost::math::factorial(x)); }; + // parent node symmetry auto const psym = node->tensor().symmetry(); + // parent node bra symmetry auto const pbrank = node->tensor().bra_rank(); + // parent node ket symmetry auto const pkrank = node->tensor().ket_rank(); - if (psym == sequant::Symmetry::symm && - node.left()->tensor().symmetry() == psym && - node.right()->tensor().symmetry() == psym && - node->op() == sequant::EvalOp::Prod) - cost.set_count(cost.count() / - (factorial(pbrank) * factorial(pkrank))); - - else if (psym == sequant::Symmetry::symm) - cost.set_count(cost.count() / factorial(pbrank)); - else if (psym == sequant::Symmetry::antisymm) - cost.set_count(cost.count() / - (factorial(pbrank) * factorial(pkrank))); - else { - // do nothing. + + if (psym == Symmetry::nonsymm || psym == Symmetry::invalid) { + // do nothing + } else { + // ------ + // psym is Symmetry::symm or Symmetry::antisymm + // + // the rules of cost reduction are taken from + // doi:10.1016/j.procs.2012.04.044 + // ------ + + auto const op = node->op(); + if (op == EvalOp::Sum) { + cost = psym == Symmetry::symm + ? cost / (factorial(pbrank) * factorial(pkrank)) + : cost / factorial(pbrank); + } else if (op == EvalOp::Prod) { + auto const lsym = node.left()->tensor().symmetry(); + auto const rsym = node.right()->tensor().symmetry(); + cost = (lsym == rsym && lsym == Symmetry::nonsymm) + ? cost / factorial(pbrank) + : cost / (factorial(pbrank) * factorial(pkrank)); + } else { + assert( + false && + "Unsupported evaluation operation for asymptotic cost computation."); + } } + return cost; } diff --git a/SeQuant/core/eval_node.hpp b/SeQuant/core/eval_node.hpp index d0c30eec4..8ba778eb4 100644 --- a/SeQuant/core/eval_node.hpp +++ b/SeQuant/core/eval_node.hpp @@ -25,10 +25,11 @@ ExprPtr to_expr(EvalNode const& node); ExprPtr linearize_eval_node(EvalNode const& node); -AsyCostEntry asy_cost_single_node(EvalNode const& node); +AsyCost asy_cost_single_node_symm_off(EvalNode const& node); -AsyCostEntry asy_cost_single_node_symmetry(EvalNode const& node); +AsyCost asy_cost_single_node(EvalNode const& node); +namespace detail { /// /// \tparam F function type that takes EvalNode const& argument and returns /// bool. @@ -36,11 +37,10 @@ AsyCostEntry asy_cost_single_node_symmetry(EvalNode const& node); /// \param node Node to compute asymptotic cost on. /// /// \param exploit_symmetry Whether to use symmetry properties of an -/// intermediate to get reduced cost. Default: true. +/// intermediate to get reduced cost. /// -/// \param pred pred is called -/// on every node and only those nodes that return true will be used to compute -/// cost. Default function: returns true. +/// \param pred pred is called on every node and only those nodes that return +/// true will be used to compute cost. /// /// \return Asymptotic cost of evaluation /// in terms of number of occupied and virtual orbitals. @@ -48,18 +48,42 @@ AsyCostEntry asy_cost_single_node_symmetry(EvalNode const& node); template , std::enable_if_t, bool> = true> -AsyCost asy_cost( - EvalNode const& node, F&& pred = [](auto const&) { return true; }, - bool exploit_symmetry = true) { +AsyCost asy_cost_impl(EvalNode const& node, bool exploit_symmetry, F&& pred) { if (node.leaf() || !std::invoke(std::forward(pred), node)) return AsyCost::zero(); - return AsyCost{exploit_symmetry ? asy_cost_single_node_symmetry(node) - : asy_cost_single_node(node)} + // - asy_cost(node.left(), std::forward(pred), exploit_symmetry) + // - asy_cost(node.right(), std::forward(pred), exploit_symmetry); + return AsyCost{exploit_symmetry ? asy_cost_single_node(node) + : asy_cost_single_node_symm_off(node)} + // + asy_cost_impl(node.left(), exploit_symmetry, + std::forward(pred)) + // + asy_cost_impl(node.right(), exploit_symmetry, std::forward(pred)); +} +} // namespace detail + +/// +/// \param pred pred is called on every node and only those nodes that return +/// true will be used to compute cost. Default function: returns true. +/// +template , + std::enable_if_t, + bool> = true> +AsyCost asy_cost_symm_off( + EvalNode const& node, F&& pred = [](EvalNode const&) { return true; }) { + return detail::asy_cost_impl(node, false, std::forward(pred)); +} + +/// +/// \param pred pred is called on every node and only those nodes that return +/// true will be used to compute cost. Default function: returns true. +/// +template , + std::enable_if_t, + bool> = true> +AsyCost asy_cost( + EvalNode const& node, F&& pred = [](EvalNode const&) { return true; }) { + return detail::asy_cost_impl(node, true, std::forward(pred)); } -} // namespace sequant +}; // namespace sequant #endif // SEQUANT_EVAL_NODE_HPP diff --git a/tests/unit/test_asy_cost.cpp b/tests/unit/test_asy_cost.cpp index 209992277..2d5e169cc 100644 --- a/tests/unit/test_asy_cost.cpp +++ b/tests/unit/test_asy_cost.cpp @@ -16,10 +16,11 @@ struct MatFlops { TEST_CASE("TEST ASY_COST", "[AsyCost]") { using sequant::AsyCost; - SECTION("to_text") { - std::wostringstream oss{}; - auto clear = [&oss]() { oss.str(std::wstring{}); }; + std::wostringstream oss{}; + auto clear = [&oss]() { oss.str(std::wstring{}); }; + + SECTION("to_text") { oss << AsyCost{0, 0}; REQUIRE(oss.str() == L"0"); @@ -53,11 +54,11 @@ TEST_CASE("TEST ASY_COST", "[AsyCost]") { clear(); oss << AsyCost{2, 2} + AsyCost{3, 2} + AsyCost{2, 3} + AsyCost{3, 3}; - REQUIRE(oss.str() == L"O^2V^2 + O^3V^2 + O^2V^3 + O^3V^3"); + REQUIRE(oss.str() == L"O^3V^3 + O^2V^3 + O^3V^2 + O^2V^2"); clear(); oss << AsyCost{1, 1} - AsyCost{2, 3} + AsyCost{2, 2}; - REQUIRE(oss.str() == L"OV + O^2V^2 - O^2V^3"); + REQUIRE(oss.str() == L"- O^2V^3 + O^2V^2 + OV"); clear(); oss << AsyCost{1, 1, 20}; @@ -99,4 +100,27 @@ TEST_CASE("TEST ASY_COST", "[AsyCost]") { auto const cost = AsyCost{3, 1} + AsyCost{2, 1}; REQUIRE(cost.ops(nocc, nvirt) == flops(3, 1) + flops(2, 1)); } + + SECTION("Fractional costs") { + clear(); + oss << AsyCost{2,4,{1,2}}; + REQUIRE(oss.str() == L"1/2*O^2V^4"); + + auto const c1 = AsyCost{1,2} * boost::rational{2, 3}; + clear(); + oss << c1 ; + REQUIRE(oss.str() == L"2/3*OV^2"); + + auto const c2 = AsyCost{1,2} / boost::rational{2, 3}; + clear(); + oss << c2 ; + REQUIRE(oss.str() == L"3/2*OV^2"); + + auto const c3 = (AsyCost{1, 2} + AsyCost{2, 4}) * 2; + clear(); + oss << c3; + REQUIRE(oss.str() == L"2*O^2V^4 + 2*OV^2"); + + clear(); + } } \ No newline at end of file diff --git a/tests/unit/test_eval_node.cpp b/tests/unit/test_eval_node.cpp index 8e75cb703..ea2d675fa 100644 --- a/tests/unit/test_eval_node.cpp +++ b/tests/unit/test_eval_node.cpp @@ -11,8 +11,6 @@ auto validate_tensor = [](const auto& x, std::wstring_view tspec) -> bool { TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { using namespace sequant; - TensorCanonicalizer::register_instance( - std::make_shared()); SECTION("product") { // 1/16 * (A * B) * C @@ -152,10 +150,6 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { } SECTION("asy_cost") { - auto asy_cost_no_exploit_sym = [](EvalNode const& n) { - return asy_cost( - n, [](auto const& n) { return true; }, false); - }; auto const p1 = parse_expr_asymm(L"g_{i2, a1}^{a2, a3} * t_{a2, a3}^{i1, i2}"); REQUIRE(asy_cost(to_eval_node(p1)) == AsyCost{2, 3}); @@ -163,20 +157,59 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { auto const p2 = parse_expr_asymm( L"g_{i2,i3}^{a2,a3} * t_{a2}^{i1} * t_{a1,a3}^{i2,i3}"); - auto const n2 = to_eval_node(p2); - REQUIRE(asy_cost(n2) == AsyCost{3, 2} + AsyCost{3, 2}); + auto const np2 = to_eval_node(p2); + REQUIRE(asy_cost(np2) == AsyCost{3, 2} + AsyCost{3, 2}); auto const p3 = parse_expr_asymm(L"g_{i2,i3}^{i1,a2} * t_{a2}^{i2} * t_{a1}^{i3}"); - auto const n3 = to_eval_node(p3); - REQUIRE(asy_cost(n3) == AsyCost{2, 1} + AsyCost{3, 1}); + auto const np3 = to_eval_node(p3); + REQUIRE(asy_cost(np3) == AsyCost{2, 1} + AsyCost{3, 1}); auto const t1 = parse_expr_asymm(L"I{i1,i2,i3;a1,a2,a3}"); - auto const n4 = to_eval_node_antisymm(t1); - - REQUIRE(asy_cost_no_exploit_sym(n4) == AsyCost{3, 3, 36}); // 36*O^3*V^3 - - auto const n5 = to_eval_node_symm(t1); - REQUIRE(asy_cost_no_exploit_sym(n5) == AsyCost{3, 3, 6}); // 6*O^3*V^3 + auto const nt1a = to_eval_node_antisymm(t1); + + REQUIRE(asy_cost_symm_off(nt1a) == AsyCost{3, 3, 36}); // 36*O^3*V^3 + + auto const nt1s = to_eval_node_symm(t1); + REQUIRE(asy_cost_symm_off(nt1s) == AsyCost{3, 3, 6}); // 6*O^3*V^3 + + auto const s1 = + parse_expr(L"I{i1,i2;a1,a2} + I{i1,i2;a1,a2}", Symmetry::symm); + auto const ns1 = to_eval_node(s1); + REQUIRE(asy_cost(ns1) == AsyCost{2, 2, {1, 4}}); // 1/4 * O^2V^2 + + auto const s2 = + parse_expr(L"I{i1,i2;a1,a2} + I{i1,i2;a1,a2}", Symmetry::antisymm); + auto const ns2 = to_eval_node(s2); + REQUIRE(asy_cost(ns2) == AsyCost{2, 2, {1, 2}}); // 1/2 * O^2V^2 + + auto const s3 = + parse_expr(L"I{i1,i2;a1,a2} + I{i1,i2;a1,a2}", Symmetry::nonsymm); + auto const ns3 = to_eval_node(s3); + REQUIRE(asy_cost(ns3) == AsyCost{2, 2}); // O^2V^2 + + auto const p4 = + parse_expr(L"I{i1,i2;a3,a4} * I{a3,a4;a1,a2}", Symmetry::symm); + auto const np4 = to_eval_node(p4); + REQUIRE(asy_cost(np4) == AsyCost{2, 4, {1, 4}}); // 1/4 * O^2V^4 + + auto const p5 = + parse_expr(L"I{i1,i2;a3,a4} * I{a3,a4;a1,a2}", Symmetry::antisymm); + auto const np5 = to_eval_node(p5); + REQUIRE(asy_cost(np5) == AsyCost{2, 4, {1, 4}}); // 1/4 * O^2V^4 + + auto const p6 = + parse_expr(L"I{i1,i2;a3,a4} * I{a3,a4;a1,a2}", Symmetry::antisymm); + auto const np6 = to_eval_node(p6); + REQUIRE(asy_cost(np6) == AsyCost{2, 4, {1, 4}}); // 1/4 * O^2V^4 + + auto const p7 = parse_expr(L"I{i1;a1} * I{i2;a2}", Symmetry::nonsymm); + auto const np7 = to_eval_node(p7); + REQUIRE(asy_cost(np7) == AsyCost{2, 2, {1, 2}}); // 1/2 * O^2V^4 + + auto const p8 = + parse_expr(L"I{i1,i2;a3,a4} * I{a3,a4;a1,a2}", Symmetry::nonsymm); + auto const np8 = to_eval_node(p8); + REQUIRE(asy_cost(np8) == AsyCost{2, 4}); // O^2V^4 } } From e7515004260da5a48af6838ac73aa5bf8a39788f Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Tue, 23 Nov 2021 13:39:02 -0500 Subject: [PATCH 006/120] Remove parse_expr_asymm function from parse_expr header. --- SeQuant/core/parse/parse_expr.cpp | 4 ---- SeQuant/core/parse_expr.hpp | 8 ------- tests/unit/test_eval_expr.cpp | 40 ++++++++++++++++--------------- tests/unit/test_eval_node.cpp | 32 +++++++++++++------------ tests/unit/test_eval_ta.cpp | 16 ++++++------- tests/unit/test_optimize.cpp | 10 ++++---- 6 files changed, 52 insertions(+), 58 deletions(-) diff --git a/SeQuant/core/parse/parse_expr.cpp b/SeQuant/core/parse/parse_expr.cpp index a50cabe8c..4d986cdc4 100644 --- a/SeQuant/core/parse/parse_expr.cpp +++ b/SeQuant/core/parse/parse_expr.cpp @@ -209,8 +209,4 @@ ExprPtr parse_expr(std::wstring_view raw_expr, Symmetry symmetry){ return result[0]; } -ExprPtr parse_expr_asymm(std::wstring_view raw) { - return parse_expr(raw, Symmetry::antisymm); -} - } // namespace sequant diff --git a/SeQuant/core/parse_expr.hpp b/SeQuant/core/parse_expr.hpp index fc65a09b1..b0ba78853 100644 --- a/SeQuant/core/parse_expr.hpp +++ b/SeQuant/core/parse_expr.hpp @@ -35,14 +35,6 @@ namespace sequant { /// \return SeQuant expression. ExprPtr parse_expr(std::wstring_view raw, Symmetry tensor_sym); -/// -/// \param raw A tensor algebra expression. -/// \return SeQuant expression with all atomic tensors annotated -/// sequant::Symmetry::antisymm unless explicitly annotated differently -/// in the @c raw expression. -/// \see parse_expr -ExprPtr parse_expr_asymm(std::wstring_view raw); - } // namespace sequant::utils #endif // SEQUANT_PARSE_EXPR_HPP diff --git a/tests/unit/test_eval_expr.cpp b/tests/unit/test_eval_expr.cpp index 505d738e9..26dc5b958 100644 --- a/tests/unit/test_eval_expr.cpp +++ b/tests/unit/test_eval_expr.cpp @@ -11,12 +11,14 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { sequant::TensorCanonicalizer::register_instance( std::make_shared()); + auto parse_expr_antisymm = [](auto const& xpr){return parse_expr(xpr, Symmetry::antisymm);}; + SECTION("Constructors") { - auto t1 = parse_expr_asymm(L"t_{i1, i2}^{a1, a2}"); + auto t1 = parse_expr_antisymm(L"t_{i1, i2}^{a1, a2}"); REQUIRE_NOTHROW(EvalExpr{t1->as()}); - auto p1 = parse_expr_asymm(L"g_{i3,a1}^{i1,i2} * t_{a2}^{a3}"); + auto p1 = parse_expr_antisymm(L"g_{i3,a1}^{i1,i2} * t_{a2}^{a3}"); const auto& c2 = EvalExpr{p1->at(0)->as()}; const auto& c3 = EvalExpr{p1->at(1)->as()}; @@ -25,13 +27,13 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { } SECTION("EvalExpr::EvalOp types") { - auto t1 = parse_expr_asymm(L"t_{i1, i2}^{a1, a2}"); + auto t1 = parse_expr_antisymm(L"t_{i1, i2}^{a1, a2}"); auto x1 = EvalExpr(t1->as()); REQUIRE(x1.op() == EvalOp::Id); - auto p1 = parse_expr_asymm(L"g_{i3,a1}^{i1,i2} * t_{a2}^{a3}"); + auto p1 = parse_expr_antisymm(L"g_{i3,a1}^{i1,i2} * t_{a2}^{a3}"); const auto& c2 = EvalExpr{p1->at(0)->as()}; const auto& c3 = EvalExpr{p1->at(1)->as()}; @@ -39,15 +41,15 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { REQUIRE(c4.op() == EvalOp::Prod); const auto c5 = - EvalExpr{parse_expr_asymm(L"I^{i3,a1}_{i1,i2}")->as()}; + EvalExpr{parse_expr_antisymm(L"I^{i3,a1}_{i1,i2}")->as()}; const auto& c6 = EvalExpr{c2, c5, EvalOp::Sum}; REQUIRE(c6.op() == EvalOp::Sum); - auto x2 = EvalExpr(parse_expr_asymm(L"A_{a_1, a_2}^{i_1, i_2}")->as()); + auto x2 = EvalExpr(parse_expr_antisymm(L"A_{a_1, a_2}^{i_1, i_2}")->as()); REQUIRE(x2.op() == EvalOp::Id); - auto p2 = parse_expr_asymm(L"A{a1,a2;i1,i2} * I{a1,a2;i1,i2}"); + auto p2 = parse_expr_antisymm(L"A{a1,a2;i1,i2} * I{a1,a2;i1,i2}"); auto const x3 = EvalExpr{p2->at(0)->as()}; auto const x4 = EvalExpr{p2->at(1)->as()}; auto const x5 = EvalExpr{x3, x4, EvalOp::Antisymm}; @@ -64,9 +66,9 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { SECTION("Sequant expression") { const auto& str_t1 = L"g_{a1,a2}^{a3,a4}"; const auto& str_t2 = L"t_{a3,a4}^{i1,i2}"; - const auto& t1 = parse_expr_asymm(str_t1); + const auto& t1 = parse_expr_antisymm(str_t1); - const auto& t2 = parse_expr_asymm(str_t2); + const auto& t2 = parse_expr_antisymm(str_t2); const auto& x1 = EvalExpr{t1->as()}; const auto& x2 = EvalExpr{t2->as()}; @@ -92,22 +94,22 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { REQUIRE(prod_indices == expected_indices); - const auto t4 = parse_expr_asymm(L"g_{i3,i4}^{a3,a4}")->as(); + const auto t4 = parse_expr_antisymm(L"g_{i3,i4}^{a3,a4}")->as(); const auto t5 = - parse_expr_asymm(L"I_{a1,a2,a3,a4}^{i1,i2,i3,i4}")->as(); + parse_expr_antisymm(L"I_{a1,a2,a3,a4}^{i1,i2,i3,i4}")->as(); const auto& x45 = EvalExpr{EvalExpr{t4}, EvalExpr{t5}, EvalOp::Prod}; const auto& x54 = EvalExpr{EvalExpr{t5}, EvalExpr{t4}, EvalOp::Prod}; REQUIRE(x45.tensor().to_latex() == - parse_expr_asymm(L"I_{a1,a2}^{i1,i2}")->to_latex()); + parse_expr_antisymm(L"I_{a1,a2}^{i1,i2}")->to_latex()); REQUIRE(x45.tensor().to_latex() == x54.tensor().to_latex()); } SECTION("Hash value") { - const auto t1 = parse_expr_asymm(L"t_{i1}^{a1}")->as(); - const auto t2 = parse_expr_asymm(L"t_{i2}^{a2}")->as(); - const auto t3 = parse_expr_asymm(L"t_{i1,i2}^{a1,a2}")->as(); + const auto t1 = parse_expr_antisymm(L"t_{i1}^{a1}")->as(); + const auto t2 = parse_expr_antisymm(L"t_{i2}^{a2}")->as(); + const auto t3 = parse_expr_antisymm(L"t_{i1,i2}^{a1,a2}")->as(); const auto& x1 = EvalExpr{t1}; const auto& x2 = EvalExpr{t2}; @@ -127,8 +129,8 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { SECTION("Symmetry of product") { // whole bra <-> ket contraction between two antisymmetric tensors - const auto t1 = parse_expr_asymm(L"g_{i3,i4}^{i1,i2}")->as(); - const auto t2 = parse_expr_asymm(L"t_{a1,a2}^{i3,i4}")->as(); + const auto t1 = parse_expr_antisymm(L"g_{i3,i4}^{i1,i2}")->as(); + const auto t2 = parse_expr_antisymm(L"t_{a1,a2}^{i3,i4}")->as(); const auto x12 = EvalExpr{EvalExpr{t1}, EvalExpr{t2}, EvalOp::Prod}; @@ -153,8 +155,8 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { REQUIRE(x56.tensor().symmetry() == Symmetry::antisymm); // contraction of some indices from a bra to a ket - const auto t7 = parse_expr_asymm(L"g_{a1,a2}^{i1,a3}")->as(); - const auto t8 = parse_expr_asymm(L"t_{a3}^{i2}")->as(); + const auto t7 = parse_expr_antisymm(L"g_{a1,a2}^{i1,a3}")->as(); + const auto t8 = parse_expr_antisymm(L"t_{a3}^{i2}")->as(); const auto x78 = EvalExpr{EvalExpr{t7}, EvalExpr{t8}, EvalOp::Prod}; diff --git a/tests/unit/test_eval_node.cpp b/tests/unit/test_eval_node.cpp index ea2d675fa..bb3e31ad6 100644 --- a/tests/unit/test_eval_node.cpp +++ b/tests/unit/test_eval_node.cpp @@ -6,15 +6,17 @@ // validates if x is constructible from tspec using parse_expr auto validate_tensor = [](const auto& x, std::wstring_view tspec) -> bool { - return x.to_latex() == sequant::parse_expr_asymm(tspec)->to_latex(); + return x.to_latex() == sequant::parse_expr(tspec, sequant::Symmetry::antisymm)->to_latex(); }; TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { using namespace sequant; + auto parse_expr_antisymm = [](auto const& xpr){return parse_expr(xpr, Symmetry::antisymm);}; + SECTION("product") { // 1/16 * (A * B) * C - const auto p1 = parse_expr_asymm( + const auto p1 = parse_expr_antisymm( L"1/16 " L"* g_{i3, i4}^{a3, a4}" L"* t_{a1, a2}^{i3, i4}" @@ -72,7 +74,7 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { } SECTION("sum") { - auto const sum1 = parse_expr_asymm( + auto const sum1 = parse_expr_antisymm( L"X^{i1,i2}_{a1,a2} " L"+ Y^{i1, i2}_{a1,a2}" L"+ g_{i3,a1}^{i1,i2} * t_{a2}^{i3}"); @@ -95,7 +97,7 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { } SECTION("to_expr") { - const auto p1 = parse_expr_asymm( + const auto p1 = parse_expr_antisymm( L"1/16 " L"* g_{i3, i4}^{a3, a4}" L"* t_{a1, a2}^{i3, i4}" @@ -110,14 +112,14 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { REQUIRE(to_expr(n1)->to_latex() == p1_after->to_latex()); - auto const p2 = parse_expr_asymm(L"1/4 * g_{i2,i1}^{a1,a2}"); + auto const p2 = parse_expr_antisymm(L"1/4 * g_{i2,i1}^{a1,a2}"); auto const n2 = to_eval_node(p2); REQUIRE(to_expr(n2)->to_latex() == p2->to_latex()); } SECTION("linearize_eval_node") { - const auto p1 = parse_expr_asymm( + const auto p1 = parse_expr_antisymm( L"1/16 " L"* g_{i3, i4}^{a3, a4}" L"* t_{a1, a2}^{i3, i4}" @@ -125,17 +127,17 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { REQUIRE(linearize_eval_node(to_eval_node(p1))->to_latex() == p1->to_latex()); - auto const p2 = parse_expr_asymm(L"1/4 * g_{i2,i1}^{a1,a2}"); + auto const p2 = parse_expr_antisymm(L"1/4 * g_{i2,i1}^{a1,a2}"); REQUIRE(linearize_eval_node(to_eval_node(p2))->to_latex() == - parse_expr_asymm(L"1/4 * g_{i2,i1}^{a1,a2}")->to_latex()); + parse_expr_antisymm(L"1/4 * g_{i2,i1}^{a1,a2}")->to_latex()); } SECTION("asy_cost_single_node") { auto const p1 = - parse_expr_asymm(L"g_{i2, a1}^{a2, a3} * t_{a2, a3}^{i1, i2}"); + parse_expr_antisymm(L"g_{i2, a1}^{a2, a3} * t_{a2, a3}^{i1, i2}"); REQUIRE(AsyCost{asy_cost_single_node(to_eval_node(p1))} == AsyCost{2, 3}); - auto const p2 = parse_expr_asymm( + auto const p2 = parse_expr_antisymm( L"g_{i2,i3}^{a2,a3} * t_{a2}^{i1} * t_{a1,a3}^{i2,i3}"); auto const n2 = to_eval_node(p2); @@ -143,7 +145,7 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { REQUIRE(AsyCost{asy_cost_single_node(n2.left())} == AsyCost{3, 2}); auto const p3 = - parse_expr_asymm(L"g_{i2,i3}^{i1,a2} * t_{a2}^{i2} * t_{a1}^{i3}"); + parse_expr_antisymm(L"g_{i2,i3}^{i1,a2} * t_{a2}^{i2} * t_{a1}^{i3}"); auto const n3 = to_eval_node(p3); REQUIRE(AsyCost{asy_cost_single_node(n3)} == AsyCost{2, 1}); REQUIRE(AsyCost{asy_cost_single_node(n3.left())} == AsyCost{3, 1}); @@ -151,21 +153,21 @@ TEST_CASE("TEST EVAL_NODE", "[EvalNode]") { SECTION("asy_cost") { auto const p1 = - parse_expr_asymm(L"g_{i2, a1}^{a2, a3} * t_{a2, a3}^{i1, i2}"); + parse_expr_antisymm(L"g_{i2, a1}^{a2, a3} * t_{a2, a3}^{i1, i2}"); REQUIRE(asy_cost(to_eval_node(p1)) == AsyCost{2, 3}); - auto const p2 = parse_expr_asymm( + auto const p2 = parse_expr_antisymm( L"g_{i2,i3}^{a2,a3} * t_{a2}^{i1} * t_{a1,a3}^{i2,i3}"); auto const np2 = to_eval_node(p2); REQUIRE(asy_cost(np2) == AsyCost{3, 2} + AsyCost{3, 2}); auto const p3 = - parse_expr_asymm(L"g_{i2,i3}^{i1,a2} * t_{a2}^{i2} * t_{a1}^{i3}"); + parse_expr_antisymm(L"g_{i2,i3}^{i1,a2} * t_{a2}^{i2} * t_{a1}^{i3}"); auto const np3 = to_eval_node(p3); REQUIRE(asy_cost(np3) == AsyCost{2, 1} + AsyCost{3, 1}); - auto const t1 = parse_expr_asymm(L"I{i1,i2,i3;a1,a2,a3}"); + auto const t1 = parse_expr_antisymm(L"I{i1,i2,i3;a1,a2,a3}"); auto const nt1a = to_eval_node_antisymm(t1); REQUIRE(asy_cost_symm_off(nt1a) == AsyCost{3, 3, 36}); // 36*O^3*V^3 diff --git a/tests/unit/test_eval_ta.cpp b/tests/unit/test_eval_ta.cpp index 9d6b0aaed..334fd7399 100644 --- a/tests/unit/test_eval_ta.cpp +++ b/tests/unit/test_eval_ta.cpp @@ -98,11 +98,11 @@ auto index_label_list = [](std::string const& str){ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { using ranges::views::transform; using TA::TArrayD; - using sequant::parse_expr_asymm; using sequant::to_eval_node; using sequant::eval::ta::eval; using sequant::eval::ta::eval_antisymm; using sequant::eval::ta::eval_symm; + auto parse_expr_antisymm = [](auto const& xpr){ return parse_expr(xpr, sequant::Symmetry::antisymm); }; // tnsr is assumed to be single-tiled auto norm = [](TArrayD const& tnsr) { return tnsr.find(0).get().norm(); }; @@ -133,7 +133,7 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { }; SECTION("summation") { - auto expr1 = parse_expr_asymm(L"t_{a1}^{i1} + f_{i1}^{a1}"); + auto expr1 = parse_expr_antisymm(L"t_{a1}^{i1} + f_{i1}^{a1}"); auto sum1_eval = eval_bnode(expr1, "i_1,a_1"); auto sum1_man = TArrayD{}; @@ -141,7 +141,7 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { REQUIRE(norm(sum1_man) == Approx(norm(sum1_eval))); - auto expr2 = parse_expr_asymm(L"2 * t_{a1}^{i1} + 1.5 * f_{i1}^{a1}"); + auto expr2 = parse_expr_antisymm(L"2 * t_{a1}^{i1} + 1.5 * f_{i1}^{a1}"); auto sum2_eval = eval_bnode(expr2, "i_1,a_1"); auto sum2_man = TArrayD{}; @@ -152,7 +152,7 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { SECTION("product") { auto expr1 = - parse_expr_asymm(L"1/2.0 * g_{i2,i4}^{a2,a4} * t_{a1,a2}^{i1,i2}"); + parse_expr_antisymm(L"1/2.0 * g_{i2,i4}^{a2,a4} * t_{a1,a2}^{i1,i2}"); auto prod1_eval = eval_bnode(expr1, "i_4,a_1,a_4,i_1"); TArrayD prod1_man{}; @@ -161,7 +161,7 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { REQUIRE(norm(prod1_man) == Approx(norm(prod1_eval))); - auto expr2 = parse_expr_asymm( + auto expr2 = parse_expr_antisymm( L"-1/4 * g_{i3,i4}^{a3,a4} * t_{a2,a4}^{i1,i2} * t_{a1,a3}^{i3,i4}"); auto prod2_eval = eval_bnode(expr2, "a_1,a_2,i_1,i_2"); @@ -173,7 +173,7 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { } SECTION("sum and product") { - auto expr1 = parse_expr_asymm( + auto expr1 = parse_expr_antisymm( L"-1/4 * g_{i3,i4}^{a3,a4} * t_{a2,a4}^{i1,i2} * t_{a1,a3}^{i3,i4}" " + " " 1/16 * g_{i3,i4}^{a3,a4} * t_{a1,a2}^{i3,i4} * t_{a3,a4}^{i1,i2} "); @@ -189,7 +189,7 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { } SECTION("Antisymmetrization") { - auto expr1 = parse_expr_asymm(L"0.5 * g_{i1, i2}^{a1, a2}"); + auto expr1 = parse_expr_antisymm(L"0.5 * g_{i1, i2}^{a1, a2}"); auto eval1 = eval_bnode_antisymm(expr1, "i_1,i_2,a_1,a_2"); auto man1 = TArrayD{}; @@ -202,7 +202,7 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { } SECTION("Symmetrization") { - auto expr1 = parse_expr_asymm(L"0.5 * g_{i1, i2}^{a1, a2}"); + auto expr1 = parse_expr_antisymm(L"0.5 * g_{i1, i2}^{a1, a2}"); auto eval1 = eval_bnode_symm(expr1, "i_1,i_2,a_1,a_2"); auto man1 = TArrayD{}; diff --git a/tests/unit/test_optimize.cpp b/tests/unit/test_optimize.cpp index 107973db7..23340da6c 100644 --- a/tests/unit/test_optimize.cpp +++ b/tests/unit/test_optimize.cpp @@ -17,8 +17,10 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { sequant::TensorCanonicalizer::register_instance( std::make_shared()); + auto parse_expr_antisymm = [](auto const& xpr){return parse_expr(xpr, Symmetry::antisymm);}; + SECTION("Single term optimization") { - const auto prod1 = parse_expr_asymm( + const auto prod1 = parse_expr_antisymm( L"g_{i3,i4}^{a3,a4}" // T1 " * t_{a1,a2}^{i3,i4}" // T2 " * t_{a3,a4}^{i1,i2}") // T3 @@ -48,7 +50,7 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { REQUIRE(to_eval_node(prod1_opt) == result1.optimal_seqs.at(0)); // - const auto prod2 = parse_expr_asymm( + const auto prod2 = parse_expr_antisymm( L" g_{i3,i4}^{a3,a4}" L" * t_{a3,a4}^{i1,i2}" L" * t_{a1}^{i3}" @@ -77,9 +79,9 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { REQUIRE(result2_discounted.cost < result2_naive.cost); // yet another example - auto prod3 = parse_expr_asymm( + auto prod3 = parse_expr_antisymm( L"t_{a1,a2}^{i1,i2} * g_{i2,i3}^{a2,a3} * t_{a3}^{i4}"); - auto prod4 = parse_expr_asymm(L"t_{a1,a2}^{i1,i2} * g_{i2,i3}^{a2,a3}"); + auto prod4 = parse_expr_antisymm(L"t_{a1,a2}^{i1,i2} * g_{i2,i3}^{a2,a3}"); // we show that two the evaluation trees for prod3 // - one: single term optimized on prod3 alone // - two: single term optimized on prod3 with the intermediate from prod4 From 43c7638d0bba5075ee3e8b1991a268adc1baf8ff Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Wed, 24 Nov 2021 11:42:29 -0500 Subject: [PATCH 007/120] Refactor optimize: - Rename optimize namespace to opt. - Move optimize function to sequant namespace. - Remove unused boolean parameter to single term optimization function. --- SeQuant/core/optimize.hpp | 23 ++++++++--------------- SeQuant/core/optimize/optimize.cpp | 13 +++++++------ examples/eval/calc_info.cpp | 4 ++-- tests/unit/test_optimize.cpp | 18 +++++++----------- 4 files changed, 24 insertions(+), 34 deletions(-) diff --git a/SeQuant/core/optimize.hpp b/SeQuant/core/optimize.hpp index b60db1511..de17fe593 100644 --- a/SeQuant/core/optimize.hpp +++ b/SeQuant/core/optimize.hpp @@ -8,8 +8,7 @@ #include #include -namespace sequant::optimize { - +namespace sequant { /// Optimize an expression assuming the number of virtual orbitals /// greater than the number of occupied orbitals. @@ -17,6 +16,8 @@ namespace sequant::optimize { /// \return EvalNode object. EvalNode optimize(ExprPtr const& expr); +namespace opt { + /// /// Omit the first factor from the top level product from given expression. /// Intended to drop "A" and "S" tensors from CC amplitudes as a preparatory @@ -47,18 +48,13 @@ struct STOResult { }; /// Perform single term optimization on a product. - -/// @param canon whether to canonicalize each product before couting flops. -/// by canonicalizing before counting flops, we increase the -/// chance of encountering an intermediate whose hash value is -/// already present in @c imed_hash. +/// /// @return STOResult template , std::enable_if_t, bool> = true> STOResult single_term_opt( - Product const& prod, bool canon, - F&& pred = [](auto const&) { return true; }) { + Product const& prod, F&& pred = [](auto const&) { return true; }) { using ranges::to_vector; using ranges::views::iota; using ranges::views::take; @@ -81,12 +77,8 @@ STOResult single_term_opt( auto result = STOResult{AsyCost::max(), {}}; - auto finder = [&result, &fold_prod, &pred, prod, canon](auto const& seq) { + auto finder = [&result, &fold_prod, &pred, prod](auto const& seq) { auto expr = seq.evaluate(fold_prod); - if (canon) { - expr->canonicalize(); - pull_scalar(expr); - } if (prod.scalar() != 1.) { if (!expr->template is()) // in case expr is non-product @@ -117,6 +109,7 @@ STOResult single_term_opt( return result; } -} // namespace sequant::optimize +} // namespace opt +} // namespace sequant #endif // SEQUANT_OPTIMIZE_OPTIMIZE_HPP diff --git a/SeQuant/core/optimize/optimize.cpp b/SeQuant/core/optimize/optimize.cpp index 96d362bec..e68b2549d 100644 --- a/SeQuant/core/optimize/optimize.cpp +++ b/SeQuant/core/optimize/optimize.cpp @@ -1,6 +1,8 @@ #include "SeQuant/core/optimize.hpp" -namespace sequant::optimize { +namespace sequant { + +namespace opt { ExprPtr tail_factor(ExprPtr const& expr) noexcept { if (expr->is()) @@ -35,15 +37,14 @@ void pull_scalar(sequant::ExprPtr expr) noexcept { prod.scale(scal); } +} // namespace opt + EvalNode optimize(const ExprPtr& expr) { using ranges::views::transform; if (expr->is()) return to_eval_node(expr); else if (expr->is()) { - // canonicalization within sto doesn't seem beneficial - bool canonize = false; - return *( - single_term_opt(expr->as(), canonize).optimal_seqs.begin()); + return *(opt::single_term_opt(expr->as()).optimal_seqs.begin()); } else if (expr->is()) { auto smands = *expr | transform([](auto const& s) { return to_expr(optimize(s)); @@ -54,4 +55,4 @@ EvalNode optimize(const ExprPtr& expr) { throw std::runtime_error{"optimization attempted on unsupported Expr type"}; } -} // namespace sequant::optimize +} // namespace sequant diff --git a/examples/eval/calc_info.cpp b/examples/eval/calc_info.cpp index 8952d4d40..488f73b4c 100644 --- a/examples/eval/calc_info.cpp +++ b/examples/eval/calc_info.cpp @@ -8,8 +8,8 @@ namespace sequant::eval { EvalNode CalcInfo::node_(const ExprPtr& expr, size_t rank) const { - auto trimmed = optimize::tail_factor(expr); - return optm_opts.single_term ? optimize::optimize(trimmed) + auto trimmed = opt::tail_factor(expr); + return optm_opts.single_term ? optimize(trimmed) : to_eval_node(trimmed); } diff --git a/tests/unit/test_optimize.cpp b/tests/unit/test_optimize.cpp index 23340da6c..e79dfbc7a 100644 --- a/tests/unit/test_optimize.cpp +++ b/tests/unit/test_optimize.cpp @@ -12,7 +12,7 @@ auto yield_interm_hash = [](sequant::EvalNode const& node) { TEST_CASE("TEST_OPTIMIZE", "[optimize]") { using namespace sequant; - using optimize::single_term_opt; + using opt::single_term_opt; sequant::TensorCanonicalizer::register_instance( std::make_shared()); @@ -36,8 +36,7 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { // ((T2 * T3) * T1) : 2 * O^4 * V^4 worst sequence of evaluation // - // canonicalization set false as it is unnecessary here - const auto result1 = single_term_opt(prod1, false); + const auto result1 = single_term_opt(prod1); REQUIRE(result1.cost != sequant::AsyCost::max()); @@ -49,7 +48,6 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { REQUIRE(to_eval_node(prod1_opt) == result1.optimal_seqs.at(0)); - // const auto prod2 = parse_expr_antisymm( L" g_{i3,i4}^{a3,a4}" L" * t_{a3,a4}^{i1,i2}" @@ -57,8 +55,7 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { L" * t_{a2}^{i4}") ->as(); - // canon set on - const auto result2_naive = single_term_opt(prod2, true); + const auto result2_naive = single_term_opt(prod2); // there will be two degenerate evaluation sequences for prod2 REQUIRE(result2_naive.optimal_seqs.size() == 2); @@ -68,8 +65,7 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { auto imed_hashes_prod1 = yield_interm_hash(result1.optimal_seqs.at(0)); const auto result2_discounted = single_term_opt( - prod2, // - true, // canonicalization on + prod2, // [&imed_hashes_prod1]( auto const& n) { // discount existing intermediate costs if (imed_hashes_prod1.contains(n->hash())) return false; @@ -87,16 +83,16 @@ TEST_CASE("TEST_OPTIMIZE", "[optimize]") { // - two: single term optimized on prod3 with the intermediate from prod4 // are not the same. auto prod3_sto = std::move( - *(single_term_opt(prod3->as(), true).optimal_seqs.begin())); + *(single_term_opt(prod3->as()).optimal_seqs.begin())); // finding the intermediate from the evaluation tree of prod4 auto prod4_sto = std::move( - *(single_term_opt(prod4->as(), true).optimal_seqs.begin())); + *(single_term_opt(prod4->as()).optimal_seqs.begin())); auto imeds_prod4 = yield_interm_hash(prod4_sto); auto prod3_sto_with_imeds = std::move( - *single_term_opt(prod3->as(), true, + *single_term_opt(prod3->as(), [&imeds_prod4](auto const& n) { return !((imeds_prod4.contains(n->hash()))); }) From 46e1687cd4c31987e6c2a98fc8e65eeaab8a0da0 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Wed, 24 Nov 2021 12:29:35 -0500 Subject: [PATCH 008/120] Reenable asymptotic cost of symmetrization and anti-symmetrization evaluations. --- SeQuant/core/eval_node.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/SeQuant/core/eval_node.cpp b/SeQuant/core/eval_node.cpp index 7ae9b2ad3..76ec39a86 100644 --- a/SeQuant/core/eval_node.cpp +++ b/SeQuant/core/eval_node.cpp @@ -192,6 +192,10 @@ AsyCost asy_cost_single_node(const EvalNode& node) { cost = (lsym == rsym && lsym == Symmetry::nonsymm) ? cost / factorial(pbrank) : cost / (factorial(pbrank) * factorial(pkrank)); + } else if (op == EvalOp::Symm) { + cost = cost / factorial(pbrank); + } else if (op == EvalOp::Antisymm) { + cost = cost / (factorial(pbrank) * factorial(pkrank)); } else { assert( false && From f72fbe9440ba152d909fa51b9fd44b0acb65d537 Mon Sep 17 00:00:00 2001 From: Conner Masteran Date: Tue, 30 Nov 2021 13:19:00 -0500 Subject: [PATCH 009/120] create partition_f function which transfers particle exchange for G ops to SeQuant side. incompatible with handling particle exchange on MPQC side. --- SeQuant/domain/eqs/single_ref_uccf12.h | 22 +-- .../domain/transcorrelated/simplifications.h | 130 ++++++++++++++---- examples/uccf12/uccf12.cpp | 21 +-- 3 files changed, 129 insertions(+), 44 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index c299b5abe..c83c18ef4 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -98,6 +98,7 @@ class uccf12{ std::pair compute(bool print = false) { auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice // start transformation + auto gg_obs = IndexSpace::all; auto h = H(false); auto r = R12(gg_space); @@ -116,10 +117,14 @@ class uccf12{ simplify(H_A_2); auto com_1 = simplification::hamiltonian_based(H_A_2); - auto fFF = compute_double_com(F(),r,r_1); - auto fFFt = compute_double_com(F(),r,ex(-1.) * adjoint(r_1)); - auto fFtFt = compute_double_com(F(),ex(-1.) * adjoint(r),ex(-1.) * adjoint(r_1)); - auto fFtF = compute_double_com(F(),ex(-1.) * adjoint(r),r_1); + auto fFF = ex(1./2) * compute_double_com(F(),r,r_1); + non_canon_simplify(fFF); + auto fFFt = ex(1./2) * compute_double_com(F(),r,ex(-1.) * adjoint(r_1)); + non_canon_simplify(fFFt); + auto fFtFt = ex(1./2) * compute_double_com(F(),ex(-1.) * adjoint(r),ex(-1.) * adjoint(r_1)); + non_canon_simplify(fFtFt); + auto fFtF = ex(1./2) * compute_double_com(F(),ex(-1.) * adjoint(r),r_1); + non_canon_simplify(fFtF); auto fFF_sim = simplification::fock_based(fFF); std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; @@ -131,11 +136,12 @@ class uccf12{ std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; - auto one_body = com_1.first + ex(1./2) * (fFF_sim.first +fFFt_sim.first + fFtFt_sim.first + fFtF_sim.first); - auto two_body = com_1.second + ex(1./2) * (fFF_sim.second + fFFt_sim.second + fFtFt_sim.second + fFtF_sim.second); + auto one_body = com_1.first + (fFF_sim.first +fFFt_sim.first + fFtFt_sim.first + fFtF_sim.first); + auto two_body = com_1.second + (fFF_sim.second + fFFt_sim.second + fFtFt_sim.second + fFtF_sim.second); - non_canon_simplify(one_body); - non_canon_simplify(two_body); + //cannot use non_canon_simplify here because of B term. + simplify(one_body); + simplify(two_body); if (print){ std::wcout << "one body terms: " << to_latex_align(one_body,20,2) << std::endl; diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 93233b5cb..1332903e0 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -60,12 +60,18 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ label_4 = factor->as().creators()[0].index().label(); label_5 = factor->as().creators()[1].index().label(); label_6 = factor->as().creators()[2].index().label(); - auto o1 = make_overlap(Index{L"p_1"},Index{label_1}); - auto o2 = make_overlap(Index{L"p_2"},Index{label_2}); - auto o3 = make_overlap(Index{L"p_3"},Index{label_3}); - auto o4 = make_overlap(Index{L"p_4"},Index{label_4}); - auto o5 = make_overlap(Index{L"p_5"},Index{label_5}); - auto o6 = make_overlap(Index{L"p_6"},Index{label_6}); + auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_1}); + auto o2 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_2}); + auto o3 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_3}); + auto o4 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_4}); + auto o5 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_5}); + auto o6 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_6}); new_product = o1 * o2 * o3 * o4 * o5 * o6 * new_product * op_to_tens(factor); } else if (it == product->as().factors().size() - 1 && factor->is() &&factor->as().rank() == 2){ @@ -77,10 +83,14 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ label_2 = factor->as().annihilators()[1].index().label(); label_3 = factor->as().creators()[0].index().label(); label_4 = factor->as().creators()[1].index().label(); - auto o1 = make_overlap(Index{L"p_1"},Index{label_1}); - auto o2 = make_overlap(Index{L"p_2"},Index{label_2}); - auto o3 = make_overlap(Index{L"p_4"},Index{label_3}); - auto o4 = make_overlap(Index{L"p_5"},Index{label_4}); + auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_1}); + auto o2 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_2}); + auto o3 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_3}); + auto o4 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_4}); new_product = o1 * o2 * o3 * o4 * new_product * op_to_tens(factor); } else if (it == product->as().factors().size() - 1 && factor->is() && factor->as().rank() == 1){ @@ -88,8 +98,10 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ std::wstring label_3; label_1 = factor->as().annihilators()[0].index().label(); label_3 = factor->as().creators()[0].index().label(); - auto o1 = make_overlap(Index{L"p_1"},Index{label_1}); - auto o3 = make_overlap(Index{L"p_4"},Index{label_3}); + auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_1}); + auto o3 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::all)),Index{label_3}); new_product = o1 * o3 * new_product * op_to_tens(factor); } else if (factor->is() && factor->as().label() == L"\\Gamma" && factor->as().rank() == 1){ @@ -129,8 +141,9 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ } FWickTheorem wick{overlap_expr}; + std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; wick.reduce(overlap_expr); - simplify(overlap_expr); + non_canon_simplify(overlap_expr); return overlap_expr; } @@ -149,7 +162,7 @@ ExprPtr remove_const(const ExprPtr ex_){ if (has_fnop){ new_expression = new_expression + product;} } } - simplify(new_expression); + non_canon_simplify(new_expression); return new_expression; } @@ -538,7 +551,7 @@ ExprPtr densities_to_occ(const ExprPtr& ex_){ FWickTheorem wick {result}; wick.reduce(result); - simplify (result); + non_canon_simplify (result); return result; } @@ -600,6 +613,7 @@ Product find_f12_interms(ExprPtr ex_){ } assert(T1_T2.size() <= 2); if (T1_T2.size() == 2){ + assert(counter == 2); auto result = biproduct_intermediate(T1_T2[0], T1_T2[1]); if(result->is() && result->as().label() == L"B"){ for (auto&& factors : ex_->as().factors()){//have to find fock matrix and remove. factor 1/2 because a product only finds 1/2 of the B tensor, a sum of two products. @@ -610,7 +624,7 @@ Product find_f12_interms(ExprPtr ex_){ } result = result * ex_; - simplify(result); + non_canon_simplify(result); return result->as(); } return ex_->as(); @@ -657,8 +671,8 @@ std::pair fnop_to_overlap(ExprPtr exprs){ one_body_result = one_body_product + one_body_result; two_body_result = two_body_product + two_body_result; } - simplify(one_body_result); - simplify(two_body_result); + non_canon_simplify(one_body_result); + non_canon_simplify(two_body_result); return {one_body_result, two_body_result}; } @@ -699,21 +713,21 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ FWickTheorem wick_f{product_clone}; wick_f.reduce(product_clone); //std::wcout << " product clone after reduce: " << to_latex_align(product_clone) << std::endl; - simplify(product_clone); + non_canon_simplify(product_clone); product_clone = screen_F12_and_density(product_clone); return_sum = product_clone + return_sum; new_product = ex(0.); break; } new_product = temp_factor * new_product; - simplify(new_product); + non_canon_simplify(new_product); } //std::wcout <<"new_product: " << to_latex_align(new_product) << std::endl; return_sum = new_product + return_sum; } //std::wcout << "return sum before reduce: " << to_latex_align(return_sum,20,2) << std::endl; - simplify(return_sum); + non_canon_simplify(return_sum); return return_sum; } else if(exprs->is()) { @@ -731,7 +745,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ //std::wcout << " product clone: " << to_latex_align(product_clone) << std::endl; FWickTheorem wick_f{product_clone}; wick_f.reduce(product_clone); - simplify(product_clone); + non_canon_simplify(product_clone); //std::wcout << " product clone after reduce: " << to_latex_align(product_clone) << std::endl; product_clone = screen_F12_and_density(product_clone); new_product = product_clone; @@ -807,6 +821,47 @@ ExprPtr tens_to_FNOps(ExprPtr ex_){ return ex_; } +ExprPtr split_f(ExprPtr exprs){ + assert(exprs->is()); + assert(exprs->as().label() == L"F"); + auto result = ex(0); + //std::wcout << "before split: " << to_latex_align(exprs,20,2) << std::endl; + if((exprs->as().const_braket()[2].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[2].space() == sequant::IndexSpace::other_unoccupied) || exprs->as().const_braket()[3].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[3].space() == sequant::IndexSpace::other_unoccupied) { + auto T1 = ex(3./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); + auto T2 = ex(1./8) * ex(L"F",std::vector{exprs->as().const_braket()[1],exprs->as().const_braket()[0]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); + result = T1 + T2; + //std::wcout << "after split: " << to_latex_align(result,20,2) << std::endl; + return result; + } + else{// otherwise the geminal generating space must be in the upper indices. so include exchange for those. + assert((exprs->as().const_braket()[0].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[0].space() == sequant::IndexSpace::other_unoccupied) || (exprs->as().const_braket()[1].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[1].space() == sequant::IndexSpace::other_unoccupied)); + auto T1 = ex(3./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); + auto T2 = ex(1./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[3],exprs->as().const_braket()[2]}); + result = T1 + T2; + //std::wcout << "after split: " << to_latex_align(result,20,2) << std::endl; + return result; + } + return result; +} + +ExprPtr partition_f(ExprPtr exprs){ + if(!exprs->is()){ + return exprs; + } + // std::wcout << "pre partition: " << to_latex_align(exprs,20,2) << std::endl; + + for (auto&& product : exprs->as().summands()){ + for (auto&& factor : product->as().factors()){ + if(factor->is() && factor->as().label() == L"F") { + factor = split_f(factor); + } + } + } + non_canon_simplify(exprs); + //std::wcout << " post partition: " << to_latex_align(exprs,20,2) << std::endl; + return(exprs); +} + //TODO generalize for spin-orbital basis //simplification to deal with hamiltonian based expressions. involving one body h and two body g tensors. // not rigorous for more than 2 body operators or more than 2 density matrices whose rank must be <= 2. @@ -818,16 +873,22 @@ std::pair hamiltonian_based(ExprPtr exprs){ //exprs = remove_const(exprs); // std::wcout << "post remove constants: " << to_latex_align(exprs,20,2) << std::endl; exprs = FNOPs_to_tens(exprs); + non_canon_simplify(exprs); //exprs = overlap_with_obs(exprs); - // std::wcout << "post obs: " << to_latex_align(exprs,20,2) << std::endl; + exprs = partition_f(exprs); + std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; exprs = screen_F12_and_density(exprs); + std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; exprs = screen_densities(exprs); - // std::wcout << "post screen F12 and density: " << to_latex_align(exprs,20,2) << std::endl; + std::wcout << "post screen density: " << to_latex_align(exprs,20,2) << std::endl; exprs = densities_to_occ(exprs); -// std::wcout << "densities to occ: " << to_latex_align(exprs,20,2) << std::endl; + std::wcout << "densities to occ: " << to_latex_align(exprs,20,2) << std::endl; for (auto&& product : exprs->as().summands()){ product->as() = simplification::find_f12_interms(product); } + std::wcout << "post intermediates: " << to_latex_align(exprs,20,2) << std::endl; + + non_canon_simplify(exprs); return fnop_to_overlap(exprs); } @@ -842,9 +903,12 @@ std::pair fock_based (ExprPtr exprs){ //exprs = remove_const(exprs); //std::wcout << "after screening constant: " << to_latex_align(exprs) << std::endl; exprs = FNOPs_to_tens(exprs); + non_canon_simplify(exprs); + //std::wcout << "fnop to tensor: " << to_latex_align(exprs,20,2) << std::endl; if(exprs->is()){ return std::pair {exprs, exprs}; } + exprs = partition_f(exprs); //exprs = overlap_with_obs(exprs); auto final_screen = exprs; //in some cases, there will now be no contributing terms left so return zero to one and two body. @@ -852,16 +916,26 @@ if(final_screen->is()){ return std::pair {final_screen, final_screen}; } final_screen = screen_F12_and_density(final_screen); + non_canon_simplify(final_screen); + //std::wcout << "screen F12: " << to_latex_align(final_screen,20,2) << std::endl; final_screen = treat_fock(final_screen); + non_canon_simplify(final_screen); + //std::wcout << "screen fock: " << to_latex_align(final_screen,20,2) << std::endl; final_screen = screen_densities(final_screen); + non_canon_simplify(final_screen); + //std::wcout << "screen densities: " << to_latex_align(final_screen,20,2) << std::endl; + non_canon_simplify(final_screen); //enforce that densities are in the occupied space since they are only non-zero in occ final_screen = densities_to_occ(final_screen); - std::wcout << "pre intermediates: " << to_latex_align(final_screen,20,2) << std::endl; - //find the special f12 intermediates that cannot efficiently be solved directly. + non_canon_simplify(final_screen); + //std::wcout << "screen densities to occ: " << to_latex_align(final_screen,20,2) << std::endl; + // std::wcout << "pre intermediates: " << to_latex_align(final_screen,20,2) << std::endl; + //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! for (auto&& product : final_screen->as().summands()){ product->as() = simplification::find_f12_interms(product); } - std::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; + //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; + non_canon_simplify(final_screen); return fnop_to_overlap(final_screen); } diff --git a/examples/uccf12/uccf12.cpp b/examples/uccf12/uccf12.cpp index 055b2e7a8..449514289 100644 --- a/examples/uccf12/uccf12.cpp +++ b/examples/uccf12/uccf12.cpp @@ -108,25 +108,28 @@ try_main() { else return input; }; auto compute_double_com = [&](ExprPtr e1, ExprPtr e2, ExprPtr e3){ - std::wcout << to_latex_align(e1) << std::endl << "next: " << to_latex_align(e2) << std::endl << " next: " << to_latex_align(e3) << std::endl; + // std::wcout << to_latex_align(e1) << std::endl << "next: " << to_latex_align(e2) << std::endl << " next: " << to_latex_align(e3) << std::endl; auto first_com = do_wick((e1 * e2) - (e2 * e1)); - std::wcout << "after first wick: " << to_latex_align(first_com) << std::endl; + //std::wcout << "after first wick: " << to_latex_align(first_com) << std::endl; auto first_com_clone = first_com->clone(); auto second_com_1 = do_wick((first_com * e3)); auto second_com_2 = do_wick(e3 * first_com); auto second_com = second_com_1 - second_com_2; simplify(second_com); second_com = keep_up_to_3_body_terms(second_com); - std::wcout << to_latex_align(second_com,20,2) << std::endl; - second_com = second_com + ex(0.);//make a sum to avoid heavy code duplication for product and sum variants. + //std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = second_com + ex(0.); + //std::wcout << to_latex_align(second_com,20,2) << std::endl;//make a sum to avoid heavy code duplication for product and sum variants. second_com = simplification::overlap_with_obs(second_com); - std::wcout << "overlap with obs" << to_latex_align(second_com) << std::endl; + // std::wcout << "overlap with obs" << to_latex_align(second_com) << std::endl; second_com = second_com + ex(0.); second_com = simplification::screen_F12_and_density(second_com); std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = simplification::tens_to_FNOps(second_com); second_com = decompositions::three_body_substitution(second_com,2); simplify(second_com); + std::wcout << "three body decomp: " << to_latex_align(second_com,20,2) << std::endl; + return second_com; }; @@ -169,15 +172,17 @@ try_main() { auto fFF_sim = simplification::fock_based(fFF); // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; auto fFFt_sim = simplification::fock_based(fFFt); - //std::wcout << "FFt one body: " << to_latex_align(fFFt_sim.first,20,2) << std::endl; - //std::wcout << "FFt two body: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; + std::wcout << "FFt one body: " << to_latex_align(fFFt_sim.first,20,2) << std::endl; + std::wcout << "FFt two body: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; auto fFtFt_sim = simplification::fock_based(fFtFt); //std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; auto fFtF_sim = simplification::fock_based(fFtF); //std::wcout << "FtF one body: " << to_latex_align(fFtF_sim.first,20,2) << std::endl; //std::wcout << "FtF two body: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; - + auto total_double_com = ex(1./2) * (fFF_sim.first + fFFt_sim.first + fFtFt_sim.first + fFtF_sim.first + fFF_sim.second + fFFt_sim.second + fFtFt_sim.second + fFtF_sim.second); + non_canon_simplify(total_double_com); + std::wcout << "total double commutator: " << to_latex_align(total_double_com,20,2) << std::endl; auto one_body = com_1.first + ex(1./2) * (fFF_sim.first + fFFt_sim.first + fFtFt_sim.first + fFtF_sim.first); auto two_body = com_1.second + ex(1./2) * (fFF_sim.second + fFFt_sim.second + fFtFt_sim.second + fFtF_sim.second); non_canon_simplify(one_body); From 01eaa8936eda312d6e931d57bbfa64eb2cb97337 Mon Sep 17 00:00:00 2001 From: Conner Masteran Date: Sat, 4 Dec 2021 11:11:02 -0500 Subject: [PATCH 010/120] remove print statements --- SeQuant/domain/eqs/single_ref_uccf12.h | 8 ++++---- SeQuant/domain/transcorrelated/simplifications.h | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index c83c18ef4..d63967901 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -127,13 +127,13 @@ class uccf12{ non_canon_simplify(fFtF); auto fFF_sim = simplification::fock_based(fFF); - std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; + //std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; auto fFFt_sim = simplification::fock_based(fFFt); - std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; + //std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; auto fFtFt_sim = simplification::fock_based(fFtFt); - std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; + //std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; auto fFtF_sim = simplification::fock_based(fFtF); - std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; + //std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; auto one_body = com_1.first + (fFF_sim.first +fFFt_sim.first + fFtFt_sim.first + fFtF_sim.first); diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 1332903e0..6a65d405b 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -141,7 +141,7 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ } FWickTheorem wick{overlap_expr}; - std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; + //std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; wick.reduce(overlap_expr); non_canon_simplify(overlap_expr); return overlap_expr; @@ -876,17 +876,17 @@ std::pair hamiltonian_based(ExprPtr exprs){ non_canon_simplify(exprs); //exprs = overlap_with_obs(exprs); exprs = partition_f(exprs); - std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; + //std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; exprs = screen_F12_and_density(exprs); - std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; + //std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; exprs = screen_densities(exprs); - std::wcout << "post screen density: " << to_latex_align(exprs,20,2) << std::endl; + //std::wcout << "post screen density: " << to_latex_align(exprs,20,2) << std::endl; exprs = densities_to_occ(exprs); - std::wcout << "densities to occ: " << to_latex_align(exprs,20,2) << std::endl; + //std::wcout << "densities to occ: " << to_latex_align(exprs,20,2) << std::endl; for (auto&& product : exprs->as().summands()){ product->as() = simplification::find_f12_interms(product); } - std::wcout << "post intermediates: " << to_latex_align(exprs,20,2) << std::endl; + //std::wcout << "post intermediates: " << to_latex_align(exprs,20,2) << std::endl; non_canon_simplify(exprs); return fnop_to_overlap(exprs); From c2631470e6a9e6f1fff3728d6847360fe0759719 Mon Sep 17 00:00:00 2001 From: Conner Masteran Date: Wed, 8 Dec 2021 10:08:21 -0500 Subject: [PATCH 011/120] Evaluate V intermediate in Sequant into all resulting terms in the approximation. --- SeQuant/domain/eqs/single_ref_uccf12.h | 30 ++++-- SeQuant/domain/mbpt/op.cpp | 4 +- SeQuant/domain/mbpt/op.hpp | 3 +- .../domain/transcorrelated/simplifications.h | 100 ++++++++++++------ 4 files changed, 99 insertions(+), 38 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index d63967901..6a205317c 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -95,10 +95,28 @@ class uccf12{ return result; } - std::pair compute(bool print = false) { - auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice - // start transformation - auto gg_obs = IndexSpace::all; + std::pair compute(std::string gg_label, bool print = false) { + //auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice + + auto gg_space = IndexSpace::frozen_occupied; + if(gg_label == "act_occ"){ + gg_space = IndexSpace::active_occupied; + } + else if(gg_label == "occ"){ + gg_space = IndexSpace::occupied; + } + else if(gg_label == "all"){ + gg_space = IndexSpace::all; + } + else if(gg_label == "fz"){ + gg_space = IndexSpace::frozen_occupied; + } + else if(gg_label == "uocc"){ + gg_space = IndexSpace::unoccupied; + } + else { + throw " USUPPORTED SPACE LABEL! CHECK ABOVE FOR VALID ENTRIES"; + } auto h = H(false); auto r = R12(gg_space); @@ -140,8 +158,8 @@ class uccf12{ auto two_body = com_1.second + (fFF_sim.second + fFFt_sim.second + fFtFt_sim.second + fFtF_sim.second); //cannot use non_canon_simplify here because of B term. - simplify(one_body); - simplify(two_body); + non_canon_simplify(one_body); + non_canon_simplify(two_body); if (print){ std::wcout << "one body terms: " << to_latex_align(one_body,20,2) << std::endl; diff --git a/SeQuant/domain/mbpt/op.cpp b/SeQuant/domain/mbpt/op.cpp index 1d4110b42..4ce9c7e3d 100644 --- a/SeQuant/domain/mbpt/op.cpp +++ b/SeQuant/domain/mbpt/op.cpp @@ -10,7 +10,7 @@ namespace mbpt { std::vector cardinal_tensor_labels() { return {L"\\lambda",L"\\gamma",L"\\Gamma", L"A", L"S", L"P", L"L", L"λ", L"h", L"f", L"g", - L"t", L"R", L"F",L"X", L"V", L"B", L"U", overlap_label(), L"a", L"ã", L"b", L"ᵬ", L"E"}; + L"t", L"R", L"F",L"X", L"V", L"B", L"U",L"GR", overlap_label(), L"a", L"ã", L"b", L"ᵬ", L"E"}; } std::wstring to_wstring(OpType op) { @@ -33,6 +33,8 @@ std::wstring to_wstring(OpType op) { return L"R"; case OpType::R12: return L"F"; + case OpType::GR: + return L"GR"; default: throw std::invalid_argument("to_wstring(OpType op): invalid op"); } diff --git a/SeQuant/domain/mbpt/op.hpp b/SeQuant/domain/mbpt/op.hpp index aaf8613dd..205f550a0 100644 --- a/SeQuant/domain/mbpt/op.hpp +++ b/SeQuant/domain/mbpt/op.hpp @@ -22,7 +22,8 @@ enum class OpType { S, //!< particle symmetrizer L, //!< left-hand eigenstate R, //!< right-hand eigenstate - R12 //!< geminal kernel + R12, //!< geminal kernel + GR //!< GR kernel from f12 theory }; /// Operator character relative to Fermi vacuum diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 6a65d405b..4522207d0 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -143,7 +143,7 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ FWickTheorem wick{overlap_expr}; //std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; wick.reduce(overlap_expr); - non_canon_simplify(overlap_expr); + simplify(overlap_expr); return overlap_expr; } @@ -395,7 +395,7 @@ auto treat_fock(ExprPtr ex_){ return new_ex_; } //to Identify the relavant F12 intermediates, the number of connections,the connected space, and the resulting ket() and bra() of the intermediate tensor are needed. -std::tuple,std::vector> ncon_spa_extket_extbra(Tensor T1, Tensor T2,bool print_ = false){ +std::tuple,std::vector,bool> ncon_spa_extket_extbra(Tensor T1, Tensor T2,bool print_ = false){ //connected space. in each example in f12, the connected space is the same between two tensors. auto space = IndexSpace::occupied; // just a default used for construction. //depreciated should be a braket function somewhere in Tensor. @@ -404,7 +404,9 @@ std::tuple,std::vector> ncon_spa //ordered list of ket and bra indices which construct the resulting intermediate. std::vector external_ket; std::vector external_bra; - + //do the external ket indices correspond to T1? + // only need for intermediates and only works for V or X. + bool T1_ket; //unique list of connected indices. list is searched often to see if a given index is connected. std::vector connected_indices; @@ -433,7 +435,7 @@ std::tuple,std::vector> ncon_spa if ( nconnects == 0){ external_ket = T1_is; external_bra = T2_is; - std::tuple zero{nconnects, space,external_ket, external_bra}; + std::tuple zero{nconnects, space,external_ket, external_bra,T1_ket}; return zero; } //which indices in the T1 bra and ket are connected? what corresponding ket or bra is it connected to in T2? @@ -441,6 +443,7 @@ std::tuple,std::vector> ncon_spa for(int i = 0; i < T1.bra().size(); i++){ //is the bra T1 index a connected index? if (in_list(T1.bra()[i],connected_indices).first){ + T1_ket = true; for(int j = 0; j < T2.ket().size(); j++){ if(T2.ket()[j].label() == T1.bra()[i].label()){ external_ket.push_back(T1.ket()[i]); @@ -450,6 +453,7 @@ for(int i = 0; i < T1.bra().size(); i++){ } // is the ket T1 index a connected index? else if(in_list(T1.ket()[i],connected_indices).first){ + T1_ket = false; for(int j = 0; j < T2.ket().size(); j++){ if(T2.bra()[j].label() == T1.ket()[i].label()){ external_ket.push_back(T2.ket()[i]); @@ -512,11 +516,11 @@ for(int i = 0; i < T2.ket().size(); i++){ external_ket.push_back(T1.ket()[0]); external_ket.push_back(T1.ket()[1]); external_bra.push_back(T2.bra()[0]); - external_bra.push_back(T2.ket()[1]); + external_bra.push_back(T2.bra()[1]); } } assert(nconnects <= 2); - return {nconnects, space, external_ket, external_bra}; + return {nconnects, space, external_ket, external_bra,T1_ket}; } //densities are enforced to map obs -> occ since the obs includes frozen core orbitals. ExprPtr densities_to_occ(const ExprPtr& ex_){ @@ -560,12 +564,42 @@ ExprPtr biproduct_intermediate(ExprPtr T1,ExprPtr T2){ assert (T1->is()); assert (T2->is()); auto result = ex(1); - auto [nconnects,space, external_ket, external_bra] = ncon_spa_extket_extbra(T1->as(),T2->as()); + auto [nconnects,space, external_ket, external_bra,T1_ket] = ncon_spa_extket_extbra(T1->as(),T2->as()); if (T1->as().label() == L"g" || T2->as().label() == L"g"){ if (nconnects == 2 && space == IndexSpace::complete_unoccupied){ //V^pq_ij - auto V_pqij = ex(L"V", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); - return V_pqij; + //auto V_pqij = ex(L"V", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); + //return V_pqij; + if(T1_ket){ + auto GR_ijpq = ex(L"GR", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); + auto F_ijrs = ex(L"F", IDX_list{external_bra[0],external_bra[1]}, + IDX_list{L"p_11",L"p_12"}); + auto g_rspq = ex(L"g",IDX_list{L"p_11",L"p_12"}, + IDX_list{external_ket[0],external_ket[1]}); + auto F_ijmc = ex(L"F", IDX_list{external_bra[0],external_bra[1]}, + IDX_list{L"m_6", L"α'_4"}); + auto g_mcpq = ex(L"g",IDX_list{L"m_6", L"α'_4"}, + IDX_list{external_ket[0],external_ket[1]}); + auto F_jicm = ex(L"F", IDX_list{external_bra[1],external_bra[0]}, IDX_list{L"α'_4",L"m_6"}); + auto g_cmqp = ex(L"g",IDX_list{L"α'_4",L"m_6"},IDX_list{external_ket[1],external_ket[0]}); + + auto V = GR_ijpq - F_ijrs * g_rspq - F_ijmc * g_mcpq - F_jicm * g_cmqp; + non_canon_simplify(V); + return V; + } + else{ + auto GR_pqij = ex(L"GR", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); + auto F_rsij = ex(L"F",IDX_list{L"p_11",L"p_12"},IDX_list{external_ket[0],external_ket[1]}); + auto g_pqrs = ex(L"g",IDX_list{external_bra[0],external_bra[1]},IDX_list{L"p_11",L"p_12"}); + auto F_mcij = ex(L"F", IDX_list{L"m_6", L"α'_4"}, IDX_list{external_ket[0],external_ket[1]}); + auto g_pqmc = ex(L"g",IDX_list{external_bra[0],external_bra[1]},IDX_list{L"m_6", L"α'_4"}); + auto F_cmji = ex(L"F",IDX_list{L"α'_4",L"m_6"},IDX_list{external_ket[1],external_ket[0]}); + auto g_qpcm = ex(L"g",IDX_list{external_bra[1],external_bra[0]},IDX_list{L"α'_4",L"m_6"}); + + auto V = GR_pqij - F_rsij * g_pqrs - F_mcij * g_pqmc - F_cmji * g_qpcm; + non_canon_simplify(V); + return V; + } } else{ result = T1 * T2; @@ -593,7 +627,7 @@ ExprPtr biproduct_intermediate(ExprPtr T1,ExprPtr T2){ } return result; } -Product find_f12_interms(ExprPtr ex_){ +ExprPtr find_f12_interms(ExprPtr ex_){ assert(ex_->is()); int counter = 0; std::vector T1_T2; @@ -625,9 +659,9 @@ Product find_f12_interms(ExprPtr ex_){ result = result * ex_; non_canon_simplify(result); - return result->as(); + return result; } - return ex_->as(); + return ex_; } //in hamiltonian based transformations, it is important to retain the original form of the hamiltonian operator. that is h^p_q E^q_p + 1/2 g^{pq}_{rs} E^{rs}_{pq}. @@ -719,7 +753,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ new_product = ex(0.); break; } - new_product = temp_factor * new_product; + new_product = new_product * temp_factor; non_canon_simplify(new_product); } @@ -751,7 +785,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ new_product = product_clone; break; } - new_product = temp_factor * new_product; + new_product = new_product * temp_factor; } return new_product; } @@ -769,11 +803,11 @@ ExprPtr FNOPs_to_tens(ExprPtr ex_){ assert(!new_factor->is()); } else{new_factor = factor;} - new_product = new_factor * new_product; + new_product = new_product * new_factor; } new_sum = new_product + new_sum; } - simplify(new_sum); + non_canon_simplify(new_sum); return new_sum; } else if(ex_->is()){ @@ -841,7 +875,6 @@ ExprPtr split_f(ExprPtr exprs){ //std::wcout << "after split: " << to_latex_align(result,20,2) << std::endl; return result; } - return result; } ExprPtr partition_f(ExprPtr exprs){ @@ -876,20 +909,26 @@ std::pair hamiltonian_based(ExprPtr exprs){ non_canon_simplify(exprs); //exprs = overlap_with_obs(exprs); exprs = partition_f(exprs); + non_canon_simplify(exprs); //std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; - exprs = screen_F12_and_density(exprs); + //exprs = screen_F12_and_density(exprs); //std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; + non_canon_simplify(exprs); exprs = screen_densities(exprs); //std::wcout << "post screen density: " << to_latex_align(exprs,20,2) << std::endl; - exprs = densities_to_occ(exprs); + //exprs = densities_to_occ(exprs); + //f12 interms needs a particular canonical ordering + non_canon_simplify(exprs); //std::wcout << "densities to occ: " << to_latex_align(exprs,20,2) << std::endl; + auto exprs_intmed = ex(0.0); for (auto&& product : exprs->as().summands()){ - product->as() = simplification::find_f12_interms(product); + auto new_product = simplification::find_f12_interms(product); + exprs_intmed = new_product + exprs_intmed; } //std::wcout << "post intermediates: " << to_latex_align(exprs,20,2) << std::endl; - non_canon_simplify(exprs); - return fnop_to_overlap(exprs); + non_canon_simplify(exprs_intmed); + return fnop_to_overlap(exprs_intmed); } //TODO generalize for spin-orbital basis @@ -911,11 +950,12 @@ std::pair fock_based (ExprPtr exprs){ exprs = partition_f(exprs); //exprs = overlap_with_obs(exprs); auto final_screen = exprs; + non_canon_simplify(final_screen); //in some cases, there will now be no contributing terms left so return zero to one and two body. if(final_screen->is()){ return std::pair {final_screen, final_screen}; } - final_screen = screen_F12_and_density(final_screen); + //final_screen = screen_F12_and_density(final_screen); non_canon_simplify(final_screen); //std::wcout << "screen F12: " << to_latex_align(final_screen,20,2) << std::endl; final_screen = treat_fock(final_screen); @@ -923,21 +963,21 @@ if(final_screen->is()){ //std::wcout << "screen fock: " << to_latex_align(final_screen,20,2) << std::endl; final_screen = screen_densities(final_screen); non_canon_simplify(final_screen); - //std::wcout << "screen densities: " << to_latex_align(final_screen,20,2) << std::endl; - non_canon_simplify(final_screen); //enforce that densities are in the occupied space since they are only non-zero in occ - final_screen = densities_to_occ(final_screen); - non_canon_simplify(final_screen); + //final_screen = densities_to_occ(final_screen); + //non_canon_simplify(final_screen); //std::wcout << "screen densities to occ: " << to_latex_align(final_screen,20,2) << std::endl; // std::wcout << "pre intermediates: " << to_latex_align(final_screen,20,2) << std::endl; //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! + auto last_screen = ex(0.0); for (auto&& product : final_screen->as().summands()){ - product->as() = simplification::find_f12_interms(product); + auto new_product = simplification::find_f12_interms(product); + last_screen = last_screen + new_product; } //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; - non_canon_simplify(final_screen); + non_canon_simplify(last_screen); - return fnop_to_overlap(final_screen); + return fnop_to_overlap(last_screen); } } #ifndef SEQUANT_SIMPLIFICATIONS_H From 41967c69629d30ad589163e3ba9e626fb656b739 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 11 Jan 2022 14:05:11 -0500 Subject: [PATCH 012/120] removed redundant canonicalization steps. removed arbitrary 1/2 prefactor which prior code seems to have fixed. --- SeQuant/domain/eqs/single_ref_uccf12.h | 10 +++++++++- SeQuant/domain/transcorrelated/simplifications.h | 8 ++++---- SeQuant/domain/transcorrelated/three_body_decomp.hpp | 4 ---- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index 6a205317c..ea09ba5a7 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -48,7 +48,7 @@ class uccf12{ //std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = simplification::tens_to_FNOps(second_com); second_com = decompositions::three_body_substitution(second_com,2); - second_com = ex(1./2) * second_com; + second_com = ex(1./1) * second_com; simplify(second_com); return second_com; } @@ -160,6 +160,14 @@ class uccf12{ //cannot use non_canon_simplify here because of B term. non_canon_simplify(one_body); non_canon_simplify(two_body); + int term_count = 0; + for (auto i =0; i < one_body->as().summands().size(); i++){ + term_count +=1; + } + for (auto i =0; i < two_body->as().summands().size(); i++){ + term_count +=1; + } + std::cout << "number of terms: " << term_count << std::endl; if (print){ std::wcout << "one body terms: " << to_latex_align(one_body,20,2) << std::endl; diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 4522207d0..c2aa2266e 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -838,7 +838,7 @@ ExprPtr tens_to_FNOps(ExprPtr ex_){ } new_sum = new_product + new_sum; } - simplify(new_sum); + non_canon_simplify(new_sum); return new_sum; } else if(ex_->is()){ @@ -911,7 +911,7 @@ std::pair hamiltonian_based(ExprPtr exprs){ exprs = partition_f(exprs); non_canon_simplify(exprs); //std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; - //exprs = screen_F12_and_density(exprs); + exprs = screen_F12_and_density(exprs); //std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; non_canon_simplify(exprs); exprs = screen_densities(exprs); @@ -926,7 +926,7 @@ std::pair hamiltonian_based(ExprPtr exprs){ exprs_intmed = new_product + exprs_intmed; } //std::wcout << "post intermediates: " << to_latex_align(exprs,20,2) << std::endl; - + //tens_to_FNOps(exprs_intmed); non_canon_simplify(exprs_intmed); return fnop_to_overlap(exprs_intmed); } @@ -976,7 +976,7 @@ if(final_screen->is()){ } //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; non_canon_simplify(last_screen); - + //tens_to_FNOps(last_screen); return fnop_to_overlap(last_screen); } } diff --git a/SeQuant/domain/transcorrelated/three_body_decomp.hpp b/SeQuant/domain/transcorrelated/three_body_decomp.hpp index 38c050831..0328e7bdb 100644 --- a/SeQuant/domain/transcorrelated/three_body_decomp.hpp +++ b/SeQuant/domain/transcorrelated/three_body_decomp.hpp @@ -173,8 +173,6 @@ std::pair, std::vector>> three_body else{ } } - - temp_result->canonicalize(); simplify(temp_result); for (auto&& product : temp_result->as().summands()){//replace the one body terms with the substituted expression if(product->is()) { @@ -227,7 +225,6 @@ std::pair, std::vector>> three_body auto ex_pair = three_body_decomp(_ex,true); _ex = ex_pair.first; initial_pairing = ex_pair.second; - _ex->canonicalize(); simplify(_ex); for (auto&& product : _ex->as().summands()) { if (product->is()) { @@ -250,7 +247,6 @@ std::pair, std::vector>> three_body } } } - _ex->canonicalize(); simplify(_ex); //std::wcout << " cumulant replacment: " << to_latex_align(_ex,20, 7) << std::endl; } else if (rank == 1) { From bc535fc9c328797134b38b7d0c5fc5d1fd28213b Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 13 Jan 2022 12:55:21 -0500 Subject: [PATCH 013/120] rename clone.hpp and add Expr level clone functions --- SeQuant/core/clone.cpp | 29 ------------ SeQuant/core/clone.hpp | 21 --------- SeQuant/core/clone_packed.cpp | 47 +++++++++++++++++++ SeQuant/core/clone_packed.hpp | 28 +++++++++++ .../{test_clone.cpp => test_clone_packed.cpp} | 18 ++++--- 5 files changed, 83 insertions(+), 60 deletions(-) delete mode 100644 SeQuant/core/clone.cpp delete mode 100644 SeQuant/core/clone.hpp create mode 100644 SeQuant/core/clone_packed.cpp create mode 100644 SeQuant/core/clone_packed.hpp rename tests/unit/{test_clone.cpp => test_clone_packed.cpp} (81%) diff --git a/SeQuant/core/clone.cpp b/SeQuant/core/clone.cpp deleted file mode 100644 index 928706a40..000000000 --- a/SeQuant/core/clone.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// -// Created by Bimal Gaudel on 9/22/21. -// - -#include "clone.hpp" -#include "expr.hpp" - -sequant::ExprPtr sequant::clone(sequant::ExprPtr expr) { - using ranges::views::transform; - - if (!expr) return nullptr; - else if (expr->is()){ - auto const smands = *expr - | transform([](ExprPtr x){return clone(x);}) - | ranges::to_vector; - return ex(smands.begin(), smands.end()); - } - else if (expr->is()){ - auto const facs = *expr - | transform([](ExprPtr x){return clone(x);}); - auto const scal = expr->as().scalar(); - - auto result = ex(scal, ExprPtrList{}); - for (auto&& f: facs) - result->as().append(f); - return result; - } - else return expr->clone(); -} diff --git a/SeQuant/core/clone.hpp b/SeQuant/core/clone.hpp deleted file mode 100644 index e8f40bad7..000000000 --- a/SeQuant/core/clone.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// -// Created by Bimal Gaudel on 9/22/21. -// - -#ifndef SEQUANT_CLONE_HPP -#define SEQUANT_CLONE_HPP - -#include "expr_fwd.hpp" - -namespace sequant { - -/// -/// Clone an expression by preserving nested structures. -/// -/// \param expr expression to be cloned -/// \return a cloned copy of \c expr -ExprPtr clone(ExprPtr expr); - -} // namespace - -#endif // SEQUANT_CLONE_HPP diff --git a/SeQuant/core/clone_packed.cpp b/SeQuant/core/clone_packed.cpp new file mode 100644 index 000000000..98d7ad034 --- /dev/null +++ b/SeQuant/core/clone_packed.cpp @@ -0,0 +1,47 @@ +// +// Created by Bimal Gaudel on 9/22/21. +// + +#include "clone_packed.hpp" +#include "expr.hpp" +#include "tensor.hpp" + +namespace sequant { + +using ranges::views::transform; + +ExprPtr clone_packed(Tensor const& t) { + return t.clone(); +} + +ExprPtr clone_packed(Sum const& s) { + auto const smands = s + | transform([](ExprPtr x){return clone_packed(x);}) + | ranges::to_vector; + return ex(smands.begin(), smands.end()); +} + +ExprPtr clone_packed(Product const& p) { + auto const facs = p + | transform([](ExprPtr x){return clone_packed(x);}); + auto const scal = p.scalar(); + + auto result = ex(scal, ExprPtrList{}); + for (auto&& f: facs) + result->as().append(f); + return result; +} + +ExprPtr clone_packed(ExprPtr expr) { + + if (!expr) return nullptr; + else if (expr->is()){ + return clone_packed(expr->as()); + } + else if (expr->is()){ + return clone_packed(expr->as()); + } + else return expr->clone(); +} + +} // diff --git a/SeQuant/core/clone_packed.hpp b/SeQuant/core/clone_packed.hpp new file mode 100644 index 000000000..11352540f --- /dev/null +++ b/SeQuant/core/clone_packed.hpp @@ -0,0 +1,28 @@ +// +// Created by Bimal Gaudel on 9/22/21. +// + +#ifndef SEQUANT_CLONE_PACKED_HPP +#define SEQUANT_CLONE_PACKED_HPP + +#include "expr.hpp" +#include "tensor.hpp" + +namespace sequant { + +/// +/// Clone an expression by preserving nested structures. +/// +/// \param expr expression to be cloned +/// \return a cloned copy of \c expr +ExprPtr clone_packed(ExprPtr expr); + +ExprPtr clone_packed(Sum const&); + +ExprPtr clone_packed(Product const&); + +ExprPtr clone_packed(Tensor const&); + +} // namespace + +#endif // SEQUANT_CLONE_PACKED_HPP diff --git a/tests/unit/test_clone.cpp b/tests/unit/test_clone_packed.cpp similarity index 81% rename from tests/unit/test_clone.cpp rename to tests/unit/test_clone_packed.cpp index d40d00cd9..b1ae9d341 100644 --- a/tests/unit/test_clone.cpp +++ b/tests/unit/test_clone_packed.cpp @@ -2,18 +2,16 @@ // Created by Bimal Gaudel on 9/22/21. // #include "catch.hpp" -#include +#include #include -TEST_CASE("TEST_CLONE", "[clone]") { +TEST_CASE("TEST_CLONE_PACKED", "[clone_packed]") { using namespace sequant; SECTION("Tensor") { - REQUIRE(clone(ex(L"t", - IndexList{L"i_1"}, - IndexList{L"a_1"}))->is()); + REQUIRE(clone_packed(ex(L"t", IndexList{L"i_1"}, IndexList{L"a_1"}))->is()); } SECTION("Constant") { - REQUIRE(clone(ex(1))->is()); + REQUIRE(clone_packed(ex(1))->is()); } SECTION("Product") { @@ -27,7 +25,7 @@ TEST_CASE("TEST_CLONE", "[clone]") { IndexList{L"a_2"}, IndexList{L"i_1"}); auto prod1 = ex(ExprPtrList{t1,t2,t3}); - REQUIRE(prod1 == clone(prod1)); + REQUIRE(prod1 == clone_packed(prod1)); auto prod2 = ex(1./2,ExprPtrList{}); prod2->as().append(t1); @@ -36,7 +34,7 @@ TEST_CASE("TEST_CLONE", "[clone]") { REQUIRE(prod2->at(0)->is()); REQUIRE(prod2->at(1)->is()); - REQUIRE(*clone(prod2) == *prod2); + REQUIRE(*clone_packed(prod2) == *prod2); } SECTION("Sum") { @@ -59,6 +57,6 @@ TEST_CASE("TEST_CLONE", "[clone]") { auto prod1 = ex(-1, ExprPtrList{g1, t1}); auto prod2 = ex(-1./2, ExprPtrList{g2,t2,t3}); auto sum = ex(ExprPtrList{prod1, prod2}); - REQUIRE(*sum == *clone(sum)); + REQUIRE(*sum == *clone_packed(sum)); } -} \ No newline at end of file +} From 2a0a1f95e94c1b2bef36557bbe2d0fe7acef706b Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 13 Jan 2022 13:04:53 -0500 Subject: [PATCH 014/120] Updated version of single term optimization used by default --- CMakeLists.txt | 10 ++- SeQuant/core/optimize.hpp | 93 +++++++++++++++++++++ SeQuant/core/optimize/optimize.cpp | 125 ++++++++++++++++++++++++++++- 3 files changed, 223 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dbf0f5946..fcab8d8fc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -188,8 +188,8 @@ add_library(SeQuant SeQuant/core/bliss.hpp SeQuant/core/timer.hpp SeQuant/core/binary_node.hpp - SeQuant/core/clone.hpp - SeQuant/core/clone.cpp + SeQuant/core/clone_packed.hpp + SeQuant/core/clone_packed.cpp SeQuant/core/eval_seq.hpp SeQuant/core/eval_expr.hpp SeQuant/core/eval_expr.cpp @@ -277,7 +277,7 @@ set(utests_src tests/unit/test_optimize.cpp tests/unit/test_token_sequant.cpp tests/unit/test_rpn.cpp - tests/unit/test_clone.cpp + tests/unit/test_clone_packed.cpp ) if (TARGET tiledarray) @@ -308,6 +308,10 @@ set_tests_properties(sequant/unit/run PROPERTIES DEPENDS sequant/unit/build WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/unit) +if (DEFINED CLI11_DIR) + add_subdirectory(scratch) +endif() + ####### Tests ######## # Single-Reference Coupled-Cluster equation generation (spin-orbital) diff --git a/SeQuant/core/optimize.hpp b/SeQuant/core/optimize.hpp index de17fe593..f85ea39a6 100644 --- a/SeQuant/core/optimize.hpp +++ b/SeQuant/core/optimize.hpp @@ -2,11 +2,13 @@ #define SEQUANT_OPTIMIZE_OPTIMIZE_HPP #include +#include #include #include #include #include +#include namespace sequant { /// Optimize an expression assuming the number of virtual orbitals @@ -18,6 +20,55 @@ EvalNode optimize(ExprPtr const& expr); namespace opt { +namespace detail { +/// +/// For n number of bits, assuming all of them are on, +/// bipartition them into all possibilities, except for +/// the trivial (all zero bits, all one bits) partition +/// eg. for n = 3 +/// (001, 110) +/// (010, 101) +/// (011, 100) +/// +template , bool> = true> +void scan_biparts_all_bits(size_t n, F&& scanner) { + auto ulim = (1 << n); + for (auto i = 1; i < ulim / 2; ++i) + std::invoke(std::forward(scanner), i, (ulim - 1 - i)); +} + +/// +/// given positions of bits that are on, +/// P = {i, j,...,m} +/// generate binary partitions of the positions +/// such as {m} and {i, j, ...} (= P - {m}) and so on +/// except for the trivial {i, j,...,m} {} partition +/// +template , bool> = true> +void scan_biparts_some_bits(std::vector const& bs, F&& scanner) { + scan_biparts_all_bits(bs.size(), [&scanner, &bs](size_t a1, size_t _) { + size_t p1{0}, p2{0}; + for (auto i = 0; i < bs.size(); ++i) { + // if ith bit is on, ith elem in bs is included in p1 + // else it is included in p2 + if ((1 << i) & a1) + p1 |= (1 << bs[i]); + else + p2 |= (1 << bs[i]); + } + std::invoke(std::forward(scanner), p1, p2); + }); +} + +/// given a number @c n, return a vector of ON bit positions +/// only first num_bits bits will be checked from right to left +/// in the bit representation of @c n +std::vector on_bits_pos(size_t n, size_t num_bits = sizeof(size_t) * 8); + +} // namespace detail + /// /// Omit the first factor from the top level product from given expression. /// Intended to drop "A" and "S" tensors from CC amplitudes as a preparatory @@ -109,6 +160,48 @@ STOResult single_term_opt( return result; } +using eval_seq_t = container::vector; + +struct OptRes { + container::vector indices; + double flops; + eval_seq_t sequence; +}; + +/// returns a pair of index vectors +/// first element of the pair is the vector of common indices compared by labels +/// second element of the pair is the set symmetric difference of the input +/// index vectors if either of the input index container is empty, the result is +/// a pair of empty vectors +/// @note I1 and I2 containers are assumed to be sorted by using +/// Index::LabelCompare{}; +template +std::pair, container::vector> common_indices( + I1 const& idxs1, I2 const& idxs2) { + container::vector i1vec(ranges::begin(idxs1), ranges::end(idxs1)), + i2vec(ranges::begin(idxs2), ranges::end(idxs2)); + if (i1vec.empty() || i2vec.empty()) return {{}, {}}; + + container::vector commons, diffs; + std::set_intersection(std::begin(i1vec), std::end(i1vec), std::begin(i2vec), + std::end(i2vec), std::back_inserter(commons), + Index::LabelCompare{}); + std::set_symmetric_difference( + std::begin(i1vec), std::end(i1vec), std::begin(i2vec), std::end(i2vec), + std::back_inserter(diffs), Index::LabelCompare{}); + return {commons, diffs}; +} + +double log_flops(container::vector const& commons, + container::vector const& diffs, double log_nocc, + double log_nvirt); + +eval_seq_t single_term_opt_v2(TensorNetwork const& network, size_t nocc, + size_t nvirt); + +// @c prod is assumed to consist of only Tensor expressions +ExprPtr single_term_opt_v2(Product const& prod, size_t nocc, size_t nvirt); + } // namespace opt } // namespace sequant diff --git a/SeQuant/core/optimize/optimize.cpp b/SeQuant/core/optimize/optimize.cpp index e68b2549d..8ab005e07 100644 --- a/SeQuant/core/optimize/optimize.cpp +++ b/SeQuant/core/optimize/optimize.cpp @@ -1,9 +1,20 @@ -#include "SeQuant/core/optimize.hpp" +#include +#include namespace sequant { namespace opt { +namespace detail { +std::vector on_bits_pos(size_t n, size_t num_bits) { + auto result = std::vector{}; + result.reserve(num_bits); + for (auto i = 0; i < num_bits; ++i) + if (n & (1 << i)) result.push_back(i); + return result; +} +} // namespace detail + ExprPtr tail_factor(ExprPtr const& expr) noexcept { if (expr->is()) return expr->clone(); @@ -37,14 +48,124 @@ void pull_scalar(sequant::ExprPtr expr) noexcept { prod.scale(scal); } +double log_flops(container::vector const& commons, + container::vector const& diffs, double log_nocc, + double log_nvirt) { + double res = 0; + for (auto&& idx : ranges::views::concat(commons, diffs)) + if (idx.space() == IndexSpace::active_occupied) + res += log_nocc; + else if (idx.space() == IndexSpace::active_unoccupied) + res += log_nvirt; + else + throw std::runtime_error( + "Unexpected IndexSpace encountered while computing flops."); + return res; +} + +eval_seq_t single_term_opt_v2(TensorNetwork const& network, size_t nocc, + size_t nvirt) { + // number of terms + auto const nt = network.tensors().size(); + auto nth_tensor_indices = container::vector>{}; + nth_tensor_indices.reserve(nt); + + for (auto i = 0; i < nt; ++i) { + auto bk = container::vector{}; + for (auto idx : braket(*network.tensors().at(i))) bk.push_back(idx); + + ranges::sort(bk, Index::LabelCompare{}); + nth_tensor_indices.emplace_back(std::move(bk)); + } + double const log_nocc = std::log10(nocc); + double const log_nvirt = std::log10(nvirt); + + // initialize result + container::vector result{}; + result.reserve(1 << nt); + result[0] = OptRes{{}, 0, {}}; + + // power_pos is used, and incremented, only when the + // result[1<<0] + // result[1<<1] + // result[1<<2] + // and so on are set + size_t power_pos = 0; + for (auto n = 1; n < (1 << nt); ++n) { + double cost = std::numeric_limits::max(); + auto const on_bits = detail::on_bits_pos(n, nt); + size_t p1 = 0, p2 = 0; + container::vector tindices{}; + detail::scan_biparts_some_bits( + on_bits, [&result = std::as_const(result), &tindices, log_nocc, + log_nvirt, &cost, &p1, &p2](auto p1_, auto p2_) { + auto [commons, diffs] = + common_indices(result[p1_].indices, result[p2_].indices); + auto new_cost = log_flops(commons, diffs, log_nocc, log_nvirt) + + result[p1_].flops + result[p2_].flops; + if (new_cost < cost) { + cost = new_cost; + tindices = std::move(diffs); + p1 = p1_; + p2 = p2_; + } + }); // + + auto seq = eval_seq_t{}; + if (tindices.empty()) { + cost = 0; + tindices = std::move(nth_tensor_indices[power_pos]); + // seq = make_sequence(++power_pos); + seq = eval_seq_t{static_cast(power_pos++)}; + } else { + // cost set + // tindices set + seq = ranges::views::concat(result[p1].sequence, result[p2].sequence) | ranges::to; + seq.push_back(-1); + } + + result[n].flops = cost; + result[n].indices = std::move(tindices); + result[n].sequence = std::move(seq); + } + + return result[(1 << nt) - 1].sequence; +} + +ExprPtr single_term_opt_v2(Product const& prod, size_t nocc, size_t nvirt) { + auto seq = single_term_opt_v2(TensorNetwork{prod}, nocc, nvirt); + auto result = container::vector{}; + for (auto i: seq) + if (i==-1){ + auto rexpr = *result.rbegin(); + result.pop_back(); + auto lexpr = *result.rbegin(); + result.pop_back(); + auto p = Product{}; + p.append(lexpr); + p.append(rexpr); + result.push_back(clone_packed(p)); + } else { + result.push_back(prod.at(i)); + } + + (*result.rbegin())->as().scale(prod.scalar()); + return *result.rbegin(); +} + } // namespace opt EvalNode optimize(const ExprPtr& expr) { + static const size_t NOCC = 10; + static const size_t NVIRT = 100; + using ranges::views::transform; if (expr->is()) return to_eval_node(expr); else if (expr->is()) { - return *(opt::single_term_opt(expr->as()).optimal_seqs.begin()); + // return *(opt::single_term_opt(expr->as()).optimal_seqs.begin()); + auto opt_expr = opt::single_term_opt_v2(expr->as(), NOCC, NVIRT); + return to_eval_node(opt_expr); } else if (expr->is()) { auto smands = *expr | transform([](auto const& s) { return to_expr(optimize(s)); From abdd657b72cf480dec2b33477ea32e354b8df563 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 13 Jan 2022 19:38:52 -0500 Subject: [PATCH 015/120] Bug fix --- SeQuant/core/optimize/optimize.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/SeQuant/core/optimize/optimize.cpp b/SeQuant/core/optimize/optimize.cpp index 8ab005e07..a1d8d3518 100644 --- a/SeQuant/core/optimize/optimize.cpp +++ b/SeQuant/core/optimize/optimize.cpp @@ -67,6 +67,8 @@ eval_seq_t single_term_opt_v2(TensorNetwork const& network, size_t nocc, size_t nvirt) { // number of terms auto const nt = network.tensors().size(); + if (nt == 1) return eval_seq_t{0}; + if (nt == 2) return eval_seq_t{0,1,-1}; auto nth_tensor_indices = container::vector>{}; nth_tensor_indices.reserve(nt); @@ -80,10 +82,7 @@ eval_seq_t single_term_opt_v2(TensorNetwork const& network, size_t nocc, double const log_nocc = std::log10(nocc); double const log_nvirt = std::log10(nvirt); - // initialize result - container::vector result{}; - result.reserve(1 << nt); - result[0] = OptRes{{}, 0, {}}; + container::vector result((1<{}; for (auto i: seq) From 711c8913b79a6e33592a4f4aaf9095caad695ae2 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 13 Jan 2022 19:44:56 -0500 Subject: [PATCH 016/120] make rand color generator seedable --- SeQuant/domain/utils/rand_color.cpp | 13 ++++++++----- SeQuant/domain/utils/rand_color.hpp | 6 ++++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/SeQuant/domain/utils/rand_color.cpp b/SeQuant/domain/utils/rand_color.cpp index f593d93cf..f13a80078 100644 --- a/SeQuant/domain/utils/rand_color.cpp +++ b/SeQuant/domain/utils/rand_color.cpp @@ -6,18 +6,21 @@ namespace sequant::utils { -rand_color::rand_color() +RandColor::RandColor() : randEngine{[]() { std::random_device seeder; const auto seed = seeder.entropy() ? seeder() : std::time(nullptr); return static_cast(seed); }()} {} -std::array rand_color::rand_rgb(double sat, double brit) { - return rand_color::hsv_to_rgb(rand_hue(), sat, brit); +RandColor::RandColor(int seed) + : randEngine{static_cast(seed)} {} + +std::array RandColor::rand_rgb(double sat, double brit) { + return RandColor::hsv_to_rgb(rand_hue(), sat, brit); } -double rand_color::rand_hue() { +double RandColor::rand_hue() { auto attempt = [this]() { auto hue = GOLDEN_RATIO_CONJ + uniRealDist(randEngine); return hue_cache_.emplace(hue > 1 ? hue - 1 : hue); @@ -31,7 +34,7 @@ double rand_color::rand_hue() { return *result.first; } -std::array rand_color::hsv_to_rgb(double h, double s, double v) { +std::array RandColor::hsv_to_rgb(double h, double s, double v) { // https://martin.ankerl.com/2009/12/09/how-to-create-random-colors-programmatically/ size_t h_i = (size_t)(h * 6); double f = h * 6 - h_i; diff --git a/SeQuant/domain/utils/rand_color.hpp b/SeQuant/domain/utils/rand_color.hpp index 19a4cd57f..021771f1f 100644 --- a/SeQuant/domain/utils/rand_color.hpp +++ b/SeQuant/domain/utils/rand_color.hpp @@ -16,7 +16,7 @@ namespace sequant::utils { * @author Bimal Gaudel * @version 29 Sep 2020 */ -class rand_color { +class RandColor { private: static constexpr double GOLDEN_RATIO_CONJ = 0.618033988749895; @@ -27,7 +27,9 @@ class rand_color { std::set hue_cache_; public: - rand_color(); + RandColor(); + + explicit RandColor(int seed); /** * Get a random color RGB hexcode for a given saturation level From cbed9fa7571937fb5ce490f65d58d0a576b02967 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Fri, 14 Jan 2022 09:55:51 -0500 Subject: [PATCH 017/120] optimize functions take an invocable argument that returns size for an index --- SeQuant/core/optimize.hpp | 141 +++++++++++++++++++++++++++-- SeQuant/core/optimize/optimize.cpp | 129 +------------------------- 2 files changed, 134 insertions(+), 136 deletions(-) diff --git a/SeQuant/core/optimize.hpp b/SeQuant/core/optimize.hpp index f85ea39a6..e7b03d362 100644 --- a/SeQuant/core/optimize.hpp +++ b/SeQuant/core/optimize.hpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -16,7 +17,7 @@ namespace sequant { /// \param expr Expression to be optimized. /// \return EvalNode object. -EvalNode optimize(ExprPtr const& expr); +// EvalNode optimize(ExprPtr const& expr); namespace opt { @@ -98,7 +99,19 @@ struct STOResult { container::vector optimal_seqs; }; -/// Perform single term optimization on a product. +template , + bool> = true> +double log_flops(IdxToSz const& idxsz, + container::vector const& commons, + container::vector const& diffs) { + double res = 0; + for (auto&& idx : ranges::views::concat(commons, diffs)) + res += std::log10(std::invoke(idxsz, idx)); + return res; +} + +/// Perform single term optimization on a product. Deprecated. /// /// @return STOResult template , @@ -192,17 +205,129 @@ std::pair, container::vector> common_indices( return {commons, diffs}; } -double log_flops(container::vector const& commons, - container::vector const& diffs, double log_nocc, - double log_nvirt); +template , + bool> = true> +eval_seq_t single_term_opt_v2(TensorNetwork const& network, + IdxToSz const& idxsz) { + // number of terms + auto const nt = network.tensors().size(); + if (nt == 1) return eval_seq_t{0}; + if (nt == 2) return eval_seq_t{0, 1, -1}; + auto nth_tensor_indices = container::vector>{}; + nth_tensor_indices.reserve(nt); + + for (auto i = 0; i < nt; ++i) { + auto bk = container::vector{}; + for (auto idx : braket(*network.tensors().at(i))) bk.push_back(idx); + + ranges::sort(bk, Index::LabelCompare{}); + nth_tensor_indices.emplace_back(std::move(bk)); + } + // double const log_nocc = std::log10(nocc); + // double const log_nvirt = std::log10(nvirt); + + auto log_flops_ = [&idxsz](container::vector const& commons, + container::vector const& diffs) { + return log_flops(idxsz, commons, diffs); + }; + + container::vector result((1 << nt), OptRes{{}, 0, {}}); + + // power_pos is used, and incremented, only when the + // result[1<<0] + // result[1<<1] + // result[1<<2] + // and so on are set + size_t power_pos = 0; + for (auto n = 1; n < (1 << nt); ++n) { + double cost = std::numeric_limits::max(); + auto const on_bits = detail::on_bits_pos(n, nt); + size_t p1 = 0, p2 = 0; + container::vector tindices{}; + detail::scan_biparts_some_bits( + on_bits, [&result = std::as_const(result), &tindices, &idxsz, + &log_flops_, &cost, &p1, &p2](auto p1_, auto p2_) { + auto [commons, diffs] = + common_indices(result[p1_].indices, result[p2_].indices); + auto new_cost = log_flops_(commons, diffs) + result[p1_].flops + + result[p2_].flops; + if (new_cost < cost) { + cost = new_cost; + tindices = std::move(diffs); + p1 = p1_; + p2 = p2_; + } + }); // + + auto seq = eval_seq_t{}; + if (tindices.empty()) { + cost = 0; + tindices = std::move(nth_tensor_indices[power_pos]); + seq = eval_seq_t{static_cast(power_pos++)}; + } else { + // cost set + // tindices set + seq = ranges::views::concat(result[p1].sequence, result[p2].sequence) | + ranges::to; + seq.push_back(-1); + } + + result[n].flops = cost; + result[n].indices = std::move(tindices); + result[n].sequence = std::move(seq); + } -eval_seq_t single_term_opt_v2(TensorNetwork const& network, size_t nocc, - size_t nvirt); + return result[(1 << nt) - 1].sequence; +} // @c prod is assumed to consist of only Tensor expressions -ExprPtr single_term_opt_v2(Product const& prod, size_t nocc, size_t nvirt); +template , bool> = true> +ExprPtr single_term_opt_v2(Product const& prod, IdxToSz const& idxsz) { + if (prod.factors().size() < 3) return clone_packed(prod); + auto seq = single_term_opt_v2(TensorNetwork{prod}, idxsz); + auto result = container::vector{}; + for (auto i : seq) + if (i == -1) { + auto rexpr = *result.rbegin(); + result.pop_back(); + auto lexpr = *result.rbegin(); + result.pop_back(); + auto p = Product{}; + p.append(lexpr); + p.append(rexpr); + result.push_back(clone_packed(p)); + } else { + result.push_back(prod.at(i)); + } + + (*result.rbegin())->as().scale(prod.scalar()); + return *result.rbegin(); +} } // namespace opt + +template , bool> = true> +EvalNode optimize(const ExprPtr& expr, IdxToSz const& idxsz) { + using ranges::views::transform; + if (expr->is()) + return to_eval_node(expr); + else if (expr->is()) { + // return *(opt::single_term_opt(expr->as()).optimal_seqs.begin()); + auto opt_expr = opt::single_term_opt_v2(expr->as(), idxsz); + return to_eval_node(opt_expr); + } else if (expr->is()) { + auto smands = *expr | transform([&idxsz](auto const& s) { + return to_expr(optimize(s, idxsz)); + }) | ranges::to_vector; + + return to_eval_node(ex(Sum{smands.begin(), smands.end()})); + } else + throw std::runtime_error{"optimization attempted on unsupported Expr type"}; +} + } // namespace sequant #endif // SEQUANT_OPTIMIZE_OPTIMIZE_HPP diff --git a/SeQuant/core/optimize/optimize.cpp b/SeQuant/core/optimize/optimize.cpp index a1d8d3518..01e1eef1b 100644 --- a/SeQuant/core/optimize/optimize.cpp +++ b/SeQuant/core/optimize/optimize.cpp @@ -31,8 +31,7 @@ ExprPtr tail_factor(ExprPtr const& expr) noexcept { } } -void pull_scalar(sequant::ExprPtr expr) noexcept { - using sequant::Product; +void pull_scalar(ExprPtr expr) noexcept { if (!expr->is()) return; auto& prod = expr->as(); @@ -48,132 +47,6 @@ void pull_scalar(sequant::ExprPtr expr) noexcept { prod.scale(scal); } -double log_flops(container::vector const& commons, - container::vector const& diffs, double log_nocc, - double log_nvirt) { - double res = 0; - for (auto&& idx : ranges::views::concat(commons, diffs)) - if (idx.space() == IndexSpace::active_occupied) - res += log_nocc; - else if (idx.space() == IndexSpace::active_unoccupied) - res += log_nvirt; - else - throw std::runtime_error( - "Unexpected IndexSpace encountered while computing flops."); - return res; -} - -eval_seq_t single_term_opt_v2(TensorNetwork const& network, size_t nocc, - size_t nvirt) { - // number of terms - auto const nt = network.tensors().size(); - if (nt == 1) return eval_seq_t{0}; - if (nt == 2) return eval_seq_t{0,1,-1}; - auto nth_tensor_indices = container::vector>{}; - nth_tensor_indices.reserve(nt); - - for (auto i = 0; i < nt; ++i) { - auto bk = container::vector{}; - for (auto idx : braket(*network.tensors().at(i))) bk.push_back(idx); - - ranges::sort(bk, Index::LabelCompare{}); - nth_tensor_indices.emplace_back(std::move(bk)); - } - double const log_nocc = std::log10(nocc); - double const log_nvirt = std::log10(nvirt); - - container::vector result((1<::max(); - auto const on_bits = detail::on_bits_pos(n, nt); - size_t p1 = 0, p2 = 0; - container::vector tindices{}; - detail::scan_biparts_some_bits( - on_bits, [&result = std::as_const(result), &tindices, log_nocc, - log_nvirt, &cost, &p1, &p2](auto p1_, auto p2_) { - auto [commons, diffs] = - common_indices(result[p1_].indices, result[p2_].indices); - auto new_cost = log_flops(commons, diffs, log_nocc, log_nvirt) + - result[p1_].flops + result[p2_].flops; - if (new_cost < cost) { - cost = new_cost; - tindices = std::move(diffs); - p1 = p1_; - p2 = p2_; - } - }); // - - auto seq = eval_seq_t{}; - if (tindices.empty()) { - cost = 0; - tindices = std::move(nth_tensor_indices[power_pos]); - // seq = make_sequence(++power_pos); - seq = eval_seq_t{static_cast(power_pos++)}; - } else { - // cost set - // tindices set - seq = ranges::views::concat(result[p1].sequence, result[p2].sequence) | ranges::to; - seq.push_back(-1); - } - - result[n].flops = cost; - result[n].indices = std::move(tindices); - result[n].sequence = std::move(seq); - } - - return result[(1 << nt) - 1].sequence; -} - -ExprPtr single_term_opt_v2(Product const& prod, size_t nocc, size_t nvirt) { - if (prod.factors().size() < 3) return clone_packed(prod); - auto seq = single_term_opt_v2(TensorNetwork{prod}, nocc, nvirt); - auto result = container::vector{}; - for (auto i: seq) - if (i==-1){ - auto rexpr = *result.rbegin(); - result.pop_back(); - auto lexpr = *result.rbegin(); - result.pop_back(); - auto p = Product{}; - p.append(lexpr); - p.append(rexpr); - result.push_back(clone_packed(p)); - } else { - result.push_back(prod.at(i)); - } - - (*result.rbegin())->as().scale(prod.scalar()); - return *result.rbegin(); -} - } // namespace opt -EvalNode optimize(const ExprPtr& expr) { - static const size_t NOCC = 10; - static const size_t NVIRT = 100; - - using ranges::views::transform; - if (expr->is()) - return to_eval_node(expr); - else if (expr->is()) { - // return *(opt::single_term_opt(expr->as()).optimal_seqs.begin()); - auto opt_expr = opt::single_term_opt_v2(expr->as(), NOCC, NVIRT); - return to_eval_node(opt_expr); - } else if (expr->is()) { - auto smands = *expr | transform([](auto const& s) { - return to_expr(optimize(s)); - }) | ranges::to_vector; - - return to_eval_node(ex(Sum{smands.begin(), smands.end()})); - } else - throw std::runtime_error{"optimization attempted on unsupported Expr type"}; -} - } // namespace sequant From 7094a678f273785c40abefba009cf33986e7d1a8 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Fri, 14 Jan 2022 10:53:12 -0500 Subject: [PATCH 018/120] Update evaluation example --- examples/eval/calc_info.cpp | 12 +++++++++++- examples/eval/ta/data_world_ta.hpp | 2 +- external/versions.cmake | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/eval/calc_info.cpp b/examples/eval/calc_info.cpp index 488f73b4c..1776dfd8a 100644 --- a/examples/eval/calc_info.cpp +++ b/examples/eval/calc_info.cpp @@ -7,9 +7,19 @@ namespace sequant::eval { +struct IndexToSize { + static const size_t nocc = 10; + static const size_t nvirt = 100; + auto operator()(Index const& idx) const { + if (idx.space() == IndexSpace::active_occupied) return nocc; + else if (idx.space() == IndexSpace::active_unoccupied) return nvirt; + else throw std::runtime_error("Unsupported IndexSpace type encountered"); + } +}; + EvalNode CalcInfo::node_(const ExprPtr& expr, size_t rank) const { auto trimmed = opt::tail_factor(expr); - return optm_opts.single_term ? optimize(trimmed) + return optm_opts.single_term ? optimize(trimmed, IndexToSize{}) : to_eval_node(trimmed); } diff --git a/examples/eval/ta/data_world_ta.hpp b/examples/eval/ta/data_world_ta.hpp index 6a3617a79..e1459315b 100644 --- a/examples/eval/ta/data_world_ta.hpp +++ b/examples/eval/ta/data_world_ta.hpp @@ -39,7 +39,7 @@ class DataWorldTA { /// \param tensor Output TA::DistArray tensor. void read_tensor_ta(std::string_view fname, Tensor_t& tensor) { // TODO assert tensor single tiled or extend to handle multiply tiled case - auto ta_tensor = TA::Tensor{ + auto ta_tensor = TA::Tensor{ tensor.trange().make_tile_range(0)}; read_tensor(fname, ta_tensor); *tensor.begin() = ta_tensor; diff --git a/external/versions.cmake b/external/versions.cmake index c276bd007..7989a03b6 100644 --- a/external/versions.cmake +++ b/external/versions.cmake @@ -5,4 +5,4 @@ set(SEQUANT_TRACKED_BOOST_VERSION 1.67) set(SEQUANT_TRACKED_RANGEV3_TAG 2e0591c57fce2aca6073ad6e4fdc50d841827864) -set(SEQUANT_TRACKED_TILEDARRAY_TAG 5c768a7b121886dfe406c6dd6a38acaa8782ae6e) +set(SEQUANT_TRACKED_TILEDARRAY_TAG 4d67b8a3d04b92639324b6d7a6beedaf4f373add) From 985b6a36bf7f3dab584cea378cf8b9d630d33d14 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Fri, 14 Jan 2022 13:18:05 -0500 Subject: [PATCH 019/120] include new projector --- SeQuant/domain/eqs/single_ref_uccf12.h | 274 +++++++++++++----- .../domain/transcorrelated/simplifications.h | 214 ++++++++++---- examples/uccf12/uccf12.cpp | 10 +- 3 files changed, 352 insertions(+), 146 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index ea09ba5a7..11167e33a 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -31,26 +31,40 @@ class uccf12{ TensorCanonicalizer::register_instance(std::make_shared()); } //[[e1,e2],e3]_12 - ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3){ + ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, int ansatz = 2){ auto first_com = do_wick((e1 * e2) - (e2 * e1)); auto first_com_clone = first_com->clone(); auto second_com_1 = do_wick((first_com * e3)); auto second_com_2 = do_wick(e3 * first_com); auto second_com = second_com_1 - second_com_2; simplify(second_com); - second_com = keep_up_to_3_body_terms(second_com); - //std::wcout << to_latex_align(second_com,20,2) << std::endl; - second_com = second_com + ex(0.);//make a sum to avoid heavy code duplication for product and sum variants. - second_com = simplification::overlap_with_obs(second_com); - //std::wcout << to_latex_align(second_com,20,2) << std::endl; - second_com = second_com + ex(0.); - second_com = simplification::screen_F12_and_density(second_com); - //std::wcout << to_latex_align(second_com,20,2) << std::endl; - second_com = simplification::tens_to_FNOps(second_com); - second_com = decompositions::three_body_substitution(second_com,2); - second_com = ex(1./1) * second_com; - simplify(second_com); - return second_com; + if(ansatz == 2) { + second_com = keep_up_to_3_body_terms(second_com); + // std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = second_com + ex(0.); // make a sum to avoid heavy code duplication for product and sum variants. + second_com = simplification::overlap_with_obs(second_com); + // std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = second_com + ex(0.); + second_com = simplification::screen_F12_and_density(second_com,2); + // std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = simplification::tens_to_FNOps(second_com); + second_com = decompositions::three_body_substitution(second_com, 2); + simplify(second_com); + return second_com; + } + if (ansatz == 1){ + second_com = keep_up_to_2_body_terms(second_com); + // std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = second_com + ex(0.); // make a sum to avoid heavy code duplication for product and sum variants. + second_com = simplification::overlap_with_obs(second_com); + // std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = second_com + ex(0.); + second_com = simplification::screen_F12_and_density(second_com,1); + // std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = simplification::tens_to_FNOps(second_com); + simplify(second_com); + return second_com; + } } ExprPtr keep_up_to_3_body_terms(const ExprPtr& input) { @@ -85,6 +99,38 @@ class uccf12{ } } } + ExprPtr keep_up_to_2_body_terms(const ExprPtr& input) { + if (input->is()) {auto filtered_summands = input->as().summands() | + ranges::views::remove_if([](const ExprPtr& ptr) {assert(ptr->is()); + bool keep = false; + bool found_operator = false; + for (auto&& factor : ptr->as().factors()) { + if (factor->is()) { + assert(!found_operator); + found_operator = true; + const auto rank = factor->as().rank(); + keep = (rank <= 3); + } + } + return !keep; + }); + auto result = ex(ranges::begin(filtered_summands), + ranges::end(filtered_summands)); + return result; + } + else if (input->is()){ + for(auto&& factor : input->as().factors()){ + if(factor->is()){ + if(factor->as().rank() <= 2){ + return input; + } + else{ + return ex(0); + } + } + } + } + } ExprPtr do_wick(ExprPtr expr) { using sequant::FWickTheorem; @@ -95,85 +141,153 @@ class uccf12{ return result; } - std::pair compute(std::string gg_label, bool print = false) { - //auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice + std::pair compute(std::string gg_label,int ansatz = 2, bool print = false) { + // auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice auto gg_space = IndexSpace::frozen_occupied; - if(gg_label == "act_occ"){ + if (gg_label == "act_occ") { gg_space = IndexSpace::active_occupied; - } - else if(gg_label == "occ"){ + } else if (gg_label == "occ") { gg_space = IndexSpace::occupied; - } - else if(gg_label == "all"){ + } else if (gg_label == "all") { gg_space = IndexSpace::all; - } - else if(gg_label == "fz"){ + } else if (gg_label == "fz") { gg_space = IndexSpace::frozen_occupied; - } - else if(gg_label == "uocc"){ + } else if (gg_label == "uocc") { gg_space = IndexSpace::unoccupied; - } - else { + } else { throw " USUPPORTED SPACE LABEL! CHECK ABOVE FOR VALID ENTRIES"; } - auto h = H(false); - auto r = R12(gg_space); - auto r_1 = R12(gg_space); - - auto A = r - adjoint(r); - auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); - auto H_A_3 = keep_up_to_3_body_terms(H_A); - //std::wcout << "pre decomp: " << to_latex_align(single_Comm,20,2) << std::endl; - H_A_3 = simplification::overlap_with_obs(H_A_3); - H_A_3 = H_A_3 + ex(0.); - H_A_3 = simplification::screen_F12_and_density(H_A_3); - // std::wcout << to_latex_align(H_A_3,20,2) << std::endl; - H_A_3 = simplification::tens_to_FNOps(H_A_3); - auto H_A_2 = decompositions::three_body_substitution(H_A_3,2); - simplify(H_A_2); - auto com_1 = simplification::hamiltonian_based(H_A_2); - - auto fFF = ex(1./2) * compute_double_com(F(),r,r_1); - non_canon_simplify(fFF); - auto fFFt = ex(1./2) * compute_double_com(F(),r,ex(-1.) * adjoint(r_1)); - non_canon_simplify(fFFt); - auto fFtFt = ex(1./2) * compute_double_com(F(),ex(-1.) * adjoint(r),ex(-1.) * adjoint(r_1)); - non_canon_simplify(fFtFt); - auto fFtF = ex(1./2) * compute_double_com(F(),ex(-1.) * adjoint(r),r_1); - non_canon_simplify(fFtF); - - auto fFF_sim = simplification::fock_based(fFF); - //std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; - auto fFFt_sim = simplification::fock_based(fFFt); - //std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; - auto fFtFt_sim = simplification::fock_based(fFtFt); - //std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; - auto fFtF_sim = simplification::fock_based(fFtF); - //std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; - - - auto one_body = com_1.first + (fFF_sim.first +fFFt_sim.first + fFtFt_sim.first + fFtF_sim.first); - auto two_body = com_1.second + (fFF_sim.second + fFFt_sim.second + fFtFt_sim.second + fFtF_sim.second); - - //cannot use non_canon_simplify here because of B term. - non_canon_simplify(one_body); - non_canon_simplify(two_body); - int term_count = 0; - for (auto i =0; i < one_body->as().summands().size(); i++){ - term_count +=1; - } - for (auto i =0; i < two_body->as().summands().size(); i++){ - term_count +=1; + if (ansatz == 2) { + auto h = H(false); + auto r = R12(gg_space); + auto r_1 = R12(gg_space); + + auto A = r - adjoint(r); + auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); + auto H_A_3 = keep_up_to_3_body_terms(H_A); + // std::wcout << "pre decomp: " << to_latex_align(single_Comm,20,2) << std::endl; + H_A_3 = simplification::overlap_with_obs(H_A_3); + H_A_3 = H_A_3 + ex(0.); + H_A_3 = simplification::screen_F12_and_density(H_A_3,2); + // std::wcout << to_latex_align(H_A_3,20,2) << std::endl; + H_A_3 = simplification::tens_to_FNOps(H_A_3); + auto H_A_2 = decompositions::three_body_substitution(H_A_3, 2); + simplify(H_A_2); + auto com_1 = simplification::hamiltonian_based_projector_2(H_A_2); + + auto fFF = ex(1. / 2) * compute_double_com(F(), r, r_1); + non_canon_simplify(fFF); + auto fFFt = ex(1. / 2) * + compute_double_com(F(), r, ex(-1.) * adjoint(r_1)); + non_canon_simplify(fFFt); + auto fFtFt = ex(1. / 2) * + compute_double_com(F(), ex(-1.) * adjoint(r), + ex(-1.) * adjoint(r_1)); + non_canon_simplify(fFtFt); + auto fFtF = ex(1. / 2) * + compute_double_com(F(), ex(-1.) * adjoint(r), r_1); + non_canon_simplify(fFtF); + + auto fFF_sim = simplification::fock_based_projector_2(fFF); + // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; + auto fFFt_sim = simplification::fock_based_projector_2(fFFt); + // std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; + auto fFtFt_sim = simplification::fock_based_projector_2(fFtFt); + // std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; + auto fFtF_sim = simplification::fock_based_projector_2(fFtF); + // std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; + + auto one_body = com_1.first + (fFF_sim.first + fFFt_sim.first + + fFtFt_sim.first + fFtF_sim.first); + auto two_body = com_1.second + (fFF_sim.second + fFFt_sim.second + + fFtFt_sim.second + fFtF_sim.second); + + // cannot use non_canon_simplify here because of B term. + non_canon_simplify(one_body); + non_canon_simplify(two_body); + int term_count = 0; + for (auto i = 0; i < one_body->as().summands().size(); i++) { + term_count += 1; + } + for (auto i = 0; i < two_body->as().summands().size(); i++) { + term_count += 1; + } + std::cout << "number of terms: " << term_count << std::endl; + + if (print) { + std::wcout << "one body terms: " << to_latex_align(one_body, 20, 2) + << std::endl; + std::wcout << "two body terms: " << to_latex_align(two_body, 20, 2) + << std::endl; + } + return std::pair{one_body, two_body}; } - std::cout << "number of terms: " << term_count << std::endl; + // If we use the 2 body approximation, all terms with Density fall out since they will happen to contain off diagonal G elements. + // we would get the same result if we kept the decomposition and simplified, but this should save time. + if(ansatz == 1){ + auto h = H(false); + auto r = R12(gg_space); + auto r_1 = R12(gg_space); + + auto A = r - adjoint(r); + auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); + auto H_A_3 = keep_up_to_2_body_terms(H_A); + + H_A_3 = simplification::overlap_with_obs(H_A_3); + H_A_3 = H_A_3 + ex(0.); + H_A_3 = simplification::screen_F12_and_density(H_A_3,1); + // std::wcout << to_latex_align(H_A_3,20,2) << std::endl; + H_A_3 = simplification::tens_to_FNOps(H_A_3); + simplify(H_A_3); + auto com_1 = simplification::hamiltonian_based_projector_1(H_A_3); + + auto fFF = ex(1. / 2) * compute_double_com(F(), r, r_1,1); + non_canon_simplify(fFF); + auto fFFt = ex(1. / 2) * + compute_double_com(F(), r, ex(-1.) * adjoint(r_1),1); + non_canon_simplify(fFFt); + auto fFtFt = ex(1. / 2) * + compute_double_com(F(), ex(-1.) * adjoint(r), + ex(-1.) * adjoint(r_1),1); + non_canon_simplify(fFtFt); + auto fFtF = ex(1. / 2) * + compute_double_com(F(), ex(-1.) * adjoint(r), r_1,1); + non_canon_simplify(fFtF); - if (print){ - std::wcout << "one body terms: " << to_latex_align(one_body,20,2) << std::endl; - std::wcout << "two body terms: " << to_latex_align(two_body,20,2) << std::endl; + auto fFF_sim = simplification::fock_based_projector_1(fFF); + // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; + auto fFFt_sim = simplification::fock_based_projector_1(fFFt); + // std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; + auto fFtFt_sim = simplification::fock_based_projector_1(fFtFt); + // std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; + auto fFtF_sim = simplification::fock_based_projector_1(fFtF); + // std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; + + auto one_body = com_1.first + (fFF_sim.first + fFFt_sim.first + + fFtFt_sim.first + fFtF_sim.first); + auto two_body = com_1.second + (fFF_sim.second + fFFt_sim.second + + fFtFt_sim.second + fFtF_sim.second); + non_canon_simplify(one_body); + non_canon_simplify(two_body); + int term_count = 0; + for (auto i = 0; i < one_body->as().summands().size(); i++) { + term_count += 1; + } + for (auto i = 0; i < two_body->as().summands().size(); i++) { + term_count += 1; + } + std::cout << "number of terms: " << term_count << std::endl; + + if (print) { + std::wcout << "one body terms: " << to_latex_align(one_body, 20, 2) + << std::endl; + std::wcout << "two body terms: " << to_latex_align(two_body, 20, 2) + << std::endl; + } + return std::pair{one_body, two_body}; } - return std::pair{one_body, two_body}; } }; diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index c2aa2266e..d1ec2724d 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -236,76 +236,124 @@ ExprPtr tens_to_op(ExprPtr ex_) { } // F tensors must contain contain indices in the bra with space > all. this includes complete, completeunoccupied, and inactiveunoccupied. // and if one of the particle indices is connected to the obs virtual space, then the other must be from the CABS set. i.e. if G^{a \beta}_{ij} -> G^{a a'}_{ij} -ExprPtr screen_F_tensors(ExprPtr ex_) { +ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { assert(ex_->is()); assert(ex_->as().label() == L"F"); auto overlap = ex(1); bool good = false; bool bra_good = false; - for (int i = 0; i < ex_->as().bra().size(); i++) { - auto bra = ex_->as().bra()[i]; - if (bra.space().type() == IndexSpace::complete || bra.space().type() == IndexSpace::complete_unoccupied) { - good = true; - bra_good = true; - } - else if(bra.space().type() == IndexSpace::complete || - bra.space().type() == IndexSpace::complete_unoccupied || - bra.space().type() == IndexSpace::other_unoccupied){ - good = true; + if(ansatz == 2) { + for (int i = 0; i < ex_->as().bra().size(); i++) { + auto bra = ex_->as().bra()[i]; + if (bra.space().type() == IndexSpace::complete || + bra.space().type() == IndexSpace::complete_unoccupied) { + good = true; + bra_good = true; + } else if (bra.space().type() == IndexSpace::complete || + bra.space().type() == IndexSpace::complete_unoccupied || + bra.space().type() == IndexSpace::other_unoccupied) { + good = true; + } } - } - for (int i = 0; i < ex_->as().bra().size(); i++){ + for (int i = 0; i < ex_->as().bra().size(); i++) { auto bra = ex_->as().bra()[i]; - if ((bra.space().type() == IndexSpace::unoccupied || bra.space().type() == - IndexSpace::all) && bra_good && (bra.space().type() != IndexSpace::complete_unoccupied)) { // if one of the upper indices is explicitly outside of CABs, create an overlap with the other index and the CABs space. - if (i == 0) { - overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::other_unoccupied)), - ex_->as().bra()[1]); - } - if (i == 1) { - overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::other_unoccupied)), - ex_->as().bra()[0]); + if ((bra.space().type() == IndexSpace::unoccupied || + bra.space().type() == IndexSpace::all) && + bra_good && + (bra.space().type() != + IndexSpace::complete_unoccupied)) { // if one of the upper indices is explicitly outside of CABs, create an overlap with the other index and the CABs space. + if (i == 0) { + overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::other_unoccupied)), + ex_->as().bra()[1]); + } + if (i == 1) { + overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::other_unoccupied)), + ex_->as().bra()[0]); + } } } - } - bool ket_good = false; - for (int j = 0; j as().ket().size(); j++) { - auto ket = ex_->as().ket()[j]; - if (ket.space().type() == IndexSpace::complete || - ket.space().type() == IndexSpace::complete_unoccupied) { - good = true; - ket_good = true; + bool ket_good = false; + for (int j = 0; j < ex_->as().ket().size(); j++) { + auto ket = ex_->as().ket()[j]; + if (ket.space().type() == IndexSpace::complete || + ket.space().type() == IndexSpace::complete_unoccupied) { + good = true; + ket_good = true; + } else if (ket.space().type() == IndexSpace::complete || + ket.space().type() == IndexSpace::complete_unoccupied || + ket.space().type() == IndexSpace::other_unoccupied) { + good = true; + } + } + for (int j = 0; j < ex_->as().ket().size(); j++) { + auto ket = ex_->as().ket()[j]; + if ((ket.space().type() == IndexSpace::unoccupied || + ket.space().type() == IndexSpace::all) && + ket_good && (ket.space().type() != IndexSpace::complete_unoccupied)) { + if (j == 0) { + overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::other_unoccupied)), + ex_->as().ket()[1]); + } + if (j == 1) { + overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::other_unoccupied)), + ex_->as().ket()[0]); + } + } } - else if (ket.space().type() == IndexSpace::complete || - ket.space().type() == IndexSpace::complete_unoccupied || - ket.space().type() == IndexSpace::other_unoccupied) { - good = true; + if (good) { + return ex_ * overlap; + } else { + return ex(0); } } - for (int j = 0; j as().ket().size(); j++){ - auto ket = ex_->as().ket()[j]; - if ((ket.space().type() == IndexSpace::unoccupied || ket.space().type() == IndexSpace::all) && ket_good && (ket.space().type() != IndexSpace::complete_unoccupied)) { - if (j == 0) { - overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::other_unoccupied)), - ex_->as().ket()[1]); + else if (ansatz == 1){ + bool non_zero = false; + bool bra_proj_space = false;// perhaps a better way would be to create a child class of tensor for G tensor which can keep track of geminal generating and projector at construction. + for (int i = 0; i < ex_->as().bra().size(); i++) { + auto bra = ex_->as().bra()[i]; + if(bra.space().type() == IndexSpace::complete || + bra.space().type() == IndexSpace::complete_unoccupied || + bra.space().type() == IndexSpace::other_unoccupied){ + bra_proj_space = true; + non_zero = true; } - if (j == 1) { - overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::other_unoccupied)), - ex_->as().ket()[0]); + } + if (bra_proj_space){ + for (int i = 0; i < ex_->as().bra().size(); i++) { + auto bra = ex_->as().bra()[i]; + auto overlap1 = make_overlap(Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied)),bra); + ex_ = overlap1 * ex_; } } - } - if(good){ - return ex_ * overlap; - } - else{ - return ex(0); + bool ket_proj_space = false;// perhaps a better way would be to create a child class of tensor for G tensor which can keep track of geminal generating and projector at construction. + for (int i = 0; i < ex_->as().ket().size(); i++) { + auto ket = ex_->as().ket()[i]; + if(ket.space().type() == IndexSpace::complete || + ket.space().type() == IndexSpace::complete_unoccupied || + ket.space().type() == IndexSpace::other_unoccupied){ + ket_proj_space = true; + non_zero = true; + } + } + if (ket_proj_space){ + for (int i = 0; i < ex_->as().ket().size(); i++) { + auto ket = ex_->as().ket()[i]; + auto overlap1 = make_overlap(Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied)),ket); + ex_ = overlap1 * ex_; + } + } + if (non_zero){ + return ex_; + } + else{ + return ex(0.0); + } } } @@ -728,7 +776,7 @@ std::pair contains_tens(ExprPtr ex_, std::wstring label){ //re-implimentation as a recursive function which gets called every time a delta is found, simplifies/reduces the product and returns. //products are const and two deltas acting on the same index makes this difficult. logically the product needs to update within its own loop, but it cannot. Alternatively, two delta's to the same index need to occur in the same product, but that breaks things. //work around. make a copy of product which can be modified? break out of product loop? -ExprPtr screen_F12_and_density(ExprPtr exprs){ +ExprPtr screen_F12_and_density(ExprPtr exprs,int ansatz = 2){ if(exprs->is()) { auto return_sum = ex(0); for (auto&& product : exprs->as().summands()) { @@ -736,7 +784,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ for (auto&& factor : product->as().factors()) { auto temp_factor = ex(1.); if (factor->is() && factor->as().label() == L"F") { - temp_factor = screen_F_tensors(factor); // screen F tensors should just provide the delta. + temp_factor = screen_F_tensors(factor,ansatz); // screen F tensors should just provide the delta. } else {temp_factor = factor;} auto product_clone = product->clone(); @@ -748,7 +796,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ wick_f.reduce(product_clone); //std::wcout << " product clone after reduce: " << to_latex_align(product_clone) << std::endl; non_canon_simplify(product_clone); - product_clone = screen_F12_and_density(product_clone); + product_clone = screen_F12_and_density(product_clone,ansatz); return_sum = product_clone + return_sum; new_product = ex(0.); break; @@ -769,7 +817,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ for (auto&& factor : exprs->as().factors()) { auto temp_factor = ex(1.); if (factor->is() && factor->as().label() == L"F") { - temp_factor = screen_F_tensors(factor); // screen F tensors should just provide the delta. + temp_factor = screen_F_tensors(factor,ansatz); // screen F tensors should just provide the delta. } else {temp_factor = factor;} auto product_clone = exprs->clone(); @@ -781,7 +829,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ wick_f.reduce(product_clone); non_canon_simplify(product_clone); //std::wcout << " product clone after reduce: " << to_latex_align(product_clone) << std::endl; - product_clone = screen_F12_and_density(product_clone); + product_clone = screen_F12_and_density(product_clone,ansatz); new_product = product_clone; break; } @@ -791,6 +839,7 @@ ExprPtr screen_F12_and_density(ExprPtr exprs){ } else return exprs; } + ExprPtr FNOPs_to_tens(ExprPtr ex_){ if(ex_->is()){ auto new_sum = ex(0); @@ -901,7 +950,7 @@ ExprPtr partition_f(ExprPtr exprs){ // unfortunately, simplify(result) and wick.reduce(result) will recanonicalize the indices. // enforces the following obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} // should allow analysis of multiple expressions who have the same normal order operator prefactor. -std::pair hamiltonian_based(ExprPtr exprs){ +std::pair hamiltonian_based_projector_2(ExprPtr exprs){ //std::wcout << "pre remove constants: " << to_latex_align(exprs,20,2) << std::endl; //exprs = remove_const(exprs); // std::wcout << "post remove constants: " << to_latex_align(exprs,20,2) << std::endl; @@ -911,7 +960,7 @@ std::pair hamiltonian_based(ExprPtr exprs){ exprs = partition_f(exprs); non_canon_simplify(exprs); //std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; - exprs = screen_F12_and_density(exprs); + exprs = screen_F12_and_density(exprs,2); //std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; non_canon_simplify(exprs); exprs = screen_densities(exprs); @@ -931,13 +980,56 @@ std::pair hamiltonian_based(ExprPtr exprs){ return fnop_to_overlap(exprs_intmed); } +// here G can only have projection to the alpha and Beta space otherwise projector constructs it to be be zero. +std::pair hamiltonian_based_projector_1(ExprPtr exprs){ + exprs = FNOPs_to_tens(exprs); + non_canon_simplify(exprs); + exprs = partition_f(exprs); + non_canon_simplify(exprs);; + exprs = screen_F12_and_density(exprs,1); + non_canon_simplify(exprs); + auto exprs_intmed = ex(0.0); + for (auto&& product : exprs->as().summands()){ + auto new_product = simplification::find_f12_interms(product); + exprs_intmed = new_product + exprs_intmed; + } + non_canon_simplify(exprs_intmed); + return fnop_to_overlap(exprs_intmed); +} +//G can only project to alpha and Beta space. still need to use fock based expression. +std::pair fock_based_projector_1(ExprPtr exprs){ + exprs = FNOPs_to_tens(exprs); + non_canon_simplify(exprs); + if(exprs->is()){ + return std::pair {exprs, exprs}; + } + exprs = partition_f(exprs); + auto final_screen = exprs; + non_canon_simplify(final_screen); + //in some cases, there will now be no contributing terms left so return zero to one and two body. + if(final_screen->is()){ + return std::pair {final_screen, final_screen}; + } + non_canon_simplify(final_screen); + final_screen = treat_fock(final_screen); + non_canon_simplify(final_screen); + //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! + auto last_screen = ex(0.0); + for (auto&& product : final_screen->as().summands()){ + auto new_product = simplification::find_f12_interms(product); + last_screen = last_screen + new_product; + } + //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; + non_canon_simplify(last_screen); + return fnop_to_overlap(last_screen); +} //TODO generalize for spin-orbital basis //simplification to deal with fock based expressions. involving one body fock operator. // not rigorous for more than 2 body operators or more than 2 density matrices whose rank must be <= 2. // unfortunately, simplify(result) and wick.reduce(result) will recanonicalize the indices. // enforces the following obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} // should allow analysis of multiple expressions who have the same normal order operator prefactor. -std::pair fock_based (ExprPtr exprs){ +std::pair fock_based_projector_2(ExprPtr exprs){ //std::wcout << "expression before removing constants: " << to_latex_align(exprs,20,2) << std::endl; //exprs = remove_const(exprs); //std::wcout << "after screening constant: " << to_latex_align(exprs) << std::endl; diff --git a/examples/uccf12/uccf12.cpp b/examples/uccf12/uccf12.cpp index 8a81edf8d..a0f83aaf6 100644 --- a/examples/uccf12/uccf12.cpp +++ b/examples/uccf12/uccf12.cpp @@ -159,7 +159,7 @@ try_main() { H_A_3 = simplification::tens_to_FNOps(H_A_3); auto H_A_2 = decompositions::three_body_substitution(H_A_3,2); simplify(H_A_2); - auto com_1 = simplification::hamiltonian_based(H_A_2); + auto com_1 = simplification::hamiltonian_based_projector_2(H_A_2); std::wcout << "h A one body: " << to_latex_align(com_1.first,20,2) << std::endl; std::wcout << "h A two body: " << to_latex_align(com_1.second,20,2) << std::endl; @@ -169,14 +169,14 @@ try_main() { auto fFtFt = compute_double_com(F(),ex(-1.) * adjoint(r),ex(-1.) * adjoint(r_1)); auto fFtF = compute_double_com(F(),ex(-1.) * adjoint(r),r_1); - auto fFF_sim = simplification::fock_based(fFF); + auto fFF_sim = simplification::fock_based_projector_2(fFF); // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; - auto fFFt_sim = simplification::fock_based(fFFt); + auto fFFt_sim = simplification::fock_based_projector_2(fFFt); std::wcout << "FFt one body: " << to_latex_align(fFFt_sim.first,20,2) << std::endl; std::wcout << "FFt two body: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; - auto fFtFt_sim = simplification::fock_based(fFtFt); + auto fFtFt_sim = simplification::fock_based_projector_2(fFtFt); //std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; - auto fFtF_sim = simplification::fock_based(fFtF); + auto fFtF_sim = simplification::fock_based_projector_2(fFtF); //std::wcout << "FtF one body: " << to_latex_align(fFtF_sim.first,20,2) << std::endl; //std::wcout << "FtF two body: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; From 55145b5fbbacd3e310b3be31f68462d63f1529fa Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 19 Jan 2022 14:35:16 -0500 Subject: [PATCH 020/120] single_ref_uccf12.h: misc cleanup --- SeQuant/domain/eqs/single_ref_uccf12.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index 6a205317c..3d5238f46 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -2,8 +2,9 @@ // Created by Conner Masteran on 8/16/21. // -#ifndef SEQUANT_SINGLE_REF_UCCF12_H -#define SEQUANT_SINGLE_REF_UCCF12_H +#ifndef SEQUANT_DOMAIN_SINGLE_REF_UCCF12_H +#define SEQUANT_DOMAIN_SINGLE_REF_UCCF12_H + #include "../transcorrelated/three_body_decomp.hpp" #include "../transcorrelated/simplifications.h" #include @@ -115,7 +116,7 @@ class uccf12{ gg_space = IndexSpace::unoccupied; } else { - throw " USUPPORTED SPACE LABEL! CHECK ABOVE FOR VALID ENTRIES"; + throw std::runtime_error("uccf12::compute(gg_label) unsupported space label"); } auto h = H(false); @@ -169,4 +170,4 @@ class uccf12{ } }; -#endif // SEQUANT_SINGLE_REF_UCCF12_H +#endif // SEQUANT_DOMAIN_SINGLE_REF_UCCF12_H From 73c4d00cf44d4ab537f2774346ee7758598e1131 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Thu, 20 Jan 2022 12:25:42 -0500 Subject: [PATCH 021/120] add support for active space --- SeQuant/domain/eqs/single_ref_uccf12.h | 17 +++++++++++++++-- SeQuant/domain/mbpt/convention.cpp | 2 ++ .../domain/transcorrelated/simplifications.h | 2 +- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index 11167e33a..cd1735f75 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -26,7 +26,16 @@ class uccf12{ uccf12(bool single_reference = true, bool fock_approx = true, unsigned int max_op_rank = 2){ sr = single_reference; fock = fock_approx; op_rank = max_op_rank; sequant::set_default_context(SeQuant(Vacuum::Physical, IndexSpaceMetric::Unit, BraKetSymmetry::conjugate, SPBasis::spinfree)); -// mbpt::set_default_convention(); + //mbpt::set_default_convention(); + std::setlocale(LC_ALL, "en_US.UTF-8"); + std::wcout.precision(std::numeric_limits::max_digits10); + std::wcerr.precision(std::numeric_limits::max_digits10); + std::wcout.sync_with_stdio(false); + std::wcerr.sync_with_stdio(false); + std::wcout.imbue(std::locale("en_US.UTF-8")); + std::wcerr.imbue(std::locale("en_US.UTF-8")); + std::wcout.sync_with_stdio(true); + std::wcerr.sync_with_stdio(true); sequant::detail::OpIdRegistrar op_id_registrar; TensorCanonicalizer::register_instance(std::make_shared()); } @@ -155,7 +164,11 @@ class uccf12{ gg_space = IndexSpace::frozen_occupied; } else if (gg_label == "uocc") { gg_space = IndexSpace::unoccupied; - } else { + } + // currently not supported, but needs to be. + else if (gg_label == "act_obs") { + gg_space = IndexSpace::all_active; + } else { throw " USUPPORTED SPACE LABEL! CHECK ABOVE FOR VALID ENTRIES"; } diff --git a/SeQuant/domain/mbpt/convention.cpp b/SeQuant/domain/mbpt/convention.cpp index d50fcdec3..3e6ef8a7c 100644 --- a/SeQuant/domain/mbpt/convention.cpp +++ b/SeQuant/domain/mbpt/convention.cpp @@ -74,6 +74,7 @@ void register_standard_instances() { IndexSpace::register_instance(declab(L"m"), IndexSpace::occupied, qnattr, do_not_throw); IndexSpace::register_instance(declab(L"a"), IndexSpace::active_unoccupied, qnattr, do_not_throw); IndexSpace::register_instance(declab(L"e"), IndexSpace::unoccupied, qnattr, do_not_throw); + IndexSpace::register_instance(declab(L"x"), IndexSpace::all_active, qnattr, do_not_throw); IndexSpace::register_instance(declab(L"p"), IndexSpace::all, qnattr, do_not_throw); IndexSpace::register_instance(declab(L"α'"), IndexSpace::other_unoccupied, qnattr, do_not_throw); IndexSpace::register_instance(declab(L"α"), IndexSpace::complete_unoccupied, qnattr, do_not_throw); @@ -94,6 +95,7 @@ void make_default_indexregistry() { register_index(idxreg_ref, Index{declab(L"m")}, 110); register_index(idxreg_ref, Index{declab(L"a")}, 1000); register_index(idxreg_ref, Index{declab(L"e")}, 1000); + register_index(idxreg_ref, Index{declab(L"x")}, 1100); register_index(idxreg_ref, Index{declab(L"p")}, 1110); register_index(idxreg_ref, Index{declab(L"α'")}, 3000); register_index(idxreg_ref, Index{declab(L"α")}, 4000); diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index d1ec2724d..80df8e36f 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -411,7 +411,7 @@ auto treat_fock(ExprPtr ex_){ auto new_product = ex(real); for (auto&& factor : product->as().factors()){ if (factor->is() && factor->as().label() == L"f"){ - // TODO This might not be exactly correct, in the case of f_i^p, this should actually set p to all occupied m. + // TODO do not assume EBC auto space = intersection(factor->as().bra()[0].space(), factor->as().ket()[0].space()); if(space.type().none()){ new_product = ex(0) * new_product; From 0bd665db7d09f83e1a908dcb7e14fb2fe6bcf5d4 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 20 Jan 2022 12:56:39 -0500 Subject: [PATCH 022/120] Add docs. --- SeQuant/core/optimize.hpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/SeQuant/core/optimize.hpp b/SeQuant/core/optimize.hpp index e7b03d362..18fb1a55e 100644 --- a/SeQuant/core/optimize.hpp +++ b/SeQuant/core/optimize.hpp @@ -224,8 +224,6 @@ eval_seq_t single_term_opt_v2(TensorNetwork const& network, ranges::sort(bk, Index::LabelCompare{}); nth_tensor_indices.emplace_back(std::move(bk)); } - // double const log_nocc = std::log10(nocc); - // double const log_nvirt = std::log10(nvirt); auto log_flops_ = [&idxsz](container::vector const& commons, container::vector const& diffs) { @@ -281,7 +279,13 @@ eval_seq_t single_term_opt_v2(TensorNetwork const& network, return result[(1 << nt) - 1].sequence; } -// @c prod is assumed to consist of only Tensor expressions +/// +/// \param prod Product to be optimized. +/// \param idxsz An invocable object that maps an Index object to size. +/// \return Parenthesized product expression. +/// +/// @note @c prod is assumed to consist of only Tensor expressions +/// template , bool> = true> ExprPtr single_term_opt_v2(Product const& prod, IdxToSz const& idxsz) { @@ -308,6 +312,10 @@ ExprPtr single_term_opt_v2(Product const& prod, IdxToSz const& idxsz) { } // namespace opt +/// +/// \param expr Expression to be optimized. +/// \param idxsz An invocable object that maps an Index object to size. +/// \return Optimized expression converted to EvalNode. template , bool> = true> EvalNode optimize(const ExprPtr& expr, IdxToSz const& idxsz) { From 1a7d59b7826ac19cca5d2b7a00882c9e7e38c8f3 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Sat, 29 Jan 2022 13:00:36 -0500 Subject: [PATCH 023/120] Generation of tensor-of-tensor(TA-tensor) annotation supported. --- SeQuant/domain/eval/eval_ta.hpp | 36 ++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/SeQuant/domain/eval/eval_ta.hpp b/SeQuant/domain/eval/eval_ta.hpp index b2a90c495..b9c94795b 100644 --- a/SeQuant/domain/eval/eval_ta.hpp +++ b/SeQuant/domain/eval/eval_ta.hpp @@ -10,14 +10,36 @@ namespace sequant::eval::ta { namespace detail { -auto const braket_to_annot = [](auto const& bk) { - std::string annot; - for (auto& idx : bk) { - annot += idx.ascii_label() + ","; +/// +/// Given an iterable of Index objects, generate a string annotation +/// that can be used for TiledArray tensor expressions. Tensor-of-tensors also +/// supported. +template +std::string braket_to_annot(Indices_t const& indices) { + // make a comma-separated and concatenated string out of an iterable of strings + auto add_commas = [](auto const& strs) -> std::string { + std::string result{ranges::front(strs)}; + for (auto&& s: ranges::views::tail(strs)) + result += "," + s; + return result; + }; + + container::vector outer_labels{}, inner_labels{}; + for (auto&& idx: indices) { + inner_labels.emplace_back(idx.ascii_label()); + for (auto&& pidx: idx.proto_indices()) + outer_labels.emplace_back(pidx.ascii_label()); } - annot.pop_back(); - return annot; -}; // braket_to_annot + + if (outer_labels.empty()) + return add_commas(inner_labels); + + // support CSV methods + ranges::sort(outer_labels); + ranges::sort(inner_labels); + auto outer_labels_updated = ranges::views::set_difference(outer_labels, inner_labels); + return add_commas(outer_labels_updated) + ";" + add_commas(inner_labels); +} auto const ords_to_annot = [](auto const& ords) { using ranges::accumulate; From 13016c17f16b2c70cfa65485feba22d7d18b50fd Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Sat, 29 Jan 2022 13:35:51 -0500 Subject: [PATCH 024/120] TiledArray annotation generation moved to EvalExpr class. --- SeQuant/core/eval_expr.cpp | 4 +++- SeQuant/core/eval_expr.hpp | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/SeQuant/core/eval_expr.cpp b/SeQuant/core/eval_expr.cpp index b5c14e91e..ee6973747 100644 --- a/SeQuant/core/eval_expr.cpp +++ b/SeQuant/core/eval_expr.cpp @@ -13,7 +13,7 @@ const Tensor& EvalExpr::tensor() const { return tensor_; } const Constant& EvalExpr::scalar() const { return scalar_; } EvalExpr::EvalExpr(const Tensor& tnsr) : op_{EvalOp::Id}, tensor_{tnsr}, -hash_{EvalExpr::hash_terminal_tensor(tnsr)}{} +hash_{EvalExpr::hash_terminal_tensor(tnsr)}, annot_{braket_to_annot(tnsr.const_braket())}{} EvalExpr::EvalExpr(const EvalExpr& xpr1, const EvalExpr& xpr2, @@ -50,6 +50,8 @@ EvalExpr::EvalExpr(const EvalExpr& xpr1, s, infer_braket_symmetry(), infer_particle_symmetry(s)}; hash_ = hash_imed(expr1, expr2, op); + + annot_ = braket_to_annot(tensor_.const_braket()); } Symmetry EvalExpr::infer_tensor_symmetry_sum(EvalExpr const& xpr1, diff --git a/SeQuant/core/eval_expr.hpp b/SeQuant/core/eval_expr.hpp index c438f382b..772011518 100644 --- a/SeQuant/core/eval_expr.hpp +++ b/SeQuant/core/eval_expr.hpp @@ -61,6 +61,11 @@ class EvalExpr final { /** Factor to scale tensor by. */ [[nodiscard]] const Constant& scalar() const; + /// + /// annotation for TiledArray + /// + std::string annot() const {return annot_; } + template > EvalExpr& operator*=(T fac) { scalar_ *= Constant{std::move(fac)}; @@ -89,6 +94,8 @@ class EvalExpr final { Constant scalar_{1}; + std::string annot_; + /** * Infer the symmetry of the resulting tensor after summing two tensors. */ @@ -151,6 +158,37 @@ class EvalExpr final { * the first is the target bra. */ static braket_type target_braket_prod(const Tensor&, const Tensor&); + + /// + /// Given an iterable of Index objects, generate a string annotation + /// that can be used for TiledArray tensor expressions. Tensor-of-tensors also + /// supported. + template + static std::string braket_to_annot(Indices_t const& indices) { + // make a comma-separated and concatenated string out of an iterable of strings + auto add_commas = [](auto const& strs) -> std::string { + std::string result{ranges::front(strs)}; + for (auto&& s: ranges::views::tail(strs)) + result += "," + s; + return result; + }; + + container::vector outer_labels{}, inner_labels{}; + for (auto&& idx: indices) { + inner_labels.emplace_back(idx.ascii_label()); + for (auto&& pidx: idx.proto_indices()) + outer_labels.emplace_back(pidx.ascii_label()); + } + + if (outer_labels.empty()) + return add_commas(inner_labels); + + // support CSV methods + ranges::sort(outer_labels); + ranges::sort(inner_labels); + auto outer_labels_updated = ranges::views::set_difference(outer_labels, inner_labels); + return add_commas(outer_labels_updated) + ";" + add_commas(inner_labels); + } }; } // namespace sequant From 0dfa306120cdd6f9bed45ca2f6fae4929e5b4b41 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Mon, 31 Jan 2022 14:28:09 -0500 Subject: [PATCH 025/120] Rename namespace sequant::eval::ta to sequant::eval. --- SeQuant/domain/eval/eval_ta.hpp | 4 ++-- examples/eval/ta/data_world_ta.hpp | 2 +- examples/eval/ta/main.cpp | 2 +- examples/eval/ta/scf_ta.hpp | 6 +++--- tests/unit/test_eval_ta.cpp | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/SeQuant/domain/eval/eval_ta.hpp b/SeQuant/domain/eval/eval_ta.hpp index b9c94795b..048caa4b3 100644 --- a/SeQuant/domain/eval/eval_ta.hpp +++ b/SeQuant/domain/eval/eval_ta.hpp @@ -6,7 +6,7 @@ #include #include -namespace sequant::eval::ta { +namespace sequant::eval { namespace detail { @@ -235,6 +235,6 @@ auto eval_antisymm(EvalNode const& node, Iterable const& target_indx_labels, return antisymm_result; } -} // namespace sequant::eval::ta +} // namespace sequant::eval #endif // SEQUANT_EVAL_EVAL_TA_HPP diff --git a/examples/eval/ta/data_world_ta.hpp b/examples/eval/ta/data_world_ta.hpp index e1459315b..689abf21d 100644 --- a/examples/eval/ta/data_world_ta.hpp +++ b/examples/eval/ta/data_world_ta.hpp @@ -13,7 +13,7 @@ #include #include -namespace sequant::eval::ta { +namespace sequant::eval { template class DataWorldTA { diff --git a/examples/eval/ta/main.cpp b/examples/eval/ta/main.cpp index 74d4b5585..e4684f571 100644 --- a/examples/eval/ta/main.cpp +++ b/examples/eval/ta/main.cpp @@ -82,7 +82,7 @@ int main(int argc, char* argv[]) { auto const calc_info = eval::make_calc_info(calc_config, fock_file, eri_file, out_file); - eval::ta::SequantEvalScfTA{world, calc_info}.scf(std::wcout); + eval::SequantEvalScfTA{world, calc_info}.scf(std::wcout); TA::finalize(); return 0; diff --git a/examples/eval/ta/scf_ta.hpp b/examples/eval/ta/scf_ta.hpp index b5197a197..95f4b9785 100644 --- a/examples/eval/ta/scf_ta.hpp +++ b/examples/eval/ta/scf_ta.hpp @@ -18,7 +18,7 @@ #include -namespace sequant::eval::ta { +namespace sequant::eval { template class SequantEvalScfTA final : public SequantEvalScf { @@ -99,8 +99,8 @@ class SequantEvalScfTA final : public SequantEvalScf { for (auto&& [r, n] : ranges::views::zip(rs, nodes_)) { auto const target_indices = tnsr_to_bk_labels_sorted(n->tensor()); r = info_.eqn_opts.spintrace - ? eval::ta::eval_symm(n, target_indices, data_world_, cman_) - : eval::ta::eval_antisymm(n, target_indices, data_world_, cman_); + ? eval::eval_symm(n, target_indices, data_world_, cman_) + : eval::eval_antisymm(n, target_indices, data_world_, cman_); } data_world_.update_amplitudes(rs); return info_.eqn_opts.spintrace ? energy_spin_free_orbital() diff --git a/tests/unit/test_eval_ta.cpp b/tests/unit/test_eval_ta.cpp index 334fd7399..0660992d6 100644 --- a/tests/unit/test_eval_ta.cpp +++ b/tests/unit/test_eval_ta.cpp @@ -99,9 +99,9 @@ TEST_CASE("TEST_EVAL_USING_TA", "[eval]") { using ranges::views::transform; using TA::TArrayD; using sequant::to_eval_node; - using sequant::eval::ta::eval; - using sequant::eval::ta::eval_antisymm; - using sequant::eval::ta::eval_symm; + using sequant::eval::eval; + using sequant::eval::eval_antisymm; + using sequant::eval::eval_symm; auto parse_expr_antisymm = [](auto const& xpr){ return parse_expr(xpr, sequant::Symmetry::antisymm); }; // tnsr is assumed to be single-tiled From 9f3c52cc577ddea108728124a1de927394b9abe2 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Mon, 31 Jan 2022 14:42:51 -0500 Subject: [PATCH 026/120] Use annotation from EvalExpr for evaluation. --- SeQuant/domain/eval/eval_ta.hpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/SeQuant/domain/eval/eval_ta.hpp b/SeQuant/domain/eval/eval_ta.hpp index 048caa4b3..4148249fe 100644 --- a/SeQuant/domain/eval/eval_ta.hpp +++ b/SeQuant/domain/eval/eval_ta.hpp @@ -65,12 +65,11 @@ Tensor_t eval_inode(EvalNode const& node, Tensor_t const& leval, assert_imaginary_zero(node.left()->scalar()); assert_imaginary_zero(node.right()->scalar()); - auto const this_annot = braket_to_annot(node->tensor().const_braket()); - auto const lannot = braket_to_annot(node.left()->tensor().const_braket()); - auto const rannot = braket_to_annot(node.right()->tensor().const_braket()); - auto const lscal = node.left()->scalar().value().real(); auto const rscal = node.right()->scalar().value().real(); + auto const& this_annot = node->annot(); + auto const& lannot = node.left()->annot(); + auto const& rannot = node.right()->annot(); auto result = Tensor_t{}; if (node->op() == EvalOp::Prod) { @@ -149,14 +148,12 @@ auto eval(EvalNode const& node, Iterable const& target_indx_labels, auto result = detail::eval_single_node(node, std::forward(yielder), man); - auto const rannot = detail::braket_to_annot(node->tensor().const_braket()); - std::string lannot = ranges::front(target_indx_labels); for (auto const& lbl : ranges::views::tail(target_indx_labels)) lannot += std::string{','} + lbl; auto scaled = decltype(result){}; - scaled(lannot) = node->scalar().value().real() * result(rannot); + scaled(lannot) = node->scalar().value().real() * result(node->annot()); return scaled; } From f55479774b72ddd8d4a6ec657a99b5786250eab4 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 8 Feb 2022 11:21:20 -0500 Subject: [PATCH 027/120] added re-lable function for easy re-lableing of expressions. --- SeQuant/domain/eqs/single_ref_uccf12.h | 165 ++++++++++++++---- .../domain/transcorrelated/simplifications.h | 47 ++--- 2 files changed, 160 insertions(+), 52 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index cd1735f75..8a5f21b50 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -42,9 +42,15 @@ class uccf12{ //[[e1,e2],e3]_12 ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, int ansatz = 2){ auto first_com = do_wick((e1 * e2) - (e2 * e1)); - auto first_com_clone = first_com->clone(); - auto second_com_1 = do_wick((first_com * e3)); - auto second_com_2 = do_wick(e3 * first_com); + simplify(first_com); + std::wcout << to_latex_align(first_com,20,2) << std::endl; + auto second_com_1 = first_com * e3; + //non_canon_simplify(second_com_1); + simplify(second_com_1); + second_com_1 = do_wick(second_com_1); + auto second_com_2 = e3 * first_com; + simplify(second_com_2); + second_com_2 = do_wick(second_com_2); auto second_com = second_com_1 - second_com_2; simplify(second_com); if(ansatz == 2) { @@ -150,7 +156,109 @@ class uccf12{ return result; } - std::pair compute(std::string gg_label,int ansatz = 2, bool print = false) { + // produces a uniquely indexed version of the given expression. + //assumes same number of upper and lower indices for operators and tensors + // do not simplify(expr) after use! this will cannonicalize labeling, undoing this work. + ExprPtr relable(ExprPtr expr) { + if (expr->is()){ + auto new_sum = ex(0.0); + for(auto && product : expr->as().summands()){ + auto new_product = relable(product); + new_sum = new_sum + new_product; + } + return new_sum; + } + + //product does not benefit from recursion + // must reproduce same connectivity to produce identical expressions. + else if(expr->is()){ + std::vector changed_indices;//list of original indices + std::vector original_indices; // list of new indices + auto new_product = ex(expr->as().scalar()); + for (auto && factor : expr->as().factors()){ + std::pair,std::vector> new_up_low; + if (factor->is()){ + for (int i = 0; i < factor->as().bra().size(); i++){ + auto in_where_bra = simplification::in_list(factor->as().bra()[i], original_indices); + if(in_where_bra.first){ + new_up_low.first.push_back(changed_indices[in_where_bra.second]); + } + else{ + original_indices.push_back(factor->as().bra()[i]); + changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().bra()[i].space().attr()))); + new_up_low.first.push_back( + changed_indices[changed_indices.size() - 1]); + } + auto in_where_ket = simplification::in_list(factor->as().ket()[i], original_indices); + if(in_where_ket.first){ + new_up_low.second.push_back(changed_indices[in_where_ket.second]); + } + else{ + original_indices.push_back(factor->as().ket()[i]); + changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().ket()[i].space().attr()))); + new_up_low.second.push_back( + changed_indices[changed_indices.size() - 1]); + } + } + auto new_factor = ex(factor->as().label(), new_up_low.first, new_up_low.second); + new_product = new_product * new_factor; + } + else if (factor->is()){ + for (int i = 0; i < factor->as().nannihilators(); i++){ + auto in_where_ann = simplification::in_list(factor->as().annihilators()[i].index(), + original_indices); + if(in_where_ann.first){ + new_up_low.first.push_back( + changed_indices[in_where_ann.second]); + } + else{ + original_indices.push_back(factor->as().annihilators()[i].index()); + changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().annihilators()[i].index().space().attr()))); + new_up_low.first.push_back( + changed_indices[changed_indices.size() - 1]); + } + auto in_where_cre = simplification::in_list(factor->as().creators()[i].index(), + original_indices); + if(in_where_cre.first){ + new_up_low.second.push_back( + changed_indices[in_where_cre.second]); + } + else{ + original_indices.push_back(factor->as().creators()[i].index()); + changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().creators()[i].index().space().attr()))); + new_up_low.second.push_back( + changed_indices[changed_indices.size() - 1]); + } + } + auto new_factor = ex(new_up_low.second, new_up_low.first); + new_product = new_product * new_factor; + } + else{throw "unsupported factor type";} + } + return new_product; + } + else if(expr->is()){ + std::pair,std::vector> new_bra_ket; + for (int i = 0; i < expr->as().bra().size(); i++){ + new_bra_ket.first.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().bra()[i].space().attr()))); + new_bra_ket.second.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().ket()[i].space().attr()))); + } + return ex(expr->as().label(), new_bra_ket.first, new_bra_ket.second); + } + else if(expr->is()){ + std::pair,std::vector> new_ann_cre; + for (int i = 0; i < expr->as().nannihilators(); i++){ + new_ann_cre.first.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().annihilators()[i].index().space().attr()))); + new_ann_cre.second.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().creators()[i].index().space().attr()))); + } + return ex(new_ann_cre.first, new_ann_cre.second); + } + else if(expr->is()){ + return expr; + } + } + + std::pair compute(std::string gg_label,int ansatz = 2, bool print = false,bool singles=false) { // auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice auto gg_space = IndexSpace::frozen_occupied; @@ -171,13 +279,31 @@ class uccf12{ } else { throw " USUPPORTED SPACE LABEL! CHECK ABOVE FOR VALID ENTRIES"; } + auto single = ex(0.0); + auto single_ = ex(0.0); + if(singles){ + // this might need to be complete space if we don't have a solution to the particular blocks of interest. + auto C = ex(L"C",std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))},std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied))}); + auto E_pa = ex (std::initializer_list{C->as().bra()[0]},std::initializer_list{C->as().ket()[0]}); + auto C_Epa = C * E_pa; + auto anti_herm_C = C_Epa - adjoint(C_Epa); + single = single + anti_herm_C; + simplify(single); + std::wcout << "single term" << to_latex_align(single) << std::endl; + + auto single_2 = single->clone(); + single_2 = relable(single_2); + std::wcout << "single after relable" << to_latex_align(single_2) << std::endl; + single_ = single_2; + } if (ansatz == 2) { auto h = H(false); auto r = R12(gg_space); auto r_1 = R12(gg_space); - auto A = r - adjoint(r); + auto A = (r - adjoint(r)) + single; + auto A_ = (r_1 - adjoint(r_1)) + single_; auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); auto H_A_3 = keep_up_to_3_body_terms(H_A); // std::wcout << "pre decomp: " << to_latex_align(single_Comm,20,2) << std::endl; @@ -189,33 +315,12 @@ class uccf12{ auto H_A_2 = decompositions::three_body_substitution(H_A_3, 2); simplify(H_A_2); auto com_1 = simplification::hamiltonian_based_projector_2(H_A_2); + auto full_double_com = ex(1./2) * compute_double_com(F(),A,A_); - auto fFF = ex(1. / 2) * compute_double_com(F(), r, r_1); - non_canon_simplify(fFF); - auto fFFt = ex(1. / 2) * - compute_double_com(F(), r, ex(-1.) * adjoint(r_1)); - non_canon_simplify(fFFt); - auto fFtFt = ex(1. / 2) * - compute_double_com(F(), ex(-1.) * adjoint(r), - ex(-1.) * adjoint(r_1)); - non_canon_simplify(fFtFt); - auto fFtF = ex(1. / 2) * - compute_double_com(F(), ex(-1.) * adjoint(r), r_1); - non_canon_simplify(fFtF); - - auto fFF_sim = simplification::fock_based_projector_2(fFF); - // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; - auto fFFt_sim = simplification::fock_based_projector_2(fFFt); - // std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; - auto fFtFt_sim = simplification::fock_based_projector_2(fFtFt); - // std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; - auto fFtF_sim = simplification::fock_based_projector_2(fFtF); - // std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; + auto sim = simplification::fock_based_projector_2(full_double_com); - auto one_body = com_1.first + (fFF_sim.first + fFFt_sim.first + - fFtFt_sim.first + fFtF_sim.first); - auto two_body = com_1.second + (fFF_sim.second + fFFt_sim.second + - fFtFt_sim.second + fFtF_sim.second); + auto one_body = com_1.first + (sim.first); + auto two_body = com_1.second + (sim.second); // cannot use non_canon_simplify here because of B term. non_canon_simplify(one_body); diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 80df8e36f..b434127ca 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -40,6 +40,7 @@ ExprPtr op_to_tens(ExprPtr ex_){ //all densities and the Hamiltonian operators are confined to a given orbital basis in second quantized notation. //thus any index on a Normal Ordered operator or density must be confined to the obs. +///TODO this dictates that the resulting hamiltonian will be in a particular basis. ExprPtr overlap_with_obs(ExprPtr ex_){ auto overlap_expr = ex(0); //enforce an overlap each E with elements from for (auto&& product : ex_->as().summands()){// may be able to make_overlaps manually and apply them to the products. simplify may know what to do with it. @@ -144,6 +145,7 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ //std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; wick.reduce(overlap_expr); simplify(overlap_expr); + //std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; return overlap_expr; } @@ -955,19 +957,19 @@ std::pair hamiltonian_based_projector_2(ExprPtr exprs){ //exprs = remove_const(exprs); // std::wcout << "post remove constants: " << to_latex_align(exprs,20,2) << std::endl; exprs = FNOPs_to_tens(exprs); - non_canon_simplify(exprs); + simplify(exprs); //exprs = overlap_with_obs(exprs); exprs = partition_f(exprs); - non_canon_simplify(exprs); + simplify(exprs); //std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; exprs = screen_F12_and_density(exprs,2); //std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; - non_canon_simplify(exprs); + simplify(exprs); exprs = screen_densities(exprs); //std::wcout << "post screen density: " << to_latex_align(exprs,20,2) << std::endl; //exprs = densities_to_occ(exprs); //f12 interms needs a particular canonical ordering - non_canon_simplify(exprs); + simplify(exprs); //std::wcout << "densities to occ: " << to_latex_align(exprs,20,2) << std::endl; auto exprs_intmed = ex(0.0); for (auto&& product : exprs->as().summands()){ @@ -976,43 +978,43 @@ std::pair hamiltonian_based_projector_2(ExprPtr exprs){ } //std::wcout << "post intermediates: " << to_latex_align(exprs,20,2) << std::endl; //tens_to_FNOps(exprs_intmed); - non_canon_simplify(exprs_intmed); + simplify(exprs_intmed); return fnop_to_overlap(exprs_intmed); } // here G can only have projection to the alpha and Beta space otherwise projector constructs it to be be zero. std::pair hamiltonian_based_projector_1(ExprPtr exprs){ exprs = FNOPs_to_tens(exprs); - non_canon_simplify(exprs); + simplify(exprs); exprs = partition_f(exprs); - non_canon_simplify(exprs);; + simplify(exprs);; exprs = screen_F12_and_density(exprs,1); - non_canon_simplify(exprs); + simplify(exprs); auto exprs_intmed = ex(0.0); for (auto&& product : exprs->as().summands()){ auto new_product = simplification::find_f12_interms(product); exprs_intmed = new_product + exprs_intmed; } - non_canon_simplify(exprs_intmed); + simplify(exprs_intmed); return fnop_to_overlap(exprs_intmed); } //G can only project to alpha and Beta space. still need to use fock based expression. std::pair fock_based_projector_1(ExprPtr exprs){ exprs = FNOPs_to_tens(exprs); - non_canon_simplify(exprs); + simplify(exprs); if(exprs->is()){ return std::pair {exprs, exprs}; } exprs = partition_f(exprs); auto final_screen = exprs; - non_canon_simplify(final_screen); + simplify(final_screen); //in some cases, there will now be no contributing terms left so return zero to one and two body. if(final_screen->is()){ return std::pair {final_screen, final_screen}; } - non_canon_simplify(final_screen); - final_screen = treat_fock(final_screen); - non_canon_simplify(final_screen); + simplify(final_screen); + //final_screen = treat_fock(final_screen); + simplify(final_screen); //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! auto last_screen = ex(0.0); for (auto&& product : final_screen->as().summands()){ @@ -1020,7 +1022,7 @@ std::pair fock_based_projector_1(ExprPtr exprs){ last_screen = last_screen + new_product; } //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; - non_canon_simplify(last_screen); + simplify(last_screen); return fnop_to_overlap(last_screen); } //TODO generalize for spin-orbital basis @@ -1034,7 +1036,7 @@ std::pair fock_based_projector_2(ExprPtr exprs){ //exprs = remove_const(exprs); //std::wcout << "after screening constant: " << to_latex_align(exprs) << std::endl; exprs = FNOPs_to_tens(exprs); - non_canon_simplify(exprs); + simplify(exprs); //std::wcout << "fnop to tensor: " << to_latex_align(exprs,20,2) << std::endl; if(exprs->is()){ return std::pair {exprs, exprs}; @@ -1042,19 +1044,20 @@ std::pair fock_based_projector_2(ExprPtr exprs){ exprs = partition_f(exprs); //exprs = overlap_with_obs(exprs); auto final_screen = exprs; - non_canon_simplify(final_screen); + simplify(final_screen); //in some cases, there will now be no contributing terms left so return zero to one and two body. if(final_screen->is()){ return std::pair {final_screen, final_screen}; } //final_screen = screen_F12_and_density(final_screen); - non_canon_simplify(final_screen); + simplify(final_screen); //std::wcout << "screen F12: " << to_latex_align(final_screen,20,2) << std::endl; - final_screen = treat_fock(final_screen); - non_canon_simplify(final_screen); + //final_screen = treat_fock(final_screen); + final_screen = FNOPs_to_tens(final_screen); + simplify(final_screen); //std::wcout << "screen fock: " << to_latex_align(final_screen,20,2) << std::endl; final_screen = screen_densities(final_screen); - non_canon_simplify(final_screen); + simplify(final_screen); //enforce that densities are in the occupied space since they are only non-zero in occ //final_screen = densities_to_occ(final_screen); //non_canon_simplify(final_screen); @@ -1067,7 +1070,7 @@ if(final_screen->is()){ last_screen = last_screen + new_product; } //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; - non_canon_simplify(last_screen); + simplify(last_screen); //tens_to_FNOps(last_screen); return fnop_to_overlap(last_screen); } From cc0a22619dfadcd73b48e2cafa504cd29c3df779 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 8 Feb 2022 11:21:41 -0500 Subject: [PATCH 028/120] add singles operator to the registry --- SeQuant/domain/mbpt/op.cpp | 4 +++- SeQuant/domain/mbpt/op.hpp | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/SeQuant/domain/mbpt/op.cpp b/SeQuant/domain/mbpt/op.cpp index 4ce9c7e3d..cde87ea30 100644 --- a/SeQuant/domain/mbpt/op.cpp +++ b/SeQuant/domain/mbpt/op.cpp @@ -10,7 +10,7 @@ namespace mbpt { std::vector cardinal_tensor_labels() { return {L"\\lambda",L"\\gamma",L"\\Gamma", L"A", L"S", L"P", L"L", L"λ", L"h", L"f", L"g", - L"t", L"R", L"F",L"X", L"V", L"B", L"U",L"GR", overlap_label(), L"a", L"ã", L"b", L"ᵬ", L"E"}; + L"t", L"R", L"F",L"X", L"V", L"B", L"U",L"GR",L"C", overlap_label(), L"a", L"ã", L"b", L"ᵬ", L"E"}; } std::wstring to_wstring(OpType op) { @@ -35,6 +35,8 @@ std::wstring to_wstring(OpType op) { return L"F"; case OpType::GR: return L"GR"; + case OpType::C: + return L"C"; default: throw std::invalid_argument("to_wstring(OpType op): invalid op"); } diff --git a/SeQuant/domain/mbpt/op.hpp b/SeQuant/domain/mbpt/op.hpp index 205f550a0..d5b42ad3b 100644 --- a/SeQuant/domain/mbpt/op.hpp +++ b/SeQuant/domain/mbpt/op.hpp @@ -23,7 +23,8 @@ enum class OpType { L, //!< left-hand eigenstate R, //!< right-hand eigenstate R12, //!< geminal kernel - GR //!< GR kernel from f12 theory + GR, //!< GR kernel from f12 theory + C //!< cabs singles op }; /// Operator character relative to Fermi vacuum From db8a8c7b401a707006044037c3cb2e4495462ca7 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 8 Feb 2022 11:23:34 -0500 Subject: [PATCH 029/120] an assert is used when there is no intersection space in an overlap. This should probably just give zero right? --- SeQuant/core/wick.impl.hpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/SeQuant/core/wick.impl.hpp b/SeQuant/core/wick.impl.hpp index 244cdb474..8c87ac1de 100644 --- a/SeQuant/core/wick.impl.hpp +++ b/SeQuant/core/wick.impl.hpp @@ -182,7 +182,13 @@ inline container::map compute_index_replacement_rules( ranges::end(external_indices); const auto intersection_space = intersection(bra.space(), ket.space()); - assert(intersection_space != IndexSpace::null_instance()); + + if (intersection_space == IndexSpace::null_instance()){ + throw zero_result{}; + } + + // not sure why this doesn't return zero_result like every other example. seems like a bug. + //assert(intersection_space != IndexSpace::null_instance()); if (!bra_is_ext && !ket_is_ext) { // int + int const auto new_dummy = idxfac.make(intersection_space); From a77fd7cd250d28c9f11d11c38465da35ade9857a Mon Sep 17 00:00:00 2001 From: nakulteke Date: Sun, 20 Feb 2022 15:47:09 -0500 Subject: [PATCH 030/120] Included the seven terms from W*t2*t3 expansion in pCCSDT. Needs inclusion of W*t2*t2 term from 3CC for complete formula --- examples/srcc/srcc.cpp | 123 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 120 insertions(+), 3 deletions(-) diff --git a/examples/srcc/srcc.cpp b/examples/srcc/srcc.cpp index 3976e7199..167571e81 100644 --- a/examples/srcc/srcc.cpp +++ b/examples/srcc/srcc.cpp @@ -89,7 +89,125 @@ int main(int argc, char* argv[]) { }; // Spin-orbital coupled cluster - auto cc_r = sequant::eqs::cceqvec{NMAX, NMAX}(true, true, true, true, true); + auto cc_r = sequant::eqs::cceqvec{3, 3}(true, true, true, true, true); + + auto A3 = ex(Tensor(L"A", + WstrList{L"i_1", L"i_2", L"i_3"}, + WstrList{L"a_1", L"a_2", L"a_3"}, Symmetry::antisymm)); + // Bra or ket for P operators don't matter + auto P_ab = ex(Tensor(L"P",WstrList{L"a_1", L"a_2"},{})); + auto P_ac = ex(Tensor(L"P",WstrList{L"a_1", L"a_3"},{})); + auto P_bc = ex(Tensor(L"P",WstrList{L"a_2", L"a_3"},{})); + auto P_ij = ex(Tensor(L"P",WstrList{L"i_1", L"i_2"},{})); + auto P_ik = ex(Tensor(L"P",WstrList{L"i_1", L"i_3"},{})); + auto P_jk = ex(Tensor(L"P",WstrList{L"i_2", L"i_3"},{})); + + // G and t are kept consistent with the SeQuant CC notation + auto G_oovv = ex(Tensor(L"g", + WstrList{L"i_4", L"i_5"}, WstrList{L"a_4", L"a_5"}, + Symmetry::antisymm)); + + auto t2_a1 = ex(Tensor(L"t", + WstrList{L"a_4", L"a_1"}, + WstrList{L"i_4", L"i_1"}, Symmetry::antisymm)); + auto t3_a1 = ex(Tensor(L"t", + WstrList{L"a_1", L"a_2", L"a_3"}, + WstrList{L"i_5", L"i_2", L"i_3"}, Symmetry::antisymm)); + + auto t2_a2 = ex(Tensor(L"t", + WstrList{L"a_1", L"a_2"}, + WstrList{L"i_1", L"i_4"}, Symmetry::antisymm)); + auto t3_a2 = ex(Tensor(L"t", + WstrList{L"a_4", L"a_5", L"a_3"}, + WstrList{L"i_2", L"i_5", L"i_3"}, Symmetry::antisymm)); + + auto t2_b1 = ex(Tensor(L"t", + WstrList{L"a_4", L"a_5"}, + WstrList{L"i_1", L"i_2"}, Symmetry::antisymm)); + auto t3_b1 = ex(Tensor(L"t", + WstrList{L"a_1", L"a_2", L"a_3"}, + WstrList{L"i_4", L"i_5", L"i_3"}, Symmetry::antisymm)); + + auto t2_b2 = ex(Tensor(L"t", + WstrList{L"a_1", L"a_2"}, + WstrList{L"i_4", L"i_5"}, Symmetry::antisymm)); + auto t3_b2 = ex(Tensor(L"t", + WstrList{L"a_4", L"a_5", L"a_3"}, + WstrList{L"i_2", L"i_5", L"i_3"}, Symmetry::antisymm)); + + auto t2_c1 = ex(Tensor(L"t", + WstrList{L"a_4", L"a_1"}, + WstrList{L"i_4", L"i_5"}, Symmetry::antisymm)); + auto t3_c1 = ex(Tensor(L"t", + WstrList{L"a_5", L"a_2", L"a_3"}, + WstrList{L"i_1", L"i_2", L"i_3"}, Symmetry::antisymm)); + + auto t2_c2 = ex(Tensor(L"t", + WstrList{L"a_1", L"a_4"}, + WstrList{L"i_1", L"i_2"}, Symmetry::antisymm)); + auto t3_c2 = ex(Tensor(L"t", + WstrList{L"a_2", L"a_5", L"a_3"}, + WstrList{L"i_4", L"i_5", L"i_3"}, Symmetry::antisymm)); + + auto t2_d1 = ex(Tensor(L"t", + WstrList{L"a_1", L"a_4"}, + WstrList{L"i_1", L"i_4"}, Symmetry::antisymm)); + auto t3_d1 = ex(Tensor(L"t", + WstrList{L"a_5", L"a_2", L"a_3"}, + WstrList{L"i_5", L"i_2", L"i_3"}, Symmetry::antisymm)); + + // Terms + auto a1 = ex (-0.5) * (ex(1.0) - P_ij - P_ik) * + G_oovv * t2_a1 * t3_a1; + auto a2 = ex (-0.5) * (ex(1.0) - P_ij - P_ik) * + (ex(1.0) - P_ac - P_bc) * + G_oovv * t2_a2 * t3_a2; + auto b1 = ex (0.25) * (ex(1.0) - P_ij - P_ik) * + G_oovv * t2_b1 * t3_b1; + auto b2 = ex (0.25) * (ex(1.0) - P_bc - P_ac) * + G_oovv * t2_b2 * t3_b2; + auto c1 = ex (-0.5) * (ex(1.0) - P_ab - P_ac) * + G_oovv * t2_c1 * t3_c1; + auto c2 = ex (-0.5) * (ex(1.0) - P_ik - P_jk) * + (ex(1.0) - P_ab - P_ac) * + G_oovv * t2_c2 * t3_c2; + auto d1 = (ex(1.0) - P_ij - P_ik) * + (ex(1.0) - P_ab - P_ac) * + G_oovv * t2_d1 * t3_d1; + + for(auto &t : {a1,a2,b1,b2,c1,c2,d1}){ + std::wcout << to_latex(t) << std::endl; + } + + // Full term that needs to be subtracted from SeQuant derived CCSDT R3 + auto w_t2_t3 = a1 + a2 + b1 + b2 + c1 + c2 + d1; + + // p_CCSDT correction + auto pCCSDT_correction = [&](const std::vector& params){ + assert(params.size() == 3); + auto correction = ex(0.5) * a1 + + ex(0.5) * a2 + + ex(params[0]) * (ex(0.5) * a1 + b1) + + ex(params[1]) * (ex(0.5) * a2 + b2) + + ex(params[2]) * (c1 + c2 + d1); + simplify(correction); + return correction; + }; + + // Operations to simplify the correction term +#if 0 + expand(c1); + c1 = expand_P_op(c1); + c1 = A3 * c1; + expand(c1); + canonicalize(c1); + c1->visit(reset_idx_tags); + c1 = remove_tensor(c1, L"A"); +#endif + + // pCCSDT R3 expression + auto r3 = cc_r[3] - w_t2_t3 + pCCSDT_correction({1, 1, 0}); + #if 0 // @@ -127,7 +245,6 @@ int main(int argc, char* argv[]) { printf("CC R%lu size: %lu time: %5.3f sec.\n", i, cc_st_r[i]->size(), time_elapsed.count()); } -#endif // // Open-shell spintrace @@ -207,7 +324,7 @@ int main(int argc, char* argv[]) { runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209); runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75); } - +#endif } // Generate S operator from external index list From a716d5d98b2d8e08865a5c6651b333da97e70f75 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Sat, 12 Mar 2022 13:32:46 -0500 Subject: [PATCH 031/120] cceqvec can generate cc-r12 residuals. PS: cc-r12 expr need screening after derivation --- SeQuant/domain/eqs/cceqs.cpp | 50 +++++++++++++++++++++--------------- SeQuant/domain/eqs/cceqs.hpp | 6 ++--- 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/SeQuant/domain/eqs/cceqs.cpp b/SeQuant/domain/eqs/cceqs.cpp index f48179126..0a8d2e5b8 100644 --- a/SeQuant/domain/eqs/cceqs.cpp +++ b/SeQuant/domain/eqs/cceqs.cpp @@ -143,7 +143,7 @@ class ccresidual { ccresidual(size_t p, size_t n) : P(p), N(n) {} ExprPtr operator()(bool screen, bool use_topology, bool use_connectivity, - bool canonical_only, bool antisymm) { + bool canonical_only, bool antisymm, bool r12) { auto ahbar = [=](const bool screen) { auto connect = [=](std::initializer_list> connlist) { if (use_connectivity) @@ -151,28 +151,37 @@ class ccresidual { else return std::initializer_list>{}; }; + auto s_r12 = r12 ? ex(1) : ex(0); auto result = screened_vac_av{0}(A(P) * H(antisymm), connect({}), screen, use_topology, canonical_only, antisymm) + - screened_vac_av{1}(A(P) * H(antisymm) * T(N, N, false, antisymm), + + screened_vac_av{1}(A(P) * H(antisymm) * + (T(N, N, false, antisymm) + s_r12 * R12()), connect({{1, 2}}), screen, use_topology, canonical_only, antisymm) + + ex(1. / 2) * - screened_vac_av{2}(A(P) * H(antisymm) * T(N, N, false, antisymm) * - T(N, N, false, antisymm), + screened_vac_av{2}(A(P) * H(antisymm) * + (T(N, N, false, antisymm) + s_r12 * R12()) * + (T(N, N, false, antisymm) + s_r12 * R12()), connect({{1, 2}, {1, 3}}), screen, use_topology, canonical_only) + + ex(1. / 6) * - screened_vac_av{3}(A(P) * H(antisymm) * T(N, N, false, antisymm) * - T(N, N, false, antisymm) * - T(N, N, false, antisymm), + screened_vac_av{3}(A(P) * H(antisymm) * + (T(N, N, false, antisymm) + s_r12 * R12()) * + (T(N, N, false, antisymm) + s_r12 * R12()) * + (T(N, N, false, antisymm) + s_r12 * R12()), connect({{1, 2}, {1, 3}, {1, 4}}), screen, use_topology, canonical_only) + + ex(1. / 24) * - screened_vac_av{4}(A(P) * H(antisymm) * T(N, N, false, antisymm) * - T(N, N, false, antisymm) * - T(N, N, false, antisymm) * - T(N, N, false, antisymm), + screened_vac_av{4}(A(P) * H(antisymm) * + (T(N, N, false, antisymm) + s_r12 * R12()) * + (T(N, N, false, antisymm) + s_r12 * R12()) * + (T(N, N, false, antisymm) + s_r12 * R12()) * + (T(N, N, false, antisymm) + s_r12 * R12()), connect({{1, 2}, {1, 3}, {1, 4}, {1, 5}}), screen, use_topology, canonical_only); simplify(result); @@ -192,14 +201,14 @@ class ccresidual_vec { void operator()(std::vector& result, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm) { + bool use_antisymm, bool r12) { result[P] = ccresidual{P, N}(screen, use_topology, use_connectivity, - canonical_only, use_antisymm); + canonical_only, use_antisymm, r12); rapid_simplify(result[P]); if (P > PMIN) ccresidual_vec{P - 1, PMIN, N}(result, screen, use_topology, use_connectivity, canonical_only, - use_antisymm); + use_antisymm, r12); } }; // class ccresidual_vec @@ -211,10 +220,11 @@ cceqvec::cceqvec(size_t n, size_t p, size_t pmin) std::vector cceqvec::operator()(bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm) { + bool use_antisymm, + bool r12) { std::vector result(P + 1); ccresidual_vec{P, PMIN, N}(result, screen, use_topology, use_connectivity, - canonical_only, use_antisymm); + canonical_only, use_antisymm, r12); return result; } @@ -233,10 +243,10 @@ compute_cceqvec::compute_cceqvec(size_t p, size_t pmin, size_t n) void compute_cceqvec::operator()(bool print, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm) { + bool use_antisymm, bool r12) { tpool.start(N); auto eqvec = cceqvec{N, P}(screen, use_topology, use_connectivity, - canonical_only, use_antisymm); + canonical_only, use_antisymm, r12); tpool.stop(N); std::wcout << std::boolalpha << "expS" << N << "[screen=" << screen << ",use_topology=" << use_topology @@ -262,9 +272,9 @@ compute_all::compute_all(size_t nmax) : NMAX(nmax) {} void compute_all::operator()(bool print, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm) { + bool use_antisymm, bool r12) { for (size_t N = 2; N <= NMAX; ++N) compute_cceqvec{N, 1, N}(print, screen, use_topology, use_connectivity, - canonical_only, use_antisymm); + canonical_only, use_antisymm, r12); } } // namespace sequant::eqs diff --git a/SeQuant/domain/eqs/cceqs.hpp b/SeQuant/domain/eqs/cceqs.hpp index 166acf233..0a48b7208 100644 --- a/SeQuant/domain/eqs/cceqs.hpp +++ b/SeQuant/domain/eqs/cceqs.hpp @@ -17,7 +17,7 @@ class cceqvec { std::vector operator()(bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm); + bool use_antisymm, bool r12); }; // class cceqvec class compute_cceqvec { @@ -28,7 +28,7 @@ class compute_cceqvec { void operator()(bool print, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm); + bool use_antisymm, bool r12); }; // class compute_cceqvec class compute_all { @@ -40,7 +40,7 @@ class compute_all { void operator()(bool print = true, bool screen = true, bool use_topology = true, bool use_connectivity = true, bool canonical_only = true, - bool use_antisymm = true); + bool use_antisymm = true, bool r12 = false); }; // class compute_all } // namespace sequant::eqs From 058bca025d3ee12a79da8ee4fc02d3c6ebb98241 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 17 Mar 2022 21:01:47 -0400 Subject: [PATCH 032/120] Impls eval functions for tensor-of-tensors. --- SeQuant/core/eval_expr.hpp | 31 ++++---- SeQuant/domain/eval/eval_ta.hpp | 123 +++++++++++++++++++++++--------- 2 files changed, 109 insertions(+), 45 deletions(-) diff --git a/SeQuant/core/eval_expr.hpp b/SeQuant/core/eval_expr.hpp index 772011518..2fac08afc 100644 --- a/SeQuant/core/eval_expr.hpp +++ b/SeQuant/core/eval_expr.hpp @@ -161,13 +161,15 @@ class EvalExpr final { /// /// Given an iterable of Index objects, generate a string annotation - /// that can be used for TiledArray tensor expressions. Tensor-of-tensors also - /// supported. + /// that can be used for TiledArray tensor expressions. + /// Tensor-of-tensors also supported. template static std::string braket_to_annot(Indices_t const& indices) { - // make a comma-separated and concatenated string out of an iterable of strings + using ranges::views::transform; + + // make a comma-separated string out of an iterable of strings auto add_commas = [](auto const& strs) -> std::string { - std::string result{ranges::front(strs)}; + auto result = std::string{ranges::front(strs)}; for (auto&& s: ranges::views::tail(strs)) result += "," + s; return result; @@ -175,20 +177,25 @@ class EvalExpr final { container::vector outer_labels{}, inner_labels{}; for (auto&& idx: indices) { - inner_labels.emplace_back(idx.ascii_label()); - for (auto&& pidx: idx.proto_indices()) - outer_labels.emplace_back(pidx.ascii_label()); + if (idx.has_proto_indices()) { + inner_labels.emplace_back(idx.ascii_label()); + for (auto&& pidx : idx.proto_indices()) + outer_labels.emplace_back(pidx.ascii_label()); + } else { + outer_labels.emplace_back(idx.ascii_label()); + } } - if (outer_labels.empty()) - return add_commas(inner_labels); + if (inner_labels.empty()) + return add_commas(outer_labels); // support CSV methods - ranges::sort(outer_labels); ranges::sort(inner_labels); - auto outer_labels_updated = ranges::views::set_difference(outer_labels, inner_labels); - return add_commas(outer_labels_updated) + ";" + add_commas(inner_labels); + ranges::sort(outer_labels); + ranges::actions::unique(outer_labels); + return add_commas(outer_labels) + ";" + add_commas(inner_labels); } + }; } // namespace sequant diff --git a/SeQuant/domain/eval/eval_ta.hpp b/SeQuant/domain/eval/eval_ta.hpp index 4148249fe..ddcda6200 100644 --- a/SeQuant/domain/eval/eval_ta.hpp +++ b/SeQuant/domain/eval/eval_ta.hpp @@ -4,43 +4,14 @@ #include #include +#include +#include #include namespace sequant::eval { namespace detail { -/// -/// Given an iterable of Index objects, generate a string annotation -/// that can be used for TiledArray tensor expressions. Tensor-of-tensors also -/// supported. -template -std::string braket_to_annot(Indices_t const& indices) { - // make a comma-separated and concatenated string out of an iterable of strings - auto add_commas = [](auto const& strs) -> std::string { - std::string result{ranges::front(strs)}; - for (auto&& s: ranges::views::tail(strs)) - result += "," + s; - return result; - }; - - container::vector outer_labels{}, inner_labels{}; - for (auto&& idx: indices) { - inner_labels.emplace_back(idx.ascii_label()); - for (auto&& pidx: idx.proto_indices()) - outer_labels.emplace_back(pidx.ascii_label()); - } - - if (outer_labels.empty()) - return add_commas(inner_labels); - - // support CSV methods - ranges::sort(outer_labels); - ranges::sort(inner_labels); - auto outer_labels_updated = ranges::views::set_difference(outer_labels, inner_labels); - return add_commas(outer_labels_updated) + ";" + add_commas(inner_labels); -} - auto const ords_to_annot = [](auto const& ords) { using ranges::accumulate; using ranges::views::intersperse; @@ -59,7 +30,7 @@ Tensor_t eval_inode(EvalNode const& node, Tensor_t const& leval, auto assert_imaginary_zero = [](sequant::Constant const& c) { assert(c.value().imag() == 0 && - "complex scalar unsupported for real tensor"); + "complex scalar unsupported"); }; assert_imaginary_zero(node.left()->scalar()); @@ -77,13 +48,46 @@ Tensor_t eval_inode(EvalNode const& node, Tensor_t const& leval, result(this_annot) = (lscal * rscal) * leval(lannot) * reval(rannot); } else { // sum - assert(node->op() == EvalOp::Sum && "unsupported operation for eval"); result(this_annot) = lscal * leval(lannot) + rscal * reval(rannot); } return result; } +template +Tensor_t eval_inode_tot(EvalNode const& node, Tensor_t const& leval, + Tensor_t const& reval) { + assert((node->op() == EvalOp::Sum || node->op() == EvalOp::Prod) && + "unsupported intermediate operation"); + + auto assert_imaginary_zero = [](sequant::Constant const& c) { + assert(c.value().imag() == 0 && + "complex scalar unsupported"); + }; + + assert_imaginary_zero(node.left()->scalar()); + assert_imaginary_zero(node.right()->scalar()); + + auto const lscal = node.left()->scalar().value().real(); + auto const rscal = node.right()->scalar().value().real(); + auto const& this_annot = node->annot(); + auto const& lannot = node.left()->annot(); + auto const& rannot = node.right()->annot(); + + auto result = Tensor_t{}; + if (node->op() == EvalOp::Prod) { + // prod + // result(this_annot) = (lscal * rscal) * leval(lannot) * reval(rannot); + decltype(result) unscaled = TA::expressions::einsum(leval(lannot), + reval(rannot), this_annot); + result(this_annot) = (lscal * rscal) * unscaled(this_annot); + } else { + // sum + result(this_annot) = lscal * leval(lannot) + rscal * reval(rannot); + } + return result; +} + template Tensor_t eval_single_node(EvalNode const& node, Yielder&& leaf_evaluator, CacheManager& cache_manager) { @@ -109,6 +113,31 @@ Tensor_t eval_single_node(EvalNode const& node, Yielder&& leaf_evaluator, cache_manager))); } +template +Tensor_t eval_single_node_tot(EvalNode const& node, Yielder&& leaf_evaluator, + CacheManager& cache_manager) { + static_assert( + std::is_invocable_r_v); + + auto const key = node->hash(); + + if (auto&& exists = cache_manager.access(key); exists && exists.value()) + return *exists.value(); + + return node.leaf() + ? *cache_manager.store(key, leaf_evaluator(node->tensor())) + : *cache_manager.store( + key, + eval_inode_tot( + node, + eval_single_node_tot(node.left(), + std::forward(leaf_evaluator), + cache_manager), + eval_single_node_tot(node.right(), + std::forward(leaf_evaluator), + cache_manager))); +} + } // namespace detail /// @@ -157,6 +186,34 @@ auto eval(EvalNode const& node, Iterable const& target_indx_labels, return scaled; } +template , + bool> = true> +Tensor_t eval_tot(EvalNode const& node, + Iterable1 const& outer_indx_labels, + Iterable2 const& inner_indx_labels, + Yielder&& yielder, CacheManager& man) { + auto bpindx_rcvd = TA::expressions::BipartiteIndexList{outer_indx_labels, inner_indx_labels}; + auto bpindx_exst = TA::expressions::BipartiteIndexList{node->annot()}; + assert(bpindx_exst.first().is_permutation(bpindx_exst.first()) + && bpindx_exst.second().is_permutation(bpindx_exst.second()) + && "Invalid target index labels"); + + auto result = detail::eval_single_node_tot(node, std::forward(yielder), man); + + auto const lannot = bpindx_rcvd.first().string() + + ";" + + bpindx_rcvd.second().string(); + auto scaled = decltype(result){}; + scaled(lannot) = node->scalar().value().real() * result(node->annot()); + return scaled; +} + /// /// Evaluate a node and symmetrize the result. /// From 90433f6c378dbb30d7f06739d8aefb27bbac54ef Mon Sep 17 00:00:00 2001 From: connermasteran Date: Fri, 18 Mar 2022 13:27:49 -0400 Subject: [PATCH 033/120] treating C as a single object and handle antihermitian part in mpqc. --- SeQuant/domain/eqs/single_ref_uccf12.h | 32 +++++++++++++++++--------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index 8a5f21b50..ee4dd99c0 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -42,8 +42,13 @@ class uccf12{ //[[e1,e2],e3]_12 ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, int ansatz = 2){ auto first_com = do_wick((e1 * e2) - (e2 * e1)); - simplify(first_com); - std::wcout << to_latex_align(first_com,20,2) << std::endl; + /*auto second_com = (((e1 * e2) - (e2 * e1)) * e3) - (e3 * ((e1 * e2) - (e2 * e1))); + non_canon_simplify(second_com); + std::wcout << "second com: " << to_latex_align(second_com,20,2) << std::endl; + second_com = do_wick(second_com); + simplify(second_com); + std::wcout << "second com: " << to_latex_align(second_com,20,2) << std::endl; + */simplify(first_com); auto second_com_1 = first_com * e3; //non_canon_simplify(second_com_1); simplify(second_com_1); @@ -53,9 +58,10 @@ class uccf12{ second_com_2 = do_wick(second_com_2); auto second_com = second_com_1 - second_com_2; simplify(second_com); + std::wcout << "second com: " << to_latex_align(second_com,20,2) << std::endl; if(ansatz == 2) { second_com = keep_up_to_3_body_terms(second_com); - // std::wcout << to_latex_align(second_com,20,2) << std::endl; + std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = second_com + ex(0.); // make a sum to avoid heavy code duplication for product and sum variants. second_com = simplification::overlap_with_obs(second_com); // std::wcout << to_latex_align(second_com,20,2) << std::endl; @@ -280,21 +286,21 @@ class uccf12{ throw " USUPPORTED SPACE LABEL! CHECK ABOVE FOR VALID ENTRIES"; } auto single = ex(0.0); - auto single_ = ex(0.0); + //auto single_ = ex(0.0); if(singles){ // this might need to be complete space if we don't have a solution to the particular blocks of interest. auto C = ex(L"C",std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))},std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied))}); auto E_pa = ex (std::initializer_list{C->as().bra()[0]},std::initializer_list{C->as().ket()[0]}); auto C_Epa = C * E_pa; - auto anti_herm_C = C_Epa - adjoint(C_Epa); + auto anti_herm_C = C_Epa/* - adjoint(C_Epa)*/; single = single + anti_herm_C; - simplify(single); + //simplify(single); std::wcout << "single term" << to_latex_align(single) << std::endl; - auto single_2 = single->clone(); - single_2 = relable(single_2); - std::wcout << "single after relable" << to_latex_align(single_2) << std::endl; - single_ = single_2; + //auto single_2 = single->clone(); + //single_2 = relable(single_2); + //std::wcout << "single after relable" << to_latex_align(single_2) << std::endl; + //single_ = single_2; } if (ansatz == 2) { @@ -303,7 +309,11 @@ class uccf12{ auto r_1 = R12(gg_space); auto A = (r - adjoint(r)) + single; - auto A_ = (r_1 - adjoint(r_1)) + single_; + std::wcout << "A: " << to_latex_align(A,20,2) << std::endl; + auto A_ = A->clone(); + A_ = relable(A_); + std::wcout << "A_: " << to_latex_align(A_,20,2) << std::endl; + //auto A_ = (r_1 - adjoint(r_1)) + single_; auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); auto H_A_3 = keep_up_to_3_body_terms(H_A); // std::wcout << "pre decomp: " << to_latex_align(single_Comm,20,2) << std::endl; From 7fa24495fc839b1c5871be27c788473b969fa3bc Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Wed, 30 Mar 2022 11:11:24 -0400 Subject: [PATCH 034/120] Fence TA computations. --- SeQuant/domain/eval/eval_ta.hpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/SeQuant/domain/eval/eval_ta.hpp b/SeQuant/domain/eval/eval_ta.hpp index ddcda6200..a18c3bb01 100644 --- a/SeQuant/domain/eval/eval_ta.hpp +++ b/SeQuant/domain/eval/eval_ta.hpp @@ -51,6 +51,7 @@ Tensor_t eval_inode(EvalNode const& node, Tensor_t const& leval, result(this_annot) = lscal * leval(lannot) + rscal * reval(rannot); } + TA::get_default_world().gop.fence(); return result; } @@ -85,6 +86,7 @@ Tensor_t eval_inode_tot(EvalNode const& node, Tensor_t const& leval, // sum result(this_annot) = lscal * leval(lannot) + rscal * reval(rannot); } + TA::get_default_world().gop.fence(); return result; } @@ -183,6 +185,7 @@ auto eval(EvalNode const& node, Iterable const& target_indx_labels, auto scaled = decltype(result){}; scaled(lannot) = node->scalar().value().real() * result(node->annot()); + TA::get_default_world().gop.fence(); return scaled; } @@ -211,6 +214,7 @@ Tensor_t eval_tot(EvalNode const& node, + bpindx_rcvd.second().string(); auto scaled = decltype(result){}; scaled(lannot) = node->scalar().value().real() * result(node->annot()); + TA::get_default_world().gop.fence(); return scaled; } @@ -248,6 +252,7 @@ auto eval_symm(EvalNode const& node, Iterable const& target_indx_labels, }; symmetrize_tensor(result.trange().rank(), sym_impl); + TA::get_default_world().gop.fence(); return symm_result; } @@ -286,6 +291,7 @@ auto eval_antisymm(EvalNode const& node, Iterable const& target_indx_labels, }; antisymmetrize_tensor(result.trange().rank(), asym_impl); + TA::get_default_world().gop.fence(); return antisymm_result; } From d3244c5c15d1bd54d5e848c0ec3233ffd2fe2603 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Wed, 30 Mar 2022 20:40:55 -0400 Subject: [PATCH 035/120] Unit test CacheManager --- tests/unit/test_cache_manager.cpp | 93 +++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 tests/unit/test_cache_manager.cpp diff --git a/tests/unit/test_cache_manager.cpp b/tests/unit/test_cache_manager.cpp new file mode 100644 index 000000000..a6999aac3 --- /dev/null +++ b/tests/unit/test_cache_manager.cpp @@ -0,0 +1,93 @@ +#include +#include +#include + +#include "catch.hpp" + +TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { + using ranges::views::concat; + using ranges::views::zip; + using data_type = int; + using manager_type = sequant::eval::CacheManager; + using key_type = manager_type::key_t; + using count_type = manager_type::count_t; + + auto const n_persistent = 4; // arbitrary + auto const n_decaying = 4; // arbitrary + // arbitrary keys and vals + auto const decaying_keys = std::array{100, 110, 200, 210}; + // decaying entries repeat more than once + auto const decaying_repeats = std::array{2, 2, 4, 3}; + auto const decaying_vals = std::array{10, 11, 20, 21}; + + // arbitrary vals and keys not present in decaying_keys + auto const persistent_keys = std::array{111, 222, 333, 444}; + auto const persistent_vals = std::array{11, 22, 33, 44}; + + auto man = + manager_type(zip(decaying_keys, decaying_repeats), persistent_keys); + + // filling data + for (auto&& [k,v]: + zip(concat(decaying_keys, persistent_keys), + concat(decaying_vals, persistent_vals))) { + // NOTE: man.store() calls man.access() implicitly and + // returns a shared_ptr to data + // hence, a count of lifetime is lost right here + REQUIRE(man.store(k, v)); + } + + auto const man_copy = man; + + SECTION("Construction") { + REQUIRE(man.cache_map().size() == n_persistent + n_decaying); + for (auto&& k: concat(decaying_keys, persistent_keys)) + REQUIRE(man.cache_map().find(k) != man.cache_map().end()); + for (auto&& [k, c]: zip(decaying_keys, decaying_repeats)) + // (c - 1) because a lifetime count is lost by implicit access + // during storing + REQUIRE(man.cache_map().find(k)->second.life_count() == c-1); + } + + SECTION("Data Access") { + // restore the cache manager in with full lifetimes and data + man = man_copy; + for (auto&& [k, v, r] : + zip(decaying_keys, decaying_vals, decaying_repeats)) { + // r - 1: the lifetime count at this point + for (auto i = r - 1; i > 1; --i) { + auto entry = man.access(k); + REQUIRE(entry); // optional> + REQUIRE(entry.value()); // shared_ptr<..> + REQUIRE(*entry.value() == v); + auto iter = man.cache_map().find(k); + REQUIRE(iter != man.cache_map().end()); + REQUIRE(iter->second.life_count() == i - 1); + } + } + // at this point all the decaying entries have only one lifetime left + // accessing each decaying entry one more time should release + // their *data* from the memory + for (auto&& k: decaying_keys) { + auto iter = man.cache_map().find(k); + REQUIRE(iter->second.life_count() == 1); + REQUIRE(man.access(k).value()); // accessed once. non-null ptr returned + REQUIRE_FALSE(man.access(k).value()); // nullptr returned + REQUIRE(iter->second.life_count() == 0); + } + + // meanwhile, the persistent entries are all intact + for (auto&& [k,v]: zip(persistent_keys, persistent_vals)) + REQUIRE(*man.access(k).value() == v); + + // now we reset the decaying entries which restores thier lifetimes + man.reset_decaying(); + for (auto&& [k,c]: zip(decaying_keys, decaying_repeats)) + REQUIRE(man.cache_map().find(k)->second.life_count() == c); + + // now we reset all entries + man.reset_all(); + for (auto&& k: concat(decaying_keys, persistent_keys)) + REQUIRE_FALSE(man.access(k).value()); // nullptr to data returned + } +} From 0c498802fecfe0314d473a1c89b226781c7550ae Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Wed, 30 Mar 2022 20:43:16 -0400 Subject: [PATCH 036/120] Refactor CacheManager --- SeQuant/domain/eval/cache_manager.hpp | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/SeQuant/domain/eval/cache_manager.hpp b/SeQuant/domain/eval/cache_manager.hpp index 2e09228f1..55d4ed41b 100644 --- a/SeQuant/domain/eval/cache_manager.hpp +++ b/SeQuant/domain/eval/cache_manager.hpp @@ -59,6 +59,8 @@ class CacheManager { } } + [[nodiscard]] count_t life_count() const noexcept { return life_c; } + private: [[nodiscard]] int decay() noexcept { return life_t == Lifetime::Persistent ? -1 : (life_c > 0 ? --life_c : 0); @@ -71,39 +73,41 @@ class CacheManager { return entry.access(); } - container::map> cache_map; + container::map> cache_map_; public: template , typename Iterable2 = container::svector> - CacheManager(Iterable1 &&decaying, Iterable2 &&persistent = {}) { + CacheManager(Iterable1 &&decaying, Iterable2 &&persistent) { for (auto &&[k, c] : decaying) - cache_map.try_emplace(k, entry{static_cast(c)}); + cache_map_.try_emplace(k, entry{static_cast(c)}); - for (auto &&k : persistent) cache_map.try_emplace(k, entry{}); + for (auto &&k : persistent) cache_map_.try_emplace(k, entry{}); } void reset_all() { - for (auto &&[k, v] : cache_map) v.reset(false); + for (auto &&[k, v] : cache_map_) v.reset(false); } void reset_decaying() { - for (auto &&[k, v] : cache_map) v.reset(true); + for (auto &&[k, v] : cache_map_) v.reset(true); } std::optional access(key_t key) noexcept { - if (auto &&found = cache_map.find(key); found != cache_map.end()) + if (auto &&found = cache_map_.find(key); found != cache_map_.end()) return found->second.access(); return std::nullopt; } ptr_t store(key_t key, Data data) { - if (auto &&found = cache_map.find(key); found != cache_map.end()) - return (store(found->second, std::move(data))); + if (auto &&found = cache_map_.find(key); found != cache_map_.end()) + return store(found->second, std::move(data)); return std::make_shared(std::move(data)); } + auto const& cache_map() const { return cache_map_; } + }; // CacheManager } // namespace From 69505f094b968652343edff4c07c0e09637a6bff Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Thu, 31 Mar 2022 12:23:45 -0400 Subject: [PATCH 037/120] update sequant::hash::combine implementation in case using Boost 1.78+ --- SeQuant/core/hash.hpp | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/SeQuant/core/hash.hpp b/SeQuant/core/hash.hpp index 3b5d7fccf..39154e7e9 100644 --- a/SeQuant/core/hash.hpp +++ b/SeQuant/core/hash.hpp @@ -87,16 +87,27 @@ inline void combine(std::size_t& seed, T const& v) { // std::size_t seed_ref = seed; // boost::hash_combine(seed_ref, v); _ hasher; + // in boost 1.78 hash_combine_impl implementation changed + // https://github.com/boostorg/container_hash/commit/21f2b5e1db1a118c83a3690055c110d0f5637da3 + // probably no longer need these acrobatics if constexpr (sizeof(std::size_t) == sizeof(boost::uint32_t) && sizeof(decltype(hasher(v))) == sizeof(boost::uint32_t)) { const boost::uint32_t value = hasher(v); - return boost::hash_detail::hash_combine_impl( - reinterpret_cast(seed), value); + boost::hash_detail::hash_combine_impl +#if BOOST_VERSION >= 107800 + <32>::fn +#endif + (reinterpret_cast(seed), value); + return; } else if constexpr (sizeof(std::size_t) == sizeof(boost::uint64_t) && sizeof(decltype(hasher(v))) == sizeof(boost::uint64_t)) { const boost::uint64_t value = hasher(v); - return boost::hash_detail::hash_combine_impl( - reinterpret_cast(seed), value); + boost::hash_detail::hash_combine_impl +#if BOOST_VERSION >= 107800 + <64>::fn +#endif + (reinterpret_cast(seed), value); + return; } else { seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } From 5bfd879a972d3983de363d58c83ebe0ccf9c3e2f Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Thu, 31 Mar 2022 12:27:15 -0400 Subject: [PATCH 038/120] bump range-v3 tag --- external/versions.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/versions.cmake b/external/versions.cmake index c276bd007..43469f264 100644 --- a/external/versions.cmake +++ b/external/versions.cmake @@ -3,6 +3,6 @@ set(SEQUANT_TRACKED_BOOST_VERSION 1.67) -set(SEQUANT_TRACKED_RANGEV3_TAG 2e0591c57fce2aca6073ad6e4fdc50d841827864) +set(SEQUANT_TRACKED_RANGEV3_TAG d800a032132512a54c291ce55a2a43e0460591c7) set(SEQUANT_TRACKED_TILEDARRAY_TAG 5c768a7b121886dfe406c6dd6a38acaa8782ae6e) From 62fc43043135e85c3fd16653f9ac28a2938c8353 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 31 Mar 2022 15:05:49 -0400 Subject: [PATCH 039/120] Typos and refactor cache_manager unit test. --- tests/unit/test_cache_manager.cpp | 81 ++++++++++++++++--------------- 1 file changed, 43 insertions(+), 38 deletions(-) diff --git a/tests/unit/test_cache_manager.cpp b/tests/unit/test_cache_manager.cpp index a6999aac3..a9a04a4fc 100644 --- a/tests/unit/test_cache_manager.cpp +++ b/tests/unit/test_cache_manager.cpp @@ -12,46 +12,50 @@ TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { using key_type = manager_type::key_t; using count_type = manager_type::count_t; - auto const n_persistent = 4; // arbitrary - auto const n_decaying = 4; // arbitrary - // arbitrary keys and vals - auto const decaying_keys = std::array{100, 110, 200, 210}; + size_t constexpr n_persistent = 4; // arbitrary + size_t constexpr n_decaying = 4; // arbitrary + + // arbitrary decaying keys + auto const decaying_keys = + std::array{100, 110, 200, 210}; // decaying entries repeat more than once - auto const decaying_repeats = std::array{2, 2, 4, 3}; - auto const decaying_vals = std::array{10, 11, 20, 21}; + auto const decaying_repeats = std::array{2, 2, 4, 3}; + // arbitrary vals corresponding to decaying keys + auto const decaying_vals = std::array{10, 11, 20, 21}; - // arbitrary vals and keys not present in decaying_keys - auto const persistent_keys = std::array{111, 222, 333, 444}; - auto const persistent_vals = std::array{11, 22, 33, 44}; + // arbitrary persistent vals, and keys not present in decaying_keys + auto const persistent_keys = + std::array{111, 222, 333, 444}; + auto const persistent_vals = + std::array{11, 22, 33, 44}; - auto man = + auto const man_const = manager_type(zip(decaying_keys, decaying_repeats), persistent_keys); - // filling data - for (auto&& [k,v]: - zip(concat(decaying_keys, persistent_keys), - concat(decaying_vals, persistent_vals))) { - // NOTE: man.store() calls man.access() implicitly and - // returns a shared_ptr to data - // hence, a count of lifetime is lost right here - REQUIRE(man.store(k, v)); - } - - auto const man_copy = man; - SECTION("Construction") { + auto const& man = man_const; + REQUIRE(man.cache_map().size() == n_persistent + n_decaying); - for (auto&& k: concat(decaying_keys, persistent_keys)) - REQUIRE(man.cache_map().find(k) != man.cache_map().end()); - for (auto&& [k, c]: zip(decaying_keys, decaying_repeats)) - // (c - 1) because a lifetime count is lost by implicit access - // during storing - REQUIRE(man.cache_map().find(k)->second.life_count() == c-1); + + // verifying the life count of decaying entries + for (auto&& [k, c] : zip(decaying_keys, decaying_repeats)) + REQUIRE(man.cache_map().find(k)->second.life_count() == c); } SECTION("Data Access") { - // restore the cache manager in with full lifetimes and data - man = man_copy; + // need a non-const manager object + auto man = man_const; + // filling data + for (auto&& [k, v] : zip(concat(decaying_keys, persistent_keys), + concat(decaying_vals, persistent_vals))) { + // NOTE: man.store() calls man.access() implicitly and + // returns a shared_ptr to data + // hence, a count of lifetime is lost right here + REQUIRE(man.store(k, v)); + } + + // now accessing decaying entries' data from the cache (c - 1) times + // where c is the corresponding entry's lifetime count for (auto&& [k, v, r] : zip(decaying_keys, decaying_vals, decaying_repeats)) { // r - 1: the lifetime count at this point @@ -65,29 +69,30 @@ TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { REQUIRE(iter->second.life_count() == i - 1); } } + // at this point all the decaying entries have only one lifetime left // accessing each decaying entry one more time should release // their *data* from the memory - for (auto&& k: decaying_keys) { + for (auto&& k : decaying_keys) { auto iter = man.cache_map().find(k); REQUIRE(iter->second.life_count() == 1); - REQUIRE(man.access(k).value()); // accessed once. non-null ptr returned - REQUIRE_FALSE(man.access(k).value()); // nullptr returned + REQUIRE(man.access(k).value()); // accessed once. non-null ptr returned + REQUIRE_FALSE(man.access(k).value()); // nullptr returned REQUIRE(iter->second.life_count() == 0); } // meanwhile, the persistent entries are all intact - for (auto&& [k,v]: zip(persistent_keys, persistent_vals)) + for (auto&& [k, v] : zip(persistent_keys, persistent_vals)) REQUIRE(*man.access(k).value() == v); // now we reset the decaying entries which restores thier lifetimes man.reset_decaying(); - for (auto&& [k,c]: zip(decaying_keys, decaying_repeats)) + for (auto&& [k, c] : zip(decaying_keys, decaying_repeats)) REQUIRE(man.cache_map().find(k)->second.life_count() == c); // now we reset all entries man.reset_all(); - for (auto&& k: concat(decaying_keys, persistent_keys)) - REQUIRE_FALSE(man.access(k).value()); // nullptr to data returned + for (auto&& k : concat(decaying_keys, persistent_keys)) + REQUIRE_FALSE(man.access(k).value()); // nullptr to data returned } -} +} \ No newline at end of file From fa3d0a8c377606244598459e149c0c124db17f01 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Tue, 5 Apr 2022 11:33:05 -0400 Subject: [PATCH 040/120] closed-shell spintrace functions in srcc --- CMakeLists.txt | 10 ++ examples/srcc/srcc.cpp | 201 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 202 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 91b68054d..e9e362ad2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -282,6 +282,11 @@ set(utests_src tests/unit/test_rpn.cpp tests/unit/test_clone.cpp ) +#set(utests_src tests/unit/test_spin.cpp +# tests/unit/test_op.cpp +# tests/unit/test_index.cpp +# tests/unit/test_wick.cpp +# tests/unit/test_tensor.cpp) if (TARGET tiledarray) list(APPEND utests_src @@ -324,6 +329,11 @@ add_executable(${example1} EXCLUDE_FROM_ALL examples/${example1}/${example1}.cpp) target_link_libraries(${example1} SeQuant ${TBB_LIBRARIES}) +set(example3 ccf12) +add_executable(${example3} EXCLUDE_FROM_ALL + examples/${example3}/${example3}.cpp) +target_link_libraries(${example3} SeQuant ${TBB_LIBRARIES}) + set(example_eval_src examples/eval/eval_utils.hpp examples/eval/options.hpp diff --git a/examples/srcc/srcc.cpp b/examples/srcc/srcc.cpp index 167571e81..d8f8fd040 100644 --- a/examples/srcc/srcc.cpp +++ b/examples/srcc/srcc.cpp @@ -14,6 +14,11 @@ using namespace sequant; +ExprPtr biorthogonal_transform( + const sequant::ExprPtr& expr, const int n_particles, + const std::vector>& ext_index_groups = {{}}, + const double threshold = 1.e-12); + container::vector biorthogonal_tran_coeff(const int n_particles, const double& threshold); std::vector> biorthogonal_tran_idx_map(const container::vector> ext_index_groups); ExprPtr symmetrize_expr(ExprPtr& expr, const container::vector> ext_index_groups = {{}}); @@ -88,8 +93,47 @@ int main(int argc, char* argv[]) { return ext_idx_list; }; + const int k = 3; // Spin-orbital coupled cluster - auto cc_r = sequant::eqs::cceqvec{3, 3}(true, true, true, true, true); + auto cc_r = sequant::eqs::cceqvec{k, k}(true, true, true, true, true, false); + std::wcout << to_latex(cc_r[3]) << std::endl; + // cc_r[2] = cc_r[2]->as().take_n(2,2); + // std::wcout << __LINE__ << " " << to_latex(cc_r[2]) << std::endl; +// auto cc_r1 = cc_r[1]->clone(); +// auto cc_r2 = cc_r[2]->clone(); +// for(auto& term : *cc_r1){ +// if (term->is()) +// term = sequant::remove_tensor(term->as(), L"A"); +// } +// std::wcout << to_latex(cc_r1) << std::endl; + +// for(auto& term : *cc_r2){ +// if (term->is()) +// term = sequant::remove_tensor(term->as(), L"A"); +// } +// std::wcout << to_latex(cc_r2) << std::endl; + +#if 0 + std::wcout << "A: " << to_latex(cc_r[3]->as().summand(30)) << std::endl; + std::wcout << "B: " << to_latex(cc_r[3]->as().summand(14)) << std::endl; + std::wcout << "C: " << to_latex(cc_r[3]->as().summand(10)) << std::endl; + std::wcout << "D: " << to_latex(cc_r[3]->as().summand(27)) << std::endl; + + cc_r[3]->as().at(30) = ex(1./9) * cc_r[3]->as().summand(30); + expand(cc_r[3]->as().at(30)); + cc_r[3]->as().at(14) = ex(1./3) * cc_r[3]->as().summand(14); + expand(cc_r[3]->as().at(14)); + cc_r[3]->as().at(10) = ex(1./9) * cc_r[3]->as().summand(10); + expand(cc_r[3]->as().at(10)); + cc_r[3]->as().at(27) = ex(1./18) * cc_r[3]->as().summand(27); + expand(cc_r[3]->as().at(27)); + + std::wcout << "A: " << to_latex(cc_r[3]->as().summand(30)) << std::endl; + std::wcout << "B: " << to_latex(cc_r[3]->as().summand(14)) << std::endl; + std::wcout << "C: " << to_latex(cc_r[3]->as().summand(10)) << std::endl; + std::wcout << "D: " << to_latex(cc_r[3]->as().summand(27)) << std::endl; + + auto A3 = ex(Tensor(L"A", WstrList{L"i_1", L"i_2", L"i_3"}, @@ -194,7 +238,22 @@ int main(int argc, char* argv[]) { return correction; }; - // Operations to simplify the correction term + // g*t2*t2 correction + auto G_ovvv = ex(Tensor(L"g", + WstrList{L"i_5", L"a_2"}, WstrList{L"a_5", L"a_6"}, + Symmetry::antisymm)); + auto t2_1 = ex(Tensor(L"t", + WstrList{L"a_1", L"a_5"}, + WstrList{L"i_1", L"i_5"}, Symmetry::antisymm)); + auto t2_2 = ex(Tensor(L"t", + WstrList{L"a_6", L"a_3"}, + WstrList{L"i_2", L"i_3"}, Symmetry::antisymm)); + + auto g_t2_t2_corr = ex(0.5) * (ex(1) - /* a/b/c permutation */) * + (ex(1) - P_ij - P_ik) * G_ovvv * t2_1 * t2_2; +#endif + +// Operations to simplify the correction term #if 0 expand(c1); c1 = expand_P_op(c1); @@ -206,10 +265,9 @@ int main(int argc, char* argv[]) { #endif // pCCSDT R3 expression - auto r3 = cc_r[3] - w_t2_t3 + pCCSDT_correction({1, 1, 0}); + // auto r3 = cc_r[3] - w_t2_t3 + pCCSDT_correction({1, 1, 0}); -#if 0 // // Closed-shell spintrace (fast) // @@ -219,11 +277,13 @@ int main(int argc, char* argv[]) { auto ext_idx = ext_idx_list(i); cc_st_r[i] = sequant::closed_shell_CC_spintrace(cc_r[i]); canonicalize(cc_st_r[i]); +// if(i==2) +// std::wcout << __LINE__ << " " << to_latex(cc_st_r[i]) << std::endl; // Remove S operator for (auto& term : *cc_st_r[i]) { if (term->is()) - term = sequant::remove_tensor_from_product(term->as(), L"S"); + term = sequant::remove_tensor(term->as(), L"S"); } // Biorthogonal transformation @@ -233,15 +293,22 @@ int main(int argc, char* argv[]) { // correct result if (i != 1) cc_st_r[i] = symmetrize_expr(cc_st_r[i], ext_idx); simplify(cc_st_r[i]); +// if(i==2) +// std::wcout << __LINE__ << " " << to_latex(cc_st_r[i]) << std::endl; // Remove S operator for (auto& term : *cc_st_r[i]) { if (term->is()) - term = remove_tensor_from_product(term->as(), L"S"); + term = remove_tensor(term->as(), L"S"); } +// if(i==2) +// std::wcout << __LINE__ << " " << to_latex(cc_st_r[i]) << std::endl; auto tstop = std::chrono::high_resolution_clock::now(); std::chrono::duration time_elapsed = tstop - tstart; +// if(i==2) +// std::wcout << __LINE__ << " " << to_latex(cc_st_r[i]) << std::endl; + printf("CC R%lu size: %lu time: %5.3f sec.\n", i, cc_st_r[i]->size(), time_elapsed.count()); } @@ -293,6 +360,8 @@ int main(int argc, char* argv[]) { auto ptr = sequant::ex(spin_case); expr_vec.push_back(ptr); std::cout << ptr->size() << " "; +// if(i==2) +// std::wcout << __LINE__ << " " << to_latex(ptr) << std::endl; } os_cc_st_r.at(i) = std::move(expr_vec); @@ -306,7 +375,7 @@ int main(int argc, char* argv[]) { throw std::runtime_error(oss.str().c_str()); \ } - if (NMAX == 4) { + if (k == 4) { runtime_assert(os_cc_st_r.size() == 5); runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30); runtime_assert(os_cc_st_r.at(2).at(1)->size() == 130); @@ -316,7 +385,7 @@ int main(int argc, char* argv[]) { runtime_assert(os_cc_st_r.at(4).at(1)->size() == 356); runtime_assert(os_cc_st_r.at(4).at(2)->size() == 386); runtime_assert(os_cc_st_r.at(4).at(4)->size() == 156); - } else if (NMAX == 3) { + } else if (k == 3) { runtime_assert(os_cc_st_r.size() == 4); runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30); runtime_assert(os_cc_st_r.at(2).at(0)->size() == 65); @@ -324,7 +393,7 @@ int main(int argc, char* argv[]) { runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209); runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75); } -#endif + } // Generate S operator from external index list @@ -341,6 +410,120 @@ ExprPtr symmetrize_expr(ExprPtr& expr, const container::vector(S) * expr; } +ExprPtr biorthogonal_transform( + const sequant::ExprPtr& expr, const int n_particles, + const std::vector>& ext_index_groups, + const double threshold) { + assert(n_particles != 0); + assert(!ext_index_groups.empty()); + + using sequant::Constant; + using sequant::ex; + using sequant::ExprPtr; + using sequant::Index; + using sequant::Sum; + using sequant::container::svector; + + // Coefficients + std::vector bt_coeff_vec; + { + using namespace Eigen; + // Dimension of permutation matrix is n_particles! + int n = std::tgamma(n_particles + 1); + + // Permutation matrix + Eigen::Matrix M(n, n); + { + M.setZero(); + size_t n_row = 0; + svector v(n_particles), v1(n_particles); + std::iota(v.begin(), v.end(), 0); + std::iota(v1.begin(), v1.end(), 0); + do { + std::vector permutation_vector; + do { + auto cycles = sequant::count_cycles(v1, v); + permutation_vector.push_back(std::pow(-2, cycles)); + } while (std::next_permutation(v.begin(), v.end())); + Eigen::VectorXd pv_eig = Eigen::Map( + permutation_vector.data(), permutation_vector.size()); + M.row(n_row) = pv_eig; + ++n_row; + } while (std::next_permutation(v1.begin(), v1.end())); + M *= std::pow(-1, n_particles); + } + + // Normalization constant + double scalar; + { + auto nonZero = [&threshold](const double& d) { + return abs(d) > threshold; + }; + + // Solve system of equations + SelfAdjointEigenSolver eig_solver(M); + std::vector eig_vals(eig_solver.eigenvalues().size()); + VectorXd::Map(&eig_vals[0], eig_solver.eigenvalues().size()) = + eig_solver.eigenvalues(); + + double non0count = + std::count_if(eig_vals.begin(), eig_vals.end(), nonZero); + scalar = eig_vals.size() / non0count; + } + + // Find Pseudo Inverse, get 1st row only + MatrixXd pinv = M.completeOrthogonalDecomposition().pseudoInverse(); + bt_coeff_vec.resize(pinv.rows()); + VectorXd::Map(&bt_coeff_vec[0], bt_coeff_vec.size()) = pinv.row(0) * scalar; + } + + // Transformation maps + std::vector> bt_maps; + { + std::vector idx_list; + for (auto& idx_group : ext_index_groups) + idx_list.push_back(*idx_group.begin()); + + const std::vector const_idx_list = idx_list; + + do { + std::map map; + auto const_list_ptr = const_idx_list.begin(); + for (auto& i : idx_list) { + map.emplace(std::make_pair(*const_list_ptr, i)); + const_list_ptr++; + } + bt_maps.push_back(map); + } while (std::next_permutation(idx_list.begin(), idx_list.end())); + } + + // If this assertion fails, change the threshold parameter + assert(bt_coeff_vec.size() == bt_maps.size()); + + // Checks if the replacement map is a canonical sequence + auto is_canonical = [](const std::map& idx_map) { + bool canonical = true; + for (auto&& pair : idx_map) + if (pair.first != pair.second) return false; + return canonical; + }; + + // Scale transformed expressions and append + Sum bt_expr{}; + auto coeff_it = bt_coeff_vec.begin(); + for (auto&& map : bt_maps) { + if (is_canonical(map)) + bt_expr.append(ex(*coeff_it) * expr->clone()); + else + bt_expr.append(ex(*coeff_it) * + sequant::transform_expr(expr->clone(), map)); + coeff_it++; + } + ExprPtr result = std::make_shared(bt_expr); + return result; +} + + container::vector biorthogonal_tran_coeff(const int n_particles, const double& threshold){ using namespace Eigen; From f034952ea11bee1b6f56d039033e8c69dee78cf1 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Thu, 7 Apr 2022 10:06:28 -0400 Subject: [PATCH 041/120] include singles correction with and without explicity correlation --- SeQuant/domain/eqs/single_ref_uccf12.h | 54 ++++------ .../domain/transcorrelated/simplifications.h | 101 +++++------------- examples/uccf12/uccf12.cpp | 4 +- 3 files changed, 49 insertions(+), 110 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index 37941e912..a9abbb98e 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -43,15 +43,8 @@ class uccf12{ //[[e1,e2],e3]_12 ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, int ansatz = 2){ auto first_com = do_wick((e1 * e2) - (e2 * e1)); - /*auto second_com = (((e1 * e2) - (e2 * e1)) * e3) - (e3 * ((e1 * e2) - (e2 * e1))); - non_canon_simplify(second_com); - std::wcout << "second com: " << to_latex_align(second_com,20,2) << std::endl; - second_com = do_wick(second_com); - simplify(second_com); - std::wcout << "second com: " << to_latex_align(second_com,20,2) << std::endl; - */simplify(first_com); + simplify(first_com); auto second_com_1 = first_com * e3; - //non_canon_simplify(second_com_1); simplify(second_com_1); second_com_1 = do_wick(second_com_1); auto second_com_2 = e3 * first_com; @@ -59,16 +52,12 @@ class uccf12{ second_com_2 = do_wick(second_com_2); auto second_com = second_com_1 - second_com_2; simplify(second_com); - std::wcout << "second com: " << to_latex_align(second_com,20,2) << std::endl; if(ansatz == 2) { second_com = keep_up_to_3_body_terms(second_com); - std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = second_com + ex(0.); // make a sum to avoid heavy code duplication for product and sum variants. second_com = simplification::overlap_with_obs(second_com); - // std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = second_com + ex(0.); - second_com = simplification::screen_F12_and_density(second_com,2); - // std::wcout << to_latex_align(second_com,20,2) << std::endl; + second_com = simplification::screen_F12_proj(second_com, 2); second_com = simplification::tens_to_FNOps(second_com); second_com = decompositions::three_body_substitution(second_com, 2); simplify(second_com); @@ -81,7 +70,7 @@ class uccf12{ second_com = simplification::overlap_with_obs(second_com); // std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = second_com + ex(0.); - second_com = simplification::screen_F12_and_density(second_com,1); + second_com = simplification::screen_F12_proj(second_com, 1); // std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = simplification::tens_to_FNOps(second_com); simplify(second_com); @@ -120,6 +109,7 @@ class uccf12{ } } } + else{return input;} } ExprPtr keep_up_to_2_body_terms(const ExprPtr& input) { if (input->is()) {auto filtered_summands = input->as().summands() | @@ -265,9 +255,9 @@ class uccf12{ } } - std::pair compute(std::string gg_label,int ansatz = 2, bool print = false,bool singles=false) { + std::pair compute(std::string gg_label,int ansatz = 2, bool print = false,bool singles=false,bool doubles=true) { // auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice - + assert(singles == true || doubles == true); auto gg_space = IndexSpace::frozen_occupied; if (gg_label == "act_occ") { gg_space = IndexSpace::active_occupied; @@ -288,21 +278,13 @@ class uccf12{ } auto single = ex(0.0); - //auto single_ = ex(0.0); if(singles){ // this might need to be complete space if we don't have a solution to the particular blocks of interest. auto C = ex(L"C",std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))},std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied))}); auto E_pa = ex (std::initializer_list{C->as().bra()[0]},std::initializer_list{C->as().ket()[0]}); auto C_Epa = C * E_pa; - auto anti_herm_C = C_Epa/* - adjoint(C_Epa)*/; + auto anti_herm_C = C_Epa - adjoint(C_Epa); single = single + anti_herm_C; - //simplify(single); - std::wcout << "single term" << to_latex_align(single) << std::endl; - - //auto single_2 = single->clone(); - //single_2 = relable(single_2); - //std::wcout << "single after relable" << to_latex_align(single_2) << std::endl; - //single_ = single_2; } if (ansatz == 2) { @@ -310,27 +292,28 @@ class uccf12{ auto r = R12(gg_space); auto r_1 = R12(gg_space); - auto A = (r - adjoint(r)) + single; - std::wcout << "A: " << to_latex_align(A,20,2) << std::endl; + ExprPtr A = ex(0.0); + if(doubles) { + A = A + (r - adjoint(r)) + single; + } + else{A = A + single;} auto A_ = A->clone(); A_ = relable(A_); - std::wcout << "A_: " << to_latex_align(A_,20,2) << std::endl; - //auto A_ = (r_1 - adjoint(r_1)) + single_; + + //first commutator in eq 9. Chem. Phys. 136, 084107 (2012). auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); auto H_A_3 = keep_up_to_3_body_terms(H_A); - // std::wcout << "pre decomp: " << to_latex_align(single_Comm,20,2) << std::endl; H_A_3 = simplification::overlap_with_obs(H_A_3); H_A_3 = H_A_3 + ex(0.); - H_A_3 = simplification::screen_F12_and_density(H_A_3,2); - // std::wcout << to_latex_align(H_A_3,20,2) << std::endl; + H_A_3 = simplification::screen_F12_proj(H_A_3, 2); H_A_3 = simplification::tens_to_FNOps(H_A_3); auto H_A_2 = decompositions::three_body_substitution(H_A_3, 2); simplify(H_A_2); auto com_1 = simplification::hamiltonian_based_projector_2(H_A_2); - auto full_double_com = ex(1./2) * compute_double_com(F(),A,A_); + // double commutator in eq. 9. Chem. Phys. 136, 084107 (2012). + auto full_double_com = ex(1./2) * compute_double_com(F(),A,A_); auto sim = simplification::fock_based_projector_2(full_double_com); - auto one_body = com_1.first + (sim.first); auto two_body = com_1.second + (sim.second); @@ -367,8 +350,7 @@ class uccf12{ H_A_3 = simplification::overlap_with_obs(H_A_3); H_A_3 = H_A_3 + ex(0.); - H_A_3 = simplification::screen_F12_and_density(H_A_3,1); - // std::wcout << to_latex_align(H_A_3,20,2) << std::endl; + H_A_3 = simplification::screen_F12_proj(H_A_3, 1); H_A_3 = simplification::tens_to_FNOps(H_A_3); simplify(H_A_3); auto com_1 = simplification::hamiltonian_based_projector_1(H_A_3); diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index b434127ca..0d11c9db6 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -362,18 +362,18 @@ ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { ExprPtr screen_density(ExprPtr ex_){// densities probably should be non-zero if each index has a chance to be occupied, in other words, screen out densities containing unoccupied labels. assert(ex_->is()); assert(ex_->as().label() == L"\\Gamma" || ex_->as().label() == L"\\gamma"); - bool good = true; + bool occ_space = true; for (auto&& bra : ex_->as().bra()){ if (bra.space().type() == IndexSpace::unoccupied || bra.space().type() == IndexSpace::complete_unoccupied){ - good = false; + occ_space = false; } } for (auto&& ket : ex_->as().ket()){ if (ket.space().type() == IndexSpace::unoccupied || ket.space().type() == IndexSpace::complete_unoccupied){ - good = false; + occ_space = false; } } - if(good){ + if(occ_space){ return ex_; } else{return ex(0);} @@ -433,11 +433,8 @@ auto treat_fock(ExprPtr ex_){ } else new_product = new_product * factor; } - //std::wcout << "problematic product: " << to_latex_align(new_product) << std::endl; - //simplify(new_product); new_ex_ = new_ex_ + new_product; } - //simplify(new_ex_); FWickTheorem wick{new_ex_}; wick.reduce(new_ex_); non_canon_simplify(new_ex_); @@ -618,8 +615,7 @@ ExprPtr biproduct_intermediate(ExprPtr T1,ExprPtr T2){ if (T1->as().label() == L"g" || T2->as().label() == L"g"){ if (nconnects == 2 && space == IndexSpace::complete_unoccupied){ //V^pq_ij - //auto V_pqij = ex(L"V", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); - //return V_pqij; + //intermediate decomposition handled by SeQuant so space labels can be properly handled if(T1_ket){ auto GR_ijpq = ex(L"GR", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); auto F_ijrs = ex(L"F", IDX_list{external_bra[0],external_bra[1]}, @@ -677,7 +673,11 @@ ExprPtr biproduct_intermediate(ExprPtr T1,ExprPtr T2){ } return result; } -ExprPtr find_f12_interms(ExprPtr ex_){ +// identify F12 intermediates +//intermediates we generate contain either 2 F or g tensors. +// those expressions are biproduct intermediate for further screening. +// special case of B intermediate is handled by an additional check for the fock operator f +ExprPtr find_F12_interms(ExprPtr ex_){ assert(ex_->is()); int counter = 0; std::vector T1_T2; @@ -773,12 +773,12 @@ std::pair contains_tens(ExprPtr ex_, std::wstring label){ } -//TODO this should be a geneeralized procedure since the screening process is different for each number of F tensors. +//TODO this should be a generalized procedure since the screening process is different for each number of F tensors. // I suppose generally, this should be a product level screening, which first finds the number of F tensors and then picks the correct screening method. //re-implimentation as a recursive function which gets called every time a delta is found, simplifies/reduces the product and returns. //products are const and two deltas acting on the same index makes this difficult. logically the product needs to update within its own loop, but it cannot. Alternatively, two delta's to the same index need to occur in the same product, but that breaks things. //work around. make a copy of product which can be modified? break out of product loop? -ExprPtr screen_F12_and_density(ExprPtr exprs,int ansatz = 2){ +ExprPtr screen_F12_proj(ExprPtr exprs,int ansatz = 2){ if(exprs->is()) { auto return_sum = ex(0); for (auto&& product : exprs->as().summands()) { @@ -792,13 +792,10 @@ ExprPtr screen_F12_and_density(ExprPtr exprs,int ansatz = 2){ auto product_clone = product->clone(); if (contains_tens(temp_factor, L"s").first) { product_clone = product_clone * contains_tens(temp_factor,L"s").second; - //std::wcout << "factor: " << to_latex_align(temp_factor) << std::endl; - //std::wcout << " product clone: " << to_latex_align(product_clone) << std::endl; FWickTheorem wick_f{product_clone}; wick_f.reduce(product_clone); - //std::wcout << " product clone after reduce: " << to_latex_align(product_clone) << std::endl; non_canon_simplify(product_clone); - product_clone = screen_F12_and_density(product_clone,ansatz); + product_clone = screen_F12_proj(product_clone, ansatz); return_sum = product_clone + return_sum; new_product = ex(0.); break; @@ -807,10 +804,8 @@ ExprPtr screen_F12_and_density(ExprPtr exprs,int ansatz = 2){ non_canon_simplify(new_product); } - //std::wcout <<"new_product: " << to_latex_align(new_product) << std::endl; return_sum = new_product + return_sum; } - //std::wcout << "return sum before reduce: " << to_latex_align(return_sum,20,2) << std::endl; non_canon_simplify(return_sum); return return_sum; } @@ -825,13 +820,10 @@ ExprPtr screen_F12_and_density(ExprPtr exprs,int ansatz = 2){ auto product_clone = exprs->clone(); if (contains_tens(temp_factor, L"s").first) { product_clone = product_clone * contains_tens(temp_factor,L"s").second; - //std::wcout << "factor: " << to_latex_align(factor) << std::endl; - //std::wcout << " product clone: " << to_latex_align(product_clone) << std::endl; FWickTheorem wick_f{product_clone}; wick_f.reduce(product_clone); non_canon_simplify(product_clone); - //std::wcout << " product clone after reduce: " << to_latex_align(product_clone) << std::endl; - product_clone = screen_F12_and_density(product_clone,ansatz); + product_clone = screen_F12_proj(product_clone, ansatz); new_product = product_clone; break; } @@ -906,16 +898,16 @@ ExprPtr tens_to_FNOps(ExprPtr ex_){ return ex_; } -ExprPtr split_f(ExprPtr exprs){ +//split F12 operator into its 2 components seen in eq 11. of Chem. Phys. 136, 084107 (2012). +// neccessary in some cases where particles get excited from different spaces. +ExprPtr split_F12(ExprPtr exprs){ assert(exprs->is()); assert(exprs->as().label() == L"F"); auto result = ex(0); - //std::wcout << "before split: " << to_latex_align(exprs,20,2) << std::endl; if((exprs->as().const_braket()[2].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[2].space() == sequant::IndexSpace::other_unoccupied) || exprs->as().const_braket()[3].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[3].space() == sequant::IndexSpace::other_unoccupied) { auto T1 = ex(3./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); auto T2 = ex(1./8) * ex(L"F",std::vector{exprs->as().const_braket()[1],exprs->as().const_braket()[0]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); result = T1 + T2; - //std::wcout << "after split: " << to_latex_align(result,20,2) << std::endl; return result; } else{// otherwise the geminal generating space must be in the upper indices. so include exchange for those. @@ -923,26 +915,23 @@ ExprPtr split_f(ExprPtr exprs){ auto T1 = ex(3./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); auto T2 = ex(1./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[3],exprs->as().const_braket()[2]}); result = T1 + T2; - //std::wcout << "after split: " << to_latex_align(result,20,2) << std::endl; return result; } } -ExprPtr partition_f(ExprPtr exprs){ +ExprPtr partition_F12(ExprPtr exprs){ if(!exprs->is()){ return exprs; } - // std::wcout << "pre partition: " << to_latex_align(exprs,20,2) << std::endl; for (auto&& product : exprs->as().summands()){ for (auto&& factor : product->as().factors()){ if(factor->is() && factor->as().label() == L"F") { - factor = split_f(factor); + factor = split_F12(factor); } } } non_canon_simplify(exprs); - //std::wcout << " post partition: " << to_latex_align(exprs,20,2) << std::endl; return(exprs); } @@ -953,31 +942,19 @@ ExprPtr partition_f(ExprPtr exprs){ // enforces the following obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} // should allow analysis of multiple expressions who have the same normal order operator prefactor. std::pair hamiltonian_based_projector_2(ExprPtr exprs){ - //std::wcout << "pre remove constants: " << to_latex_align(exprs,20,2) << std::endl; - //exprs = remove_const(exprs); -// std::wcout << "post remove constants: " << to_latex_align(exprs,20,2) << std::endl; exprs = FNOPs_to_tens(exprs); simplify(exprs); - //exprs = overlap_with_obs(exprs); - exprs = partition_f(exprs); + exprs = partition_F12(exprs); simplify(exprs); - //std::wcout << "post convert to tensor: " << to_latex_align(exprs,20,2) << std::endl; - exprs = screen_F12_and_density(exprs,2); - //std::wcout << "post screen f12: " << to_latex_align(exprs,20,2) << std::endl; + exprs = screen_F12_proj(exprs, 2); simplify(exprs); exprs = screen_densities(exprs); - //std::wcout << "post screen density: " << to_latex_align(exprs,20,2) << std::endl; - //exprs = densities_to_occ(exprs); - //f12 interms needs a particular canonical ordering simplify(exprs); - //std::wcout << "densities to occ: " << to_latex_align(exprs,20,2) << std::endl; auto exprs_intmed = ex(0.0); for (auto&& product : exprs->as().summands()){ - auto new_product = simplification::find_f12_interms(product); + auto new_product = simplification::find_F12_interms(product); exprs_intmed = new_product + exprs_intmed; } - //std::wcout << "post intermediates: " << to_latex_align(exprs,20,2) << std::endl; - //tens_to_FNOps(exprs_intmed); simplify(exprs_intmed); return fnop_to_overlap(exprs_intmed); } @@ -986,13 +963,13 @@ std::pair hamiltonian_based_projector_2(ExprPtr exprs){ std::pair hamiltonian_based_projector_1(ExprPtr exprs){ exprs = FNOPs_to_tens(exprs); simplify(exprs); - exprs = partition_f(exprs); + exprs = partition_F12(exprs); simplify(exprs);; - exprs = screen_F12_and_density(exprs,1); + exprs = screen_F12_proj(exprs, 1); simplify(exprs); auto exprs_intmed = ex(0.0); for (auto&& product : exprs->as().summands()){ - auto new_product = simplification::find_f12_interms(product); + auto new_product = simplification::find_F12_interms(product); exprs_intmed = new_product + exprs_intmed; } simplify(exprs_intmed); @@ -1005,7 +982,7 @@ std::pair fock_based_projector_1(ExprPtr exprs){ if(exprs->is()){ return std::pair {exprs, exprs}; } - exprs = partition_f(exprs); + exprs = partition_F12(exprs); auto final_screen = exprs; simplify(final_screen); //in some cases, there will now be no contributing terms left so return zero to one and two body. @@ -1013,15 +990,12 @@ std::pair fock_based_projector_1(ExprPtr exprs){ return std::pair {final_screen, final_screen}; } simplify(final_screen); - //final_screen = treat_fock(final_screen); - simplify(final_screen); //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! auto last_screen = ex(0.0); for (auto&& product : final_screen->as().summands()){ - auto new_product = simplification::find_f12_interms(product); + auto new_product = simplification::find_F12_interms(product); last_screen = last_screen + new_product; } - //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; simplify(last_screen); return fnop_to_overlap(last_screen); } @@ -1032,46 +1006,29 @@ std::pair fock_based_projector_1(ExprPtr exprs){ // enforces the following obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} // should allow analysis of multiple expressions who have the same normal order operator prefactor. std::pair fock_based_projector_2(ExprPtr exprs){ - //std::wcout << "expression before removing constants: " << to_latex_align(exprs,20,2) << std::endl; - //exprs = remove_const(exprs); - //std::wcout << "after screening constant: " << to_latex_align(exprs) << std::endl; exprs = FNOPs_to_tens(exprs); simplify(exprs); - //std::wcout << "fnop to tensor: " << to_latex_align(exprs,20,2) << std::endl; if(exprs->is()){ return std::pair {exprs, exprs}; } - exprs = partition_f(exprs); - //exprs = overlap_with_obs(exprs); + exprs = partition_F12(exprs); auto final_screen = exprs; simplify(final_screen); //in some cases, there will now be no contributing terms left so return zero to one and two body. if(final_screen->is()){ return std::pair {final_screen, final_screen}; } - //final_screen = screen_F12_and_density(final_screen); - simplify(final_screen); - //std::wcout << "screen F12: " << to_latex_align(final_screen,20,2) << std::endl; - //final_screen = treat_fock(final_screen); final_screen = FNOPs_to_tens(final_screen); simplify(final_screen); - //std::wcout << "screen fock: " << to_latex_align(final_screen,20,2) << std::endl; final_screen = screen_densities(final_screen); simplify(final_screen); - //enforce that densities are in the occupied space since they are only non-zero in occ - //final_screen = densities_to_occ(final_screen); - //non_canon_simplify(final_screen); - //std::wcout << "screen densities to occ: " << to_latex_align(final_screen,20,2) << std::endl; - // std::wcout << "pre intermediates: " << to_latex_align(final_screen,20,2) << std::endl; //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! auto last_screen = ex(0.0); for (auto&& product : final_screen->as().summands()){ - auto new_product = simplification::find_f12_interms(product); + auto new_product = simplification::find_F12_interms(product); last_screen = last_screen + new_product; } - //::wcout << "post intermediates: " << to_latex_align(final_screen,20,2) << std::endl; simplify(last_screen); - //tens_to_FNOps(last_screen); return fnop_to_overlap(last_screen); } } diff --git a/examples/uccf12/uccf12.cpp b/examples/uccf12/uccf12.cpp index a0f83aaf6..857a51151 100644 --- a/examples/uccf12/uccf12.cpp +++ b/examples/uccf12/uccf12.cpp @@ -123,7 +123,7 @@ try_main() { second_com = simplification::overlap_with_obs(second_com); // std::wcout << "overlap with obs" << to_latex_align(second_com) << std::endl; second_com = second_com + ex(0.); - second_com = simplification::screen_F12_and_density(second_com); + second_com = simplification::screen_F12_proj(second_com); std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = simplification::tens_to_FNOps(second_com); second_com = decompositions::three_body_substitution(second_com,2); @@ -154,7 +154,7 @@ try_main() { std::wcout << "post overlap: " << to_latex_align(H_A_3,20,2) << std::endl; H_A_3 = H_A_3 + ex(0.); - H_A_3 = simplification::screen_F12_and_density(H_A_3); + H_A_3 = simplification::screen_F12_proj(H_A_3); std::wcout << to_latex_align(H_A_3,20,2) << std::endl; H_A_3 = simplification::tens_to_FNOps(H_A_3); auto H_A_2 = decompositions::three_body_substitution(H_A_3,2); From 321440c190c3624eff66f09d59350f9b3b0f94cd Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Thu, 7 Apr 2022 13:03:21 -0400 Subject: [PATCH 042/120] re-"merged" versions.cmake from master --- external/versions.cmake | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/external/versions.cmake b/external/versions.cmake index bd0654d79..5224a7b94 100644 --- a/external/versions.cmake +++ b/external/versions.cmake @@ -2,7 +2,10 @@ # to be able to auto-update them set(SEQUANT_TRACKED_BOOST_VERSION 1.67) +set(SEQUANT_TRACKED_BOOST_PREVIOUS_VERSION 1.67) set(SEQUANT_TRACKED_RANGEV3_TAG d800a032132512a54c291ce55a2a43e0460591c7) +set(SEQUANT_TRACKED_RANGEV3_PREVIOUS_TAG 2e0591c57fce2aca6073ad6e4fdc50d841827864) -set(SEQUANT_TRACKED_TILEDARRAY_TAG 4d67b8a3d04b92639324b6d7a6beedaf4f373add) +set(SEQUANT_TRACKED_TILEDARRAY_TAG 36e2ad205c21c339434dd0ef8f4f1467e7e26037) +set(SEQUANT_TRACKED_TILEDARRAY_PREVIOUS_TAG 5c768a7b121886dfe406c6dd6a38acaa8782ae6e) From f33a656e2d079c135aad53138a44f964c0bc2310 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Thu, 7 Apr 2022 13:09:38 -0400 Subject: [PATCH 043/120] [cmake] clone -> clone_repacked --- CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e8eff598c..353c7feba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -203,8 +203,8 @@ add_library(SeQuant SeQuant/core/bliss.hpp SeQuant/core/timer.hpp SeQuant/core/binary_node.hpp - SeQuant/core/clone.hpp - SeQuant/core/clone.cpp + SeQuant/core/clone_packed.hpp + SeQuant/core/clone_packed.cpp SeQuant/core/eval_seq.hpp SeQuant/core/eval_expr.hpp SeQuant/core/eval_expr.cpp @@ -298,7 +298,7 @@ if (BUILD_TESTING) tests/unit/test_optimize.cpp tests/unit/test_token_sequant.cpp tests/unit/test_rpn.cpp - tests/unit/test_clone.cpp + tests/unit/test_clone_packed.cpp ) if (TARGET tiledarray) From 781a9212dfff8ebc38e0bb2c3750711029ecac74 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Thu, 7 Apr 2022 13:57:06 -0400 Subject: [PATCH 044/120] document that sequant::detail::compute_index_replacement_rules supports overlaps of nonoverlapping spaces --- SeQuant/core/wick.impl.hpp | 230 +++++++++++++++++++++---------------- 1 file changed, 134 insertions(+), 96 deletions(-) diff --git a/SeQuant/core/wick.impl.hpp b/SeQuant/core/wick.impl.hpp index 8c87ac1de..c9e41d816 100644 --- a/SeQuant/core/wick.impl.hpp +++ b/SeQuant/core/wick.impl.hpp @@ -10,7 +10,7 @@ #include "utility.hpp" #ifdef SEQUANT_HAS_EXECUTION_HEADER -# include +#include #endif namespace sequant { @@ -25,13 +25,14 @@ struct zero_result : public std::exception {}; /// summations can be reduced by index replacements. Reducing sums over dummy /// (internal) indices uses 2 rules: /// - if a Kronecker delta binds 2 internal indices I and J, replace them with a -/// new internal index -/// representing intersection of spaces of I and J, !!remove delta!! +/// new internal index representing intersection of spaces of I and J, +/// !!remove delta!! /// - if a Kronecker delta binds an internal index J and an external index I: /// - if space of J includes space of I, replace J with I, !!remove delta!! /// - if space of J is a subset of space of I, replace J with a new internal /// index representing intersection of spaces of I and J, !!keep the delta!! -/// @throw zero_result if @c product is zero +/// @throw zero_result if @c product is zero for any reason, e.g. because +/// it includes an overlap of 2 indices from nonoverlapping spaces inline container::map compute_index_replacement_rules( std::shared_ptr &product, const container::set &external_indices, @@ -69,14 +70,13 @@ inline container::map compute_index_replacement_rules( }; // adds src->dst or src->intersection(dst,current_dst) - auto add_rule = [&result, &proto, &make_intersection_index](const Index &src, - const Index &dst) { + auto add_rule = [&result, &proto, &make_intersection_index]( + const Index &src, const Index &dst) { auto src_it = result.find(src); if (src_it == result.end()) { // if brand new, add the rule auto insertion_result = result.emplace(src, proto(dst, src)); assert(insertion_result.second); - } - else { // else modify the destination of the existing rule to the + } else { // else modify the destination of the existing rule to the // intersection const auto &old_dst = src_it->second; assert(old_dst.proto_indices() == src.proto_indices()); @@ -136,8 +136,8 @@ inline container::map compute_index_replacement_rules( const auto &old_dst2 = src2_it->second; const auto new_dst_space = (dst.space() != old_dst1.space() || dst.space() != old_dst2.space()) - ? intersection(old_dst1.space(), old_dst2.space(), dst.space()) - : dst.space(); + ? intersection(old_dst1.space(), old_dst2.space(), dst.space()) + : dst.space(); if (new_dst_space == IndexSpace::null_instance()) throw zero_result{}; Index new_dst; if (new_dst_space == old_dst1.space()) { @@ -157,15 +157,14 @@ inline container::map compute_index_replacement_rules( new_dst = dst; } else new_dst = idxfac.make(new_dst_space); - result[src1] = proto(new_dst,dst1_proto); - result[src2] = proto(new_dst,dst2_proto); + result[src1] = proto(new_dst, dst1_proto); + result[src2] = proto(new_dst, dst2_proto); } }; /// this makes the list of replacements ... we do not mutate the expressions /// to keep the information about which indices are related - for (auto it = ranges::begin(exrng); it != ranges::end(exrng); - ++it) { + for (auto it = ranges::begin(exrng); it != ranges::end(exrng); ++it) { const auto &factor = *it; if (factor->type_id() == Expr::get_type_id()) { const auto &tensor = static_cast(*factor); @@ -177,19 +176,17 @@ inline container::map compute_index_replacement_rules( assert(bra != ket); const auto bra_is_ext = ranges::find(external_indices, bra) != - ranges::end(external_indices); + ranges::end(external_indices); const auto ket_is_ext = ranges::find(external_indices, ket) != - ranges::end(external_indices); + ranges::end(external_indices); const auto intersection_space = intersection(bra.space(), ket.space()); - if (intersection_space == IndexSpace::null_instance()){ + // if overlap's indices are from non-overlapping spaces, return zero + if (intersection_space == IndexSpace::null_instance()) { throw zero_result{}; } - // not sure why this doesn't return zero_result like every other example. seems like a bug. - //assert(intersection_space != IndexSpace::null_instance()); - if (!bra_is_ext && !ket_is_ext) { // int + int const auto new_dummy = idxfac.make(intersection_space); add_rules(bra, ket, new_dummy); @@ -252,8 +249,7 @@ inline bool apply_index_replacement_rules( auto &tensor = factor->as(); /// replace indices - pass_mutated &= - tensor.transform_indices(const_replrules); + pass_mutated &= tensor.transform_indices(const_replrules); if (tensor.label() == overlap_label()) { const auto &bra = tensor.bra().at(0); @@ -304,8 +300,7 @@ inline bool apply_index_replacement_rules( #endif } } else { // ext + ext - if (bra == ket) - erase_it = true; + if (bra == ket) erase_it = true; } if (erase_it) { @@ -335,8 +330,8 @@ inline bool apply_index_replacement_rules( ranges::for_each( all_indices, [&const_replrules, &all_indices_new](const Index &idx) { auto dst_it = const_replrules.find(idx); - [[maybe_unused]] auto insertion_result = all_indices_new.emplace(dst_it != const_replrules.end() ? dst_it->second - : idx); + [[maybe_unused]] auto insertion_result = all_indices_new.emplace( + dst_it != const_replrules.end() ? dst_it->second : idx); }); std::swap(all_indices_new, all_indices); @@ -359,7 +354,8 @@ inline void reduce_wick_impl(std::shared_ptr &expr, if (factor->template is()) { ranges::for_each(factor->template as().braket(), [&all_indices](const Index &idx) { - [[maybe_unused]] auto result = all_indices.insert(idx); + [[maybe_unused]] auto result = + all_indices.insert(idx); }); } }); @@ -367,7 +363,7 @@ inline void reduce_wick_impl(std::shared_ptr &expr, const auto replacement_rules = compute_index_replacement_rules(expr, external_indices, all_indices); - if (Logger::get_instance().wick_reduce){ + if (Logger::get_instance().wick_reduce) { std::wcout << "reduce_wick_impl(expr, external_indices):\n expr = " << expr->to_latex() << "\n external_indices = "; ranges::for_each(external_indices, [](auto &index) { @@ -402,9 +398,13 @@ ExprPtr WickTheorem::compute(const bool count_only) { // have an Expr as input? Apply recursively ... if (expr_input_) { /// expand, then apply recursively to products - if (Logger::get_instance().wick_harness) std::wcout << "WickTheorem::compute: input (before expand) = " << to_latex_align(expr_input_) << std::endl; + if (Logger::get_instance().wick_harness) + std::wcout << "WickTheorem::compute: input (before expand) = " + << to_latex_align(expr_input_) << std::endl; expand(expr_input_); - if (Logger::get_instance().wick_harness) std::wcout << "WickTheorem::compute: input (after expand) = " << to_latex_align(expr_input_) << std::endl; + if (Logger::get_instance().wick_harness) + std::wcout << "WickTheorem::compute: input (after expand) = " + << to_latex_align(expr_input_) << std::endl; // if sum, canonicalize and apply to each summand ... if (expr_input_->is()) { canonicalize(expr_input_); @@ -415,7 +415,10 @@ ExprPtr WickTheorem::compute(const bool count_only) { std::mutex result_mtx; // serializes updates of result auto summands = expr_input_->as().summands(); - if (Logger::get_instance().wick_harness) std::wcout << "WickTheorem::compute: input (after canonicalize) has " << summands.size() << " terms = " << to_latex_align(result) << std::endl; + if (Logger::get_instance().wick_harness) + std::wcout << "WickTheorem::compute: input (after canonicalize) has " + << summands.size() << " terms = " << to_latex_align(result) + << std::endl; #ifdef SEQUANT_HAS_EXECUTION_HEADER auto wick_task = [&result, &result_mtx, this, @@ -459,24 +462,24 @@ ExprPtr WickTheorem::compute(const bool count_only) { // ... else if (expr_input_->is()) { auto canon_byproduct = expr_input_->rapid_canonicalize(); - assert(canon_byproduct == nullptr); // canonicalization of Product always returns nullptr + assert(canon_byproduct == + nullptr); // canonicalization of Product always returns nullptr // NormalOperators should be all at the end auto first_nop_it = ranges::find_if( *expr_input_, [](const ExprPtr &expr) { return expr->is>(); }); // if have ops, split into prefactor and op sequence if (first_nop_it != ranges::end(*expr_input_)) { - - // compute and record/analyze topological NormalOperator and Index partitions + // compute and record/analyze topological NormalOperator and Index + // partitions if (use_topology_) { - // construct graph representation of the tensor product TensorNetwork tn(expr_input_->as().factors()); auto [graph, vlabels, vcolors, vtypes] = tn.make_bliss_graph(); const auto n = vlabels.size(); assert(vtypes.size() == n); - const auto& tn_edges = tn.edges(); - const auto& tn_tensors = tn.tensors(); + const auto &tn_edges = tn.edges(); + const auto &tn_tensors = tn.tensors(); // identify vertex indices of NormalOperators and Indices container::set nop_vertex_idx; @@ -506,19 +509,23 @@ ExprPtr WickTheorem::compute(const bool count_only) { graph->set_splitting_heuristic(bliss::Graph::shs_fsm); auto save_aut = [&aut_generators](const unsigned int n, - const unsigned int* aut) { + const unsigned int *aut) { aut_generators.emplace_back(aut, aut + n); }; - graph->find_automorphisms(stats, &bliss::aut_hook, &save_aut); + graph->find_automorphisms( + stats, &bliss::aut_hook, &save_aut); } - // use automorphisms to determine groups of topologically equivalent NormalOperators and Indices - // this partitions vertices into partitions (only nontrivial partitions are reported) - // vertex_pair_exclude is a callable that accepts 2 vertex indices and returns true if this pair of indices is to be excluded - // the default is to not exclude any pairs - auto compute_partitions = [&aut_generators](const container::set& vertices, - auto&& vertex_pair_exclude) { + // use automorphisms to determine groups of topologically equivalent + // NormalOperators and Indices this partitions vertices into + // partitions (only nontrivial partitions are reported) + // vertex_pair_exclude is a callable that accepts 2 vertex indices and + // returns true if this pair of indices is to be excluded the default + // is to not exclude any pairs + auto compute_partitions = [&aut_generators]( + const container::set &vertices, + auto &&vertex_pair_exclude) { container::map vertex_to_partition_idx; int next_partition_idx = -1; @@ -527,7 +534,11 @@ ExprPtr WickTheorem::compute(const bool count_only) { // update partitions for (const auto v1 : vertices) { const auto v2 = aut[v1]; - if (v2 != v1 && !vertex_pair_exclude(v1, v2)) { // if the automorphism maps this vertex to another ... they both must be in the same partition + if (v2 != v1 && + !vertex_pair_exclude( + v1, v2)) { // if the automorphism maps this vertex to + // another ... they both must be in the same + // partition assert(vertices.find(v2) != vertices.end()); auto v1_partition_it = vertex_to_partition_idx.find(v1); auto v2_partition_it = vertex_to_partition_idx.find(v2); @@ -536,11 +547,16 @@ ExprPtr WickTheorem::compute(const bool count_only) { const bool v2_has_partition = v2_partition_it != vertex_to_partition_idx.end(); if (v1_has_partition && - v2_has_partition) { // both are in partitions? make sure they are in the same partition. N.B. this may leave gaps in partition indices ... no biggie + v2_has_partition) { // both are in partitions? make sure + // they are in the same partition. + // N.B. this may leave gaps in + // partition indices ... no biggie const auto v1_part_idx = v1_partition_it->second; const auto v2_part_idx = v2_partition_it->second; if (v1_part_idx != - v2_part_idx) { // if they have different partition indices, change the larger of the two indices to match the lower + v2_part_idx) { // if they have different partition + // indices, change the larger of the two + // indices to match the lower const auto target_part_idx = std::min(v1_part_idx, v2_part_idx); for (auto &v : vertex_to_partition_idx) { @@ -548,13 +564,16 @@ ExprPtr WickTheorem::compute(const bool count_only) { v.second = target_part_idx; } } - } else if (v1_has_partition) { // only v1 is in a partition? place v2 in it + } else if (v1_has_partition) { // only v1 is in a partition? + // place v2 in it const auto v1_part_idx = v1_partition_it->second; vertex_to_partition_idx.emplace(v2, v1_part_idx); - } else if (v2_has_partition) { // only v2 is in a partition? place v1 in it + } else if (v2_has_partition) { // only v2 is in a partition? + // place v1 in it const auto v2_part_idx = v2_partition_it->second; vertex_to_partition_idx.emplace(v1, v2_part_idx); - } else { // neither is in a partition? place both in the next available partition + } else { // neither is in a partition? place both in the next + // available partition const size_t target_part_idx = ++next_partition_idx; vertex_to_partition_idx.emplace(v1, target_part_idx); vertex_to_partition_idx.emplace(v2, target_part_idx); @@ -565,27 +584,33 @@ ExprPtr WickTheorem::compute(const bool count_only) { return std::make_tuple(vertex_to_partition_idx, next_partition_idx); }; - // compute NormalOperator->partition map, convert to partition lists (if any), - // and register via set_op_partitions to be used in full contractions - auto [nop_to_partition_idx, max_nop_partition_idx] = compute_partitions(nop_vertex_idx, [](size_t v1, size_t v2) { return false; }); + // compute NormalOperator->partition map, convert to partition lists + // (if any), and register via set_op_partitions to be used in full + // contractions + auto [nop_to_partition_idx, max_nop_partition_idx] = + compute_partitions(nop_vertex_idx, + [](size_t v1, size_t v2) { return false; }); if (!nop_to_partition_idx.empty()) { container::vector> nop_partitions; assert(max_nop_partition_idx > -1); const size_t max_partition_index = max_nop_partition_idx; nop_partitions.reserve(max_partition_index); - // iterate over all partition indices ... note that there may be gaps so count the actual partitions + // iterate over all partition indices ... note that there may be + // gaps so count the actual partitions size_t partition_cnt = 0; - for(size_t p=0; p<=max_partition_index; ++p) { + for (size_t p = 0; p <= max_partition_index; ++p) { bool p_found = false; - for(const auto& nop_part: nop_to_partition_idx) { + for (const auto &nop_part : nop_to_partition_idx) { if (nop_part.second == p) { - // !!remember to map the vertex index into the operator index!! - const auto nop_idx = nop_vertex_idx.find(nop_part.first) - nop_vertex_idx.begin(); + // !!remember to map the vertex index into the operator + // index!! + const auto nop_idx = nop_vertex_idx.find(nop_part.first) - + nop_vertex_idx.begin(); if (p_found == false) { // first time this is found - nop_partitions.emplace_back(container::vector{static_cast(nop_idx)}); - } - else + nop_partitions.emplace_back(container::vector{ + static_cast(nop_idx)}); + } else nop_partitions[partition_cnt].emplace_back(nop_idx); p_found = true; } @@ -593,24 +618,28 @@ ExprPtr WickTheorem::compute(const bool count_only) { if (p_found) ++partition_cnt; } -// std::wcout << "topological nop partitions:{\n"; -// ranges::for_each(nop_partitions, [](auto&& part) { -// std::wcout << "{"; -// ranges::for_each(part, [](auto&& p) { -// std::wcout << p << " "; -// }); -// std::wcout << "}"; -// }); -// std::wcout << "}" << std::endl; + // std::wcout << "topological nop partitions:{\n"; + // ranges::for_each(nop_partitions, [](auto&& part) { + // std::wcout << "{"; + // ranges::for_each(part, [](auto&& p) { + // std::wcout << p << " "; + // }); + // std::wcout << "}"; + // }); + // std::wcout << "}" << std::endl; this->set_op_partitions(nop_partitions); } - // compute Index->partition map, and convert to partition lists (if any), and check that use_topology_ is compatible with index partitions - // Index partitions are constructed to *only* include Index objects attached to the bra/ket of the same NormalOperator! - // hence need to use filter in computing partitions - auto exclude_index_vertex_pair = [&tn_tensors,&tn_edges](size_t v1, size_t v2) { - // v1 and v2 are vertex indices and also index the edges in the TensorNetwork + // compute Index->partition map, and convert to partition lists (if + // any), and check that use_topology_ is compatible with index + // partitions Index partitions are constructed to *only* include Index + // objects attached to the bra/ket of the same NormalOperator! hence + // need to use filter in computing partitions + auto exclude_index_vertex_pair = [&tn_tensors, &tn_edges](size_t v1, + size_t v2) { + // v1 and v2 are vertex indices and also index the edges in the + // TensorNetwork assert(v1 < tn_edges.size()); assert(v2 < tn_edges.size()); const auto &edge1 = *(tn_edges.begin() + v1); @@ -634,45 +663,56 @@ ExprPtr WickTheorem::compute(const bool count_only) { }; container::map index_to_partition_idx; int max_index_partition_idx; - std::tie(index_to_partition_idx, max_index_partition_idx) = compute_partitions(index_vertex_idx, exclude_index_vertex_pair); + std::tie(index_to_partition_idx, max_index_partition_idx) = + compute_partitions(index_vertex_idx, exclude_index_vertex_pair); { // use_topology_=true in full contractions will assume that all - // equivalent indices in NormalOperator's bra or ket are topologically - // equivalent (see Hugenholtz vertex and associated code) - // here we make sure that this is indeed the case + // equivalent indices in NormalOperator's bra or ket are + // topologically equivalent (see Hugenholtz vertex and associated + // code) here we make sure that this is indeed the case assert(use_topology_); // since we are here, use_topology_ is true - // this reports whether bra/ket of tensor @c t is in the same partition - auto is_nop_braket_singlepartition = [&tn_edges,&index_to_partition_idx](auto&& tensor_ptr, BraKetPos bkpos) { + // this reports whether bra/ket of tensor @c t is in the same + // partition + auto is_nop_braket_singlepartition = [&tn_edges, + &index_to_partition_idx]( + auto &&tensor_ptr, + BraKetPos bkpos) { auto expr_ptr = std::dynamic_pointer_cast(tensor_ptr); assert(expr_ptr); - auto bkrange = bkpos == BraKetPos::bra ? bra(*tensor_ptr) : ket(*tensor_ptr); + auto bkrange = + bkpos == BraKetPos::bra ? bra(*tensor_ptr) : ket(*tensor_ptr); assert(ranges::size(bkrange) > 1); int partition = -1; // will be set to the actual partition index - for(auto&& idx: bkrange) { + for (auto &&idx : bkrange) { auto idx_full_label = idx.full_label(); auto edge_it = tn_edges.find(idx_full_label); assert(edge_it != tn_edges.end()); - auto vertex = edge_it - tn_edges.begin(); // vertex idx for this Index + auto vertex = + edge_it - tn_edges.begin(); // vertex idx for this Index auto idx_part_it = index_to_partition_idx.find(vertex); - if (idx_part_it != index_to_partition_idx.end()) { // is part of a partition + if (idx_part_it != + index_to_partition_idx.end()) { // is part of a partition if (partition == -1) // first index partition = idx_part_it->second; - else if (partition != idx_part_it->second) // compare to the first index's partition # + else if (partition != + idx_part_it->second) // compare to the first index's + // partition # return false; - } - else // not part of a partition? fail + } else // not part of a partition? fail return false; } return true; }; bool multipartition_nop_braket = false; - for(auto&& tensor: tn_tensors) { - auto nop_ptr = std::dynamic_pointer_cast>(tensor); + for (auto &&tensor : tn_tensors) { + auto nop_ptr = + std::dynamic_pointer_cast>(tensor); if (nop_ptr) { // if NormalOperator - auto make_logic_error =[&nop_ptr](BraKetPos pos) { + auto make_logic_error = [&nop_ptr](BraKetPos pos) { std::basic_stringstream oss; - oss << "WickTheorem::use_topology is true but NormalOperator " + oss << "WickTheorem::use_topology is true but " + "NormalOperator " << nop_ptr->to_latex() << " has " << (pos == BraKetPos::bra ? "bra" : "ket") << " whose indices are not topologically equivalent"; @@ -696,7 +736,6 @@ ExprPtr WickTheorem::compute(const bool count_only) { throw make_logic_error(BraKetPos::ket); } } - } } @@ -766,8 +805,7 @@ void WickTheorem::reduce(ExprPtr &expr) const { try { detail::reduce_wick_impl(subexpr_cast, external_indices_); subexpr = subexpr_cast; - } - catch (detail::zero_result &) { + } catch (detail::zero_result &) { subexpr = std::make_shared(0); } } From 664b870582edb8e1a93f5be3b8d2987ac72c981a Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 15:17:56 -0400 Subject: [PATCH 045/120] Add docs to asymptotic cost class. --- SeQuant/core/asy_cost.hpp | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/SeQuant/core/asy_cost.hpp b/SeQuant/core/asy_cost.hpp index 16595cf47..93cba7548 100644 --- a/SeQuant/core/asy_cost.hpp +++ b/SeQuant/core/asy_cost.hpp @@ -9,12 +9,22 @@ namespace sequant { +/// +/// Represents an symbolic asymptotic cost in terms of active_occupied +/// and the rest orbitals. +/// eg. +/// - AsyCost{2,4} implies scaling of $O^2V^4$. In other words, the cost +/// scales by the second power in the number of active_occupied orbitals +/// and the fourth power in the number of the rest orbitals. +/// - AsyCost{2, 4, boost::rational{1,2}} implies the same scaling as +/// above except the numeric value obtained by substituting $O$ and $V$ +/// numbers is then halved. class AsyCost { private: class AsyCostEntry { - size_t occ_; - size_t virt_; - mutable boost::rational count_; + size_t occ_; // power of active_occupied + size_t virt_; // power of the rest orbitals + mutable boost::rational count_; // count of this asymptotic symbol public: template @@ -118,18 +128,28 @@ class AsyCost { }; private: - sequant::container::set cost_; + container::set cost_; AsyCost(AsyCostEntry); public: + /// + /// \return The infinitely scaling cost. static AsyCost const &max(); + /// + /// \return The zero cost. static AsyCost const &zero(); + /// + /// Default construct to zero cost. AsyCost(); - AsyCost(size_t nocc, size_t nvirt, boost::rational count = 1); + /// + /// \param nocc Asymptotic scaling exponent in the active occupied orbitals. + /// \param nrest Asymptotic scaling exponent in the rest orbitals. + /// \param count Rational number of times this cost repeats. + AsyCost(size_t nocc, size_t nrest, boost::rational count = 1); AsyCost(AsyCost const &) = default; @@ -139,6 +159,10 @@ class AsyCost { AsyCost &operator=(AsyCost &&) = default; + /// + /// \param nocc Substitute $O$ by nocc. + /// \param nvirt Substitute $V$ by nvirt. + /// \return Scaled asymptotic cost. [[nodiscard]] boost::rational ops(unsigned short nocc, unsigned short nvirt) const; From 945c1928fcb9395380740438a80f0be4fffed7f4 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 15:32:33 -0400 Subject: [PATCH 046/120] code cleanup --- SeQuant/core/eval_expr.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/SeQuant/core/eval_expr.cpp b/SeQuant/core/eval_expr.cpp index ee6973747..4312e7d2f 100644 --- a/SeQuant/core/eval_expr.cpp +++ b/SeQuant/core/eval_expr.cpp @@ -70,13 +70,13 @@ Symmetry EvalExpr::infer_tensor_symmetry_sum(EvalExpr const& xpr1, Symmetry EvalExpr::infer_tensor_symmetry_prod(EvalExpr const& xpr1, EvalExpr const& xpr2) { + using index_set_t = container::set; // HELPER LAMBDA // check if all the indices in cont1 are in cont2 AND vice versa auto all_common_indices = [](const auto& cont1, const auto& cont2) -> bool { - if (cont1.size() != cont2.size()) return false; - - return (cont1 | ranges::to>) == - (cont2 | ranges::to>); + return (cont1.size() == cont2.size()) && + (cont1 | ranges::to) == + (cont2 | ranges::to); }; // ////// @@ -88,8 +88,7 @@ Symmetry EvalExpr::infer_tensor_symmetry_prod(EvalExpr const& xpr1, auto const uniq_idxs = ranges::views::concat(tnsr1.const_braket(), tnsr2.const_braket()) - | ranges::to>; + | ranges::to; if (ranges::distance(uniq_idxs) == tnsr1.const_braket().size() + tnsr2.const_braket().size()) { From 489ecc66606f6256960add548b919ef2920980ca Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 15:52:58 -0400 Subject: [PATCH 047/120] Add dox to the full-binary node class. --- SeQuant/core/binary_node.hpp | 46 ++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/SeQuant/core/binary_node.hpp b/SeQuant/core/binary_node.hpp index 25f5c7228..012fca72f 100644 --- a/SeQuant/core/binary_node.hpp +++ b/SeQuant/core/binary_node.hpp @@ -9,6 +9,12 @@ namespace sequant { +/// +/// @brief Represents a node with data of @c T type in a full-binary tree. +/// +/// A full binary tree is a binary tree in which each node has two children +/// or no children. +/// template class FullBinaryNode { public: @@ -36,18 +42,40 @@ class FullBinaryNode { } public: + /// + /// Construct an internal node with emtpy left and right nodes. + /// + /// \param d Data in the internal node. FullBinaryNode(T d) : data_{std::move(d)} {} + /// + /// Construct an internal node with left and right node data. + /// + /// \param d Data in the internal node. + /// \param l Data in the left node. + /// \param r Data in the right node. FullBinaryNode(T d, T l, T r) : data_{std::move(d)}, left_{std::make_unique(std::move(l))}, right_{std::make_unique(std::move(r))} {} + /// + /// Constructs an internal node with left and right nodes. + /// + /// \param d Data in the internal node. + /// \param l Left node. + /// \param r Right node FullBinaryNode(T d, FullBinaryNode l, FullBinaryNode r) : data_{std::move(d)}, left_{std::make_unique>(std::move(l))}, right_{std::make_unique>(std::move(r))} {} + /// + /// Constructs an internal node with left and right node pointers. + /// + /// \param d Data in the internal node. + /// \param l Left node pointer. + /// \param r Right node pointer. FullBinaryNode(T d, node_ptr&& l, node_ptr&& r) : data_{std::move(d)}, left_{std::move(l)}, right_{std::move(r)} {} @@ -75,14 +103,32 @@ class FullBinaryNode { bool leaf() const { return !(left_ || right_); } + /// + /// \return Returns the data stored by the node. T const& operator*() const { return data_; } + /// + /// \return Returns the data stored by the node. T& operator*() { return data_; } + /// + /// \return Returns the pointer to the data stored by the node. T const* operator->() const { return &data_; } + /// + /// \return Returns the pointer to the data stored by the node. T* operator->() { return &data_; } + /// + /// Left-fold a container to make a full-binary node. + /// + /// \param container To be binarized. + /// \param binarize Fold function. + /// \c binarize needs to support: + /// - unary function call with a return value (say of type R) + /// to the element type of the container (say of type V) + /// - binary function call of kind f(R,V) that returns R type + /// template FullBinaryNode(Cont const& container, F&& binarize) { using value_type = decltype(*ranges::begin(container)); From e9b953944e2805f9ecbbb2b7884ae21aff339643 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 15:58:23 -0400 Subject: [PATCH 048/120] Dox to RandColor class. --- SeQuant/domain/utils/rand_color.hpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/SeQuant/domain/utils/rand_color.hpp b/SeQuant/domain/utils/rand_color.hpp index 021771f1f..f161e5725 100644 --- a/SeQuant/domain/utils/rand_color.hpp +++ b/SeQuant/domain/utils/rand_color.hpp @@ -16,14 +16,17 @@ namespace sequant::utils { * @author Bimal Gaudel * @version 29 Sep 2020 */ + class RandColor { private: + /// Used for better color distribution static constexpr double GOLDEN_RATIO_CONJ = 0.618033988749895; std::uniform_real_distribution uniRealDist; std::mt19937_64 randEngine; + /// Do not return the same hue again from this object. std::set hue_cache_; public: From 259c1a62f8462ff8ebb3fea1819701ec0cfbc3ea Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 16:14:36 -0400 Subject: [PATCH 049/120] Flag Expr::clone() for flattening out the sub-expressions. --- SeQuant/core/expr.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/SeQuant/core/expr.hpp b/SeQuant/core/expr.hpp index f1d9b1d3d..35c7dc170 100644 --- a/SeQuant/core/expr.hpp +++ b/SeQuant/core/expr.hpp @@ -78,7 +78,10 @@ class Expr : public std::enable_shared_from_this, public ranges::view_faca virtual std::wstring to_wolfram() const; /// @return a clone of this object - /// @note must be overridden in the derived class + /// @note - must be overridden in the derived class. + /// - flattens out the nested structure + /// for example, a product of products will be + /// just a product of tensors virtual ExprPtr clone() const; /// Canonicalizes @c this and returns the biproduct of canonicalization (e.g. phase) From fc5fe20acacb44968b685f14a9b3370384de3f13 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 16:33:11 -0400 Subject: [PATCH 050/120] Add dox to clone_packed and remove clone_packed(Tensor const&) from header. --- SeQuant/core/clone_packed.hpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/SeQuant/core/clone_packed.hpp b/SeQuant/core/clone_packed.hpp index 11352540f..587a3ad81 100644 --- a/SeQuant/core/clone_packed.hpp +++ b/SeQuant/core/clone_packed.hpp @@ -17,12 +17,16 @@ namespace sequant { /// \return a cloned copy of \c expr ExprPtr clone_packed(ExprPtr expr); +/// +/// Clone a sum by preserving nested structures. +/// ExprPtr clone_packed(Sum const&); +/// +/// Clone a product by preserving nested structures. +/// ExprPtr clone_packed(Product const&); -ExprPtr clone_packed(Tensor const&); - } // namespace #endif // SEQUANT_CLONE_PACKED_HPP From 36b175cc2bd01aa662f5a239991247ca9f792de0 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 19:44:56 -0400 Subject: [PATCH 051/120] Add missing header. --- SeQuant/domain/eval/cache_manager.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/SeQuant/domain/eval/cache_manager.hpp b/SeQuant/domain/eval/cache_manager.hpp index 55d4ed41b..3d63feb77 100644 --- a/SeQuant/domain/eval/cache_manager.hpp +++ b/SeQuant/domain/eval/cache_manager.hpp @@ -3,6 +3,7 @@ #include #include +#include namespace sequant::eval { From 070b929b2cd362173408032d0c0e6010ca27f0f8 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Thu, 7 Apr 2022 19:45:11 -0400 Subject: [PATCH 052/120] Bug fix. --- SeQuant/domain/eval/eval.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SeQuant/domain/eval/eval.hpp b/SeQuant/domain/eval/eval.hpp index b2c5fc966..6fda037ed 100644 --- a/SeQuant/domain/eval/eval.hpp +++ b/SeQuant/domain/eval/eval.hpp @@ -132,7 +132,7 @@ CacheManager make_cache_manager(Iterable const& nodes, auto less_repeating = [](auto const& pair) { return pair.second < 2; }; ranges::actions::remove_if(hash_to_counts, less_repeating); - if (!persistent_leaves) return CacheManager{hash_to_counts}; + if (!persistent_leaves) return CacheManager{hash_to_counts,{}}; container::set leaf_hashes{}; for (auto const& n : nodes) { From b69d9a5c0a813817f41352e2e62cfced449003f5 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Fri, 8 Apr 2022 13:55:53 -0400 Subject: [PATCH 053/120] Update intermediate symmetry inference. --- SeQuant/core/eval_expr.cpp | 44 +++++++++++++++++++++-------------- tests/unit/test_eval_expr.cpp | 8 +++++++ 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/SeQuant/core/eval_expr.cpp b/SeQuant/core/eval_expr.cpp index 4312e7d2f..409f23a47 100644 --- a/SeQuant/core/eval_expr.cpp +++ b/SeQuant/core/eval_expr.cpp @@ -83,28 +83,36 @@ Symmetry EvalExpr::infer_tensor_symmetry_prod(EvalExpr const& xpr1, auto const& tnsr1 = xpr1.tensor(); auto const& tnsr2 = xpr2.tensor(); - if (xpr1.hash() == xpr2.hash()){ + auto imed_sym = Symmetry::invalid; + if (xpr1.hash() == xpr2.hash()) { // potential outer product - - auto const uniq_idxs = ranges::views::concat(tnsr1.const_braket(), - tnsr2.const_braket()) - | ranges::to; - - if (ranges::distance(uniq_idxs) == tnsr1.const_braket().size() - + tnsr2.const_braket().size()) { - return Symmetry::antisymm; + auto const uniq_idxs = + ranges::views::concat(tnsr1.const_braket(), tnsr2.const_braket()) | + ranges::to; + + if (ranges::distance(uniq_idxs) == + tnsr1.const_braket().size() + tnsr2.const_braket().size()) { + // outer product confirmed + imed_sym = Symmetry::antisymm; + } + } else { + bool whole_bk_contracted = (all_common_indices(tnsr1.bra(), tnsr2.ket()) || + all_common_indices(tnsr1.ket(), tnsr2.bra())); + auto sym1 = tnsr1.symmetry(); + auto sym2 = tnsr2.symmetry(); + assert(sym1 != Symmetry::invalid); + assert(sym2 != Symmetry::invalid); + if (whole_bk_contracted && + !(sym1 == Symmetry::nonsymm || sym2 == Symmetry::nonsymm)) { + imed_sym = sym1 == sym2 ? sym1 : Symmetry::symm; + + } else { + imed_sym = Symmetry::nonsymm; } } - bool whole_bk_contracted = (all_common_indices(tnsr1.bra(), tnsr2.ket()) || - all_common_indices(tnsr1.ket(), tnsr2.bra())); - - // sym/sym or antisym/antisym with whole braket contraction - if (whole_bk_contracted && tnsr1.symmetry() == tnsr2.symmetry()) - return tnsr1.symmetry(); - - // non symmetric intermediate - return Symmetry::nonsymm; + assert(imed_sym != Symmetry::invalid); + return imed_sym; } ParticleSymmetry EvalExpr::infer_particle_symmetry(Symmetry s) { diff --git a/tests/unit/test_eval_expr.cpp b/tests/unit/test_eval_expr.cpp index 26dc5b958..43de7f8d5 100644 --- a/tests/unit/test_eval_expr.cpp +++ b/tests/unit/test_eval_expr.cpp @@ -161,6 +161,14 @@ TEST_CASE("TEST_EVAL_EXPR", "[EvalExpr]") { const auto x78 = EvalExpr{EvalExpr{t7}, EvalExpr{t8}, EvalOp::Prod}; REQUIRE(x78.tensor().symmetry() == Symmetry::nonsymm); + + // whole bra <-> ket contraction between symmetric and antisymmetric tensors + auto const t9 = parse_expr(L"g_{a1,a2}^{a3,a4}", + Symmetry::antisymm)->as(); + auto const t10 = parse_expr(L"t_{a3,a4}^{i1,i2}", + Symmetry::symm)->as(); + auto const x910 = EvalExpr{EvalExpr{t9}, EvalExpr{t10}, EvalOp::Prod}; + REQUIRE(x910.tensor().symmetry() == Symmetry::symm); } SECTION("Symmetry of sum") { From 02c5b7376a97cad77473ab16c1921b1c4973d14f Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Sat, 9 Apr 2022 15:08:08 -0400 Subject: [PATCH 054/120] Add dox to CacheManager and avoid gutting it for unit testing using access_by pattern. --- CMakeLists.txt | 1 + SeQuant/domain/eval/cache_manager.hpp | 84 ++++++++++++++++++++++++++- tests/unit/test_cache_manager.cpp | 32 +++++++--- 3 files changed, 108 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 353c7feba..e42b1dfbd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -299,6 +299,7 @@ if (BUILD_TESTING) tests/unit/test_token_sequant.cpp tests/unit/test_rpn.cpp tests/unit/test_clone_packed.cpp + tests/unit/test_cache_manager.cpp ) if (TARGET tiledarray) diff --git a/SeQuant/domain/eval/cache_manager.hpp b/SeQuant/domain/eval/cache_manager.hpp index 3d63feb77..fcc379f4e 100644 --- a/SeQuant/domain/eval/cache_manager.hpp +++ b/SeQuant/domain/eval/cache_manager.hpp @@ -7,6 +7,45 @@ namespace sequant::eval { +/// +/// This class implements a cache manager useful for the cases when the number +/// of times the cached objects will be accessed, and/or they need to be cached +/// for indefinitely many accesses. +/// +/// \tparam Data Type of the data to be cached. +/// @details +/// - It is a wrapper around a map structure. +/// - The keys of the map, which are provided by the user at +/// construction, are used to store and access data. +/// - The mapped values(MVs) of the map are a special objects with +/// the following properties: +/// - MV has a store method that takes a key and the cached data. +/// Storing a data implicitly accesses it after storing. This a +/// design decision made based on the use case of this class. So, +/// decaying entries start decaying right from the storing. See +/// below for more on decaying entries. +/// - MV has an access method that returns a pointer to the cached +/// data. +/// - MV has a tag to identify itself as having either a persistent +/// or a decaying lifetime. +/// - If MV has a decaying lifetime: +/// - It takes a count at construction aka max_life. +/// - The current lifetime count is set to max_life +/// at construction. +/// - Accessing the cached data dcreases the MV's current +/// lifetime count. +/// - Once the current lifetime reaches zero, the pointer to the +/// cached data becomes equivalent to a nullptr thereby freeing +/// the cache memory. +/// - It has a reset method that restores the current lifetime to +/// the max_life. Reseting also resets the cached data pointer. +/// - Calling store method with zero lifetime count in the MV +/// will store nothing. +/// - If MV has a persistent lifetime: +/// - It takes no param at construction. +/// - Allows indefinitely many accesses to the cached data. +/// - Calling reset method resets the cached data pointer. +/// template class CacheManager { public: @@ -46,10 +85,12 @@ class CacheManager { ptr_t access() noexcept { if (!data_p) return data_p; + // decay() < 0 implies persistent lifetime + // decay() >= 0 implies decaying lifetime return decay() == 0 ? std::move(data_p) : data_p; } - void store(D data) noexcept { + void store(D data) { data_p = std::make_shared(std::move(data)); } @@ -63,6 +104,11 @@ class CacheManager { [[nodiscard]] count_t life_count() const noexcept { return life_c; } private: + /// + /// @details life_c == 0 for objects with Lifetime::Decaying implies full + /// decay. They don't decay beyond zero. + /// \return If object has persistent lifetime return -1 else + /// decrement lifetime, if it hasn't fully decayed and return life_c [[nodiscard]] int decay() noexcept { return life_t == Lifetime::Persistent ? -1 : (life_c > 0 ? --life_c : 0); } @@ -77,6 +123,16 @@ class CacheManager { container::map> cache_map_; public: + /// + /// @brief Construct a cache manager. + /// CacheManger<>::key_t type keys are expected for construction. + /// + /// @param decaying A map-like iterable that maps the keys to the maximum + /// number of times the associated data should be accessed. + /// @param persistent An iterable of keys to the data that are to be accessed + /// an indefinitely many times. + /// @note Repeating keys in @c decaying and @c persistent leads to an + /// undefined behavior. template , typename Iterable2 = container::svector> CacheManager(Iterable1 &&decaying, Iterable2 &&persistent) { @@ -86,14 +142,28 @@ class CacheManager { for (auto &&k : persistent) cache_map_.try_emplace(k, entry{}); } + /// + /// Resets all cached data. + /// void reset_all() { for (auto &&[k, v] : cache_map_) v.reset(false); } + /// + /// Only resets decaying cached data, which restores their lifetimes to the + /// values they were constructed with. void reset_decaying() { for (auto &&[k, v] : cache_map_) v.reset(true); } + /// + /// @brief Access cached data. + /// + /// @param key The that identifies the cached data. + /// @return Optional object to the pointer to the cached data. Only if @c key + /// doesn't exist in the cache database, nullopt is returned. + /// In other words if @c key was not passed during construction, the + /// return value is a std::nullopt object. std::optional access(key_t key) noexcept { if (auto &&found = cache_map_.find(key); found != cache_map_.end()) return found->second.access(); @@ -101,13 +171,23 @@ class CacheManager { return std::nullopt; } + /// + /// @param key The key to identify the cached data. + /// @param data The data to be cached. + /// \return Pointer to the stored data. Implictly accesses the stored data, + /// hence, decays the lifetime if the key accesses a decaying cache + /// entry. Passing @c key that was not present during construction of + /// this CacheManager object, stores nothing, but still returns a + /// valid pointer to @c data. ptr_t store(key_t key, Data data) { if (auto &&found = cache_map_.find(key); found != cache_map_.end()) return store(found->second, std::move(data)); return std::make_shared(std::move(data)); } - auto const& cache_map() const { return cache_map_; } + // for unit testing + template struct access_by; + template friend struct access_by; }; // CacheManager diff --git a/tests/unit/test_cache_manager.cpp b/tests/unit/test_cache_manager.cpp index a9a04a4fc..61492cdd4 100644 --- a/tests/unit/test_cache_manager.cpp +++ b/tests/unit/test_cache_manager.cpp @@ -4,13 +4,29 @@ #include "catch.hpp" +using data_type = int; + +namespace sequant::eval { +struct TestCacheManager {}; + +template <> +template <> +struct CacheManager::template access_by{ + auto const& map(CacheManager const& man) const { + return man.cache_map_; + } +}; +} // + TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { using ranges::views::concat; using ranges::views::zip; - using data_type = int; using manager_type = sequant::eval::CacheManager; using key_type = manager_type::key_t; using count_type = manager_type::count_t; + using tester_type = manager_type::access_by; + + auto const tester = tester_type{}; size_t constexpr n_persistent = 4; // arbitrary size_t constexpr n_decaying = 4; // arbitrary @@ -34,17 +50,19 @@ TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { SECTION("Construction") { auto const& man = man_const; + auto const& map = tester.map(man); // access private map object - REQUIRE(man.cache_map().size() == n_persistent + n_decaying); + REQUIRE(map.size() == n_persistent + n_decaying); // verifying the life count of decaying entries for (auto&& [k, c] : zip(decaying_keys, decaying_repeats)) - REQUIRE(man.cache_map().find(k)->second.life_count() == c); + REQUIRE(map.find(k)->second.life_count() == c); } SECTION("Data Access") { // need a non-const manager object auto man = man_const; + auto const& map = tester.map(man); // access private map object // filling data for (auto&& [k, v] : zip(concat(decaying_keys, persistent_keys), concat(decaying_vals, persistent_vals))) { @@ -64,8 +82,8 @@ TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { REQUIRE(entry); // optional> REQUIRE(entry.value()); // shared_ptr<..> REQUIRE(*entry.value() == v); - auto iter = man.cache_map().find(k); - REQUIRE(iter != man.cache_map().end()); + auto iter = map.find(k); + REQUIRE(iter != map.end()); REQUIRE(iter->second.life_count() == i - 1); } } @@ -74,7 +92,7 @@ TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { // accessing each decaying entry one more time should release // their *data* from the memory for (auto&& k : decaying_keys) { - auto iter = man.cache_map().find(k); + auto iter = map.find(k); REQUIRE(iter->second.life_count() == 1); REQUIRE(man.access(k).value()); // accessed once. non-null ptr returned REQUIRE_FALSE(man.access(k).value()); // nullptr returned @@ -88,7 +106,7 @@ TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { // now we reset the decaying entries which restores thier lifetimes man.reset_decaying(); for (auto&& [k, c] : zip(decaying_keys, decaying_repeats)) - REQUIRE(man.cache_map().find(k)->second.life_count() == c); + REQUIRE(map.find(k)->second.life_count() == c); // now we reset all entries man.reset_all(); From 215647cfd25d450187b2edcca9a986f0e42f4233 Mon Sep 17 00:00:00 2001 From: Bimal Gaudel Date: Sat, 9 Apr 2022 19:16:12 -0400 Subject: [PATCH 055/120] Some range-v3 views in gcc-9 behaved unexpectedly, fix it. --- tests/unit/test_cache_manager.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_cache_manager.cpp b/tests/unit/test_cache_manager.cpp index 61492cdd4..7a86058c2 100644 --- a/tests/unit/test_cache_manager.cpp +++ b/tests/unit/test_cache_manager.cpp @@ -64,8 +64,10 @@ TEST_CASE("TEST_CACHE_MANAGER", "[cache_manager]") { auto man = man_const; auto const& map = tester.map(man); // access private map object // filling data - for (auto&& [k, v] : zip(concat(decaying_keys, persistent_keys), - concat(decaying_vals, persistent_vals))) { + auto const kvs = zip(concat(decaying_keys, persistent_keys), + concat(decaying_vals, persistent_vals)) + | ranges::to>; + for (auto&& [k, v] : kvs) { // NOTE: man.store() calls man.access() implicitly and // returns a shared_ptr to data // hence, a count of lifetime is lost right here From bdbe99730386024297654004b3411bb317935bdd Mon Sep 17 00:00:00 2001 From: nakulteke Date: Wed, 13 Apr 2022 13:00:23 -0400 Subject: [PATCH 056/120] removed R12 terms in CC equation generator --- SeQuant/domain/eqs/cceqs.cpp | 50 +++++++++++++++--------------------- SeQuant/domain/eqs/cceqs.hpp | 7 +++-- 2 files changed, 23 insertions(+), 34 deletions(-) diff --git a/SeQuant/domain/eqs/cceqs.cpp b/SeQuant/domain/eqs/cceqs.cpp index 0a8d2e5b8..f48179126 100644 --- a/SeQuant/domain/eqs/cceqs.cpp +++ b/SeQuant/domain/eqs/cceqs.cpp @@ -143,7 +143,7 @@ class ccresidual { ccresidual(size_t p, size_t n) : P(p), N(n) {} ExprPtr operator()(bool screen, bool use_topology, bool use_connectivity, - bool canonical_only, bool antisymm, bool r12) { + bool canonical_only, bool antisymm) { auto ahbar = [=](const bool screen) { auto connect = [=](std::initializer_list> connlist) { if (use_connectivity) @@ -151,37 +151,28 @@ class ccresidual { else return std::initializer_list>{}; }; - auto s_r12 = r12 ? ex(1) : ex(0); auto result = screened_vac_av{0}(A(P) * H(antisymm), connect({}), screen, use_topology, canonical_only, antisymm) + - - screened_vac_av{1}(A(P) * H(antisymm) * - (T(N, N, false, antisymm) + s_r12 * R12()), + screened_vac_av{1}(A(P) * H(antisymm) * T(N, N, false, antisymm), connect({{1, 2}}), screen, use_topology, canonical_only, antisymm) + - ex(1. / 2) * - screened_vac_av{2}(A(P) * H(antisymm) * - (T(N, N, false, antisymm) + s_r12 * R12()) * - (T(N, N, false, antisymm) + s_r12 * R12()), + screened_vac_av{2}(A(P) * H(antisymm) * T(N, N, false, antisymm) * + T(N, N, false, antisymm), connect({{1, 2}, {1, 3}}), screen, use_topology, canonical_only) + - ex(1. / 6) * - screened_vac_av{3}(A(P) * H(antisymm) * - (T(N, N, false, antisymm) + s_r12 * R12()) * - (T(N, N, false, antisymm) + s_r12 * R12()) * - (T(N, N, false, antisymm) + s_r12 * R12()), + screened_vac_av{3}(A(P) * H(antisymm) * T(N, N, false, antisymm) * + T(N, N, false, antisymm) * + T(N, N, false, antisymm), connect({{1, 2}, {1, 3}, {1, 4}}), screen, use_topology, canonical_only) + - ex(1. / 24) * - screened_vac_av{4}(A(P) * H(antisymm) * - (T(N, N, false, antisymm) + s_r12 * R12()) * - (T(N, N, false, antisymm) + s_r12 * R12()) * - (T(N, N, false, antisymm) + s_r12 * R12()) * - (T(N, N, false, antisymm) + s_r12 * R12()), + screened_vac_av{4}(A(P) * H(antisymm) * T(N, N, false, antisymm) * + T(N, N, false, antisymm) * + T(N, N, false, antisymm) * + T(N, N, false, antisymm), connect({{1, 2}, {1, 3}, {1, 4}, {1, 5}}), screen, use_topology, canonical_only); simplify(result); @@ -201,14 +192,14 @@ class ccresidual_vec { void operator()(std::vector& result, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm, bool r12) { + bool use_antisymm) { result[P] = ccresidual{P, N}(screen, use_topology, use_connectivity, - canonical_only, use_antisymm, r12); + canonical_only, use_antisymm); rapid_simplify(result[P]); if (P > PMIN) ccresidual_vec{P - 1, PMIN, N}(result, screen, use_topology, use_connectivity, canonical_only, - use_antisymm, r12); + use_antisymm); } }; // class ccresidual_vec @@ -220,11 +211,10 @@ cceqvec::cceqvec(size_t n, size_t p, size_t pmin) std::vector cceqvec::operator()(bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm, - bool r12) { + bool use_antisymm) { std::vector result(P + 1); ccresidual_vec{P, PMIN, N}(result, screen, use_topology, use_connectivity, - canonical_only, use_antisymm, r12); + canonical_only, use_antisymm); return result; } @@ -243,10 +233,10 @@ compute_cceqvec::compute_cceqvec(size_t p, size_t pmin, size_t n) void compute_cceqvec::operator()(bool print, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm, bool r12) { + bool use_antisymm) { tpool.start(N); auto eqvec = cceqvec{N, P}(screen, use_topology, use_connectivity, - canonical_only, use_antisymm, r12); + canonical_only, use_antisymm); tpool.stop(N); std::wcout << std::boolalpha << "expS" << N << "[screen=" << screen << ",use_topology=" << use_topology @@ -272,9 +262,9 @@ compute_all::compute_all(size_t nmax) : NMAX(nmax) {} void compute_all::operator()(bool print, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm, bool r12) { + bool use_antisymm) { for (size_t N = 2; N <= NMAX; ++N) compute_cceqvec{N, 1, N}(print, screen, use_topology, use_connectivity, - canonical_only, use_antisymm, r12); + canonical_only, use_antisymm); } } // namespace sequant::eqs diff --git a/SeQuant/domain/eqs/cceqs.hpp b/SeQuant/domain/eqs/cceqs.hpp index 0a48b7208..02ac38e04 100644 --- a/SeQuant/domain/eqs/cceqs.hpp +++ b/SeQuant/domain/eqs/cceqs.hpp @@ -17,7 +17,7 @@ class cceqvec { std::vector operator()(bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm, bool r12); + bool use_antisymm); }; // class cceqvec class compute_cceqvec { @@ -28,7 +28,7 @@ class compute_cceqvec { void operator()(bool print, bool screen, bool use_topology, bool use_connectivity, bool canonical_only, - bool use_antisymm, bool r12); + bool use_antisymm); }; // class compute_cceqvec class compute_all { @@ -39,8 +39,7 @@ class compute_all { void operator()(bool print = true, bool screen = true, bool use_topology = true, bool use_connectivity = true, - bool canonical_only = true, - bool use_antisymm = true, bool r12 = false); + bool canonical_only = true, bool use_antisymm = true); }; // class compute_all } // namespace sequant::eqs From 8535474e0aa49f133380492cb90a8062592675cf Mon Sep 17 00:00:00 2001 From: nakulteke Date: Wed, 13 Apr 2022 13:01:01 -0400 Subject: [PATCH 057/120] separated spin-orbital, closed-shell spintrace and open-shell spintrace examples --- CMakeLists.txt | 54 ++++--- examples/osstcc/osstcc.cpp | 120 +++++++++++++++ examples/pCCSDT/pccsdt.cpp | 42 ++++++ examples/srcc/srcc.cpp | 291 ------------------------------------- examples/stcc/stcc.cpp | 228 +++++++++++++++++++++++++++++ 5 files changed, 426 insertions(+), 309 deletions(-) create mode 100644 examples/osstcc/osstcc.cpp create mode 100644 examples/pCCSDT/pccsdt.cpp create mode 100644 examples/stcc/stcc.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index e8eff598c..0fd1a3a0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -336,13 +336,31 @@ if (BUILD_TESTING) set(example0 srcc) add_executable(${example0} EXCLUDE_FROM_ALL examples/${example0}/${example0}.cpp) - find_package(Eigen3 3.0 REQUIRED NO_MODULE) - target_link_libraries(${example0} SeQuant Eigen3::Eigen ${TBB_LIBRARIES}) + target_link_libraries(${example0} SeQuant) - set(example1 uccf12) + # Single-Reference closed-shell Coupled-Cluster equation generation (spin-traced) + set(example1 stcc) add_executable(${example1} EXCLUDE_FROM_ALL examples/${example1}/${example1}.cpp) - target_link_libraries(${example1} SeQuant ${TBB_LIBRARIES}) + find_package(Eigen3 3.0 REQUIRED NO_MODULE) + target_link_libraries(${example1} SeQuant Eigen3::Eigen) + + # Single-Reference open-shell equation generation (spin-traced) + set(example2 osstcc) + add_executable(${example2} EXCLUDE_FROM_ALL + examples/${example2}/${example2}.cpp) + target_link_libraries(${example2} SeQuant) + + set(example3 uccf12) + add_executable(${example3} EXCLUDE_FROM_ALL + examples/${example3}/${example3}.cpp) + target_link_libraries(${example3} SeQuant ${TBB_LIBRARIES}) + + # parametrized CCSDT (spin-orbital) + set(example4 pccsdt) + add_executable(${example4} EXCLUDE_FROM_ALL + examples/${example4}/${example4}.cpp) + target_link_libraries(${example4} SeQuant) set(example_eval_src examples/eval/eval_utils.hpp @@ -357,37 +375,37 @@ if (BUILD_TESTING) ) if (TARGET tiledarray) - set(example2 eval_ta) - add_executable(${example2} EXCLUDE_FROM_ALL + set(example5 eval_ta) + add_executable(${example5} EXCLUDE_FROM_ALL ${example_eval_src} SeQuant/domain/eval/eval_ta.hpp examples/eval/ta/data_world_ta.hpp examples/eval/ta/scf_ta.hpp examples/eval/ta/main.cpp) - target_link_libraries(${example2} SeQuant ${TBB_LIBRARIES} tiledarray) + target_link_libraries(${example5} SeQuant ${TBB_LIBRARIES} tiledarray) endif (TARGET tiledarray) - set(example4 antisymmetrizer_test) - add_executable(${example4} EXCLUDE_FROM_ALL - examples/${example4}/${example4}.cpp) - target_link_libraries(${example4} SeQuant ${TBB_LIBRARIES}) - target_link_directories(${example4} PUBLIC ${TBB_LIBRARIES}) - target_compile_definitions(${example4} PRIVATE SEQUANT_HAS_TILEDARRAY) + set(example6 antisymmetrizer_test) + add_executable(${example6} EXCLUDE_FROM_ALL + examples/${example6}/${example6}.cpp) + target_link_libraries(${example6} SeQuant ${TBB_LIBRARIES}) + target_link_directories(${example6} PUBLIC ${TBB_LIBRARIES}) + target_compile_definitions(${example6} PRIVATE SEQUANT_HAS_TILEDARRAY) if (BTAS_SOURCE_DIR) - set(example3 eval_btas) - add_executable(${example3} EXCLUDE_FROM_ALL + set(example7 eval_btas) + add_executable(${example7} EXCLUDE_FROM_ALL ${example_eval_src} SeQuant/domain/eval/eval_btas.hpp examples/eval/btas/data_world_btas.hpp examples/eval/btas/scf_btas.hpp examples/eval/btas/main.cpp) - target_include_directories(${example3} PUBLIC ${BTAS_SOURCE_DIR} ${Boost_INCLUDE_DIRS}) - target_link_libraries(${example3} SeQuant ${TBB_LIBRARIES}) + target_include_directories(${example7} PUBLIC ${BTAS_SOURCE_DIR} ${Boost_INCLUDE_DIRS}) + target_link_libraries(${example7} SeQuant ${TBB_LIBRARIES}) endif (BTAS_SOURCE_DIR) # add tests for running examples - set(lastexample 4) + set(lastexample 8) foreach (i RANGE ${lastexample}) if (TARGET ${example${i}}) add_test(sequant/example/${example${i}}/build "${CMAKE_COMMAND}" diff --git a/examples/osstcc/osstcc.cpp b/examples/osstcc/osstcc.cpp new file mode 100644 index 000000000..fb0ea303d --- /dev/null +++ b/examples/osstcc/osstcc.cpp @@ -0,0 +1,120 @@ +#include +#include +#include +#include +#include + +using namespace sequant; + +#define runtime_assert(tf) \ + if (!(tf)) { \ + std::ostringstream oss; \ + oss << "failed assert at line " << __LINE__ << " in SRCC example"; \ + throw std::runtime_error(oss.str().c_str()); \ + } + +int main(int argc, char* argv[]) { + mbpt::set_default_convention(); + TensorCanonicalizer::register_instance( + std::make_shared()); + +#ifndef NDEBUG + const size_t DEFAULT_NMAX = 3; +#else + const size_t DEFAULT_NMAX = 4; +#endif + const size_t NMAX = argc > 1 ? std::atoi(argv[1]) : DEFAULT_NMAX; + + /// Make external index + auto ext_idx_list = [](const int i_max) { + container::vector> ext_idx_list; + + for (size_t i = 1; i <= i_max; ++i) { + auto label = std::to_wstring(i); + auto occ_i = Index::make_label_index( + IndexSpace::instance(IndexSpace::active_occupied), label); + auto virt_i = Index::make_label_index( + IndexSpace::instance(IndexSpace::active_unoccupied), label); + container::vector pair = {occ_i, virt_i}; + ext_idx_list.push_back(pair); + } + return ext_idx_list; + }; + + // Spin-orbital coupled cluster + auto cc_r = sequant::eqs::cceqvec{NMAX, NMAX}(true, true, true, true, true); + + // + // Open-shell spintrace + // + std::cout << "Open-shell coupled cluster: nterms per spin blocks: " + << std::endl; + std::vector> os_cc_st_r(cc_r.size()); + for (auto i = 1; i < cc_r.size(); ++i) { + Tensor A = + cc_r[i]->as().summand(0)->as().factors()[0]->as(); + assert(A.label() == L"A"); + auto P_vec = open_shell_P_op_vector(A); + auto A_vec = open_shell_A_op(A); + assert(P_vec.size() == i + 1); + + std::vector concat_terms(i + 1); + size_t n_spin_orbital_term = 0; + for (auto& product_term : *cc_r[i]) { + auto term = remove_tensor(product_term->as(), L"A"); + std::vector os_st(i + 1); + + // Apply the P operators on the product term without the A, + // Expand the P operators and spin-trace the expression + // Then apply A operator, canonicalize and remove A operator + for (int s = 0; s != os_st.size(); ++s) { + os_st.at(s) = P_vec.at(s) * term; + expand(os_st.at(s)); + os_st.at(s) = expand_P_op(os_st.at(s), false, true); + os_st.at(s) = + open_shell_spintrace(os_st.at(s), ext_idx_list(i), s).at(0); + if (i > 2) { + os_st.at(s) = A_vec.at(s) * os_st.at(s); + simplify(os_st.at(s)); + os_st.at(s) = remove_tensor(os_st.at(s), L"A"); + } + } + + for (size_t j = 0; j != os_st.size(); ++j) { + concat_terms.at(j).append(os_st.at(j)); + } + ++n_spin_orbital_term; + } + + // Combine spin-traced terms for the current residual + std::vector expr_vec; + std::cout << "CC R" << i << ": "; + for (auto& spin_case : concat_terms) { + auto ptr = sequant::ex(spin_case); + expr_vec.push_back(ptr); + std::cout << ptr->size() << " "; + } + + os_cc_st_r.at(i) = std::move(expr_vec); + std::cout << "\n"; + } + + if (NMAX == 4) { + runtime_assert(os_cc_st_r.size() == 5) + runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) + runtime_assert(os_cc_st_r.at(2).at(1)->size() == 130) + runtime_assert(os_cc_st_r.at(2).at(2)->size() == 74) + runtime_assert(os_cc_st_r.at(3).at(1)->size() == 249) + runtime_assert(os_cc_st_r.at(3).at(3)->size() == 124) + runtime_assert(os_cc_st_r.at(4).at(1)->size() == 356) + runtime_assert(os_cc_st_r.at(4).at(2)->size() == 386) + runtime_assert(os_cc_st_r.at(4).at(4)->size() == 156) + } else if (NMAX == 3) { + runtime_assert(os_cc_st_r.size() == 4) + runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) + runtime_assert(os_cc_st_r.at(2).at(0)->size() == 65) + runtime_assert(os_cc_st_r.at(2).at(1)->size() == 122) + runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209) + runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75) + } +} diff --git a/examples/pCCSDT/pccsdt.cpp b/examples/pCCSDT/pccsdt.cpp new file mode 100644 index 000000000..e61300712 --- /dev/null +++ b/examples/pCCSDT/pccsdt.cpp @@ -0,0 +1,42 @@ +// +// Created by Nakul Teke on 4/13/22. +// +// Spin-orbital pCCSDT example +#include +#include +#include +#include + +#include + +using namespace sequant; + +#define runtime_assert(tf) \ + if (!(tf)) { \ + std::ostringstream oss; \ + oss << "failed assert at line " << __LINE__ << " in SRCC example"; \ + throw std::runtime_error(oss.str().c_str()); \ + } + +int main(int argc, char* argv[]) { + std::setlocale(LC_ALL, "en_US.UTF-8"); + std::wcout.precision(std::numeric_limits::max_digits10); + std::wcerr.precision(std::numeric_limits::max_digits10); + std::wcout.sync_with_stdio(false); + std::wcerr.sync_with_stdio(false); + std::wcout.imbue(std::locale("en_US.UTF-8")); + std::wcerr.imbue(std::locale("en_US.UTF-8")); + std::wcout.sync_with_stdio(true); + std::wcerr.sync_with_stdio(true); + sequant::detail::OpIdRegistrar op_id_registrar; + + mbpt::set_default_convention(); + + using sequant::eqs::compute_all; + + TensorCanonicalizer::register_instance( + std::make_shared()); + + auto cc_r = sequant::eqs::cceqvec{3, 3}(true, true, true, true, true); + +} diff --git a/examples/srcc/srcc.cpp b/examples/srcc/srcc.cpp index 5b8270d4e..234c7a484 100644 --- a/examples/srcc/srcc.cpp +++ b/examples/srcc/srcc.cpp @@ -2,32 +2,11 @@ #include #include #include -#include - -#include #include using namespace sequant; -#define CLOSED_SHELL_SPINTRACE 1 -#if CLOSED_SHELL_SPINTRACE -ExprPtr biorthogonal_transform( - const sequant::ExprPtr& expr, const int n_particles, - const std::vector>& ext_index_groups = {{}}, - const double threshold = 1.e-12); -ExprPtr symmetrize_expr( - ExprPtr& expr, - const container::vector> ext_index_groups = {{}}); -#endif - -#define runtime_assert(tf) \ - if (!(tf)) { \ - std::ostringstream oss; \ - oss << "failed assert at line " << __LINE__ << " in SRCC example"; \ - throw std::runtime_error(oss.str().c_str()); \ - } - int main(int argc, char* argv[]) { std::setlocale(LC_ALL, "en_US.UTF-8"); std::wcout.precision(std::numeric_limits::max_digits10); @@ -74,274 +53,4 @@ int main(int argc, char* argv[]) { }); }); }); - - /// Make external index - auto ext_idx_list = [](const int i_max) { - container::vector> ext_idx_list; - - for (size_t i = 1; i <= i_max; ++i) { - auto label = std::to_wstring(i); - auto occ_i = Index::make_label_index( - IndexSpace::instance(IndexSpace::active_occupied), label); - auto virt_i = Index::make_label_index( - IndexSpace::instance(IndexSpace::active_unoccupied), label); - container::vector pair = {occ_i, virt_i}; - ext_idx_list.push_back(pair); - } - return ext_idx_list; - }; - - // Spin-orbital coupled cluster - auto cc_r = sequant::eqs::cceqvec{NMAX, NMAX}(true, true, true, true, true); - -#if CLOSED_SHELL_SPINTRACE - // - // Closed-shell spintrace (fast) - // - std::vector cc_st_r(cc_r.size()); - for (size_t i = 1; i < cc_r.size(); ++i) { - const auto tstart = std::chrono::high_resolution_clock::now(); - auto ext_idx = ext_idx_list(i); - cc_st_r[i] = sequant::closed_shell_CC_spintrace(cc_r[i]); - canonicalize(cc_st_r[i]); - - // Remove S operator - for (auto& term : *cc_st_r[i]) { - if (term->is()) term = remove_tensor(term->as(), L"S"); - } - - // Biorthogonal transformation - cc_st_r[i] = biorthogonal_transform(cc_st_r[i], i, ext_idx); - - // The symmetrizer operator is required for canonicalizer to give the - // correct result - if (i != 1) cc_st_r[i] = symmetrize_expr(cc_st_r[i], ext_idx); - simplify(cc_st_r[i]); - - // Remove S operator - for (auto& term : *cc_st_r[i]) { - if (term->is()) term = remove_tensor(term->as(), L"S"); - } - - auto tstop = std::chrono::high_resolution_clock::now(); - std::chrono::duration time_elapsed = tstop - tstart; - printf("CC R%lu size: %lu time: %5.3f sec.\n", i, cc_st_r[i]->size(), - time_elapsed.count()); - } - - if (NMAX == 4) { - runtime_assert(cc_st_r.size() == 5); - runtime_assert(cc_st_r.at(1)->size() == 30); // T1 - runtime_assert(cc_st_r.at(2)->size() == 78); // T2 - runtime_assert(cc_st_r.at(3)->size() == 567); // T3 - runtime_assert(cc_st_r.at(4)->size() == 2150); // T4 - } else if (NMAX == 3) { - runtime_assert(cc_st_r.size() == 4); - runtime_assert(cc_st_r.at(1)->size() == 30); // T1 - runtime_assert(cc_st_r.at(2)->size() == 73); // T2 - runtime_assert(cc_st_r.at(3)->size() == 490); // T3 - } - -#else - // - // Open-shell spintrace - // - std::cout << "Open-shell coupled cluster: nterms per spin blocks: " - << std::endl; - std::vector> os_cc_st_r(cc_r.size()); - for (auto i = 1; i < cc_r.size(); ++i) { - Tensor A = - cc_r[i]->as().summand(0)->as().factors()[0]->as(); - assert(A.label() == L"A"); - auto P_vec = open_shell_P_op_vector(A); - auto A_vec = open_shell_A_op(A); - assert(P_vec.size() == i + 1); - - std::vector concat_terms(i + 1); - size_t n_spin_orbital_term = 0; - for (auto& product_term : *cc_r[i]) { - auto term = remove_tensor(product_term->as(), L"A"); - std::vector os_st(i + 1); - - // Apply the P operators on the product term without the A, - // Expand the P operators and spin-trace the expression - // Then apply A operator, canonicalize and remove A operator - for (int s = 0; s != os_st.size(); ++s) { - os_st.at(s) = P_vec.at(s) * term; - expand(os_st.at(s)); - os_st.at(s) = expand_P_op(os_st.at(s), false, true); - os_st.at(s) = - open_shell_spintrace(os_st.at(s), ext_idx_list(i), s).at(0); - if (i > 2) { - os_st.at(s) = A_vec.at(s) * os_st.at(s); - simplify(os_st.at(s)); - os_st.at(s) = remove_tensor(os_st.at(s), L"A"); - } - } - - for (size_t j = 0; j != os_st.size(); ++j) { - concat_terms.at(j).append(os_st.at(j)); - } - ++n_spin_orbital_term; - } - - // Combine spin-traced terms for the current residual - std::vector expr_vec; - std::cout << "CC R" << i << ": "; - for (auto& spin_case : concat_terms) { - auto ptr = sequant::ex(spin_case); - expr_vec.push_back(ptr); - std::cout << ptr->size() << " "; - } - - os_cc_st_r.at(i) = std::move(expr_vec); - std::cout << "\n"; - } - - if (NMAX == 4) { - runtime_assert(os_cc_st_r.size() == 5); - runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30); - runtime_assert(os_cc_st_r.at(2).at(1)->size() == 130); - runtime_assert(os_cc_st_r.at(2).at(2)->size() == 74); - runtime_assert(os_cc_st_r.at(3).at(1)->size() == 249); - runtime_assert(os_cc_st_r.at(3).at(3)->size() == 124); - runtime_assert(os_cc_st_r.at(4).at(1)->size() == 356); - runtime_assert(os_cc_st_r.at(4).at(2)->size() == 386); - runtime_assert(os_cc_st_r.at(4).at(4)->size() == 156); - } else if (NMAX == 3) { - runtime_assert(os_cc_st_r.size() == 4); - runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30); - runtime_assert(os_cc_st_r.at(2).at(0)->size() == 65); - runtime_assert(os_cc_st_r.at(2).at(1)->size() == 122); - runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209); - runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75); - } -#endif } - -#if CLOSED_SHELL_SPINTRACE -ExprPtr biorthogonal_transform( - const sequant::ExprPtr& expr, const int n_particles, - const std::vector>& ext_index_groups, - const double threshold) { - assert(n_particles != 0); - assert(!ext_index_groups.empty()); - - using sequant::container::svector; - - // Coefficients - std::vector bt_coeff_vec; - { - using namespace Eigen; - // Dimension of permutation matrix is n_particles! - int n = std::tgamma(n_particles + 1); - - // Permutation matrix - Eigen::Matrix M(n, n); - { - M.setZero(); - size_t n_row = 0; - svector v(n_particles), v1(n_particles); - std::iota(v.begin(), v.end(), 0); - std::iota(v1.begin(), v1.end(), 0); - do { - std::vector permutation_vector; - do { - auto cycles = sequant::count_cycles(v1, v); - permutation_vector.push_back(std::pow(-2, cycles)); - } while (std::next_permutation(v.begin(), v.end())); - Eigen::VectorXd pv_eig = Eigen::Map( - permutation_vector.data(), permutation_vector.size()); - M.row(n_row) = pv_eig; - ++n_row; - } while (std::next_permutation(v1.begin(), v1.end())); - M *= std::pow(-1, n_particles); - } - - // Normalization constant - double scalar; - { - auto nonZero = [&threshold](const double& d) { - return abs(d) > threshold; - }; - - // Solve system of equations - SelfAdjointEigenSolver eig_solver(M); - std::vector eig_vals(eig_solver.eigenvalues().size()); - VectorXd::Map(&eig_vals[0], eig_solver.eigenvalues().size()) = - eig_solver.eigenvalues(); - - double non0count = - std::count_if(eig_vals.begin(), eig_vals.end(), nonZero); - scalar = eig_vals.size() / non0count; - } - - // Find Pseudo Inverse, get 1st row only - MatrixXd pinv = M.completeOrthogonalDecomposition().pseudoInverse(); - bt_coeff_vec.resize(pinv.rows()); - VectorXd::Map(&bt_coeff_vec[0], bt_coeff_vec.size()) = pinv.row(0) * scalar; - } - - // Transformation maps - std::vector> bt_maps; - { - std::vector idx_list(ext_index_groups.size()); - - for (auto i = 0; i != ext_index_groups.size(); ++i) { - idx_list[i] = *ext_index_groups[i].begin(); - } - - const std::vector const_idx_list = idx_list; - - do { - std::map map; - auto const_list_ptr = const_idx_list.begin(); - for (auto& i : idx_list) { - map.emplace(std::make_pair(*const_list_ptr, i)); - const_list_ptr++; - } - bt_maps.push_back(map); - } while (std::next_permutation(idx_list.begin(), idx_list.end())); - } - - // If this assertion fails, change the threshold parameter - assert(bt_coeff_vec.size() == bt_maps.size()); - - // Checks if the replacement map is a canonical sequence - auto is_canonical = [](const std::map& idx_map) { - bool canonical = true; - for (auto&& pair : idx_map) - if (pair.first != pair.second) return false; - return canonical; - }; - - // Scale transformed expressions and append - Sum bt_expr{}; - auto coeff_it = bt_coeff_vec.begin(); - for (auto&& map : bt_maps) { - if (is_canonical(map)) - bt_expr.append(ex(*coeff_it) * expr->clone()); - else - bt_expr.append(ex(*coeff_it) * - sequant::transform_expr(expr->clone(), map)); - coeff_it++; - } - ExprPtr result = std::make_shared(bt_expr); - return result; -} - -// Generate S operator from external index list -ExprPtr symmetrize_expr( - ExprPtr& expr, - const container::vector> ext_index_groups) { - container::vector bra_list, ket_list; - for (auto&& idx_group : ext_index_groups) { - bra_list.push_back(*idx_group.begin()); - ket_list.push_back(*(idx_group.begin() + 1)); - } - - assert(bra_list.size() == ket_list.size()); - auto S = Tensor(L"S", bra_list, ket_list, Symmetry::nonsymm); - return ex(S) * expr; -} -#endif diff --git a/examples/stcc/stcc.cpp b/examples/stcc/stcc.cpp new file mode 100644 index 000000000..017328084 --- /dev/null +++ b/examples/stcc/stcc.cpp @@ -0,0 +1,228 @@ +#include +#include +#include +#include +#include + +#include + +using namespace sequant; + +ExprPtr biorthogonal_transform( + const sequant::ExprPtr& expr, int n_particles, + const std::vector>& ext_index_groups = {{}}, + double threshold = 1.e-12); +ExprPtr symmetrize_expr( + ExprPtr& expr, + const container::vector>& ext_index_groups = {{}}); + +#define runtime_assert(tf) \ + if (!(tf)) { \ + std::ostringstream oss; \ + oss << "failed assert at line " << __LINE__ << " in SRCC example"; \ + throw std::runtime_error(oss.str().c_str()); \ + } + +int main(int argc, char* argv[]) { + mbpt::set_default_convention(); + TensorCanonicalizer::register_instance( + std::make_shared()); + +#ifndef NDEBUG + const size_t DEFAULT_NMAX = 3; +#else + const size_t DEFAULT_NMAX = 4; +#endif + const size_t NMAX = argc > 1 ? std::atoi(argv[1]) : DEFAULT_NMAX; + + /// Make external index + auto ext_idx_list = [](const int i_max) { + container::vector> ext_idx_list; + + for (size_t i = 1; i <= i_max; ++i) { + auto label = std::to_wstring(i); + auto occ_i = Index::make_label_index( + IndexSpace::instance(IndexSpace::active_occupied), label); + auto virt_i = Index::make_label_index( + IndexSpace::instance(IndexSpace::active_unoccupied), label); + container::vector pair = {occ_i, virt_i}; + ext_idx_list.push_back(pair); + } + return ext_idx_list; + }; + + // Spin-orbital coupled cluster + auto cc_r = sequant::eqs::cceqvec{NMAX, NMAX}(true, true, true, true, true); + + // + // Closed-shell spintrace (fast) + // + std::vector cc_st_r(cc_r.size()); + for (auto i = 1; i < cc_r.size(); ++i) { + const auto tstart = std::chrono::high_resolution_clock::now(); + auto ext_idx = ext_idx_list(i); + cc_st_r[i] = sequant::closed_shell_CC_spintrace(cc_r[i]); + canonicalize(cc_st_r[i]); + + // Remove S operator + for (auto& term : *cc_st_r[i]) { + if (term->is()) term = remove_tensor(term->as(), L"S"); + } + + // Biorthogonal transformation + cc_st_r[i] = biorthogonal_transform(cc_st_r[i], i, ext_idx); + + // The symmetrizer operator is required for canonicalizer to give the + // correct result + if (i != 1) cc_st_r[i] = symmetrize_expr(cc_st_r[i], ext_idx); + simplify(cc_st_r[i]); + + // Remove S operator + for (auto& term : *cc_st_r[i]) { + if (term->is()) term = remove_tensor(term->as(), L"S"); + } + + auto tstop = std::chrono::high_resolution_clock::now(); + std::chrono::duration time_elapsed = tstop - tstart; + printf("CC R%d size: %lu time: %5.3f sec.\n", i, cc_st_r[i]->size(), + time_elapsed.count()); + } + + if (NMAX == 4) { + runtime_assert(cc_st_r.size() == 5) + runtime_assert(cc_st_r.at(1)->size() == 30) // T1 + runtime_assert(cc_st_r.at(2)->size() == 78) // T2 + runtime_assert(cc_st_r.at(3)->size() == 567) // T3 + runtime_assert(cc_st_r.at(4)->size() == 2150) // T4 + } else if (NMAX == 3) { + runtime_assert(cc_st_r.size() == 4) + runtime_assert(cc_st_r.at(1)->size() == 30) // T1 + runtime_assert(cc_st_r.at(2)->size() == 73) // T2 + runtime_assert(cc_st_r.at(3)->size() == 490) // T3 + } +} + +ExprPtr biorthogonal_transform( + const sequant::ExprPtr& expr, const int n_particles, + const std::vector>& ext_index_groups, + const double threshold) { + assert(n_particles != 0); + assert(!ext_index_groups.empty()); + + using sequant::container::svector; + + // Coefficients + std::vector bt_coeff_vec; + { + using namespace Eigen; + // Dimension of permutation matrix is n_particles! + int n = std::tgamma(n_particles + 1); + + // Permutation matrix + Eigen::Matrix M(n, n); + { + M.setZero(); + size_t n_row = 0; + svector v(n_particles), v1(n_particles); + std::iota(v.begin(), v.end(), 0); + std::iota(v1.begin(), v1.end(), 0); + do { + std::vector permutation_vector; + do { + auto cycles = sequant::count_cycles(v1, v); + permutation_vector.push_back(std::pow(-2, cycles)); + } while (std::next_permutation(v.begin(), v.end())); + Eigen::VectorXd pv_eig = Eigen::Map( + permutation_vector.data(), permutation_vector.size()); + M.row(n_row) = pv_eig; + ++n_row; + } while (std::next_permutation(v1.begin(), v1.end())); + M *= std::pow(-1, n_particles); + } + + // Normalization constant + double scalar; + { + auto nonZero = [&threshold](const double& d) { + return abs(d) > threshold; + }; + + // Solve system of equations + SelfAdjointEigenSolver eig_solver(M); + std::vector eig_vals(eig_solver.eigenvalues().size()); + VectorXd::Map(&eig_vals[0], eig_solver.eigenvalues().size()) = + eig_solver.eigenvalues(); + + double non0count = + std::count_if(eig_vals.begin(), eig_vals.end(), nonZero); + scalar = eig_vals.size() / non0count; + } + + // Find Pseudo Inverse, get 1st row only + MatrixXd pinv = M.completeOrthogonalDecomposition().pseudoInverse(); + bt_coeff_vec.resize(pinv.rows()); + VectorXd::Map(&bt_coeff_vec[0], bt_coeff_vec.size()) = pinv.row(0) * scalar; + } + + // Transformation maps + std::vector> bt_maps; + { + std::vector idx_list(ext_index_groups.size()); + + for (auto i = 0; i != ext_index_groups.size(); ++i) { + idx_list[i] = *ext_index_groups[i].begin(); + } + + const std::vector const_idx_list = idx_list; + + do { + std::map map; + auto const_list_ptr = const_idx_list.begin(); + for (auto& i : idx_list) { + map.emplace(std::make_pair(*const_list_ptr, i)); + const_list_ptr++; + } + bt_maps.push_back(map); + } while (std::next_permutation(idx_list.begin(), idx_list.end())); + } + + // If this assertion fails, change the threshold parameter + assert(bt_coeff_vec.size() == bt_maps.size()); + + // Checks if the replacement map is a canonical sequence + auto is_canonical = [](const std::map& idx_map) { + bool canonical = true; + for (auto&& pair : idx_map) + if (pair.first != pair.second) return false; + return canonical; + }; + + // Scale transformed expressions and append + Sum bt_expr{}; + auto coeff_it = bt_coeff_vec.begin(); + for (auto&& map : bt_maps) { + if (is_canonical(map)) + bt_expr.append(ex(*coeff_it) * expr->clone()); + else + bt_expr.append(ex(*coeff_it) * + sequant::transform_expr(expr->clone(), map)); + coeff_it++; + } + ExprPtr result = std::make_shared(bt_expr); + return result; +} + +// Generate S operator from external index list +ExprPtr symmetrize_expr( + ExprPtr& expr, + const container::vector>& ext_index_groups) { + container::vector bra_list, ket_list; + for (auto&& idx_group : ext_index_groups) { + bra_list.push_back(*idx_group.begin()); + ket_list.push_back(*(idx_group.begin() + 1)); + } + + assert(bra_list.size() == ket_list.size()); + auto S = Tensor(L"S", bra_list, ket_list, Symmetry::nonsymm); + return ex(S) * expr; +} From 4ef686f16557a403f1c316b32e3b348f22f9db8e Mon Sep 17 00:00:00 2001 From: nakulteke Date: Wed, 13 Apr 2022 19:15:20 -0400 Subject: [PATCH 058/120] [Bugfix] `Constant(sum_of_prefactors / first_prefactor);` gave the `nan` --- SeQuant/core/expr.cpp | 22 ++++++++++------------ SeQuant/core/expr.hpp | 5 +++++ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/SeQuant/core/expr.cpp b/SeQuant/core/expr.cpp index 1cf03f091..2f07fca0a 100644 --- a/SeQuant/core/expr.cpp +++ b/SeQuant/core/expr.cpp @@ -264,24 +264,22 @@ ExprPtr Sum::canonicalize_impl(bool multipass) { assert(nidentical > 1); auto reduce_range = [first_it, this, nidentical](auto &begin, auto &end) { if ((*first_it)->template is()) { - assert((*first_it)->template is()); Product tensor_as_Product{}; tensor_as_Product.append(static_cast(nidentical), (*first_it)->as()); (*first_it) = std::make_shared(tensor_as_Product); this->summands_.erase(first_it + 1, end); } else if ((*first_it)->template is()) { - const auto first_prefactor = - (*first_it)->template as().scalar(); - // add prefactors of each product - const auto sum_of_prefactors = std::accumulate( - begin, end, Product::scalar_type(0), - [](auto &acc, const auto &prod) { - assert(prod->template is()); - return acc + prod->template as().scalar(); - }); - (*first_it)->template as() *= - Constant(sum_of_prefactors / first_prefactor); + for (auto it = begin + 1; it != end; ++it) { + if ((*it)->template is()) { + Product tensor_as_Product{}; + tensor_as_Product.append(1.0, (*it)->template as()); + (*it) = std::make_shared(tensor_as_Product); + } else if ((*it)->template is()) { + std::static_pointer_cast(*first_it)->add_identical( + std::static_pointer_cast(*it)); + } + } this->summands_.erase(first_it + 1, end); } }; diff --git a/SeQuant/core/expr.hpp b/SeQuant/core/expr.hpp index 9e157ee25..3a18d6b94 100644 --- a/SeQuant/core/expr.hpp +++ b/SeQuant/core/expr.hpp @@ -865,6 +865,11 @@ class Product : public Expr { return *this; } + void add_identical(const std::shared_ptr &other) { + assert(this->hash_value() == other->hash_value()); + scalar_ += other->scalar_; + } + private: scalar_type scalar_ = {1.0, 0.0}; container::svector factors_{}; From 25f0dc15e994b8b248320c8867a217b79ed16312 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Wed, 13 Apr 2022 19:19:20 -0400 Subject: [PATCH 059/120] [clang-format] headers for latex printing --- examples/osstcc/osstcc.cpp | 51 ++++++++++++++++++++++++-------------- examples/stcc/stcc.cpp | 37 ++++++++++++++++++--------- 2 files changed, 57 insertions(+), 31 deletions(-) diff --git a/examples/osstcc/osstcc.cpp b/examples/osstcc/osstcc.cpp index fb0ea303d..4f2762c37 100644 --- a/examples/osstcc/osstcc.cpp +++ b/examples/osstcc/osstcc.cpp @@ -4,16 +4,29 @@ #include #include +#include + using namespace sequant; -#define runtime_assert(tf) \ - if (!(tf)) { \ - std::ostringstream oss; \ - oss << "failed assert at line " << __LINE__ << " in SRCC example"; \ - throw std::runtime_error(oss.str().c_str()); \ +#define runtime_assert(tf) \ + if (!(tf)) { \ + std::ostringstream oss; \ + oss << "failed assert at line " << __LINE__ \ + << " in open-shell spin-traced coupled cluster example"; \ + throw std::runtime_error(oss.str().c_str()); \ } int main(int argc, char* argv[]) { + std::setlocale(LC_ALL, "en_US.UTF-8"); + std::wcout.precision(std::numeric_limits::max_digits10); + std::wcerr.precision(std::numeric_limits::max_digits10); + std::wcout.sync_with_stdio(false); + std::wcerr.sync_with_stdio(false); + std::wcout.imbue(std::locale("en_US.UTF-8")); + std::wcerr.imbue(std::locale("en_US.UTF-8")); + std::wcout.sync_with_stdio(true); + std::wcerr.sync_with_stdio(true); + mbpt::set_default_convention(); TensorCanonicalizer::register_instance( std::make_shared()); @@ -100,21 +113,21 @@ int main(int argc, char* argv[]) { } if (NMAX == 4) { - runtime_assert(os_cc_st_r.size() == 5) - runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) - runtime_assert(os_cc_st_r.at(2).at(1)->size() == 130) - runtime_assert(os_cc_st_r.at(2).at(2)->size() == 74) - runtime_assert(os_cc_st_r.at(3).at(1)->size() == 249) - runtime_assert(os_cc_st_r.at(3).at(3)->size() == 124) - runtime_assert(os_cc_st_r.at(4).at(1)->size() == 356) - runtime_assert(os_cc_st_r.at(4).at(2)->size() == 386) - runtime_assert(os_cc_st_r.at(4).at(4)->size() == 156) + runtime_assert(os_cc_st_r.size() == + 5) runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) + runtime_assert(os_cc_st_r.at(2).at(1)->size() == 130) runtime_assert( + os_cc_st_r.at(2).at(2)->size() == + 74) runtime_assert(os_cc_st_r.at(3).at(1)->size() == 249) + runtime_assert(os_cc_st_r.at(3).at(3)->size() == 124) + runtime_assert(os_cc_st_r.at(4).at(1)->size() == 356) + runtime_assert(os_cc_st_r.at(4).at(2)->size() == 386) + runtime_assert(os_cc_st_r.at(4).at(4)->size() == 156) } else if (NMAX == 3) { runtime_assert(os_cc_st_r.size() == 4) - runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) - runtime_assert(os_cc_st_r.at(2).at(0)->size() == 65) - runtime_assert(os_cc_st_r.at(2).at(1)->size() == 122) - runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209) - runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75) + runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) + runtime_assert(os_cc_st_r.at(2).at(0)->size() == 65) + runtime_assert(os_cc_st_r.at(2).at(1)->size() == 122) + runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209) + runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75) } } diff --git a/examples/stcc/stcc.cpp b/examples/stcc/stcc.cpp index 017328084..72904eec6 100644 --- a/examples/stcc/stcc.cpp +++ b/examples/stcc/stcc.cpp @@ -4,6 +4,8 @@ #include #include +#include + #include using namespace sequant; @@ -16,14 +18,25 @@ ExprPtr symmetrize_expr( ExprPtr& expr, const container::vector>& ext_index_groups = {{}}); -#define runtime_assert(tf) \ - if (!(tf)) { \ - std::ostringstream oss; \ - oss << "failed assert at line " << __LINE__ << " in SRCC example"; \ - throw std::runtime_error(oss.str().c_str()); \ +#define runtime_assert(tf) \ + if (!(tf)) { \ + std::ostringstream oss; \ + oss << "failed assert at line " << __LINE__ \ + << " in closed-shell spin-traced coupled cluster example"; \ + throw std::runtime_error(oss.str().c_str()); \ } int main(int argc, char* argv[]) { + std::setlocale(LC_ALL, "en_US.UTF-8"); + std::wcout.precision(std::numeric_limits::max_digits10); + std::wcerr.precision(std::numeric_limits::max_digits10); + std::wcout.sync_with_stdio(false); + std::wcerr.sync_with_stdio(false); + std::wcout.imbue(std::locale("en_US.UTF-8")); + std::wcerr.imbue(std::locale("en_US.UTF-8")); + std::wcout.sync_with_stdio(true); + std::wcerr.sync_with_stdio(true); + mbpt::set_default_convention(); TensorCanonicalizer::register_instance( std::make_shared()); @@ -90,15 +103,15 @@ int main(int argc, char* argv[]) { if (NMAX == 4) { runtime_assert(cc_st_r.size() == 5) - runtime_assert(cc_st_r.at(1)->size() == 30) // T1 - runtime_assert(cc_st_r.at(2)->size() == 78) // T2 - runtime_assert(cc_st_r.at(3)->size() == 567) // T3 - runtime_assert(cc_st_r.at(4)->size() == 2150) // T4 + runtime_assert(cc_st_r.at(1)->size() == 30) // T1 + runtime_assert(cc_st_r.at(2)->size() == 78) // T2 + runtime_assert(cc_st_r.at(3)->size() == 567) // T3 + runtime_assert(cc_st_r.at(4)->size() == 2150) // T4 } else if (NMAX == 3) { runtime_assert(cc_st_r.size() == 4) - runtime_assert(cc_st_r.at(1)->size() == 30) // T1 - runtime_assert(cc_st_r.at(2)->size() == 73) // T2 - runtime_assert(cc_st_r.at(3)->size() == 490) // T3 + runtime_assert(cc_st_r.at(1)->size() == 30) // T1 + runtime_assert(cc_st_r.at(2)->size() == 73) // T2 + runtime_assert(cc_st_r.at(3)->size() == 490) // T3 } } From c7446f977aee2582f8c3ff3783534f1a78285b50 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Wed, 13 Apr 2022 19:31:09 -0400 Subject: [PATCH 060/120] removed pCCSDT feature --- CMakeLists.txt | 34 +++++++++++++----------------- examples/pCCSDT/pccsdt.cpp | 42 -------------------------------------- 2 files changed, 14 insertions(+), 62 deletions(-) delete mode 100644 examples/pCCSDT/pccsdt.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 0fd1a3a0b..afbd90397 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -356,12 +356,6 @@ if (BUILD_TESTING) examples/${example3}/${example3}.cpp) target_link_libraries(${example3} SeQuant ${TBB_LIBRARIES}) - # parametrized CCSDT (spin-orbital) - set(example4 pccsdt) - add_executable(${example4} EXCLUDE_FROM_ALL - examples/${example4}/${example4}.cpp) - target_link_libraries(${example4} SeQuant) - set(example_eval_src examples/eval/eval_utils.hpp examples/eval/options.hpp @@ -375,37 +369,37 @@ if (BUILD_TESTING) ) if (TARGET tiledarray) - set(example5 eval_ta) - add_executable(${example5} EXCLUDE_FROM_ALL + set(example4 eval_ta) + add_executable(${example4} EXCLUDE_FROM_ALL ${example_eval_src} SeQuant/domain/eval/eval_ta.hpp examples/eval/ta/data_world_ta.hpp examples/eval/ta/scf_ta.hpp examples/eval/ta/main.cpp) - target_link_libraries(${example5} SeQuant ${TBB_LIBRARIES} tiledarray) + target_link_libraries(${example4} SeQuant ${TBB_LIBRARIES} tiledarray) endif (TARGET tiledarray) - set(example6 antisymmetrizer_test) - add_executable(${example6} EXCLUDE_FROM_ALL - examples/${example6}/${example6}.cpp) - target_link_libraries(${example6} SeQuant ${TBB_LIBRARIES}) - target_link_directories(${example6} PUBLIC ${TBB_LIBRARIES}) - target_compile_definitions(${example6} PRIVATE SEQUANT_HAS_TILEDARRAY) + set(example5 antisymmetrizer_test) + add_executable(${example5} EXCLUDE_FROM_ALL + examples/${example5}/${example5}.cpp) + target_link_libraries(${example5} SeQuant ${TBB_LIBRARIES}) + target_link_directories(${example5} PUBLIC ${TBB_LIBRARIES}) + target_compile_definitions(${example5} PRIVATE SEQUANT_HAS_TILEDARRAY) if (BTAS_SOURCE_DIR) - set(example7 eval_btas) - add_executable(${example7} EXCLUDE_FROM_ALL + set(example6 eval_btas) + add_executable(${example6} EXCLUDE_FROM_ALL ${example_eval_src} SeQuant/domain/eval/eval_btas.hpp examples/eval/btas/data_world_btas.hpp examples/eval/btas/scf_btas.hpp examples/eval/btas/main.cpp) - target_include_directories(${example7} PUBLIC ${BTAS_SOURCE_DIR} ${Boost_INCLUDE_DIRS}) - target_link_libraries(${example7} SeQuant ${TBB_LIBRARIES}) + target_include_directories(${example6} PUBLIC ${BTAS_SOURCE_DIR} ${Boost_INCLUDE_DIRS}) + target_link_libraries(${example6} SeQuant ${TBB_LIBRARIES}) endif (BTAS_SOURCE_DIR) # add tests for running examples - set(lastexample 8) + set(lastexample 7) foreach (i RANGE ${lastexample}) if (TARGET ${example${i}}) add_test(sequant/example/${example${i}}/build "${CMAKE_COMMAND}" diff --git a/examples/pCCSDT/pccsdt.cpp b/examples/pCCSDT/pccsdt.cpp deleted file mode 100644 index e61300712..000000000 --- a/examples/pCCSDT/pccsdt.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// -// Created by Nakul Teke on 4/13/22. -// -// Spin-orbital pCCSDT example -#include -#include -#include -#include - -#include - -using namespace sequant; - -#define runtime_assert(tf) \ - if (!(tf)) { \ - std::ostringstream oss; \ - oss << "failed assert at line " << __LINE__ << " in SRCC example"; \ - throw std::runtime_error(oss.str().c_str()); \ - } - -int main(int argc, char* argv[]) { - std::setlocale(LC_ALL, "en_US.UTF-8"); - std::wcout.precision(std::numeric_limits::max_digits10); - std::wcerr.precision(std::numeric_limits::max_digits10); - std::wcout.sync_with_stdio(false); - std::wcerr.sync_with_stdio(false); - std::wcout.imbue(std::locale("en_US.UTF-8")); - std::wcerr.imbue(std::locale("en_US.UTF-8")); - std::wcout.sync_with_stdio(true); - std::wcerr.sync_with_stdio(true); - sequant::detail::OpIdRegistrar op_id_registrar; - - mbpt::set_default_convention(); - - using sequant::eqs::compute_all; - - TensorCanonicalizer::register_instance( - std::make_shared()); - - auto cc_r = sequant::eqs::cceqvec{3, 3}(true, true, true, true, true); - -} From 7e5c9a258e8200f4b1627be32a8fb75038ddb5a0 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Thu, 14 Apr 2022 11:27:47 -0400 Subject: [PATCH 061/120] added comments to identify the T amplitude --- examples/osstcc/osstcc.cpp | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/examples/osstcc/osstcc.cpp b/examples/osstcc/osstcc.cpp index 4f2762c37..31df98064 100644 --- a/examples/osstcc/osstcc.cpp +++ b/examples/osstcc/osstcc.cpp @@ -113,21 +113,21 @@ int main(int argc, char* argv[]) { } if (NMAX == 4) { - runtime_assert(os_cc_st_r.size() == - 5) runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) - runtime_assert(os_cc_st_r.at(2).at(1)->size() == 130) runtime_assert( - os_cc_st_r.at(2).at(2)->size() == - 74) runtime_assert(os_cc_st_r.at(3).at(1)->size() == 249) - runtime_assert(os_cc_st_r.at(3).at(3)->size() == 124) - runtime_assert(os_cc_st_r.at(4).at(1)->size() == 356) - runtime_assert(os_cc_st_r.at(4).at(2)->size() == 386) - runtime_assert(os_cc_st_r.at(4).at(4)->size() == 156) + runtime_assert(os_cc_st_r.size() == 5) + runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) // T1a + runtime_assert(os_cc_st_r.at(2).at(1)->size() == 130) // T2ab + runtime_assert(os_cc_st_r.at(2).at(2)->size() == 74) // T2bb + runtime_assert(os_cc_st_r.at(3).at(1)->size() == 249) // T3aab + runtime_assert(os_cc_st_r.at(3).at(3)->size() == 124) // T3bbb + runtime_assert(os_cc_st_r.at(4).at(1)->size() == 356) // T4aaab + runtime_assert(os_cc_st_r.at(4).at(2)->size() == 386) // T4aabb + runtime_assert(os_cc_st_r.at(4).at(4)->size() == 156) // T4bbbb } else if (NMAX == 3) { runtime_assert(os_cc_st_r.size() == 4) - runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) - runtime_assert(os_cc_st_r.at(2).at(0)->size() == 65) - runtime_assert(os_cc_st_r.at(2).at(1)->size() == 122) - runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209) - runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75) + runtime_assert(os_cc_st_r.at(1).at(0)->size() == 30) // T1a + runtime_assert(os_cc_st_r.at(2).at(0)->size() == 65) // T2aa + runtime_assert(os_cc_st_r.at(2).at(1)->size() == 122) // T2ab + runtime_assert(os_cc_st_r.at(3).at(2)->size() == 209) // T3abb + runtime_assert(os_cc_st_r.at(3).at(3)->size() == 75) // T3bbb } } From 405f48091d7af836f8afccb28395286856ccc0d4 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Thu, 14 Apr 2022 17:17:17 -0400 Subject: [PATCH 062/120] Coupled cluster spin tracing test with regular (spin-substitution) approach --- CMakeLists.txt | 52 +++-- examples/stcc_rigorous/stcc_rigorous.cpp | 243 +++++++++++++++++++++++ 2 files changed, 274 insertions(+), 21 deletions(-) create mode 100644 examples/stcc_rigorous/stcc_rigorous.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index afbd90397..0101ab1a2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -332,29 +332,39 @@ if (BUILD_TESTING) ####### Tests ######## - # Single-Reference Coupled-Cluster equation generation (spin-orbital) + # Single-Reference Coupled-Cluster equation generation + # (spin-orbital) set(example0 srcc) add_executable(${example0} EXCLUDE_FROM_ALL examples/${example0}/${example0}.cpp) target_link_libraries(${example0} SeQuant) - # Single-Reference closed-shell Coupled-Cluster equation generation (spin-traced) + # Single-Reference closed-shell Coupled-Cluster equation + # generation (fast spin-traced) set(example1 stcc) add_executable(${example1} EXCLUDE_FROM_ALL examples/${example1}/${example1}.cpp) find_package(Eigen3 3.0 REQUIRED NO_MODULE) target_link_libraries(${example1} SeQuant Eigen3::Eigen) - # Single-Reference open-shell equation generation (spin-traced) - set(example2 osstcc) + # Single-Reference closed-shell Coupled-Cluster equation + # generation (rigorous spin-traced) + set(example2 stcc_rigorous) add_executable(${example2} EXCLUDE_FROM_ALL examples/${example2}/${example2}.cpp) - target_link_libraries(${example2} SeQuant) + find_package(Eigen3 3.0 REQUIRED NO_MODULE) + target_link_libraries(${example2} SeQuant Eigen3::Eigen) - set(example3 uccf12) + # Single-Reference open-shell equation generation (spin-traced) + set(example3 osstcc) add_executable(${example3} EXCLUDE_FROM_ALL examples/${example3}/${example3}.cpp) - target_link_libraries(${example3} SeQuant ${TBB_LIBRARIES}) + target_link_libraries(${example3} SeQuant) + + set(example4 uccf12) + add_executable(${example4} EXCLUDE_FROM_ALL + examples/${example4}/${example4}.cpp) + target_link_libraries(${example4} SeQuant ${TBB_LIBRARIES}) set(example_eval_src examples/eval/eval_utils.hpp @@ -369,37 +379,37 @@ if (BUILD_TESTING) ) if (TARGET tiledarray) - set(example4 eval_ta) - add_executable(${example4} EXCLUDE_FROM_ALL + set(example5 eval_ta) + add_executable(${example5} EXCLUDE_FROM_ALL ${example_eval_src} SeQuant/domain/eval/eval_ta.hpp examples/eval/ta/data_world_ta.hpp examples/eval/ta/scf_ta.hpp examples/eval/ta/main.cpp) - target_link_libraries(${example4} SeQuant ${TBB_LIBRARIES} tiledarray) + target_link_libraries(${example5} SeQuant ${TBB_LIBRARIES} tiledarray) endif (TARGET tiledarray) - set(example5 antisymmetrizer_test) - add_executable(${example5} EXCLUDE_FROM_ALL - examples/${example5}/${example5}.cpp) - target_link_libraries(${example5} SeQuant ${TBB_LIBRARIES}) - target_link_directories(${example5} PUBLIC ${TBB_LIBRARIES}) - target_compile_definitions(${example5} PRIVATE SEQUANT_HAS_TILEDARRAY) + set(example6 antisymmetrizer_test) + add_executable(${example6} EXCLUDE_FROM_ALL + examples/${example6}/${example6}.cpp) + target_link_libraries(${example6} SeQuant ${TBB_LIBRARIES}) + target_link_directories(${example6} PUBLIC ${TBB_LIBRARIES}) + target_compile_definitions(${example6} PRIVATE SEQUANT_HAS_TILEDARRAY) if (BTAS_SOURCE_DIR) - set(example6 eval_btas) - add_executable(${example6} EXCLUDE_FROM_ALL + set(example7 eval_btas) + add_executable(${example7} EXCLUDE_FROM_ALL ${example_eval_src} SeQuant/domain/eval/eval_btas.hpp examples/eval/btas/data_world_btas.hpp examples/eval/btas/scf_btas.hpp examples/eval/btas/main.cpp) - target_include_directories(${example6} PUBLIC ${BTAS_SOURCE_DIR} ${Boost_INCLUDE_DIRS}) - target_link_libraries(${example6} SeQuant ${TBB_LIBRARIES}) + target_include_directories(${example7} PUBLIC ${BTAS_SOURCE_DIR} ${Boost_INCLUDE_DIRS}) + target_link_libraries(${example7} SeQuant ${TBB_LIBRARIES}) endif (BTAS_SOURCE_DIR) # add tests for running examples - set(lastexample 7) + set(lastexample 8) foreach (i RANGE ${lastexample}) if (TARGET ${example${i}}) add_test(sequant/example/${example${i}}/build "${CMAKE_COMMAND}" diff --git a/examples/stcc_rigorous/stcc_rigorous.cpp b/examples/stcc_rigorous/stcc_rigorous.cpp new file mode 100644 index 000000000..a3d98ace9 --- /dev/null +++ b/examples/stcc_rigorous/stcc_rigorous.cpp @@ -0,0 +1,243 @@ +#include +#include +#include +#include +#include + +#include + +#include + +using namespace sequant; + +ExprPtr biorthogonal_transform( + const sequant::ExprPtr& expr, int n_particles, + const std::vector>& ext_index_groups = {{}}, + double threshold = 1.e-12); +ExprPtr symmetrize_expr( + ExprPtr& expr, + const container::vector>& ext_index_groups = {{}}); + +#define runtime_assert(tf) \ + if (!(tf)) { \ + std::ostringstream oss; \ + oss << "failed assert at line " << __LINE__ \ + << " in closed-shell spin-traced coupled cluster example"; \ + throw std::runtime_error(oss.str().c_str()); \ + } + +int main(int argc, char* argv[]) { + std::setlocale(LC_ALL, "en_US.UTF-8"); + std::wcout.precision(std::numeric_limits::max_digits10); + std::wcerr.precision(std::numeric_limits::max_digits10); + std::wcout.sync_with_stdio(false); + std::wcerr.sync_with_stdio(false); + std::wcout.imbue(std::locale("en_US.UTF-8")); + std::wcerr.imbue(std::locale("en_US.UTF-8")); + std::wcout.sync_with_stdio(true); + std::wcerr.sync_with_stdio(true); + + mbpt::set_default_convention(); + TensorCanonicalizer::register_instance( + std::make_shared()); + +#ifndef NDEBUG + const size_t DEFAULT_NMAX = 2; +#else + const size_t DEFAULT_NMAX = 3; +#endif + const size_t NMAX = argc > 1 ? std::atoi(argv[1]) : DEFAULT_NMAX; + + /// Make external index + auto ext_idx_list = [](const int i_max) { + container::vector> ext_idx_list; + + for (size_t i = 1; i <= i_max; ++i) { + auto label = std::to_wstring(i); + auto occ_i = Index::make_label_index( + IndexSpace::instance(IndexSpace::active_occupied), label); + auto virt_i = Index::make_label_index( + IndexSpace::instance(IndexSpace::active_unoccupied), label); + container::vector pair = {occ_i, virt_i}; + ext_idx_list.push_back(pair); + } + return ext_idx_list; + }; + + // Spin-orbital coupled cluster + auto cc_r = sequant::eqs::cceqvec{NMAX, NMAX}(true, true, true, true, true); + for (auto i = 1; i < cc_r.size(); ++i) { + std::cout << "Spin-orbital CC R" << i << " size: " << cc_r[i]->size() << "\n"; + } + + // + // Closed-shell spintrace (fast) + // + std::cout << "\nClosed-shell coupled cluster:\n"; + std::vector cc_st_r(cc_r.size()); + for (auto i = 1; i < cc_r.size(); ++i) { + const auto tstart = std::chrono::high_resolution_clock::now(); + auto ext_idx = ext_idx_list(i); + cc_st_r[i] = sequant::spintrace(cc_r[i], ext_idx_list(i)); + canonicalize(cc_st_r[i]); + + // Remove S operator + for (auto& term : *cc_st_r[i]) { + if (term->is()) term = remove_tensor(term->as(), L"S"); + } + + // Biorthogonal transformation + cc_st_r[i] = biorthogonal_transform(cc_st_r[i], i, ext_idx); + + // The symmetrizer operator is required for canonicalizer to give the + // correct result + if (i != 1) cc_st_r[i] = symmetrize_expr(cc_st_r[i], ext_idx); + simplify(cc_st_r[i]); + + // Remove S operator + for (auto& term : *cc_st_r[i]) { + if (term->is()) term = remove_tensor(term->as(), L"S"); + } + + auto tstop = std::chrono::high_resolution_clock::now(); + std::chrono::duration time_elapsed = tstop - tstart; + printf("CC R%d size: %lu time: %5.3f sec.\n", i, cc_st_r[i]->size(), + time_elapsed.count()); + } + + if (NMAX == 2) { + runtime_assert(cc_st_r.size() == 3) + runtime_assert(cc_st_r.at(1)->size() == 26) // T1 + runtime_assert(cc_st_r.at(2)->size() == 55) // T2 + } else if (NMAX == 3) { + runtime_assert(cc_st_r.size() == 4) + runtime_assert(cc_st_r.at(1)->size() == 30) // T1 + runtime_assert(cc_st_r.at(2)->size() == 73) // T2 + runtime_assert(cc_st_r.at(3)->size() == 490) // T3 + } +} + +ExprPtr biorthogonal_transform( + const sequant::ExprPtr& expr, const int n_particles, + const std::vector>& ext_index_groups, + const double threshold) { + assert(n_particles != 0); + assert(!ext_index_groups.empty()); + + using sequant::container::svector; + + // Coefficients + std::vector bt_coeff_vec; + { + using namespace Eigen; + // Dimension of permutation matrix is n_particles! + int n = std::tgamma(n_particles + 1); + + // Permutation matrix + Eigen::Matrix M(n, n); + { + M.setZero(); + size_t n_row = 0; + svector v(n_particles), v1(n_particles); + std::iota(v.begin(), v.end(), 0); + std::iota(v1.begin(), v1.end(), 0); + do { + std::vector permutation_vector; + do { + permutation_vector.push_back( + std::pow(-2, sequant::count_cycles(v1, v))); + } while (std::next_permutation(v.begin(), v.end())); + Eigen::VectorXd pv_eig = Eigen::Map( + permutation_vector.data(), permutation_vector.size()); + M.row(n_row) = pv_eig; + ++n_row; + } while (std::next_permutation(v1.begin(), v1.end())); + M *= std::pow(-1, n_particles); + } + + // Normalization constant + double scalar; + { + auto nonZero = [&threshold](const double& d) { + return abs(d) > threshold; + }; + + // Solve system of equations + SelfAdjointEigenSolver eig_solver(M); + std::vector eig_vals(eig_solver.eigenvalues().size()); + VectorXd::Map(&eig_vals[0], eig_solver.eigenvalues().size()) = + eig_solver.eigenvalues(); + + double non0count = + std::count_if(eig_vals.begin(), eig_vals.end(), nonZero); + scalar = eig_vals.size() / non0count; + } + + // Find Pseudo Inverse, get 1st row only + MatrixXd pinv = M.completeOrthogonalDecomposition().pseudoInverse(); + bt_coeff_vec.resize(pinv.rows()); + VectorXd::Map(&bt_coeff_vec[0], bt_coeff_vec.size()) = pinv.row(0) * scalar; + } + + // Transformation maps + std::vector> bt_maps; + { + std::vector idx_list(ext_index_groups.size()); + + for (auto i = 0; i != ext_index_groups.size(); ++i) { + idx_list[i] = *ext_index_groups[i].begin(); + } + + const std::vector const_idx_list = idx_list; + + do { + std::map map; + auto const_list_ptr = const_idx_list.begin(); + for (auto& i : idx_list) { + map.emplace(std::make_pair(*const_list_ptr, i)); + const_list_ptr++; + } + bt_maps.push_back(map); + } while (std::next_permutation(idx_list.begin(), idx_list.end())); + } + + // If this assertion fails, change the threshold parameter + assert(bt_coeff_vec.size() == bt_maps.size()); + + // Checks if the replacement map is a canonical sequence + auto is_canonical = [](const std::map& idx_map) { + bool canonical = true; + for (auto&& pair : idx_map) + if (pair.first != pair.second) return false; + return canonical; + }; + + // Scale transformed expressions and append + Sum bt_expr{}; + auto coeff_it = bt_coeff_vec.begin(); + for (auto&& map : bt_maps) { + if (is_canonical(map)) + bt_expr.append(ex(*coeff_it) * expr->clone()); + else + bt_expr.append(ex(*coeff_it) * + sequant::transform_expr(expr->clone(), map)); + coeff_it++; + } + ExprPtr result = std::make_shared(bt_expr); + return result; +} + +// Generate S operator from external index list +ExprPtr symmetrize_expr( + ExprPtr& expr, + const container::vector>& ext_index_groups) { + container::vector bra_list, ket_list; + for (auto&& idx_group : ext_index_groups) { + bra_list.push_back(*idx_group.begin()); + ket_list.push_back(*(idx_group.begin() + 1)); + } + + assert(bra_list.size() == ket_list.size()); + auto S = Tensor(L"S", bra_list, ket_list, Symmetry::nonsymm); + return ex(S) * expr; +} From 505afb6d7b26a4bbaa9d4af47d9bb2da6e06c1f8 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Thu, 14 Apr 2022 17:17:47 -0400 Subject: [PATCH 063/120] Print nterms in spin-orbital expressions --- examples/osstcc/osstcc.cpp | 10 ++++++++-- examples/stcc/stcc.cpp | 8 ++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/examples/osstcc/osstcc.cpp b/examples/osstcc/osstcc.cpp index 31df98064..b09f747da 100644 --- a/examples/osstcc/osstcc.cpp +++ b/examples/osstcc/osstcc.cpp @@ -56,14 +56,18 @@ int main(int argc, char* argv[]) { // Spin-orbital coupled cluster auto cc_r = sequant::eqs::cceqvec{NMAX, NMAX}(true, true, true, true, true); + for (auto i = 1; i < cc_r.size(); ++i) { + std::cout << "Spin-orbital CC R" << i << " size: " << cc_r[i]->size() << "\n"; + } // // Open-shell spintrace // - std::cout << "Open-shell coupled cluster: nterms per spin blocks: " + std::cout << "\nOpen-shell coupled cluster: nterms per spin blocks: " << std::endl; std::vector> os_cc_st_r(cc_r.size()); for (auto i = 1; i < cc_r.size(); ++i) { + const auto tstart = std::chrono::high_resolution_clock::now(); Tensor A = cc_r[i]->as().summand(0)->as().factors()[0]->as(); assert(A.label() == L"A"); @@ -109,7 +113,9 @@ int main(int argc, char* argv[]) { } os_cc_st_r.at(i) = std::move(expr_vec); - std::cout << "\n"; + auto tstop = std::chrono::high_resolution_clock::now(); + std::chrono::duration time_elapsed = tstop - tstart; + printf(" time: %5.3f sec.\n", time_elapsed.count()); } if (NMAX == 4) { diff --git a/examples/stcc/stcc.cpp b/examples/stcc/stcc.cpp index 72904eec6..1cb6f31e7 100644 --- a/examples/stcc/stcc.cpp +++ b/examples/stcc/stcc.cpp @@ -66,10 +66,14 @@ int main(int argc, char* argv[]) { // Spin-orbital coupled cluster auto cc_r = sequant::eqs::cceqvec{NMAX, NMAX}(true, true, true, true, true); + for (auto i = 1; i < cc_r.size(); ++i) { + std::cout << "Spin-orbital CC R" << i << " size: " << cc_r[i]->size() << "\n"; + } // // Closed-shell spintrace (fast) // + std::cout << "\nClosed-shell coupled cluster:\n"; std::vector cc_st_r(cc_r.size()); for (auto i = 1; i < cc_r.size(); ++i) { const auto tstart = std::chrono::high_resolution_clock::now(); @@ -142,8 +146,8 @@ ExprPtr biorthogonal_transform( do { std::vector permutation_vector; do { - auto cycles = sequant::count_cycles(v1, v); - permutation_vector.push_back(std::pow(-2, cycles)); + permutation_vector.push_back( + std::pow(-2, sequant::count_cycles(v1, v))); } while (std::next_permutation(v.begin(), v.end())); Eigen::VectorXd pv_eig = Eigen::Map( permutation_vector.data(), permutation_vector.size()); From 58dada51446ead4d7efe127c47539fcd88ffd97e Mon Sep 17 00:00:00 2001 From: nakulteke Date: Thu, 14 Apr 2022 17:19:03 -0400 Subject: [PATCH 064/120] comments that explain lambdas, renamed variables, cleanup --- SeQuant/domain/mbpt/spin.cpp | 183 ++++++++++++++++++++--------------- SeQuant/domain/mbpt/spin.hpp | 2 +- 2 files changed, 104 insertions(+), 81 deletions(-) diff --git a/SeQuant/domain/mbpt/spin.cpp b/SeQuant/domain/mbpt/spin.cpp index fb75427e7..596692e73 100644 --- a/SeQuant/domain/mbpt/spin.cpp +++ b/SeQuant/domain/mbpt/spin.cpp @@ -187,9 +187,10 @@ bool can_expand(const Tensor& tensor) { if (tensor.bra_rank() != tensor.ket_rank()) return false; // indices with non-qns are not allowed - for (auto& idx : tensor.const_braket()) { - assert(idx.space().qns() != IndexSpace::nullqns); - } + assert(std::all_of(tensor.const_braket().begin(), tensor.const_braket().end(), + [](const auto& idx) { + return idx.space().qns() != IndexSpace::nullqns; + })); // count alpha indices in bra auto is_alpha = [](const Index& idx) { @@ -444,17 +445,16 @@ ExprPtr symmetrize_expr(const Product& product) { if (A_is_nconserving) { S = Tensor(L"S", A_tensor.bra(), A_tensor.ket(), Symmetry::nonsymm); } else { // A is not nconserving - size_t n = (A_tensor.bra_rank() < A_tensor.ket_rank()) - ? A_tensor.bra_rank() - : A_tensor.ket_rank(); + auto n = std::min(A_tensor.bra_rank(), A_tensor.ket_rank()); container::svector bra_list(A_tensor.bra().begin(), - A_tensor.bra().end()); + A_tensor.bra().begin() + n); container::svector ket_list(A_tensor.ket().begin(), - A_tensor.ket().end()); + A_tensor.ket().begin() + n); S = Tensor(L"S", bra_list, ket_list, Symmetry::nonsymm); } - // Generate replacement maps from a list(could be a bra or a ket) + // Generate replacement maps from a list of Index type (could be a bra or a + // ket) // Uses a permuted list of int to generate permutations // TODO factor out for reuse auto maps_from_list = [](const container::svector& list) { @@ -491,10 +491,9 @@ ExprPtr symmetrize_expr(const Product& product) { maps = maps_from_list(A_tensor.bra()); } else { assert(A_tensor.bra_rank() != A_tensor.ket_rank()); - if (A_tensor.bra_rank() > A_tensor.ket_rank()) - maps = maps_from_list(A_tensor.bra()); - else - maps = maps_from_list(A_tensor.ket()); + maps = A_tensor.bra_rank() > A_tensor.ket_rank() + ? maps_from_list(A_tensor.bra()) + : maps_from_list(A_tensor.ket()); } assert(!maps.empty()); for (auto&& map : maps) { @@ -777,7 +776,12 @@ ExprPtr closed_shell_spintrace( }; expression->visit(check_proto_index); - // Symmetrize the expr and keep S operator + // Symmetrize and expression + // Partially expand the antisymmetrizer and write it in terms of S operator. + // See symmetrize_expr(expr) function for implementation details. We want an + // expresison with non-symmetric tensors, hence we are partially expanding the + // antisymmetrizer (A) and fully expanding the anti-symmetric tensors to + // non-symmetric. auto symm_and_expand = [](const ExprPtr& expr) { auto temp = expr; if (has_tensor(temp, L"A")) temp = symmetrize_expr(temp); @@ -787,16 +791,24 @@ ExprPtr closed_shell_spintrace( }; auto expr = symm_and_expand(expression); + // Index tags are cleaned prior to calling the fast canonicalizer auto reset_idx_tags = [](ExprPtr& expr) { if (expr->is()) ranges::for_each(expr->as().const_braket(), [](const Index& idx) { idx.reset_tag(); }); }; + + // Cleanup index tags expr->visit(reset_idx_tags); // This call is REQUIRED expand(expr); // This call is REQUIRED rapid_simplify(expr); - // Returns the number of cycles + // Returns the number of closed loops that can be generated by traversing + // through a bra and a ket vector that are placed vertically adjacent to each + // other. The loops can be counted for non-particle number conserving + // operators too, by replacing empty spaces with dummy indices. + // We are assuming all our tensors are particle number conserving here. + // The two vectors must be permutations of each other. auto count_cycles = [](container::svector& v, container::svector& v1) { assert(v.size() == v1.size()); @@ -804,7 +816,6 @@ ExprPtr closed_shell_spintrace( auto dummy_idx = Index(L"p_50"); for (auto it = v.begin(); it != v.end(); ++it) { if (*it != dummy_idx) { - // TODO: Throw exception if bra and ket indices don't match n_cycles++; auto idx = std::distance(v.begin(), it); auto it0 = it; @@ -821,11 +832,15 @@ ExprPtr closed_shell_spintrace( return n_cycles; }; - // Lambda for a product + // Lambda for spin-tracing a product term + // For closed-shell case, a spin-traced result is a product term scaled by + // 2^{n_cycles}, where n_cycles are counted by the lambda function described + // above. For every product term, the bra indices on all tensors are merged + // into a single list, so are the ket indices. External indices are + // substituted with either one of the index (because the two vectors should be + // permutations of each other to count cycles). All tensors must be nonsymm. auto trace_product = [&ext_index_groups, &count_cycles](const Product& product) { - // TODO: Check symmetry of tensors - // Remove S if present in a product Product temp_product{}; temp_product.scale(product.scalar()); @@ -848,6 +863,7 @@ ExprPtr closed_shell_spintrace( } return ket_idx; }; + auto product_kets = get_ket_indices(temp_product); auto get_bra_indices = [](const Product& prod) { container::svector bra_idx; @@ -859,27 +875,25 @@ ExprPtr closed_shell_spintrace( } return bra_idx; }; - - auto product_kets = get_ket_indices(temp_product); auto product_bras = get_bra_indices(temp_product); + auto substitute_ext_idx = [&product_bras, &product_kets]( + const container::vector& idx_pair) { + assert(idx_pair.size() == 2); + if (idx_pair.size() == 2) { + auto it = idx_pair.begin(); + auto first = *it; + it++; + auto second = *it; + std::replace(product_bras.begin(), product_bras.end(), first, second); + std::replace(product_kets.begin(), product_kets.end(), first, second); + } + }; + // Substitute indices from external index list - if ((*ext_index_groups.begin()).size() == 2) - ranges::for_each(ext_index_groups, - [&product_bras, &product_kets]( - const container::vector& idx_pair) { - assert(idx_pair.size() == 2); - if (idx_pair.size() == 2) { - auto it = idx_pair.begin(); - auto first = *it; - it++; - auto second = *it; - std::replace(product_bras.begin(), - product_bras.end(), first, second); - std::replace(product_kets.begin(), - product_kets.end(), first, second); - } - }); + if ((*ext_index_groups.begin()).size() == 2) { + ranges::for_each(ext_index_groups, substitute_ext_idx); + } auto n_cycles = count_cycles(product_kets, product_bras); @@ -1021,7 +1035,7 @@ ExprPtr swap_spin(const ExprPtr& expr) { return nullptr; } -ExprPtr merge_operators(const Tensor& O1, const Tensor& O2) { +ExprPtr merge_tensors(const Tensor& O1, const Tensor& O2) { assert(O1.label() == O2.label()); assert(O1.symmetry() == O2.symmetry()); auto bra = ranges::views::concat(O1.bra(), O2.bra()); @@ -1152,7 +1166,7 @@ std::vector open_shell_P_op_vector(const Tensor& A) { if (P.factors().size() == 2) { auto P1 = P.factor(0)->as(); auto P2 = P.factor(1)->as(); - term = merge_operators(P1, P2); + term = merge_tensors(P1, P2); } } } @@ -1371,6 +1385,11 @@ std::vector open_shell_CC_spintrace(const ExprPtr& expr) { ExprPtr spintrace( const ExprPtr& expression, container::vector> ext_index_groups) { + // Escape immediately if expression is a constant + if (expression->is()) { + return expression; + } + // SPIN TRACE DOES NOT SUPPORT PROTO INDICES YET. auto check_proto_index = [](const ExprPtr& expr) { if (expr->is()) { @@ -1383,31 +1402,35 @@ ExprPtr spintrace( }; expression->visit(check_proto_index); - if (expression->is()) { - return expression; - } - + // This function must be used for tensors with spin-labelled indices only. If + // the spin-symmetry is conserved: the tensor is expanded; else: zero is + // returned. auto spin_trace_tensor = [](const Tensor& tensor) { - if (can_expand(tensor)) { - return expand_antisymm(tensor); - } else - return ex(0); + return can_expand(tensor) ? expand_antisymm(tensor) : ex(0); }; + // This function is used to spin-trace a product terms with spin-labelled + // indices. It checks if all tensors can be expanded and spintraces individual + // tensors by call to the about lambda function. auto spin_trace_product = [&spin_trace_tensor](const Product& product) { Product spin_product{}; - spin_product.scale(product.scalar()); - for (auto&& term : product) { - if (term->is()) { - if (can_expand(term->as())) { - spin_product.append(1, spin_trace_tensor(term->as())); - } else - break; - } - } - if (product.size() != spin_product.size()) { - spin_product.scale(0); + + // Check if all tensors in this product can expand + // If NOT all tensors can expand, return zero + if (!std::all_of(product.factors().begin(), product.factors().end(), + [](const auto& t) { + return can_expand(t->template as()); + })) { + return ex(0); } + + spin_product.scale(product.scalar()); + ranges::for_each(product.factors().begin(), product.factors().end(), + [&spin_trace_tensor, &spin_product](const auto& t) { + spin_product.append( + 1, spin_trace_tensor(t->template as())); + }); + ExprPtr result = std::make_shared(spin_product); expand(result); rapid_simplify(result); @@ -1423,9 +1446,9 @@ ExprPtr spintrace( // Most important lambda of this function auto trace_product = [&ext_index_groups, &spin_trace_tensor, &spin_trace_product](const Product& expression) { - auto result = std::make_shared(); ExprPtr expr = std::make_shared(expression); + // List of all indices in the expression container::set grand_idxlist; auto collect_indices = [&grand_idxlist](const ExprPtr& expr) { if (expr->is()) { @@ -1438,6 +1461,8 @@ ExprPtr spintrace( }; expr->visit(collect_indices); + // List of external indices, i.e. indices that are not summed over Einstein + // style (indices that are not repeated in an expression) container::set ext_idxlist; for (auto&& idxgrp : ext_index_groups) { for (auto&& idx : idxgrp) { @@ -1446,6 +1471,7 @@ ExprPtr spintrace( } } + // List of internal indices, i.e. indices that are contracted over container::set int_idxlist; for (auto&& gidx : grand_idxlist) { if (ext_idxlist.find(gidx) == ext_idxlist.end()) { @@ -1458,19 +1484,15 @@ ExprPtr spintrace( // internal index = 1 group) using IndexGroup = container::vector; container::vector index_groups; - - for (auto&& i : int_idxlist) { - index_groups.emplace_back(IndexGroup(1, i)); - } - + for (auto&& i : int_idxlist) index_groups.emplace_back(IndexGroup(1, i)); index_groups.insert(index_groups.end(), ext_index_groups.begin(), ext_index_groups.end()); // EFV: for each spincase (loop over integer from 0 to 2^n-1, n=#of index // groups) - const uint64_t nspincases = std::pow(2, index_groups.size()); + auto result = std::make_shared(); for (uint64_t spincase_bitstr = 0; spincase_bitstr != nspincases; ++spincase_bitstr) { // EFV: assign spin to each index group => make a replacement list @@ -1479,7 +1501,7 @@ ExprPtr spintrace( uint64_t index_group_count = 0; for (auto&& index_group : index_groups) { auto spin_bit = (spincase_bitstr << (64 - index_group_count - 1)) >> 63; - assert((spin_bit == 0) || (spin_bit == 1)); + assert(spin_bit == 0 || spin_bit == 1); for (auto&& index : index_group) { auto type = index.space().type(); @@ -1497,30 +1519,32 @@ ExprPtr spintrace( ++index_group_count; } + // Append spin labels to indices in the expression auto spin_expr = append_spin(expr, index_replacements); - // This call to rapid_simplify is required for Tensor case - rapid_simplify(spin_expr); + rapid_simplify(spin_expr); // This call is required for Tensor case + // NB: There are temporaries in the following code to enable + // printing intermediate expressions. if (spin_expr->is()) { - auto temp = spin_trace_tensor(spin_expr->as()); - auto spin_removed = remove_spin(temp); + auto st_expr = spin_trace_tensor(spin_expr->as()); + auto spin_removed = remove_spin(st_expr); result->append(spin_removed); } else if (spin_expr->is()) { - auto temp = spin_trace_product(spin_expr->as()); - if (temp->size() != 0) { - result->append(remove_spin(temp)); + auto st_expr = spin_trace_product(spin_expr->as()); + if (st_expr->size() != 0) { + result->append(remove_spin(st_expr)); } } else if (spin_expr->is()) { for (auto&& summand : *spin_expr) { - Sum temp{}; + Sum st_expr{}; if (summand->is()) - temp.append(spin_trace_tensor(summand->as())); + st_expr.append(spin_trace_tensor(summand->as())); else if (summand->is()) - temp.append(spin_trace_product(summand->as())); + st_expr.append(spin_trace_product(summand->as())); else { - temp.append(summand); + st_expr.append(summand); } - ExprPtr SumPtr = std::make_shared(temp); + ExprPtr SumPtr = std::make_shared(st_expr); auto spin_removed = remove_spin(SumPtr); result->append(spin_removed); } @@ -1602,8 +1626,7 @@ ExprPtr factorize_S(const ExprPtr& expression, /////////////////////////////////////////////// /// Fast approach /// /////////////////////////////////////////////// - // This method is stores hash values of terms in a container - // for faster run times + // This method hashes terms for faster run times if (fast_method) { // summands_hash_list sorted container of hash values of canonicalized diff --git a/SeQuant/domain/mbpt/spin.hpp b/SeQuant/domain/mbpt/spin.hpp index e608555d2..6e030994f 100644 --- a/SeQuant/domain/mbpt/spin.hpp +++ b/SeQuant/domain/mbpt/spin.hpp @@ -183,7 +183,7 @@ ExprPtr swap_spin(const ExprPtr& expr); /// @brief Merge operators into a single operator (designed for P operator) /// @warning Repetition of indices is allowed in a bra or a ket -ExprPtr merge_operators(const Tensor& O1, const Tensor& O2); +ExprPtr merge_tensors(const Tensor& O1, const Tensor& O2); /// @brief Vector of Anti-symmetrizers for spin-traced open-shell expr std::vector open_shell_A_op(const Tensor& A); From bbfc0669019760d0b8e65d6deabfb0d5503a4592 Mon Sep 17 00:00:00 2001 From: nakulteke Date: Thu, 14 Apr 2022 22:45:03 -0400 Subject: [PATCH 065/120] bugfix after the function was renamed --- tests/unit/test_spin.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_spin.cpp b/tests/unit/test_spin.cpp index cf3f7af1d..1b2700010 100644 --- a/tests/unit/test_spin.cpp +++ b/tests/unit/test_spin.cpp @@ -953,9 +953,9 @@ SECTION("Merge P operators") { auto P2 = Tensor(L"P", {}, WstrList{L"a_1", L"a_2"}); auto P3 = Tensor(L"P", WstrList{L"i_1", L"i_2"}, WstrList{L"a_1", L"a_2"}); auto P4 = Tensor(L"P", {}, {}); - auto P12 = merge_operators(P1, P2); - auto P34 = merge_operators(P3, P4); - auto P11 = merge_operators(P1, P1); + auto P12 = merge_tensors(P1, P2); + auto P34 = merge_tensors(P3, P4); + auto P11 = merge_tensors(P1, P1); REQUIRE(to_latex(P12) == L"{P^{{a_1}{a_2}}_{{i_1}{i_2}}}"); REQUIRE(to_latex(P34) == L"{P^{{a_1}{a_2}}_{{i_1}{i_2}}}"); REQUIRE(to_latex(P11) == L"{P^{}_{{i_1}{i_2}{i_1}{i_2}}}"); From ef50af8df6521ccd550d20c7c02d614db8f440c0 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Wed, 20 Apr 2022 15:38:39 -0400 Subject: [PATCH 066/120] include singles correction with and without explicit correlation --- SeQuant/core/wick.impl.hpp | 3 +- SeQuant/domain/eqs/single_ref_uccf12.h | 391 +++--- .../transcorrelated/antisymmetrizer.hpp | 458 ++++--- .../domain/transcorrelated/simplifications.h | 1065 ++++++++++------- .../transcorrelated/three_body_decomp.hpp | 246 ++-- 5 files changed, 1269 insertions(+), 894 deletions(-) diff --git a/SeQuant/core/wick.impl.hpp b/SeQuant/core/wick.impl.hpp index c9e41d816..a83a11829 100644 --- a/SeQuant/core/wick.impl.hpp +++ b/SeQuant/core/wick.impl.hpp @@ -183,8 +183,7 @@ inline container::map compute_index_replacement_rules( const auto intersection_space = intersection(bra.space(), ket.space()); // if overlap's indices are from non-overlapping spaces, return zero - if (intersection_space == IndexSpace::null_instance()) { - throw zero_result{}; + if (intersection_space == IndexSpace::null_instance()) {throw zero_result{}; } if (!bra_is_ext && !ket_is_ext) { // int + int diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index a9abbb98e..ccbadc015 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -5,29 +5,34 @@ #ifndef SEQUANT_DOMAIN_SINGLE_REF_UCCF12_H #define SEQUANT_DOMAIN_SINGLE_REF_UCCF12_H -#include "../transcorrelated/three_body_decomp.hpp" -#include "../transcorrelated/simplifications.h" #include #include #include #include +#include "../transcorrelated/simplifications.h" +#include "../transcorrelated/three_body_decomp.hpp" #include using namespace sequant; using namespace sequant::mbpt::sr::so; -class uccf12{ - - public: - bool sr; - bool fock; - unsigned int op_rank; - //TODO implement logic for non-default variables. should also include logic for spin-orbital expressions. - uccf12(bool single_reference = true, bool fock_approx = true, unsigned int max_op_rank = 2){ sr = single_reference; fock = fock_approx; op_rank = max_op_rank; - sequant::set_default_context(SeQuant(Vacuum::Physical, IndexSpaceMetric::Unit, BraKetSymmetry::conjugate, - SPBasis::spinfree)); - //mbpt::set_default_convention(); +class uccf12 { + public: + bool sr; + bool fock; + unsigned int op_rank; + // TODO implement logic for non-default variables. should also include logic + // for spin-orbital expressions. + uccf12(bool single_reference = true, bool fock_approx = true, + unsigned int max_op_rank = 2) { + sr = single_reference; + fock = fock_approx; + op_rank = max_op_rank; + sequant::set_default_context( + SeQuant(Vacuum::Physical, IndexSpaceMetric::Unit, + BraKetSymmetry::conjugate, SPBasis::spinfree)); + // mbpt::set_default_convention(); std::setlocale(LC_ALL, "en_US.UTF-8"); std::wcout.precision(std::numeric_limits::max_digits10); std::wcerr.precision(std::numeric_limits::max_digits10); @@ -38,35 +43,43 @@ class uccf12{ std::wcout.sync_with_stdio(true); std::wcerr.sync_with_stdio(true); sequant::detail::OpIdRegistrar op_id_registrar; - TensorCanonicalizer::register_instance(std::make_shared()); + TensorCanonicalizer::register_instance( + std::make_shared()); } //[[e1,e2],e3]_12 - ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, int ansatz = 2){ + ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, + int ansatz = 2) { auto first_com = do_wick((e1 * e2) - (e2 * e1)); - simplify(first_com); + non_canon_simplify(first_com); auto second_com_1 = first_com * e3; - simplify(second_com_1); + non_canon_simplify(second_com_1); second_com_1 = do_wick(second_com_1); auto second_com_2 = e3 * first_com; - simplify(second_com_2); + non_canon_simplify(second_com_2); second_com_2 = do_wick(second_com_2); auto second_com = second_com_1 - second_com_2; - simplify(second_com); - if(ansatz == 2) { + non_canon_simplify(second_com); + if (ansatz == 2) { second_com = keep_up_to_3_body_terms(second_com); - second_com = second_com + ex(0.); // make a sum to avoid heavy code duplication for product and sum variants. + second_com = + second_com + + ex(0.); // make a sum to avoid heavy code duplication for + // product and sum variants. second_com = simplification::overlap_with_obs(second_com); - second_com = second_com + ex(0.); - second_com = simplification::screen_F12_proj(second_com, 2); - second_com = simplification::tens_to_FNOps(second_com); - second_com = decompositions::three_body_substitution(second_com, 2); - simplify(second_com); + //second_com = second_com + ex(0.); + //second_com = simplification::screen_F12_proj(second_com, 2); + //second_com = simplification::tens_to_FNOps(second_com); + //second_com = decompositions::three_body_substitution(second_com, 2); + non_canon_simplify(second_com); return second_com; } - if (ansatz == 1){ + if (ansatz == 1) { second_com = keep_up_to_2_body_terms(second_com); // std::wcout << to_latex_align(second_com,20,2) << std::endl; - second_com = second_com + ex(0.); // make a sum to avoid heavy code duplication for product and sum variants. + second_com = + second_com + + ex(0.); // make a sum to avoid heavy code duplication for + // product and sum variants. second_com = simplification::overlap_with_obs(second_com); // std::wcout << to_latex_align(second_com,20,2) << std::endl; second_com = second_com + ex(0.); @@ -79,8 +92,11 @@ class uccf12{ } ExprPtr keep_up_to_3_body_terms(const ExprPtr& input) { - if (input->is()) {auto filtered_summands = input->as().summands() | - ranges::views::remove_if([](const ExprPtr& ptr) {assert(ptr->is()); + if (input->is()) { + auto filtered_summands = + input->as().summands() | + ranges::views::remove_if([](const ExprPtr& ptr) { + assert(ptr->is()); bool keep = false; bool found_operator = false; for (auto&& factor : ptr->as().factors()) { @@ -96,47 +112,47 @@ class uccf12{ auto result = ex(ranges::begin(filtered_summands), ranges::end(filtered_summands)); return result; - } - else if (input->is()){ - for(auto&& factor : input->as().factors()){ - if(factor->is()){ - if(factor->as().rank() <= 3){ + } else if (input->is()) { + for (auto&& factor : input->as().factors()) { + if (factor->is()) { + if (factor->as().rank() <= 3) { return input; - } - else{ + } else { return ex(0); } } } + } else { + return input; } - else{return input;} } ExprPtr keep_up_to_2_body_terms(const ExprPtr& input) { - if (input->is()) {auto filtered_summands = input->as().summands() | - ranges::views::remove_if([](const ExprPtr& ptr) {assert(ptr->is()); - bool keep = false; - bool found_operator = false; - for (auto&& factor : ptr->as().factors()) { - if (factor->is()) { - assert(!found_operator); - found_operator = true; - const auto rank = factor->as().rank(); - keep = (rank <= 3); - } - } - return !keep; - }); + if (input->is()) { + auto filtered_summands = + input->as().summands() | + ranges::views::remove_if([](const ExprPtr& ptr) { + assert(ptr->is()); + bool keep = false; + bool found_operator = false; + for (auto&& factor : ptr->as().factors()) { + if (factor->is()) { + assert(!found_operator); + found_operator = true; + const auto rank = factor->as().rank(); + keep = (rank <= 3); + } + } + return !keep; + }); auto result = ex(ranges::begin(filtered_summands), ranges::end(filtered_summands)); return result; - } - else if (input->is()){ - for(auto&& factor : input->as().factors()){ - if(factor->is()){ - if(factor->as().rank() <= 2){ + } else if (input->is()) { + for (auto&& factor : input->as().factors()) { + if (factor->is()) { + if (factor->as().rank() <= 2) { return input; - } - else{ + } else { return ex(0); } } @@ -149,114 +165,136 @@ class uccf12{ FWickTheorem wick{expr}; wick.spinfree(false).full_contractions(false); auto result = wick.compute(); - simplify(result); + non_canon_simplify(result); return result; } // produces a uniquely indexed version of the given expression. - //assumes same number of upper and lower indices for operators and tensors - // do not simplify(expr) after use! this will cannonicalize labeling, undoing this work. + // assumes same number of upper and lower indices for operators and tensors + // do not simplify(expr) after use! this will cannonicalize labeling, undoing + // this work. ExprPtr relable(ExprPtr expr) { - if (expr->is()){ + if (expr->is()) { auto new_sum = ex(0.0); - for(auto && product : expr->as().summands()){ + for (auto&& product : expr->as().summands()) { auto new_product = relable(product); new_sum = new_sum + new_product; } return new_sum; } - //product does not benefit from recursion - // must reproduce same connectivity to produce identical expressions. - else if(expr->is()){ - std::vector changed_indices;//list of original indices - std::vector original_indices; // list of new indices + // product does not benefit from recursion + // must reproduce same connectivity to produce identical expressions. + else if (expr->is()) { + std::vector changed_indices; // list of original indices + std::vector original_indices; // list of new indices auto new_product = ex(expr->as().scalar()); - for (auto && factor : expr->as().factors()){ - std::pair,std::vector> new_up_low; - if (factor->is()){ - for (int i = 0; i < factor->as().bra().size(); i++){ - auto in_where_bra = simplification::in_list(factor->as().bra()[i], original_indices); - if(in_where_bra.first){ + for (auto&& factor : expr->as().factors()) { + std::pair, std::vector> new_up_low; + if (factor->is()) { + for (int i = 0; i < factor->as().bra().size(); i++) { + auto in_where_bra = simplification::in_list( + factor->as().bra()[i], original_indices); + if (in_where_bra.first) { new_up_low.first.push_back(changed_indices[in_where_bra.second]); - } - else{ + } else { original_indices.push_back(factor->as().bra()[i]); - changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().bra()[i].space().attr()))); + changed_indices.push_back( + Index::make_tmp_index(IndexSpace::instance( + factor->as().bra()[i].space().attr()))); new_up_low.first.push_back( changed_indices[changed_indices.size() - 1]); } - auto in_where_ket = simplification::in_list(factor->as().ket()[i], original_indices); - if(in_where_ket.first){ + auto in_where_ket = simplification::in_list( + factor->as().ket()[i], original_indices); + if (in_where_ket.first) { new_up_low.second.push_back(changed_indices[in_where_ket.second]); - } - else{ + } else { original_indices.push_back(factor->as().ket()[i]); - changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().ket()[i].space().attr()))); + changed_indices.push_back( + Index::make_tmp_index(IndexSpace::instance( + factor->as().ket()[i].space().attr()))); new_up_low.second.push_back( changed_indices[changed_indices.size() - 1]); } } - auto new_factor = ex(factor->as().label(), new_up_low.first, new_up_low.second); + auto new_factor = ex(factor->as().label(), + new_up_low.first, new_up_low.second); new_product = new_product * new_factor; - } - else if (factor->is()){ - for (int i = 0; i < factor->as().nannihilators(); i++){ - auto in_where_ann = simplification::in_list(factor->as().annihilators()[i].index(), + } else if (factor->is()) { + for (int i = 0; i < factor->as().nannihilators(); i++) { + auto in_where_ann = simplification::in_list( + factor->as().annihilators()[i].index(), original_indices); - if(in_where_ann.first){ - new_up_low.first.push_back( - changed_indices[in_where_ann.second]); - } - else{ - original_indices.push_back(factor->as().annihilators()[i].index()); - changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().annihilators()[i].index().space().attr()))); - new_up_low.first.push_back( - changed_indices[changed_indices.size() - 1]); - } - auto in_where_cre = simplification::in_list(factor->as().creators()[i].index(), - original_indices); - if(in_where_cre.first){ - new_up_low.second.push_back( - changed_indices[in_where_cre.second]); - } - else{ - original_indices.push_back(factor->as().creators()[i].index()); - changed_indices.push_back(Index::make_tmp_index(IndexSpace::instance(factor->as().creators()[i].index().space().attr()))); - new_up_low.second.push_back( - changed_indices[changed_indices.size() - 1]); - } + if (in_where_ann.first) { + new_up_low.first.push_back(changed_indices[in_where_ann.second]); + } else { + original_indices.push_back( + factor->as().annihilators()[i].index()); + changed_indices.push_back(Index::make_tmp_index( + IndexSpace::instance(factor->as() + .annihilators()[i] + .index() + .space() + .attr()))); + new_up_low.first.push_back( + changed_indices[changed_indices.size() - 1]); + } + auto in_where_cre = simplification::in_list( + factor->as().creators()[i].index(), + original_indices); + if (in_where_cre.first) { + new_up_low.second.push_back(changed_indices[in_where_cre.second]); + } else { + original_indices.push_back( + factor->as().creators()[i].index()); + changed_indices.push_back(Index::make_tmp_index( + IndexSpace::instance(factor->as() + .creators()[i] + .index() + .space() + .attr()))); + new_up_low.second.push_back( + changed_indices[changed_indices.size() - 1]); + } } auto new_factor = ex(new_up_low.second, new_up_low.first); new_product = new_product * new_factor; + } else { + throw "unsupported factor type"; } - else{throw "unsupported factor type";} } return new_product; - } - else if(expr->is()){ - std::pair,std::vector> new_bra_ket; - for (int i = 0; i < expr->as().bra().size(); i++){ - new_bra_ket.first.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().bra()[i].space().attr()))); - new_bra_ket.second.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().ket()[i].space().attr()))); - } - return ex(expr->as().label(), new_bra_ket.first, new_bra_ket.second); - } - else if(expr->is()){ - std::pair,std::vector> new_ann_cre; - for (int i = 0; i < expr->as().nannihilators(); i++){ - new_ann_cre.first.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().annihilators()[i].index().space().attr()))); - new_ann_cre.second.push_back(Index::make_tmp_index(IndexSpace::instance(expr->as().creators()[i].index().space().attr()))); + } else if (expr->is()) { + std::pair, std::vector> new_bra_ket; + for (int i = 0; i < expr->as().bra().size(); i++) { + new_bra_ket.first.push_back(Index::make_tmp_index( + IndexSpace::instance(expr->as().bra()[i].space().attr()))); + new_bra_ket.second.push_back(Index::make_tmp_index( + IndexSpace::instance(expr->as().ket()[i].space().attr()))); + } + return ex(expr->as().label(), new_bra_ket.first, + new_bra_ket.second); + } else if (expr->is()) { + std::pair, std::vector> new_ann_cre; + for (int i = 0; i < expr->as().nannihilators(); i++) { + new_ann_cre.first.push_back(Index::make_tmp_index(IndexSpace::instance( + expr->as().annihilators()[i].index().space().attr()))); + new_ann_cre.second.push_back(Index::make_tmp_index(IndexSpace::instance( + expr->as().creators()[i].index().space().attr()))); } return ex(new_ann_cre.first, new_ann_cre.second); - } - else if(expr->is()){ + } else if (expr->is()) { return expr; } } - std::pair compute(std::string gg_label,int ansatz = 2, bool print = false,bool singles=false,bool doubles=true) { - // auto gg_space = IndexSpace::active_occupied; // Geminal-generating space: active occupieds is the normal choice, all orbitals is the reference-independent (albeit expensive) choice + std::pair compute(std::string gg_label, int ansatz = 2, + bool print = false, bool singles = false, + bool doubles = true) { + // auto gg_space = IndexSpace::active_occupied; // Geminal-generating + // space: active occupieds is the normal choice, all orbitals is the + // reference-independent (albeit expensive) choice assert(singles == true || doubles == true); auto gg_space = IndexSpace::frozen_occupied; if (gg_label == "act_occ") { @@ -274,18 +312,27 @@ class uccf12{ else if (gg_label == "act_obs") { gg_space = IndexSpace::all_active; } else { - throw std::runtime_error("uccf12::compute(gg_label) unsupported space label"); + throw std::runtime_error( + "uccf12::compute(gg_label) unsupported space label"); } auto single = ex(0.0); - if(singles){ - // this might need to be complete space if we don't have a solution to the particular blocks of interest. - auto C = ex(L"C",std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))},std::initializer_list{Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied))}); - auto E_pa = ex (std::initializer_list{C->as().bra()[0]},std::initializer_list{C->as().ket()[0]}); + if (singles) { + // this might need to be complete space if we don't have a solution to the + // particular blocks of interest. + auto C = ex( + L"C", + std::initializer_list{ + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))}, + std::initializer_list{Index::make_tmp_index( + IndexSpace::instance(IndexSpace::complete_unoccupied))}); + auto E_pa = ex( + std::initializer_list{C->as().bra()[0]}, + std::initializer_list{C->as().ket()[0]}); auto C_Epa = C * E_pa; - auto anti_herm_C = C_Epa - adjoint(C_Epa); + auto anti_herm_C = C_Epa - adjoint(C_Epa); single = single + anti_herm_C; - } + } if (ansatz == 2) { auto h = H(false); @@ -293,40 +340,54 @@ class uccf12{ auto r_1 = R12(gg_space); ExprPtr A = ex(0.0); - if(doubles) { + if (doubles) { A = A + (r - adjoint(r)) + single; + } else { + A = A + single; + non_canon_simplify(A); } - else{A = A + single;} auto A_ = A->clone(); A_ = relable(A_); - - //first commutator in eq 9. Chem. Phys. 136, 084107 (2012). + std::wcout << "singles_A: " << to_latex_align(A_, 20,4) << std::endl; + // first commutator in eq 9. Chem. Phys. 136, 084107 (2012). auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); auto H_A_3 = keep_up_to_3_body_terms(H_A); H_A_3 = simplification::overlap_with_obs(H_A_3); H_A_3 = H_A_3 + ex(0.); H_A_3 = simplification::screen_F12_proj(H_A_3, 2); H_A_3 = simplification::tens_to_FNOps(H_A_3); + simplify(H_A_3); auto H_A_2 = decompositions::three_body_substitution(H_A_3, 2); simplify(H_A_2); auto com_1 = simplification::hamiltonian_based_projector_2(H_A_2); // double commutator in eq. 9. Chem. Phys. 136, 084107 (2012). - auto full_double_com = ex(1./2) * compute_double_com(F(),A,A_); + auto full_double_com = + ex(1. / 2) * compute_double_com(F(), A, A_); auto sim = simplification::fock_based_projector_2(full_double_com); auto one_body = com_1.first + (sim.first); auto two_body = com_1.second + (sim.second); // cannot use non_canon_simplify here because of B term. - non_canon_simplify(one_body); - non_canon_simplify(two_body); + simplify(one_body); + simplify(two_body); int term_count = 0; - for (auto i = 0; i < one_body->as().summands().size(); i++) { + if(!one_body->is()){ term_count += 1; } - for (auto i = 0; i < two_body->as().summands().size(); i++) { + else { + for (auto i = 0; i < one_body->as().summands().size(); i++) { + term_count += 1; + } + } + if(!two_body->is()){ term_count += 1; } + else { + for (auto i = 0; i < two_body->as().summands().size(); i++) { + term_count += 1; + } + } std::cout << "number of terms: " << term_count << std::endl; if (print) { @@ -337,9 +398,11 @@ class uccf12{ } return std::pair{one_body, two_body}; } - // If we use the 2 body approximation, all terms with Density fall out since they will happen to contain off diagonal G elements. - // we would get the same result if we kept the decomposition and simplified, but this should save time. - if(ansatz == 1){ + // If we use the 2 body approximation, all terms with Density fall out since + // they will happen to contain off diagonal G elements. we would get the + // same result if we kept the decomposition and simplified, but this should + // save time. + if (ansatz == 1) { auto h = H(false); auto r = R12(gg_space); auto r_1 = R12(gg_space); @@ -352,37 +415,43 @@ class uccf12{ H_A_3 = H_A_3 + ex(0.); H_A_3 = simplification::screen_F12_proj(H_A_3, 1); H_A_3 = simplification::tens_to_FNOps(H_A_3); - simplify(H_A_3); + non_canon_simplify(H_A_3); auto com_1 = simplification::hamiltonian_based_projector_1(H_A_3); - auto fFF = ex(1. / 2) * compute_double_com(F(), r, r_1,1); + auto fFF = ex(1. / 2) * compute_double_com(F(), r, r_1, 1); non_canon_simplify(fFF); - auto fFFt = ex(1. / 2) * - compute_double_com(F(), r, ex(-1.) * adjoint(r_1),1); + auto fFFt = + ex(1. / 2) * + compute_double_com(F(), r, ex(-1.) * adjoint(r_1), 1); non_canon_simplify(fFFt); auto fFtFt = ex(1. / 2) * compute_double_com(F(), ex(-1.) * adjoint(r), - ex(-1.) * adjoint(r_1),1); + ex(-1.) * adjoint(r_1), 1); non_canon_simplify(fFtFt); - auto fFtF = ex(1. / 2) * - compute_double_com(F(), ex(-1.) * adjoint(r), r_1,1); + auto fFtF = + ex(1. / 2) * + compute_double_com(F(), ex(-1.) * adjoint(r), r_1, 1); non_canon_simplify(fFtF); auto fFF_sim = simplification::fock_based_projector_1(fFF); - // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << std::endl; + // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << + // std::endl; auto fFFt_sim = simplification::fock_based_projector_1(fFFt); - // std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << std::endl; + // std::wcout << "FFt: " << to_latex_align(fFFt_sim.second,20,2) << + // std::endl; auto fFtFt_sim = simplification::fock_based_projector_1(fFtFt); - // std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << std::endl; + // std::wcout << "FtFt: " << to_latex_align(fFtFt_sim.second,20,2) << + // std::endl; auto fFtF_sim = simplification::fock_based_projector_1(fFtF); - // std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << std::endl; + // std::wcout << "FtF: " << to_latex_align(fFtF_sim.second,20,2) << + // std::endl; auto one_body = com_1.first + (fFF_sim.first + fFFt_sim.first + fFtFt_sim.first + fFtF_sim.first); auto two_body = com_1.second + (fFF_sim.second + fFFt_sim.second + fFtFt_sim.second + fFtF_sim.second); - non_canon_simplify(one_body); - non_canon_simplify(two_body); + simplify(one_body); + simplify(two_body); int term_count = 0; for (auto i = 0; i < one_body->as().summands().size(); i++) { term_count += 1; diff --git a/SeQuant/domain/transcorrelated/antisymmetrizer.hpp b/SeQuant/domain/transcorrelated/antisymmetrizer.hpp index 9a5b1b078..1c2e9f485 100644 --- a/SeQuant/domain/transcorrelated/antisymmetrizer.hpp +++ b/SeQuant/domain/transcorrelated/antisymmetrizer.hpp @@ -2,31 +2,39 @@ // created by Conner Masteran 06/1/2021 // -#include #include #include +#include +#include #include #include -#include - namespace sequant { -//@brief generates all unique permutations of a product where products differing only by internal tensor antisymmetry are non-unique. -// i.e. a^{i_1 i_2}_{a_1 a_2} = - a^{i_2 i_1}_{a_1 a_2}. RHS is non-unique in this context. +//@brief generates all unique permutations of a product where products differing +// only by internal tensor antisymmetry are non-unique. +// i.e. a^{i_1 i_2}_{a_1 a_2} = - a^{i_2 i_1}_{a_1 a_2}. RHS is non-unique in +// this context. class antisymm_element { using IndexGroup = std::pair; private: std::vector - index_group; // where each tensor begins and ends. assumes particle conserving ops. needed to keep track of canonical ordering + index_group; // where each tensor begins and ends. assumes particle + // conserving ops. needed to keep track of canonical + // ordering std::vector>> - unique_bras_list; // list of unique bra orderings with associated integer for the sign + unique_bras_list; // list of unique bra orderings with associated integer + // for the sign std::vector>> - unique_kets_list; // list of unique ket orderings with associated integer for the sign - ExprPtr current_product; // used to keep track of the original expression recieved by the constructor + unique_kets_list; // list of unique ket orderings with associated integer + // for the sign + ExprPtr current_product; // used to keep track of the original expression + // recieved by the constructor - // generates all possible permutations while observing the canonical ordering of each tensor/operator function kept general to work with other data types, algorithm does not require sequant::Index objects + // generates all possible permutations while observing the canonical ordering + // of each tensor/operator function kept general to work with other data + // types, algorithm does not require sequant::Index objects // @param a list of T objects/indices in the canonical ordering // @return a list of all permutations each with an associated sign template @@ -34,13 +42,15 @@ class antisymm_element { std::vector ordered_indices) { std::vector>> result; // next_permutation needs an ordered list. works for integers quite well. - // Easiest to map T to an integer corresponding to its position in a vector and then go back. + // Easiest to map T to an integer corresponding to its position in a vector + // and then go back. std::vector ordered_numbers; for (int i = 0; i < ordered_indices.size(); i++) { ordered_numbers.push_back(i); } - // return {next_exists, nswaps for this permutation} N.B. nswaps_relative_to_input != nswaps_relative_to_оriginal + // return {next_exists, nswaps for this permutation} N.B. + // nswaps_relative_to_input != nswaps_relative_to_оriginal auto swapcounting_tracking_next_permutation = [](auto first, auto last) -> std::pair { int nswaps = 0; @@ -59,7 +69,8 @@ class antisymm_element { ++nswaps; std::reverse(i1, last); nswaps += (std::distance(i1, last)) / - 2; // logic from https://en.cppreference.com/w/cpp/algorithm/reverse + 2; // logic from + // https://en.cppreference.com/w/cpp/algorithm/reverse return std::make_pair(true, nswaps); } if (i == first) { @@ -73,7 +84,8 @@ class antisymm_element { std::vector numbers = ordered_numbers; result.push_back({1, ordered_indices}); - int total_swaps = 0; // even # swaps produces positive and odd # of swaps produce negative + int total_swaps = 0; // even # swaps produces positive and odd # of swaps + // produce negative int counter = 0; bool do_next_perm = true; @@ -89,7 +101,9 @@ class antisymm_element { auto is_canonical_sign = [this, &total_swaps](const auto& indices) -> std::pair { for (auto& group : - this->index_group) { // There is only one sorted possibility in a set (tensor) considering that no index label should be the same. + this->index_group) { // There is only one sorted possibility in a + // set (tensor) considering that no index + // label should be the same. if (!is_sorted(indices.begin() + group.first, indices.begin() + group.second)) return { @@ -118,9 +132,12 @@ class antisymm_element { } public: - // takes a sequant::ExprPtr and generates all antisymmetric unique permutations of that expression. requires that ex_ is a product expression at this point + // takes a sequant::ExprPtr and generates all antisymmetric unique + // permutations of that expression. requires that ex_ is a product expression + // at this point // @param ex_ as product - // populates a result ExprPtr that the user can grab. result is in general a Sum. + // populates a result ExprPtr that the user can grab. result is in general a + // Sum. antisymm_element(ExprPtr ex_) { current_product = ex_; assert(ex_->is()); @@ -166,13 +183,15 @@ class antisymm_element { auto new_sum = ex(0.0); auto summand_exists = - [&new_sum](ExprPtr ex) { // check whether a summand has already been generated to screen out same terms. + [&new_sum](ExprPtr ex) { // check whether a summand has already been + // generated to screen out same terms. bool value = false; for (auto summand = new_sum->begin_subexpr(); summand != new_sum->end_subexpr(); summand++) { value = ex.get()->as() == summand->get() - ->as(); // ensure that this equality is mathematical and not hash based. + ->as(); // ensure that this equality is + // mathematical and not hash based. if (value == true) { return value; } @@ -219,7 +238,7 @@ class antisymm_element { auto new_Nop = ex>(new_crea, new_anni); new_product = new_product * new_Nop; - //std::wcout << "product: " << to_latex(new_product) << std::endl; + // std::wcout << "product: " << to_latex(new_product) << std::endl; new_product->canonicalize(); } @@ -228,7 +247,8 @@ class antisymm_element { } } if (!summand_exists( - new_product)) { // since products are canonicalized, repeats can be found. + new_product)) { // since products are canonicalized, repeats + // can be found. new_sum = new_sum + new_product; } } @@ -237,8 +257,10 @@ class antisymm_element { result = new_sum; } - std::vector sorted_bra_indices; // The original order of the upper indices on a given term - std::vector sorted_ket_indices; // the original order of the lower indices on a given term + std::vector sorted_bra_indices; // The original order of the upper + // indices on a given term + std::vector sorted_ket_indices; // the original order of the lower + // indices on a given term ExprPtr result; }; @@ -249,12 +271,15 @@ class antisymmetrize { antisymmetrize(ExprPtr s) { if (s->is()) { - for (auto&& product : s->as().summands()) { // for each element in the sum - if(product->is()) { - antisymm_element ele(product); // calculate the sum of all the valid permutations for each term. each object here should only live until this loop ends. + for (auto&& product : + s->as().summands()) { // for each element in the sum + if (product->is()) { + antisymm_element ele( + product); // calculate the sum of all the valid permutations for + // each term. each object here should only live until + // this loop ends. result = result + ele.result; // append that to the final list; - } - else{ + } else { result = result + product; } } @@ -268,195 +293,242 @@ class antisymmetrize { }; namespace antisymm { - // function which counts number of closed loops from the starting order of upper and lower indices and the contracted indices. since the ordering of the new contracted indices is arbitrary, the algorithm searched for the upper index which would connect to the lower index checks if contracted lower index closes the loop, if not, continue searching until the corresponding upper index is not present, or the loop closes. keep track of which indices are used so that loops are not double counted - //@param1 initial order of upper indices before contraction, @param 2 initial order of lower indices before contraction, @param 3 set of contracted upper indices, @param 4 set of lower contracted indices. - // @out the number of loops. - // TODO Test this function extensively and add more asserts - int num_closed_loops( - std::vector init_upper, std::vector init_lower, std::vector new_upper, std::vector new_lower) { - int result = 0; - auto equal_indices = [](Index i1, Index i2) { - return i1.label() == i2.label(); - }; - auto where_same_ele = [&](Index i1, std::vector ref_list) { - int hit_counter = 0; - int where; - bool in_list = false; - for (int i = 0; i < ref_list.size(); i++) { - if (equal_indices(i1, ref_list[i])) { - hit_counter += 1; - where = i; - in_list = true; - } +// function which counts number of closed loops from the starting order of upper +// and lower indices and the contracted indices. since the ordering of the new +// contracted indices is arbitrary, the algorithm searched for the upper index +// which would connect to the lower index checks if contracted lower index +// closes the loop, if not, continue searching until the corresponding upper +// index is not present, or the loop closes. keep track of which indices are +// used so that loops are not double counted +//@param1 initial order of upper indices before contraction, @param 2 initial +// order of lower indices before contraction, @param 3 set of contracted upper +// indices, @param 4 set of lower contracted indices. +// @out the number of loops. +// TODO Test this function extensively and add more asserts +int num_closed_loops(std::vector init_upper, + std::vector init_lower, + std::vector new_upper, + std::vector new_lower) { + int result = 0; + auto equal_indices = [](Index i1, Index i2) { + return i1.label() == i2.label(); + }; + auto where_same_ele = [&](Index i1, std::vector ref_list) { + int hit_counter = 0; + int where; + bool in_list = false; + for (int i = 0; i < ref_list.size(); i++) { + if (equal_indices(i1, ref_list[i])) { + hit_counter += 1; + where = i; + in_list = true; } - assert(hit_counter < 2); - std::pair result(where, in_list); - return result; - }; + } + assert(hit_counter < 2); + std::pair result(where, in_list); + return result; + }; - std::vector in_loop; // lower indices already in a loop. - for (int i = 0; i < new_upper.size(); i++) { - assert(new_upper.size() == new_lower.size()); - if (!where_same_ele(new_lower[i], in_loop).second) { - auto starting_point = where_same_ele(new_upper[i], init_upper).first; - auto chain_end = init_lower[starting_point]; + std::vector in_loop; // lower indices already in a loop. + for (int i = 0; i < new_upper.size(); i++) { + assert(new_upper.size() == new_lower.size()); + if (!where_same_ele(new_lower[i], in_loop).second) { + auto starting_point = where_same_ele(new_upper[i], init_upper).first; + auto chain_end = init_lower[starting_point]; - bool closed = equal_indices(chain_end, new_lower[i]); - auto lower_index = new_lower[i]; - while (!closed) { - auto where_exist = where_same_ele( - init_upper[where_same_ele(lower_index, init_lower).first], - new_upper); // if the initial upper index is the same particle as the new lower index in question, find it in the new upper list. - if (!where_exist.second) { // if the upper particle index originally connected to the lower index in question is not part of a contraction, there is no loop with these. - break; - } else { - lower_index =new_lower[where_exist.first]; // the lower index below the found upper index - in_loop.push_back(lower_index); // this lower index is part of one loop so it cannot be part of another. - closed = equal_indices(chain_end,lower_index); // is the lower index the same as the end of the chain? - } - } - if (closed) { - result += 1; + bool closed = equal_indices(chain_end, new_lower[i]); + auto lower_index = new_lower[i]; + while (!closed) { + auto where_exist = where_same_ele( + init_upper[where_same_ele(lower_index, init_lower).first], + new_upper); // if the initial upper index is the same particle as + // the new lower index in question, find it in the new + // upper list. + if (!where_exist + .second) { // if the upper particle index originally connected + // to the lower index in question is not part of a + // contraction, there is no loop with these. + break; + } else { + lower_index = new_lower[where_exist.first]; // the lower index below + // the found upper index + in_loop.push_back( + lower_index); // this lower index is part of one loop so it + // cannot be part of another. + closed = equal_indices(chain_end, + lower_index); // is the lower index the same + // as the end of the chain? } - in_loop.push_back(new_lower[i]); // put the starting lower index in the list } + if (closed) { + result += 1; + } + in_loop.push_back( + new_lower[i]); // put the starting lower index in the list } - assert(in_loop.size() <= init_lower.size()); - assert(result <= init_lower.size()); - return result; } + assert(in_loop.size() <= init_lower.size()); + assert(result <= init_lower.size()); + return result; +} - //for the mnemonic rules of spin summing to work, each individual tensor/FNOperator needs to maximally - //resemble the original tensor, so indices may need to be swapped (and the sign changed). - // this does not matter for spin-orbital expressions, but the rules become incredibly simple if things stay maximally the same. - // @param1 original upper indices - // @param2 original lower indices - // @param3 starting expression - ExprPtr max_similarity(const std::vector& original_upper, const std::vector& original_lower, ExprPtr expression){ - //index pairing is originally understood as a position in the original vectors, but for this case, a map may do better. - std::map original_map; - for (int i = 0; i < original_upper.size(); i++){ - original_map.emplace(original_upper[i].to_latex(), original_lower[i].to_latex()); - } - for (auto&& product : expression->as().summands()){ - for (auto&& factor : product->as().factors()){ - int og_pairs = 0; - int new_pairs = 0; - if (factor->is()){ - std::vector current_upper; - std::vector current_lower; - if(factor->as().rank() == 2){ - for(int i = 0; i < 2; i++){ - assert(original_map.find(factor->as().ket()[i].to_latex()) != original_map.end()); - if(original_map.find(factor->as().ket()[i].to_latex())->second == factor->as().bra()[i].to_latex()){ - og_pairs += 1; - } - current_upper.push_back(factor->as().ket()[i]); - current_lower.push_back(factor->as().bra()[i]); - } - std::iter_swap(current_lower.begin(), current_lower.begin() + 1); - for(int i = 0; i < 2; i++){ - assert(original_map.find(current_upper[i].to_latex()) != original_map.end()); - if(original_map.find(current_upper[i].to_latex())->second == current_lower[i].to_latex()){ - new_pairs += 1; - } +// for the mnemonic rules of spin summing to work, each individual +// tensor/FNOperator needs to maximally resemble the original tensor, so indices +// may need to be swapped (and the sign changed). +// this does not matter for spin-orbital expressions, but the rules become +// incredibly simple if things stay maximally the same. +// @param1 original upper indices +// @param2 original lower indices +// @param3 starting expression +ExprPtr max_similarity(const std::vector& original_upper, + const std::vector& original_lower, + ExprPtr expression) { + // index pairing is originally understood as a position in the original + // vectors, but for this case, a map may do better. + std::map original_map; + for (int i = 0; i < original_upper.size(); i++) { + original_map.emplace(original_upper[i].to_latex(), + original_lower[i].to_latex()); + } + for (auto&& product : expression->as().summands()) { + for (auto&& factor : product->as().factors()) { + int og_pairs = 0; + int new_pairs = 0; + if (factor->is()) { + std::vector current_upper; + std::vector current_lower; + if (factor->as().rank() == 2) { + for (int i = 0; i < 2; i++) { + assert( + original_map.find(factor->as().ket()[i].to_latex()) != + original_map.end()); + if (original_map.find(factor->as().ket()[i].to_latex()) + ->second == factor->as().bra()[i].to_latex()) { + og_pairs += 1; } + current_upper.push_back(factor->as().ket()[i]); + current_lower.push_back(factor->as().bra()[i]); } - if(new_pairs > og_pairs){ - factor = ex(-1) * ex(factor->as().label(),current_lower, current_upper); + std::iter_swap(current_lower.begin(), current_lower.begin() + 1); + for (int i = 0; i < 2; i++) { + assert(original_map.find(current_upper[i].to_latex()) != + original_map.end()); + if (original_map.find(current_upper[i].to_latex())->second == + current_lower[i].to_latex()) { + new_pairs += 1; + } } } - else if (factor->is()){ - std::vector current_upper; - std::vector current_lower; - if(factor->as().rank() == 2){ - for(int i = 0; i < 2; i++) { - assert(original_map.find(factor->as() - .creators()[i] - .index() - .to_latex()) != original_map.end()); - if (original_map - .find(factor->as() - .creators()[i] - .index() - .to_latex()) - ->second == factor->as() - .annihilators()[i] - .index() - .to_latex()) { - og_pairs += 1; - } - current_upper.push_back( - factor->as().creators()[i].index()); - current_lower.push_back( - factor->as().annihilators()[i].index()); - } - std::iter_swap(current_lower.begin(), current_lower.begin() + 1); - for(int i = 0; i < 2; i++){ - assert(original_map.find(current_upper[i].to_latex()) != original_map.end()); - if(original_map.find(current_upper[i].to_latex())->second == current_lower[i].to_latex()){ - new_pairs += 1; - } + if (new_pairs > og_pairs) { + factor = ex(-1) * ex(factor->as().label(), + current_lower, current_upper); + } + } else if (factor->is()) { + std::vector current_upper; + std::vector current_lower; + if (factor->as().rank() == 2) { + for (int i = 0; i < 2; i++) { + assert(original_map.find(factor->as() + .creators()[i] + .index() + .to_latex()) != original_map.end()); + if (original_map + .find(factor->as() + .creators()[i] + .index() + .to_latex()) + ->second == + factor->as().annihilators()[i].index().to_latex()) { + og_pairs += 1; } + current_upper.push_back( + factor->as().creators()[i].index()); + current_lower.push_back( + factor->as().annihilators()[i].index()); } - if(new_pairs > og_pairs){ - factor = ex(-1) * ex(current_upper, current_lower); + std::iter_swap(current_lower.begin(), current_lower.begin() + 1); + for (int i = 0; i < 2; i++) { + assert(original_map.find(current_upper[i].to_latex()) != + original_map.end()); + if (original_map.find(current_upper[i].to_latex())->second == + current_lower[i].to_latex()) { + new_pairs += 1; + } } } + if (new_pairs > og_pairs) { + factor = + ex(-1) * ex(current_upper, current_lower); + } } } - simplify(expression); - return expression; } + simplify(expression); + return expression; +} - // not a general spin-summing procedure, implementation for a known singlet state for the prefactor rules to apply. - // @param1 an antisymm_element object strictly so the original ordering of the indices is known - // @param2 bool singlet_state? the looping rules and contraction prefactors are a direct result of the singlet state approximation to densities. - // TODO: use a generalized spin summation for non-singlet states - ExprPtr spin_sum(std::vector original_upper,std::vector original_lower, ExprPtr expression, bool singlet_state = true) { - if (singlet_state) { - //std::wcout << "before spin sum! :" << std::endl << to_latex_align(expression) << std::endl; - auto init_upper = original_upper; - auto init_lower = original_lower; - max_similarity(original_upper,original_lower,expression); - // may need to add separate loop if the result is a single product or Operator/Tensor - auto return_val = ex(0); - for (auto&& product : expression->as().summands()) { - auto prefactor = ex(0.125); //each term in the 3 body decomp, should have (1 /2^n) prefactor where n is rank of product so product rank always 3 for 3 body decomp - std::vector new_upper; - std::vector new_lower; - for (auto&& factor : product->as().factors()) { - if (factor->is() && factor->as().label() == L"\\gamma") { - //prefactor = ex(-0.5) * ex(factor->as().rank()) * prefactor; - for (int i = 0; i < factor->as().rank(); i++) { - new_upper.push_back(factor->as().ket()[i]); - new_lower.push_back(factor->as().bra()[i]); - } - factor = ex(L"\\Gamma",factor->as().bra(), factor->as().ket()); +// not a general spin-summing procedure, implementation for a known singlet +// state for the prefactor rules to apply. +// @param1 an antisymm_element object strictly so the original ordering of the +// indices is known +// @param2 bool singlet_state? the looping rules and contraction prefactors are +// a direct result of the singlet state approximation to densities. +// TODO: use a generalized spin summation for non-singlet states +ExprPtr spin_sum(std::vector original_upper, + std::vector original_lower, ExprPtr expression, + bool singlet_state = true) { + if (singlet_state) { + // std::wcout << "before spin sum! :" << std::endl << + // to_latex_align(expression) << std::endl; + auto init_upper = original_upper; + auto init_lower = original_lower; + max_similarity(original_upper, original_lower, expression); + // may need to add separate loop if the result is a single product or + // Operator/Tensor + auto return_val = ex(0); + for (auto&& product : expression->as().summands()) { + auto prefactor = + ex(0.125); // each term in the 3 body decomp, should have + // (1 /2^n) prefactor where n is rank of product + // so product rank always 3 for 3 body decomp + std::vector new_upper; + std::vector new_lower; + for (auto&& factor : product->as().factors()) { + if (factor->is() && + factor->as().label() == L"\\gamma") { + // prefactor = ex(-0.5) * + // ex(factor->as().rank()) * prefactor; + for (int i = 0; i < factor->as().rank(); i++) { + new_upper.push_back(factor->as().ket()[i]); + new_lower.push_back(factor->as().bra()[i]); } - else if (factor->is()) { - //prefactor = ex(-0.5) * ex(factor->as().rank()) * prefactor; - for (int i = 0; i < factor->as().rank(); i++) { - new_upper.push_back(factor->as().creators()[i].index()); - new_lower.push_back(factor->as().annihilators()[i].index()); - } + factor = ex(L"\\Gamma", factor->as().bra(), + factor->as().ket()); + } else if (factor->is()) { + // prefactor = ex(-0.5) * + // ex(factor->as().rank()) * prefactor; + for (int i = 0; i < factor->as().rank(); i++) { + new_upper.push_back(factor->as().creators()[i].index()); + new_lower.push_back( + factor->as().annihilators()[i].index()); } } - //std::wcout << to_latex_align(product) << std::endl; - int nloops = num_closed_loops(init_upper, init_lower, new_upper, new_lower); - //std::wcout << "nloops: " << nloops << std::endl; - if (nloops == 0) { - } - else { - prefactor = ex(std::pow(2,nloops)) * prefactor; - } - return_val = (product * prefactor) + return_val; } - return_val->canonicalize(); - return return_val; - } else { - throw " non-singlet states not yet supported"; + // std::wcout << to_latex_align(product) << std::endl; + int nloops = + num_closed_loops(init_upper, init_lower, new_upper, new_lower); + // std::wcout << "nloops: " << nloops << std::endl; + if (nloops == 0) { + } else { + prefactor = ex(std::pow(2, nloops)) * prefactor; + } + return_val = (product * prefactor) + return_val; } + return_val->canonicalize(); + return return_val; + } else { + throw " non-singlet states not yet supported"; } } -} +} // namespace antisymm +} // namespace sequant diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 0d11c9db6..c544b673d 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -1,54 +1,66 @@ // // Created by Conner Masteran on 7/29/21. // -#include +#include #include #include -#include +#include using namespace sequant; -//various simplifications of tensor-operator expressions based on properties of particular tensors. Also functionality for restricting our operators and densities to the orbital basis set (obs). -namespace simplification{ -template -std::pair in_list(Index idx, vec_type ref_list){ +// various simplifications of tensor-operator expressions based on properties of +// particular tensors. Also functionality for restricting our operators and +// densities to the orbital basis set (obs). +namespace simplification { +template +std::pair in_list(Index idx, vec_type ref_list) { bool inlist = false; int where_inlist = 0; - for (int i = 0; i < ref_list.size(); i++){ - if (idx.label() == ref_list[i].label()){ + for (int i = 0; i < ref_list.size(); i++) { + if (idx.label() == ref_list[i].label()) { inlist = true; where_inlist = i; } } - std::pair result{inlist,where_inlist}; + std::pair result{inlist, where_inlist}; return result; } -//convert a sequant::FNOperator to a sequant::tensor object -ExprPtr op_to_tens(ExprPtr ex_){ +// convert a sequant::FNOperator to a sequant::tensor object +ExprPtr op_to_tens(ExprPtr ex_) { assert(ex_->is()); std::vector bra_indices; std::vector ket_indices; - for(auto&& ann : ex_->as().annihilators()){ + for (auto&& ann : ex_->as().annihilators()) { bra_indices.push_back(ann.index()); } - for(auto&& cre : ex_->as().creators()){ + for (auto&& cre : ex_->as().creators()) { ket_indices.push_back(cre.index()); } - auto label = get_default_context().spbasis() == SPBasis::spinfree ? L"E" : L"a"; + auto label = + get_default_context().spbasis() == SPBasis::spinfree ? L"E" : L"a"; auto result = ex(label, bra_indices, ket_indices); return result; } -//all densities and the Hamiltonian operators are confined to a given orbital basis in second quantized notation. -//thus any index on a Normal Ordered operator or density must be confined to the obs. -///TODO this dictates that the resulting hamiltonian will be in a particular basis. -ExprPtr overlap_with_obs(ExprPtr ex_){ - auto overlap_expr = ex(0); //enforce an overlap each E with elements from - for (auto&& product : ex_->as().summands()){// may be able to make_overlaps manually and apply them to the products. simplify may know what to do with it. +// all densities and the Hamiltonian operators are confined to a given orbital +// basis in second quantized notation. thus any index on a Normal Ordered +// operator or density must be confined to the obs. +/// TODO this dictates that the resulting hamiltonian will be in a particular +/// basis. +ExprPtr overlap_with_obs(ExprPtr ex_) { + std::wcout << to_latex_align(ex_,20,4) << std::endl; + auto overlap_expr = + ex(0); // enforce an overlap each E with elements from + for (auto&& product : + ex_->as().summands()) { // may be able to make_overlaps manually + // and apply them to the products. simplify + // may know what to do with it. auto new_product = ex(product->as().scalar()); - for (int it = product->as().factors().size() - 1; it >= 0; it--){//loop backwards through the products. + for (int it = product->as().factors().size() - 1; it >= 0; + it--) { // loop backwards through the products. auto factor = product->as().factor(it); - if(it == product->as().factors().size() - 1 && factor->is() && factor->as().rank() == 3){ + if (it == product->as().factors().size() - 1 && + factor->is() && factor->as().rank() == 3) { std::wstring label_1; std::wstring label_2; std::wstring label_3; @@ -61,21 +73,29 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ label_4 = factor->as().creators()[0].index().label(); label_5 = factor->as().creators()[1].index().label(); label_6 = factor->as().creators()[2].index().label(); - auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_1}); - auto o2 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_2}); - auto o3 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_3}); - auto o4 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_4}); - auto o5 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_5}); - auto o6 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_6}); - new_product = o1 * o2 * o3 * o4 * o5 * o6 * new_product * op_to_tens(factor); - } - else if (it == product->as().factors().size() - 1 && factor->is() &&factor->as().rank() == 2){ + auto o1 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_1}); + auto o2 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_2}); + auto o3 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_3}); + auto o4 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_4}); + auto o5 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_5}); + auto o6 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_6}); + new_product = + o1 * o2 * o3 * o4 * o5 * o6 * new_product * op_to_tens(factor); + } else if (it == product->as().factors().size() - 1 && + factor->is() && + factor->as().rank() == 2) { std::wstring label_1; std::wstring label_2; std::wstring label_3; @@ -84,39 +104,50 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ label_2 = factor->as().annihilators()[1].index().label(); label_3 = factor->as().creators()[0].index().label(); label_4 = factor->as().creators()[1].index().label(); - auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_1}); - auto o2 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_2}); - auto o3 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_3}); - auto o4 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_4}); + auto o1 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_1}); + auto o2 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_2}); + auto o3 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_3}); + auto o4 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_4}); new_product = o1 * o2 * o3 * o4 * new_product * op_to_tens(factor); - } - else if (it == product->as().factors().size() - 1 && factor->is() && factor->as().rank() == 1){ + } else if (it == product->as().factors().size() - 1 && + factor->is() && + factor->as().rank() == 1) { std::wstring label_1; std::wstring label_3; label_1 = factor->as().annihilators()[0].index().label(); label_3 = factor->as().creators()[0].index().label(); - auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_1}); - auto o3 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_3}); + auto o1 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_1}); + auto o3 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_3}); new_product = o1 * o3 * new_product * op_to_tens(factor); - } - else if (factor->is() && factor->as().label() == L"\\Gamma" && factor->as().rank() == 1){ + } else if (factor->is() && + factor->as().label() == L"\\Gamma" && + factor->as().rank() == 1) { std::wstring label_2; std::wstring label_4; label_2 = factor->as().ket()[0].label(); label_4 = factor->as().bra()[0].label(); - auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_2}); - auto o3 = make_overlap(Index{label_4},Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all))); - new_product = o1 * o3 * factor * new_product; - } - else if (factor->is() && factor->as().label() == L"\\Gamma" && factor->as().rank() == 2){ + auto o1 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_2}); + auto o3 = make_overlap( + Index{label_4}, + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))); + new_product = o1 * o3 * factor * new_product; + } else if (factor->is() && + factor->as().label() == L"\\Gamma" && + factor->as().rank() == 2) { std::wstring label_1; std::wstring label_3; std::wstring label_2; @@ -125,126 +156,142 @@ ExprPtr overlap_with_obs(ExprPtr ex_){ label_3 = factor->as().bra()[0].label(); label_2 = factor->as().ket()[1].label(); label_4 = factor->as().bra()[1].label(); - auto o1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_1}); - auto o3 = make_overlap(Index{label_3}, Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all))); - auto o2 = make_overlap(Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all)),Index{label_2}); - auto o4 = make_overlap(Index{label_4},Index::make_tmp_index(IndexSpace::instance( - IndexSpace::all))); - new_product = o1 * o3 * o2 * o4 * factor * new_product; + auto o1 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_1}); + auto o3 = make_overlap( + Index{label_3}, + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))); + auto o2 = make_overlap( + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), + Index{label_2}); + auto o4 = make_overlap( + Index{label_4}, + Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))); + new_product = o1 * o3 * o2 * o4 * factor * new_product; + } else { + new_product = new_product * factor; } - else{new_product = new_product * factor;} } new_product = new_product; overlap_expr = overlap_expr + new_product; } FWickTheorem wick{overlap_expr}; - //std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; + // std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; wick.reduce(overlap_expr); - simplify(overlap_expr); - //std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; + non_canon_simplify(overlap_expr); + // std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; return overlap_expr; } using IDX_list = std::initializer_list; -//in various transformation methods it seems as if the constants are removed or treated separatly from the main transformed hamiltonian expression. -ExprPtr remove_const(const ExprPtr ex_){ +// in various transformation methods it seems as if the constants are removed or +// treated separatly from the main transformed hamiltonian expression. +ExprPtr remove_const(const ExprPtr ex_) { auto new_expression = ex(0); - if (ex_->is()){ - for (auto&& product : ex_->as().summands()){ + if (ex_->is()) { + for (auto&& product : ex_->as().summands()) { bool has_fnop = false; - for (auto&& factor : product->as().factors()){ - if (factor->is()){ + for (auto&& factor : product->as().factors()) { + if (factor->is()) { has_fnop = true; } } - if (has_fnop){ new_expression = new_expression + product;} + if (has_fnop) { + new_expression = new_expression + product; + } } } non_canon_simplify(new_expression); return new_expression; } -//params ex_ : a product to replace indices on. -// og: original index in the product to be replaced -// newer: the new index which replaces the original index. -Product replace_idx(ExprPtr ex_, Index og, Index newer){ +// params ex_ : a product to replace indices on. +// og: original index in the product to be replaced +// newer: the new index which replaces the original index. +Product replace_idx(ExprPtr ex_, Index og, Index newer) { assert(ex_->is()); auto constant = ex_->as().scalar(); auto new_product = ex(1); - for(auto&& factor : ex_->as().factors()){ - if(factor->is()){ + for (auto&& factor : ex_->as().factors()) { + if (factor->is()) { std::vector new_bras; - for(auto&& bra : factor->as().bra()){ - if(bra.label() == og.label()){ + for (auto&& bra : factor->as().bra()) { + if (bra.label() == og.label()) { new_bras.push_back(newer); + } else { + new_bras.push_back(bra); } - else{new_bras.push_back(bra);} } std::vector new_kets; - for(auto&& ket : factor->as().ket()){ - if(ket.label() == og.label()){ + for (auto&& ket : factor->as().ket()) { + if (ket.label() == og.label()) { new_kets.push_back(newer); + } else { + new_kets.push_back(ket); } - else{new_kets.push_back(ket);} } - auto new_tensor = ex(factor->as().label(), new_bras, new_kets); + auto new_tensor = + ex(factor->as().label(), new_bras, new_kets); new_product = new_tensor * new_product; } - if(factor->is()){ + if (factor->is()) { std::vector new_cres; - for (auto&& cre : factor->as().creators()){ - if(cre.index().label() == og.label()){ + for (auto&& cre : factor->as().creators()) { + if (cre.index().label() == og.label()) { new_cres.push_back(newer); + } else { + new_cres.push_back(cre.index()); } - else{new_cres.push_back(cre.index());} } std::vector new_anns; - for (auto&& ann : factor->as().annihilators()){ - if(ann.index().label() == og.label()){ + for (auto&& ann : factor->as().annihilators()) { + if (ann.index().label() == og.label()) { new_anns.push_back(newer); + } else { + new_anns.push_back(ann.index()); } - else{new_anns.push_back(ann.index());} } - if (factor->as().ncreators() == 1){ - auto o1 = make_overlap({L"p_7"},new_anns[0]); - auto o3 = make_overlap(new_cres[0],{L"p_9"}); + if (factor->as().ncreators() == 1) { + auto o1 = make_overlap({L"p_7"}, new_anns[0]); + auto o3 = make_overlap(new_cres[0], {L"p_9"}); new_product = new_product * o1 * o3; - } - else if(factor->as().ncreators() == 2){ - auto o1 = make_overlap({L"p_7"},new_anns[0]); - auto o2 = make_overlap({L"p_8"},new_anns[1]); - auto o3 = make_overlap(new_cres[0],{L"p_9"}); - auto o4 = make_overlap(new_cres[1],{L"p_10"}); + } else if (factor->as().ncreators() == 2) { + auto o1 = make_overlap({L"p_7"}, new_anns[0]); + auto o2 = make_overlap({L"p_8"}, new_anns[1]); + auto o3 = make_overlap(new_cres[0], {L"p_9"}); + auto o4 = make_overlap(new_cres[1], {L"p_10"}); new_product = new_product * o1 * o2 * o3 * o4; + } else { + throw "does not handle size > 2"; } - else{throw "does not handle size > 2";} - //auto new_op = ex(new_cres,new_anns); - // new_product = new_product * new_op; + // auto new_op = ex(new_cres,new_anns); + // new_product = new_product * new_op; } } auto result = (ex(constant) * new_product); return result->as(); } -//convert a sequant::Tensor to a sequant::FNOperator +// convert a sequant::Tensor to a sequant::FNOperator ExprPtr tens_to_op(ExprPtr ex_) { assert(ex_->is()); - auto result = ex(ex_->as().ket(),ex_->as().bra()); + auto result = + ex(ex_->as().ket(), ex_->as().bra()); return result; } -// F tensors must contain contain indices in the bra with space > all. this includes complete, completeunoccupied, and inactiveunoccupied. -// and if one of the particle indices is connected to the obs virtual space, then the other must be from the CABS set. i.e. if G^{a \beta}_{ij} -> G^{a a'}_{ij} +// F tensors must contain contain indices in the bra with space > all. this +// includes complete, completeunoccupied, and inactiveunoccupied. and if one of +// the particle indices is connected to the obs virtual space, then the other +// must be from the CABS set. i.e. if G^{a \beta}_{ij} -> G^{a a'}_{ij} ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { assert(ex_->is()); assert(ex_->as().label() == L"F"); auto overlap = ex(1); bool good = false; bool bra_good = false; - if(ansatz == 2) { + if (ansatz == 2) { for (int i = 0; i < ex_->as().bra().size(); i++) { auto bra = ex_->as().bra()[i]; if (bra.space().type() == IndexSpace::complete || @@ -264,7 +311,11 @@ ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { bra.space().type() == IndexSpace::all) && bra_good && (bra.space().type() != - IndexSpace::complete_unoccupied)) { // if one of the upper indices is explicitly outside of CABs, create an overlap with the other index and the CABs space. + IndexSpace::complete_unoccupied)) { // if one of the upper indices + // is explicitly outside of + // CABs, create an overlap with + // the other index and the CABs + // space. if (i == 0) { overlap = make_overlap(Index::make_tmp_index(IndexSpace::instance( IndexSpace::other_unoccupied)), @@ -313,112 +364,128 @@ ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { } else { return ex(0); } - } - else if (ansatz == 1){ + } else if (ansatz == 1) { bool non_zero = false; - bool bra_proj_space = false;// perhaps a better way would be to create a child class of tensor for G tensor which can keep track of geminal generating and projector at construction. + bool bra_proj_space = + false; // perhaps a better way would be to create a child class of + // tensor for G tensor which can keep track of geminal + // generating and projector at construction. for (int i = 0; i < ex_->as().bra().size(); i++) { auto bra = ex_->as().bra()[i]; - if(bra.space().type() == IndexSpace::complete || + if (bra.space().type() == IndexSpace::complete || bra.space().type() == IndexSpace::complete_unoccupied || - bra.space().type() == IndexSpace::other_unoccupied){ + bra.space().type() == IndexSpace::other_unoccupied) { bra_proj_space = true; non_zero = true; } } - if (bra_proj_space){ + if (bra_proj_space) { for (int i = 0; i < ex_->as().bra().size(); i++) { auto bra = ex_->as().bra()[i]; - auto overlap1 = make_overlap(Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied)),bra); + auto overlap1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::other_unoccupied)), + bra); ex_ = overlap1 * ex_; } } - bool ket_proj_space = false;// perhaps a better way would be to create a child class of tensor for G tensor which can keep track of geminal generating and projector at construction. + bool ket_proj_space = + false; // perhaps a better way would be to create a child class of + // tensor for G tensor which can keep track of geminal + // generating and projector at construction. for (int i = 0; i < ex_->as().ket().size(); i++) { auto ket = ex_->as().ket()[i]; - if(ket.space().type() == IndexSpace::complete || + if (ket.space().type() == IndexSpace::complete || ket.space().type() == IndexSpace::complete_unoccupied || - ket.space().type() == IndexSpace::other_unoccupied){ + ket.space().type() == IndexSpace::other_unoccupied) { ket_proj_space = true; non_zero = true; } } - if (ket_proj_space){ + if (ket_proj_space) { for (int i = 0; i < ex_->as().ket().size(); i++) { auto ket = ex_->as().ket()[i]; - auto overlap1 = make_overlap(Index::make_tmp_index(IndexSpace::instance(IndexSpace::other_unoccupied)),ket); + auto overlap1 = make_overlap(Index::make_tmp_index(IndexSpace::instance( + IndexSpace::other_unoccupied)), + ket); ex_ = overlap1 * ex_; } } - if (non_zero){ + if (non_zero) { return ex_; - } - else{ + } else { return ex(0.0); } } } -ExprPtr screen_density(ExprPtr ex_){// densities probably should be non-zero if each index has a chance to be occupied, in other words, screen out densities containing unoccupied labels. +ExprPtr screen_density( + ExprPtr ex_) { // densities probably should be non-zero if each index has a + // chance to be occupied, in other words, screen out + // densities containing unoccupied labels. assert(ex_->is()); - assert(ex_->as().label() == L"\\Gamma" || ex_->as().label() == L"\\gamma"); + assert(ex_->as().label() == L"\\Gamma" || + ex_->as().label() == L"\\gamma"); bool occ_space = true; - for (auto&& bra : ex_->as().bra()){ - if (bra.space().type() == IndexSpace::unoccupied || bra.space().type() == IndexSpace::complete_unoccupied){ + for (auto&& bra : ex_->as().bra()) { + if (bra.space().type() == IndexSpace::unoccupied || + bra.space().type() == IndexSpace::complete_unoccupied) { occ_space = false; } } - for (auto&& ket : ex_->as().ket()){ - if (ket.space().type() == IndexSpace::unoccupied || ket.space().type() == IndexSpace::complete_unoccupied){ + for (auto&& ket : ex_->as().ket()) { + if (ket.space().type() == IndexSpace::unoccupied || + ket.space().type() == IndexSpace::complete_unoccupied) { occ_space = false; } } - if(occ_space){ + if (occ_space) { return ex_; + } else { + return ex(0); } - else{return ex(0);} }; -ExprPtr screen_densities(ExprPtr ex_){ - if(ex_->is()){ - for (auto && product : ex_->as().summands()){ - for (auto && factor : product->as()){ - if(factor->as().label() == L"\\Gamma" || factor->as().label() == L"\\gamma") { +ExprPtr screen_densities(ExprPtr ex_) { + if (ex_->is()) { + for (auto&& product : ex_->as().summands()) { + for (auto&& factor : product->as()) { + if (factor->as().label() == L"\\Gamma" || + factor->as().label() == L"\\gamma") { factor = screen_density(factor); } } } return ex_; - } - else if(ex_->is()){ - for (auto && factor : ex_->as()){ - if(factor->as().label() == L"\\Gamma" || factor->as().label() == L"\\gamma") { + } else if (ex_->is()) { + for (auto&& factor : ex_->as()) { + if (factor->as().label() == L"\\Gamma" || + factor->as().label() == L"\\gamma") { factor = screen_density(factor); } } return ex_; - } - else if(ex_->is()){ + } else if (ex_->is()) { return screen_density(ex_); + } else { + throw "unsupported operation for screening densities"; } - else{throw "unsupported operation for screening densities";} } -//based on Brillouin's Theory, the fock matrix should be block diagonal. -// generalized says that complete unoccupied might be non-zero with obs unocc, but zero with occ and complete unocc. -// lets assume normal Brillouin's Theory. -auto treat_fock(ExprPtr ex_){ +// based on Brillouin's Theory, the fock matrix should be block diagonal. +// generalized says that complete unoccupied might be non-zero with obs unocc, +// but zero with occ and complete unocc. lets assume normal Brillouin's Theory. +auto treat_fock(ExprPtr ex_) { auto new_ex_ = ex(0); - for (auto&& product : ex_->as().summands()){ + for (auto&& product : ex_->as().summands()) { double real = product->as().scalar().real(); auto new_product = ex(real); - for (auto&& factor : product->as().factors()){ - if (factor->is() && factor->as().label() == L"f"){ + for (auto&& factor : product->as().factors()) { + if (factor->is() && factor->as().label() == L"f") { // TODO do not assume EBC - auto space = intersection(factor->as().bra()[0].space(), factor->as().ket()[0].space()); - if(space.type().none()){ + auto space = intersection(factor->as().bra()[0].space(), + factor->as().ket()[0].space()); + if (space.type().none()) { new_product = ex(0) * new_product; - } - else { + } else { auto bra_index = Index::make_tmp_index(IndexSpace::instance(space.type())); auto ket_index = @@ -430,136 +497,150 @@ auto treat_fock(ExprPtr ex_){ make_overlap(factor->as().ket()[0], ket_index); new_product = bra_overlap * ket_overlap * factor * new_product; } - } - else new_product = new_product * factor; + } else + new_product = new_product * factor; } new_ex_ = new_ex_ + new_product; } - FWickTheorem wick{new_ex_}; + FWickTheorem wick{new_ex_}; wick.reduce(new_ex_); non_canon_simplify(new_ex_); return new_ex_; } -//to Identify the relavant F12 intermediates, the number of connections,the connected space, and the resulting ket() and bra() of the intermediate tensor are needed. -std::tuple,std::vector,bool> ncon_spa_extket_extbra(Tensor T1, Tensor T2,bool print_ = false){ - //connected space. in each example in f12, the connected space is the same between two tensors. - auto space = IndexSpace::occupied; // just a default used for construction. - //depreciated should be a braket function somewhere in Tensor. +// to Identify the relavant F12 intermediates, the number of connections,the +// connected space, and the resulting ket() and bra() of the intermediate tensor +// are needed. +std::tuple, std::vector, bool> +ncon_spa_extket_extbra(Tensor T1, Tensor T2, bool print_ = false) { + // connected space. in each example in f12, the connected space is the same + // between two tensors. + auto space = IndexSpace::occupied; // just a default used for construction. + // depreciated should be a braket function somewhere in Tensor. std::vector T1_is; std::vector T2_is; - //ordered list of ket and bra indices which construct the resulting intermediate. + // ordered list of ket and bra indices which construct the resulting + // intermediate. std::vector external_ket; std::vector external_bra; - //do the external ket indices correspond to T1? - // only need for intermediates and only works for V or X. + // do the external ket indices correspond to T1? + // only need for intermediates and only works for V or X. bool T1_ket; - //unique list of connected indices. list is searched often to see if a given index is connected. + // unique list of connected indices. list is searched often to see if a given + // index is connected. std::vector connected_indices; int nconnects = 0; - for (auto&& bra : T1.bra()){ + for (auto&& bra : T1.bra()) { T1_is.push_back(bra); } - for (auto&& ket : T1.ket()){ + for (auto&& ket : T1.ket()) { T1_is.push_back(ket); } - for (auto&& bra : T2.bra()){ + for (auto&& bra : T2.bra()) { T2_is.push_back(bra); } - for (auto&& ket : T2.ket()){ + for (auto&& ket : T2.ket()) { T2_is.push_back(ket); } - for (int i1 = 0; i1 < T1_is.size(); i1++){ - for ( int i2 = 0; i2 occ since the obs includes frozen core orbitals. -ExprPtr densities_to_occ(const ExprPtr& ex_){ +// densities are enforced to map obs -> occ since the obs includes frozen core +// orbitals. +ExprPtr densities_to_occ(const ExprPtr& ex_) { auto result = ex(0); - for (auto&& product : ex_->as().summands()){ + for (auto&& product : ex_->as().summands()) { auto new_product = ex(product->as().scalar()); - for (auto&& factor : product->as().factors()){ - if (factor->is() && (factor->as().label() == L"\\Gamma" || factor->as().label() == L"\\gamma")){ + for (auto&& factor : product->as().factors()) { + if (factor->is() && + (factor->as().label() == L"\\Gamma" || + factor->as().label() == L"\\gamma")) { for (size_t i = 0; i < factor->as().bra().size(); i++) { - if(factor->as().bra()[i].space() == IndexSpace::all) { + if (factor->as().bra()[i].space() == IndexSpace::all) { new_product = new_product * make_overlap(factor->as().bra()[i], Index::make_tmp_index( IndexSpace::instance(IndexSpace::occupied))); } - if(factor->as().ket()[i].space() == IndexSpace::all){ + if (factor->as().ket()[i].space() == IndexSpace::all) { new_product = new_product * make_overlap(factor->as().ket()[i], @@ -594,114 +678,139 @@ ExprPtr densities_to_occ(const ExprPtr& ex_){ } } new_product = factor * new_product; - } result = result + new_product; - } - FWickTheorem wick {result}; + FWickTheorem wick{result}; wick.reduce(result); - non_canon_simplify (result); + non_canon_simplify(result); return result; } -//constructs a biproduct intermediate tensor from a given two tensors in an expression. -ExprPtr biproduct_intermediate(ExprPtr T1,ExprPtr T2){ - assert (T1->is()); - assert (T2->is()); +// constructs a biproduct intermediate tensor from a given two tensors in an +// expression. +ExprPtr biproduct_intermediate(ExprPtr T1, ExprPtr T2) { + assert(T1->is()); + assert(T2->is()); auto result = ex(1); - auto [nconnects,space, external_ket, external_bra,T1_ket] = ncon_spa_extket_extbra(T1->as(),T2->as()); - if (T1->as().label() == L"g" || T2->as().label() == L"g"){ - if (nconnects == 2 && space == IndexSpace::complete_unoccupied){ - //V^pq_ij - //intermediate decomposition handled by SeQuant so space labels can be properly handled - if(T1_ket){ - auto GR_ijpq = ex(L"GR", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); - auto F_ijrs = ex(L"F", IDX_list{external_bra[0],external_bra[1]}, - IDX_list{L"p_11",L"p_12"}); - auto g_rspq = ex(L"g",IDX_list{L"p_11",L"p_12"}, - IDX_list{external_ket[0],external_ket[1]}); - auto F_ijmc = ex(L"F", IDX_list{external_bra[0],external_bra[1]}, - IDX_list{L"m_6", L"α'_4"}); - auto g_mcpq = ex(L"g",IDX_list{L"m_6", L"α'_4"}, - IDX_list{external_ket[0],external_ket[1]}); - auto F_jicm = ex(L"F", IDX_list{external_bra[1],external_bra[0]}, IDX_list{L"α'_4",L"m_6"}); - auto g_cmqp = ex(L"g",IDX_list{L"α'_4",L"m_6"},IDX_list{external_ket[1],external_ket[0]}); + auto [nconnects, space, external_ket, external_bra, T1_ket] = + ncon_spa_extket_extbra(T1->as(), T2->as()); + if (T1->as().label() == L"g" || T2->as().label() == L"g") { + if (nconnects == 2 && space == IndexSpace::complete_unoccupied) { + // V^pq_ij + // intermediate decomposition handled by SeQuant so space labels can be + // properly handled + if (T1_ket) { + auto GR_ijpq = + ex(L"GR", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{external_ket[0], external_ket[1]}); + auto F_ijrs = + ex(L"F", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{L"p_11", L"p_12"}); + auto g_rspq = ex(L"g", IDX_list{L"p_11", L"p_12"}, + IDX_list{external_ket[0], external_ket[1]}); + auto F_ijmc = + ex(L"F", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{L"m_6", L"α'_4"}); + auto g_mcpq = ex(L"g", IDX_list{L"m_6", L"α'_4"}, + IDX_list{external_ket[0], external_ket[1]}); + auto F_jicm = + ex(L"F", IDX_list{external_bra[1], external_bra[0]}, + IDX_list{L"α'_4", L"m_6"}); + auto g_cmqp = ex(L"g", IDX_list{L"α'_4", L"m_6"}, + IDX_list{external_ket[1], external_ket[0]}); auto V = GR_ijpq - F_ijrs * g_rspq - F_ijmc * g_mcpq - F_jicm * g_cmqp; non_canon_simplify(V); return V; - } - else{ - auto GR_pqij = ex(L"GR", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); - auto F_rsij = ex(L"F",IDX_list{L"p_11",L"p_12"},IDX_list{external_ket[0],external_ket[1]}); - auto g_pqrs = ex(L"g",IDX_list{external_bra[0],external_bra[1]},IDX_list{L"p_11",L"p_12"}); - auto F_mcij = ex(L"F", IDX_list{L"m_6", L"α'_4"}, IDX_list{external_ket[0],external_ket[1]}); - auto g_pqmc = ex(L"g",IDX_list{external_bra[0],external_bra[1]},IDX_list{L"m_6", L"α'_4"}); - auto F_cmji = ex(L"F",IDX_list{L"α'_4",L"m_6"},IDX_list{external_ket[1],external_ket[0]}); - auto g_qpcm = ex(L"g",IDX_list{external_bra[1],external_bra[0]},IDX_list{L"α'_4",L"m_6"}); + } else { + auto GR_pqij = + ex(L"GR", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{external_ket[0], external_ket[1]}); + auto F_rsij = ex(L"F", IDX_list{L"p_11", L"p_12"}, + IDX_list{external_ket[0], external_ket[1]}); + auto g_pqrs = + ex(L"g", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{L"p_11", L"p_12"}); + auto F_mcij = ex(L"F", IDX_list{L"m_6", L"α'_4"}, + IDX_list{external_ket[0], external_ket[1]}); + auto g_pqmc = + ex(L"g", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{L"m_6", L"α'_4"}); + auto F_cmji = ex(L"F", IDX_list{L"α'_4", L"m_6"}, + IDX_list{external_ket[1], external_ket[0]}); + auto g_qpcm = + ex(L"g", IDX_list{external_bra[1], external_bra[0]}, + IDX_list{L"α'_4", L"m_6"}); auto V = GR_pqij - F_rsij * g_pqrs - F_mcij * g_pqmc - F_cmji * g_qpcm; non_canon_simplify(V); return V; } - } - else{ + } else { result = T1 * T2; } - } - else{ - if (nconnects == 2 && space == IndexSpace::complete_unoccupied){ - //X^kl_ij - auto X_klij = ex(L"X", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); - result = X_klij; + } else { + if (nconnects == 2 && space == IndexSpace::complete_unoccupied) { + // X^kl_ij + auto X_klij = ex(L"X", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{external_ket[0], external_ket[1]}); + result = X_klij; - } - else if (nconnects == 1 && space == IndexSpace::complete_unoccupied){ - //B^kl_ij - auto B_klij = ex(L"B", IDX_list{external_bra[0],external_bra[1]}, IDX_list{external_ket[0],external_ket[1]}); - result = B_klij; - } - else if (nconnects == 0){ - //return original expression (no simplifications to be made) + } else if (nconnects == 1 && space == IndexSpace::complete_unoccupied) { + // B^kl_ij + auto B_klij = ex(L"B", IDX_list{external_bra[0], external_bra[1]}, + IDX_list{external_ket[0], external_ket[1]}); + result = B_klij; + } else if (nconnects == 0) { + // return original expression (no simplifications to be made) result = T1 * T2; - } - else{ + } else { result = T1 * T2; } } return result; } // identify F12 intermediates -//intermediates we generate contain either 2 F or g tensors. +// intermediates we generate contain either 2 F or g tensors. // those expressions are biproduct intermediate for further screening. -// special case of B intermediate is handled by an additional check for the fock operator f -ExprPtr find_F12_interms(ExprPtr ex_){ +// special case of B intermediate is handled by an additional check for the fock +// operator f +ExprPtr find_F12_interms(ExprPtr ex_) { assert(ex_->is()); int counter = 0; std::vector T1_T2; - for (auto&& factors : ex_->as().factors()){ + for (auto&& factors : ex_->as().factors()) { assert(!factors->is()); - if(factors->as().label() == L"F" || factors->as().label() == L"g"){ + if (factors->as().label() == L"F" || + factors->as().label() == L"g") { T1_T2.push_back(factors); - counter +=1; + counter += 1; } } - for (auto&& factors : ex_->as().factors()){//have to loop through again unfourtunately to remove the factors that were combined. - if(factors->as().label() == L"F" || factors->as().label() == L"g"){ - if(counter == 2){ + for (auto&& factors : + ex_->as() + .factors()) { // have to loop through again unfourtunately to remove + // the factors that were combined. + if (factors->as().label() == L"F" || + factors->as().label() == L"g") { + if (counter == 2) { factors = ex(1); } } } assert(T1_T2.size() <= 2); - if (T1_T2.size() == 2){ + if (T1_T2.size() == 2) { assert(counter == 2); auto result = biproduct_intermediate(T1_T2[0], T1_T2[1]); - if(result->is() && result->as().label() == L"B"){ - for (auto&& factors : ex_->as().factors()){//have to find fock matrix and remove. factor 1/2 because a product only finds 1/2 of the B tensor, a sum of two products. - if(factors->is() && factors->as().label() == L"f"){ + if (result->is() && result->as().label() == L"B") { + for (auto&& factors : + ex_->as() + .factors()) { // have to find fock matrix and remove. factor 1/2 + // because a product only finds 1/2 of the B + // tensor, a sum of two products. + if (factors->is() && factors->as().label() == L"f") { factors = ex(1.); } } @@ -714,20 +823,23 @@ ExprPtr find_F12_interms(ExprPtr ex_){ return ex_; } -//in hamiltonian based transformations, it is important to retain the original form of the hamiltonian operator. that is h^p_q E^q_p + 1/2 g^{pq}_{rs} E^{rs}_{pq}. -//to achieve this form, the tensor part of the expression must contain overlaps in place of the normal ordered operators. -//here we chose a canonical form for E^{p_7}_{p_9} and E^{p_7 p_8}_{p_9 p_10} -// this also simultaneously partitions the result into one and two body terms. -std::pair fnop_to_overlap(ExprPtr exprs){ +// in hamiltonian based transformations, it is important to retain the original +// form of the hamiltonian operator. that is h^p_q E^q_p + 1/2 g^{pq}_{rs} +// E^{rs}_{pq}. to achieve this form, the tensor part of the expression must +// contain overlaps in place of the normal ordered operators. here we chose a +// canonical form for E^{p_7}_{p_9} and E^{p_7 p_8}_{p_9 p_10} +// this also simultaneously partitions the result into one and two body terms. +std::pair fnop_to_overlap(ExprPtr exprs) { auto one_body_result = ex(0); auto two_body_result = ex(0); - for (auto&& product : exprs->as().summands()){ + for (auto&& product : exprs->as().summands()) { auto one_body_product = ex(product->as().scalar()); auto two_body_product = ex(product->as().scalar()); - for (auto&& factor : product->as().factors()){ - if(factor->is() && (factor->as().label() == L"E" || factor->as().label() == L"a")){ + for (auto&& factor : product->as().factors()) { + if (factor->is() && (factor->as().label() == L"E" || + factor->as().label() == L"a")) { factor = tens_to_op(factor); - if(factor->is()) { + if (factor->is()) { if (factor->as().ncreators() == 1) { auto o1 = make_overlap( {L"p_7"}, factor->as().creators()[0].index()); @@ -748,9 +860,10 @@ std::pair fnop_to_overlap(ExprPtr exprs){ one_body_product = one_body_product * ex(0); } } + } else { + one_body_product = factor * one_body_product; + two_body_product = factor * two_body_product; } - else{one_body_product = factor * one_body_product; - two_body_product = factor * two_body_product;} } one_body_result = one_body_product + one_body_result; two_body_result = two_body_product + two_body_result; @@ -760,38 +873,49 @@ std::pair fnop_to_overlap(ExprPtr exprs){ return {one_body_result, two_body_result}; } -std::pair contains_tens(ExprPtr ex_, std::wstring label){ - if(!ex_->is()){ +std::pair contains_tens(ExprPtr ex_, std::wstring label) { + if (!ex_->is()) { ex_ = ex(1.) * ex_; } int it = 0; - for (auto&& factor : ex_->as().factors()){ - if(factor->is() && factor->as().label() == label){ return {true,factor};} - it ++; + for (auto&& factor : ex_->as().factors()) { + if (factor->is() && factor->as().label() == label) { + return {true, factor}; + } + it++; } - return {false,ex_}; - + return {false, ex_}; } -//TODO this should be a generalized procedure since the screening process is different for each number of F tensors. -// I suppose generally, this should be a product level screening, which first finds the number of F tensors and then picks the correct screening method. -//re-implimentation as a recursive function which gets called every time a delta is found, simplifies/reduces the product and returns. -//products are const and two deltas acting on the same index makes this difficult. logically the product needs to update within its own loop, but it cannot. Alternatively, two delta's to the same index need to occur in the same product, but that breaks things. -//work around. make a copy of product which can be modified? break out of product loop? -ExprPtr screen_F12_proj(ExprPtr exprs,int ansatz = 2){ - if(exprs->is()) { +// TODO this should be a generalized procedure since the screening process is +// different for each number of F tensors. +// I suppose generally, this should be a product level screening, which first +// finds the number of F tensors and then picks the correct screening method. +// re-implimentation as a recursive function which gets called every time a +// delta is found, simplifies/reduces the product and returns. products are +// const and two deltas acting on the same index makes this difficult. logically +// the product needs to update within its own loop, but it cannot. +// Alternatively, two delta's to the same index need to occur in the same +// product, but that breaks things. work around. make a copy of product which +// can be modified? break out of product loop? +ExprPtr screen_F12_proj(ExprPtr exprs, int ansatz = 2) { + if (exprs->is()) { auto return_sum = ex(0); for (auto&& product : exprs->as().summands()) { auto new_product = ex(product->as().scalar().real()); for (auto&& factor : product->as().factors()) { auto temp_factor = ex(1.); if (factor->is() && factor->as().label() == L"F") { - temp_factor = screen_F_tensors(factor,ansatz); // screen F tensors should just provide the delta. + temp_factor = screen_F_tensors( + factor, + ansatz); // screen F tensors should just provide the delta. + } else { + temp_factor = factor; } - else {temp_factor = factor;} auto product_clone = product->clone(); if (contains_tens(temp_factor, L"s").first) { - product_clone = product_clone * contains_tens(temp_factor,L"s").second; + product_clone = + product_clone * contains_tens(temp_factor, L"s").second; FWickTheorem wick_f{product_clone}; wick_f.reduce(product_clone); non_canon_simplify(product_clone); @@ -802,146 +926,191 @@ ExprPtr screen_F12_proj(ExprPtr exprs,int ansatz = 2){ } new_product = new_product * temp_factor; non_canon_simplify(new_product); - } return_sum = new_product + return_sum; } non_canon_simplify(return_sum); return return_sum; - } - else if(exprs->is()) { + } else if (exprs->is()) { auto new_product = ex(exprs->as().scalar()); for (auto&& factor : exprs->as().factors()) { auto temp_factor = ex(1.); - if (factor->is() && factor->as().label() == L"F") { - temp_factor = screen_F_tensors(factor,ansatz); // screen F tensors should just provide the delta. - } - else {temp_factor = factor;} - auto product_clone = exprs->clone(); - if (contains_tens(temp_factor, L"s").first) { - product_clone = product_clone * contains_tens(temp_factor,L"s").second; - FWickTheorem wick_f{product_clone}; - wick_f.reduce(product_clone); - non_canon_simplify(product_clone); - product_clone = screen_F12_proj(product_clone, ansatz); - new_product = product_clone; - break; - } - new_product = new_product * temp_factor; + if (factor->is() && factor->as().label() == L"F") { + temp_factor = screen_F_tensors( + factor, ansatz); // screen F tensors should just provide the delta. + } else { + temp_factor = factor; + } + auto product_clone = exprs->clone(); + if (contains_tens(temp_factor, L"s").first) { + product_clone = product_clone * contains_tens(temp_factor, L"s").second; + FWickTheorem wick_f{product_clone}; + wick_f.reduce(product_clone); + simplify(product_clone); + product_clone = screen_F12_proj(product_clone, ansatz); + new_product = product_clone; + break; + } + new_product = new_product * temp_factor; } return new_product; - } - else return exprs; + } else + return exprs; } -ExprPtr FNOPs_to_tens(ExprPtr ex_){ - if(ex_->is()){ +ExprPtr FNOPs_to_tens(ExprPtr ex_) { + if (ex_->is()) { auto new_sum = ex(0); - for (auto&& product : ex_->as().summands()){ + for (auto&& product : ex_->as().summands()) { auto new_product = ex(product->as().scalar()); - for (auto factor : product->as().factors()){ + for (auto factor : product->as().factors()) { auto new_factor = ex(0); - if(factor->is()){ + if (factor->is()) { new_factor = op_to_tens(factor); assert(!new_factor->is()); + } else { + new_factor = factor; } - else{new_factor = factor;} new_product = new_product * new_factor; } new_sum = new_product + new_sum; } - non_canon_simplify(new_sum); + simplify(new_sum); return new_sum; - } - else if(ex_->is()){ - for(auto&& factor : ex_->as().factors()){ - if(factor->is()) { + } else if (ex_->is()) { + for (auto&& factor : ex_->as().factors()) { + if (factor->is()) { factor = op_to_tens(factor); } } - } - else if(ex_->is()){ + } else if (ex_->is()) { ex_ = op_to_tens(ex_); + } else { + return ex_; } - else {return ex_;} return ex_; } -ExprPtr tens_to_FNOps(ExprPtr ex_){ - if(ex_->is()){ +ExprPtr tens_to_FNOps(ExprPtr ex_) { + if (ex_->is()) { auto new_sum = ex(0); - for (auto&& product : ex_->as().summands()){ + for (auto&& product : ex_->as().summands()) { auto new_product = ex(product->as().scalar()); - for (auto factor : product->as().factors()){ + for (auto factor : product->as().factors()) { auto new_factor = ex(0); - if(factor->is() && (factor->as().label() == L"E" || factor->as().label() == L"a")){ + if (factor->is() && (factor->as().label() == L"E" || + factor->as().label() == L"a")) { new_factor = tens_to_op(factor); + } else { + new_factor = factor; } - else{new_factor = factor;} new_product = new_factor * new_product; } new_sum = new_product + new_sum; } - non_canon_simplify(new_sum); + simplify(new_sum); return new_sum; - } - else if(ex_->is()){ - for(auto&& factor : ex_->as().factors()){ - if(factor->is() && (factor->as().label() == L"E" || factor->as().label() == L"a")) { + } else if (ex_->is()) { + for (auto&& factor : ex_->as().factors()) { + if (factor->is() && (factor->as().label() == L"E" || + factor->as().label() == L"a")) { factor = tens_to_op(factor); } } - } - else if(ex_->is() && (ex_->as().label() == L"E" || ex_->as().label() == L"a")){ + } else if (ex_->is() && (ex_->as().label() == L"E" || + ex_->as().label() == L"a")) { ex_ = tens_to_op(ex_); + } else { + return ex_; } - else {return ex_;} return ex_; } -//split F12 operator into its 2 components seen in eq 11. of Chem. Phys. 136, 084107 (2012). -// neccessary in some cases where particles get excited from different spaces. -ExprPtr split_F12(ExprPtr exprs){ +// split F12 operator into its 2 components seen in eq 11. of Chem. Phys. 136, +// 084107 (2012). +// neccessary in some cases where particles get excited from different spaces. +ExprPtr split_F12(ExprPtr exprs) { assert(exprs->is()); assert(exprs->as().label() == L"F"); auto result = ex(0); - if((exprs->as().const_braket()[2].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[2].space() == sequant::IndexSpace::other_unoccupied) || exprs->as().const_braket()[3].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[3].space() == sequant::IndexSpace::other_unoccupied) { - auto T1 = ex(3./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); - auto T2 = ex(1./8) * ex(L"F",std::vector{exprs->as().const_braket()[1],exprs->as().const_braket()[0]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); + if ((exprs->as().const_braket()[2].space() == + sequant::IndexSpace::complete_unoccupied || + exprs->as().const_braket()[2].space() == + sequant::IndexSpace::other_unoccupied) || + exprs->as().const_braket()[3].space() == + sequant::IndexSpace::complete_unoccupied || + exprs->as().const_braket()[3].space() == + sequant::IndexSpace::other_unoccupied) { + auto T1 = + ex(3. / 8) * + ex(L"F", + std::vector{exprs->as().const_braket()[0], + exprs->as().const_braket()[1]}, + std::vector{exprs->as().const_braket()[2], + exprs->as().const_braket()[3]}); + auto T2 = + ex(1. / 8) * + ex(L"F", + std::vector{exprs->as().const_braket()[1], + exprs->as().const_braket()[0]}, + std::vector{exprs->as().const_braket()[2], + exprs->as().const_braket()[3]}); result = T1 + T2; return result; - } - else{// otherwise the geminal generating space must be in the upper indices. so include exchange for those. - assert((exprs->as().const_braket()[0].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[0].space() == sequant::IndexSpace::other_unoccupied) || (exprs->as().const_braket()[1].space() == sequant::IndexSpace::complete_unoccupied || exprs->as().const_braket()[1].space() == sequant::IndexSpace::other_unoccupied)); - auto T1 = ex(3./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[2],exprs->as().const_braket()[3]}); - auto T2 = ex(1./8) * ex(L"F",std::vector{exprs->as().const_braket()[0],exprs->as().const_braket()[1]},std::vector{exprs->as().const_braket()[3],exprs->as().const_braket()[2]}); + } else { // otherwise the geminal generating space must be in the upper + // indices. so include exchange for those. + assert((exprs->as().const_braket()[0].space() == + sequant::IndexSpace::complete_unoccupied || + exprs->as().const_braket()[0].space() == + sequant::IndexSpace::other_unoccupied) || + (exprs->as().const_braket()[1].space() == + sequant::IndexSpace::complete_unoccupied || + exprs->as().const_braket()[1].space() == + sequant::IndexSpace::other_unoccupied)); + auto T1 = + ex(3. / 8) * + ex(L"F", + std::vector{exprs->as().const_braket()[0], + exprs->as().const_braket()[1]}, + std::vector{exprs->as().const_braket()[2], + exprs->as().const_braket()[3]}); + auto T2 = + ex(1. / 8) * + ex(L"F", + std::vector{exprs->as().const_braket()[0], + exprs->as().const_braket()[1]}, + std::vector{exprs->as().const_braket()[3], + exprs->as().const_braket()[2]}); result = T1 + T2; return result; } } -ExprPtr partition_F12(ExprPtr exprs){ - if(!exprs->is()){ +ExprPtr partition_F12(ExprPtr exprs) { + if (!exprs->is()) { return exprs; } - for (auto&& product : exprs->as().summands()){ - for (auto&& factor : product->as().factors()){ - if(factor->is() && factor->as().label() == L"F") { + for (auto&& product : exprs->as().summands()) { + for (auto&& factor : product->as().factors()) { + if (factor->is() && factor->as().label() == L"F") { factor = split_F12(factor); } } } non_canon_simplify(exprs); - return(exprs); + return (exprs); } -//TODO generalize for spin-orbital basis -//simplification to deal with hamiltonian based expressions. involving one body h and two body g tensors. -// not rigorous for more than 2 body operators or more than 2 density matrices whose rank must be <= 2. -// unfortunately, simplify(result) and wick.reduce(result) will recanonicalize the indices. -// enforces the following obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} -// should allow analysis of multiple expressions who have the same normal order operator prefactor. -std::pair hamiltonian_based_projector_2(ExprPtr exprs){ +// TODO generalize for spin-orbital basis +// simplification to deal with hamiltonian based expressions. involving one body +// h and two body g tensors. +// not rigorous for more than 2 body operators or more than 2 density matrices +// whose rank must be <= 2. unfortunately, simplify(result) and +// wick.reduce(result) will recanonicalize the indices. enforces the following +// obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} should +// allow analysis of multiple expressions who have the same normal order +// operator prefactor. +std::pair hamiltonian_based_projector_2(ExprPtr exprs) { exprs = FNOPs_to_tens(exprs); simplify(exprs); exprs = partition_F12(exprs); @@ -951,87 +1120,97 @@ std::pair hamiltonian_based_projector_2(ExprPtr exprs){ exprs = screen_densities(exprs); simplify(exprs); auto exprs_intmed = ex(0.0); - for (auto&& product : exprs->as().summands()){ - auto new_product = simplification::find_F12_interms(product); - exprs_intmed = new_product + exprs_intmed; + for (auto&& product : exprs->as().summands()) { + auto new_product = simplification::find_F12_interms(product); + exprs_intmed = new_product + exprs_intmed; } - simplify(exprs_intmed); + non_canon_simplify(exprs_intmed); return fnop_to_overlap(exprs_intmed); } -// here G can only have projection to the alpha and Beta space otherwise projector constructs it to be be zero. -std::pair hamiltonian_based_projector_1(ExprPtr exprs){ +// here G can only have projection to the alpha and Beta space otherwise +// projector constructs it to be be zero. +std::pair hamiltonian_based_projector_1(ExprPtr exprs) { exprs = FNOPs_to_tens(exprs); simplify(exprs); exprs = partition_F12(exprs); - simplify(exprs);; + simplify(exprs); + ; exprs = screen_F12_proj(exprs, 1); simplify(exprs); auto exprs_intmed = ex(0.0); - for (auto&& product : exprs->as().summands()){ + for (auto&& product : exprs->as().summands()) { auto new_product = simplification::find_F12_interms(product); exprs_intmed = new_product + exprs_intmed; } simplify(exprs_intmed); return fnop_to_overlap(exprs_intmed); } -//G can only project to alpha and Beta space. still need to use fock based expression. -std::pair fock_based_projector_1(ExprPtr exprs){ +// G can only project to alpha and Beta space. still need to use fock based +// expression. +std::pair fock_based_projector_1(ExprPtr exprs) { exprs = FNOPs_to_tens(exprs); simplify(exprs); - if(exprs->is()){ - return std::pair {exprs, exprs}; + if (exprs->is()) { + return std::pair{exprs, exprs}; } exprs = partition_F12(exprs); auto final_screen = exprs; simplify(final_screen); - //in some cases, there will now be no contributing terms left so return zero to one and two body. - if(final_screen->is()){ - return std::pair {final_screen, final_screen}; + // in some cases, there will now be no contributing terms left so return zero + // to one and two body. + if (final_screen->is()) { + return std::pair{final_screen, final_screen}; } simplify(final_screen); - //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! + // find the special f12 intermediates that cannot efficiently be solved + // directly. This seems to work already for the general case! auto last_screen = ex(0.0); - for (auto&& product : final_screen->as().summands()){ + for (auto&& product : final_screen->as().summands()) { auto new_product = simplification::find_F12_interms(product); last_screen = last_screen + new_product; } simplify(last_screen); return fnop_to_overlap(last_screen); } -//TODO generalize for spin-orbital basis -//simplification to deal with fock based expressions. involving one body fock operator. -// not rigorous for more than 2 body operators or more than 2 density matrices whose rank must be <= 2. -// unfortunately, simplify(result) and wick.reduce(result) will recanonicalize the indices. -// enforces the following obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} -// should allow analysis of multiple expressions who have the same normal order operator prefactor. -std::pair fock_based_projector_2(ExprPtr exprs){ +// TODO generalize for spin-orbital basis +// simplification to deal with fock based expressions. involving one body fock +// operator. +// not rigorous for more than 2 body operators or more than 2 density matrices +// whose rank must be <= 2. unfortunately, simplify(result) and +// wick.reduce(result) will recanonicalize the indices. enforces the following +// obs convention. E^{p_7}_{p_9} and E^{{p_7}{p_8}}_{{p_9}{p_{10}}} should +// allow analysis of multiple expressions who have the same normal order +// operator prefactor. +std::pair fock_based_projector_2(ExprPtr exprs) { exprs = FNOPs_to_tens(exprs); simplify(exprs); - if(exprs->is()){ - return std::pair {exprs, exprs}; + if (exprs->is()) { + return std::pair{exprs, exprs}; } exprs = partition_F12(exprs); auto final_screen = exprs; simplify(final_screen); - //in some cases, there will now be no contributing terms left so return zero to one and two body. -if(final_screen->is()){ - return std::pair {final_screen, final_screen}; -} + // in some cases, there will now be no contributing terms left so return zero + // to one and two body. + if (final_screen->is()) { + return std::pair{final_screen, final_screen}; + } final_screen = FNOPs_to_tens(final_screen); simplify(final_screen); final_screen = screen_densities(final_screen); simplify(final_screen); - //find the special f12 intermediates that cannot efficiently be solved directly. This seems to work already for the general case! + // find the special f12 intermediates that cannot efficiently be solved + // directly. This seems to work already for the general case! auto last_screen = ex(0.0); - for (auto&& product : final_screen->as().summands()){ + for (auto&& product : final_screen->as().summands()) { auto new_product = simplification::find_F12_interms(product); last_screen = last_screen + new_product; } simplify(last_screen); return fnop_to_overlap(last_screen); - } } +} // namespace simplification #ifndef SEQUANT_SIMPLIFICATIONS_H #define SEQUANT_SIMPLIFICATIONS_H diff --git a/SeQuant/domain/transcorrelated/three_body_decomp.hpp b/SeQuant/domain/transcorrelated/three_body_decomp.hpp index 0328e7bdb..cee065afa 100644 --- a/SeQuant/domain/transcorrelated/three_body_decomp.hpp +++ b/SeQuant/domain/transcorrelated/three_body_decomp.hpp @@ -7,18 +7,19 @@ namespace decompositions { #ifndef SEQUANT_THREE_BODY_DECOMP_H #define SEQUANT_THREE_BODY_DECOMP_H -ExprPtr cumu_to_density(ExprPtr ex_){ +ExprPtr cumu_to_density(ExprPtr ex_) { assert(ex_->is()); assert(ex_->as().rank() == 1); assert(ex_->as().label() == L"\\lambda"); auto down_0 = ex_->as().ket()[0]; auto up_0 = ex_->as().bra()[0]; - auto density = ex(L"\\gamma",std::initializer_list{up_0}, std::initializer_list{down_0}); + auto density = ex(L"\\gamma", std::initializer_list{up_0}, + std::initializer_list{down_0}); return density; } -ExprPtr cumu2_to_density(ExprPtr ex_){ +ExprPtr cumu2_to_density(ExprPtr ex_) { assert(ex_->is()); assert(ex_->as().rank() == 2); assert(ex_->as().label() == L"\\lambda"); @@ -28,15 +29,19 @@ ExprPtr cumu2_to_density(ExprPtr ex_){ auto down_1 = ex_->as().ket()[1]; auto up_1 = ex_->as().bra()[1]; - auto density2 = ex(L"\\gamma",std::initializer_list{up_0, up_1}, std::initializer_list{down_0, down_1}); - auto density_1 = ex(L"\\gamma",std::initializer_list{up_0}, std::initializer_list{down_0}); - auto density_2 = ex(L"\\gamma",std::initializer_list{up_1}, std::initializer_list{down_1}); + auto density2 = + ex(L"\\gamma", std::initializer_list{up_0, up_1}, + std::initializer_list{down_0, down_1}); + auto density_1 = ex(L"\\gamma", std::initializer_list{up_0}, + std::initializer_list{down_0}); + auto density_2 = ex(L"\\gamma", std::initializer_list{up_1}, + std::initializer_list{down_1}); auto d1_d2 = antisymmetrize(density_1 * density_2); return density2 + ex(-1) * d1_d2.result; } -ExprPtr cumu3_to_density(ExprPtr ex_){ +ExprPtr cumu3_to_density(ExprPtr ex_) { assert(ex_->is()); assert(ex_->as().rank() == 3); assert(ex_->as().label() == L"\\lambda"); @@ -48,25 +53,36 @@ ExprPtr cumu3_to_density(ExprPtr ex_){ auto down_2 = ex_->as().ket()[2]; auto up_2 = ex_->as().bra()[2]; - auto cumulant2 = ex(L"\\lambda",std::initializer_list{up_1, up_2}, std::initializer_list{down_1, down_2}); - auto density_1 = ex(L"\\gamma",std::initializer_list{up_0}, std::initializer_list{down_0}); - auto density_2 = ex(L"\\gamma",std::initializer_list{up_1}, std::initializer_list{down_1}); - auto density_3 = ex(L"\\gamma",std::initializer_list{up_2}, std::initializer_list{down_2}); - auto density3 = ex(L"\\gamma",std::initializer_list{up_0, up_1,up_2}, std::initializer_list{down_0, down_1, down_2}); - - auto d1_d2 = antisymmetrize(density_1 * density_2 * density_3 + density_1 * cumulant2); + auto cumulant2 = + ex(L"\\lambda", std::initializer_list{up_1, up_2}, + std::initializer_list{down_1, down_2}); + auto density_1 = ex(L"\\gamma", std::initializer_list{up_0}, + std::initializer_list{down_0}); + auto density_2 = ex(L"\\gamma", std::initializer_list{up_1}, + std::initializer_list{down_1}); + auto density_3 = ex(L"\\gamma", std::initializer_list{up_2}, + std::initializer_list{down_2}); + auto density3 = + ex(L"\\gamma", std::initializer_list{up_0, up_1, up_2}, + std::initializer_list{down_0, down_1, down_2}); + + auto d1_d2 = + antisymmetrize(density_1 * density_2 * density_3 + density_1 * cumulant2); auto temp_result = density3 * ex(-1) * d1_d2.result; - for (auto&& product : temp_result->as().summands()){ - for (auto&& factor : product->as().factors()){ - if (factor->is() && (factor->as().label() == L"\\lambda") && (factor->as().rank() == 2)){ + for (auto&& product : temp_result->as().summands()) { + for (auto&& factor : product->as().factors()) { + if (factor->is() && + (factor->as().label() == L"\\lambda") && + (factor->as().rank() == 2)) { factor = cumu2_to_density(factor); } } } - for (auto&& product : temp_result->as().summands()){ - for (auto&& factor : product->as().factors()){ - if (factor->is() && factor->as().label() == L"\\lambda" && factor->as().rank() == 1){ + for (auto&& product : temp_result->as().summands()) { + for (auto&& factor : product->as().factors()) { + if (factor->is() && factor->as().label() == L"\\lambda" && + factor->as().rank() == 1) { factor = cumu_to_density(factor); } } @@ -74,20 +90,27 @@ ExprPtr cumu3_to_density(ExprPtr ex_){ return temp_result; } -ExprPtr one_body_sub (ExprPtr ex_){//J. Chem. Phys. 132, 234107 (2010); https://doi.org/10.1063/1.3439395 eqn 15 for +ExprPtr one_body_sub( + ExprPtr ex_) { // J. Chem. Phys. 132, 234107 (2010); + // https://doi.org/10.1063/1.3439395 eqn 15 for assert(ex_->is()); assert(ex_->as().rank() == 1); auto down_0 = ex_->as().annihilators()[0].index(); auto up_0 = ex_->as().creators()[0].index(); - const auto a = ex(std::initializer_list{up_0}, std::initializer_list{down_0}); - const auto cumu1 = ex(L"\\lambda", WstrList{down_0.label()}, WstrList{up_0.label()}); + const auto a = ex(std::initializer_list{up_0}, + std::initializer_list{down_0}); + const auto cumu1 = + ex(L"\\lambda", WstrList{down_0.label()}, WstrList{up_0.label()}); auto result = a + (ex(-1) * cumu1); return (result); } -ExprPtr two_body_decomp(ExprPtr ex_, bool approx = false){//J. Chem. Phys. 132, 234107 (2010); https://doi.org/10.1063/1.3439395 eqn 16 for \tilde{a^{pr}_{qs}} +ExprPtr two_body_decomp( + ExprPtr ex_, bool approx = false) { // J. Chem. Phys. 132, 234107 (2010); + // https://doi.org/10.1063/1.3439395 + // eqn 16 for \tilde{a^{pr}_{qs}} assert(ex_->is()); assert(ex_->as().rank() == 2); @@ -97,25 +120,32 @@ ExprPtr two_body_decomp(ExprPtr ex_, bool approx = false){//J. Chem. Phys. 132, auto up_0 = ex_->as().creators()[0].index(); auto up_1 = ex_->as().creators()[1].index(); - const auto cumu1 = ex(L"\\lambda", WstrList{down_0.label()}, WstrList{up_0.label()}); - const auto cumu2 = ex(L"\\lambda", WstrList{down_1.label()}, WstrList{up_1.label()}); - const auto a = ex(std::initializer_list{up_1}, std::initializer_list{down_1}); - const auto a2 = ex(std::initializer_list{up_0, up_1}, std::initializer_list{down_0, down_1}); - const auto double_cumu = ex(L"\\lambda", WstrList{down_0.label(), down_1.label()}, WstrList{up_0.label(), up_1.label()}); + const auto cumu1 = + ex(L"\\lambda", WstrList{down_0.label()}, WstrList{up_0.label()}); + const auto cumu2 = + ex(L"\\lambda", WstrList{down_1.label()}, WstrList{up_1.label()}); + const auto a = ex(std::initializer_list{up_1}, + std::initializer_list{down_1}); + const auto a2 = ex(std::initializer_list{up_0, up_1}, + std::initializer_list{down_0, down_1}); + const auto double_cumu = + ex(L"\\lambda", WstrList{down_0.label(), down_1.label()}, + WstrList{up_0.label(), up_1.label()}); auto term1 = cumu1 * a; auto term2 = cumu1 * cumu2; auto term3 = double_cumu; - auto sum_of_terms = antisymmetrize( term1 + term2 + term3); + auto sum_of_terms = antisymmetrize(term1 + term2 + term3); sum_of_terms.result = ex(-1) * sum_of_terms.result; auto result = a2 + sum_of_terms.result; return (result); - } -//express 3-body term as sums of 1 and 2-body term. as described in J. Chem. Phys. 132, 234107 (2010); https://doi.org/10.1063/1.3439395 eqn 17. -std::pair, std::vector>> three_body_decomp(ExprPtr ex_,bool approx = true){ +// express 3-body term as sums of 1 and 2-body term. as described in J. Chem. +// Phys. 132, 234107 (2010); https://doi.org/10.1063/1.3439395 eqn 17. +std::pair, std::vector>> +three_body_decomp(ExprPtr ex_, bool approx = true) { assert(ex_->is()); assert(ex_->as().rank() == 3); @@ -131,24 +161,32 @@ std::pair, std::vector>> three_body std::vector initial_upper{up_0, up_1, up_2}; - const auto cumulant = ex(L"\\lambda", WstrList{down_0.label()}, WstrList{up_0.label()}); - const auto a = ex(std::initializer_list{up_1, up_2}, std::initializer_list{down_1, down_2}); + const auto cumulant = + ex(L"\\lambda", WstrList{down_0.label()}, WstrList{up_0.label()}); + const auto a = ex(std::initializer_list{up_1, up_2}, + std::initializer_list{down_1, down_2}); auto a_cumulant = cumulant * a; - auto cumulant2 = ex(L"\\lambda", WstrList{down_1.label()}, WstrList{up_1.label()}); - auto cumulant3 = ex(L"\\lambda", WstrList{down_2.label()}, WstrList{up_2.label()}); + auto cumulant2 = + ex(L"\\lambda", WstrList{down_1.label()}, WstrList{up_1.label()}); + auto cumulant3 = + ex(L"\\lambda", WstrList{down_2.label()}, WstrList{up_2.label()}); auto cumulant_3x = cumulant * cumulant2 * cumulant3; - auto a1 = ex( std::initializer_list{up_0}, std::initializer_list{down_0}); + auto a1 = ex(std::initializer_list{up_0}, + std::initializer_list{down_0}); auto a1_cumu1_cumu2 = a1 * cumulant2 * cumulant3; - auto two_body_cumu = ex(L"\\lambda", WstrList{down_1.label(), down_2.label()}, WstrList{up_1.label(),up_2.label()}); + auto two_body_cumu = + ex(L"\\lambda", WstrList{down_1.label(), down_2.label()}, + WstrList{up_1.label(), up_2.label()}); auto a1_cumu2 = a1 * two_body_cumu; auto cumu1_cumu2 = cumulant * two_body_cumu; - auto sum_of_terms = antisymmetrize(a_cumulant + cumulant_3x + a1_cumu1_cumu2 + a1_cumu2 + cumu1_cumu2); + auto sum_of_terms = antisymmetrize(a_cumulant + cumulant_3x + a1_cumu1_cumu2 + + a1_cumu2 + cumu1_cumu2); - if(!approx) { + if (!approx) { auto cumu3 = ex( L"\\lambda", WstrList{down_0.label(), down_1.label(), down_2.label()}, WstrList{up_0.label(), up_1.label(), up_2.label()}); @@ -159,39 +197,45 @@ std::pair, std::vector>> three_body auto temp_result = sum_of_terms.result; temp_result->canonicalize(); simplify(temp_result); - //std::wcout << "result before substitiutions: " << to_latex_align(temp_result) << std::endl; + // std::wcout << "result before substitiutions: " << + // to_latex_align(temp_result) << std::endl; - for (auto&& product : temp_result->as().summands()){//replace all the two body terms with one body terms. + for (auto&& product : + temp_result->as().summands()) { // replace all the two body terms + // with one body terms. if (product->is()) { for (auto&& factor : product->as().factors()) { - if (factor->is() && - factor->as().rank() == 2) { + if (factor->is() && factor->as().rank() == 2) { factor = two_body_decomp(factor); } } - } - else{ + } else { } } simplify(temp_result); - for (auto&& product : temp_result->as().summands()){//replace the one body terms with the substituted expression - if(product->is()) { + for (auto&& product : + temp_result->as().summands()) { // replace the one body terms with + // the substituted expression + if (product->is()) { for (auto&& factor : product->as().factors()) { - if (factor->is() && - factor->as().rank() == 1) { + if (factor->is() && factor->as().rank() == 1) { factor = one_body_sub(factor); } } } } - std::pair, std::vector> initial_pairing(initial_lower,initial_upper); - std::pair, std::vector>> result(temp_result,initial_pairing); - //simplify(temp_result); - // std::wcout << "result before substitiutions: " << to_latex_align(temp_result,20,7) << std::endl; + std::pair, std::vector> initial_pairing( + initial_lower, initial_upper); + std::pair, std::vector>> result( + temp_result, initial_pairing); + // simplify(temp_result); + // std::wcout << "result before substitiutions: " << + // to_latex_align(temp_result,20,7) << std::endl; return result; } -std::pair, std::vector>> three_body_decomposition(ExprPtr _ex, int rank) { +std::pair, std::vector>> +three_body_decomposition(ExprPtr _ex, int rank) { std::pair, std::vector> initial_pairing; if (rank == 3) { auto ex_pair = three_body_decomp(_ex); @@ -209,20 +253,20 @@ std::pair, std::vector>> three_body } else if (factor->as().label() == L"\\lambda" && factor->as().rank() == 2) { factor = cumu2_to_density(factor); - } else if (factor->as().label() == L"\\lambda" && factor->as().rank() == 1) { + } else if (factor->as().label() == L"\\lambda" && + factor->as().rank() == 1) { factor = cumu_to_density(factor); } else { assert(factor->as().label() != L"\\lambda"); } } - } } } _ex->canonicalize(); simplify(_ex); } else if (rank == 2) { - auto ex_pair = three_body_decomp(_ex,true); + auto ex_pair = three_body_decomp(_ex, true); _ex = ex_pair.first; initial_pairing = ex_pair.second; simplify(_ex); @@ -232,13 +276,11 @@ std::pair, std::vector>> three_body if (factor->is()) { if (factor->as().label() == L"\\lambda" && factor->as().rank() > 2) { - factor = ex(0); - } - else if (factor->as().label() == L"\\lambda" && - factor->as().rank() == 2) { - factor = cumu2_to_density(factor); - } - else if (factor->as().label() == L"\\lambda") { + factor = ex(0); + } else if (factor->as().label() == L"\\lambda" && + factor->as().rank() == 2) { + factor = cumu2_to_density(factor); + } else if (factor->as().label() == L"\\lambda") { factor = cumu_to_density(factor); } else { assert(factor->as().label() != L"\\lambda"); @@ -248,9 +290,10 @@ std::pair, std::vector>> three_body } } simplify(_ex); - //std::wcout << " cumulant replacment: " << to_latex_align(_ex,20, 7) << std::endl; + // std::wcout << " cumulant replacment: " << to_latex_align(_ex,20, 7) << + // std::endl; } else if (rank == 1) { - auto ex_pair = three_body_decomp(_ex,true); + auto ex_pair = three_body_decomp(_ex, true); _ex = ex_pair.first; initial_pairing = ex_pair.second; _ex->canonicalize(); @@ -279,26 +322,34 @@ std::pair, std::vector>> three_body return {_ex, initial_pairing}; } -// in general a three body substitution can be approximated with 1, 2, or 3 body terms(3 body has no approximation). -// this is achieved by replacing densities with with particle number > rank by the each successive cumulant approximation followed by neglect of the particle rank sized term. -// TODO this implementation is ambitious and currently we only support rank 2 decompositions. -//TODO there may be a faster way to implement this given knowledge of the resulting expression. could have a "fast" and a "rigourous" implementation -ExprPtr three_body_substitution (ExprPtr &input, int rank){ - //just return back if the input is zero. - if(input == ex(0)){ +// in general a three body substitution can be approximated with 1, 2, or 3 body +// terms(3 body has no approximation). this is achieved by replacing densities +// with with particle number > rank by the each successive cumulant +// approximation followed by neglect of the particle rank sized term. +// TODO this implementation is ambitious and currently we only support rank 2 +// decompositions. +// TODO there may be a faster way to implement this given knowledge of the +// resulting expression. could have a "fast" and a "rigourous" implementation +ExprPtr three_body_substitution(ExprPtr& input, int rank) { + // just return back if the input is zero. + if (input == ex(0)) { return input; } std::pair, std::vector> initial_pairing; - if(input->is()) { + if (input->is()) { for (auto&& product : input->as().summands()) { if (product->is()) { for (auto&& factor : product->as().factors()) { - if (factor->is() && (factor->as().rank() ==3)) { // find the 3-body terms - auto fac_pair = decompositions::three_body_decomposition(factor,rank); // decompose that term and replace the existing term. + if (factor->is() && (factor->as().rank() == + 3)) { // find the 3-body terms + auto fac_pair = decompositions::three_body_decomposition( + factor, + rank); // decompose that term and replace the existing term. factor = fac_pair.first; initial_pairing = fac_pair.second; - if(get_default_context().spbasis() == SPBasis::spinfree){ - factor = antisymm::spin_sum(initial_pairing.second,initial_pairing.first, factor); + if (get_default_context().spbasis() == SPBasis::spinfree) { + factor = antisymm::spin_sum(initial_pairing.second, + initial_pairing.first, factor); factor->canonicalize(); simplify(factor); } @@ -306,33 +357,38 @@ ExprPtr three_body_substitution (ExprPtr &input, int rank){ } } } - } - else if(input->is()){ + } else if (input->is()) { for (auto&& factor : input->as().factors()) { - if (factor->is() && (factor->as().rank() ==3)) { // find the 3-body terms - auto fac_pair = decompositions::three_body_decomposition(factor,rank); // decompose that term and replace the existing term. + if (factor->is() && + (factor->as().rank() == 3)) { // find the 3-body terms + auto fac_pair = decompositions::three_body_decomposition( + factor, + rank); // decompose that term and replace the existing term. factor = fac_pair.first; - initial_pairing = fac_pair.second;// decompose that term and replace the existing term. - if(get_default_context().spbasis() == SPBasis::spinfree){ - factor = antisymm::spin_sum(initial_pairing.second,initial_pairing.first, factor); + initial_pairing = + fac_pair + .second; // decompose that term and replace the existing term. + if (get_default_context().spbasis() == SPBasis::spinfree) { + factor = antisymm::spin_sum(initial_pairing.second, + initial_pairing.first, factor); factor->canonicalize(); simplify(factor); } } } - } - else if(input->is()){ - auto fac_pair = decompositions::three_body_decomposition(input,rank); // decompose that term and replace the existing term. + } else if (input->is()) { + auto fac_pair = decompositions::three_body_decomposition( + input, rank); // decompose that term and replace the existing term. input = fac_pair.first; initial_pairing = fac_pair.second; - if(get_default_context().spbasis() == SPBasis::spinfree){ - //std::wcout << to_latex_align(input,20) << std::endl; - input = antisymm::spin_sum(initial_pairing.second,initial_pairing.first, input); + if (get_default_context().spbasis() == SPBasis::spinfree) { + // std::wcout << to_latex_align(input,20) << std::endl; + input = antisymm::spin_sum(initial_pairing.second, initial_pairing.first, + input); input->canonicalize(); simplify(input); } - } - else{ + } else { throw "cannot handle this type"; } From f66eb720446bcaee720de8f6c42e3a3c80dd27f5 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Thu, 21 Apr 2022 10:50:07 -0400 Subject: [PATCH 067/120] small bug in cache manager constructor --- SeQuant/domain/eval/eval.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SeQuant/domain/eval/eval.hpp b/SeQuant/domain/eval/eval.hpp index 6fda037ed..5306b1954 100644 --- a/SeQuant/domain/eval/eval.hpp +++ b/SeQuant/domain/eval/eval.hpp @@ -112,7 +112,7 @@ CacheManager make_cache_manager(EvalNode const& node, auto less_repeating = [](auto const& pair) { return pair.second < 2; }; ranges::actions::remove_if(hash_to_counts, less_repeating); - if (!persistent_leaves) return CacheManager{hash_to_counts}; + if (!persistent_leaves) return CacheManager{hash_to_counts,{}}; container::svector leaf_hashes{}; node.visit_leaf([&leaf_hashes](auto const& node) { From 44de355be7a051921a5dd6b0e63c00bafd4b8e89 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Thu, 21 Apr 2022 10:57:11 -0400 Subject: [PATCH 068/120] small bug in cache manager constructor --- SeQuant/domain/eval/eval.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SeQuant/domain/eval/eval.hpp b/SeQuant/domain/eval/eval.hpp index b2c5fc966..032a0c954 100644 --- a/SeQuant/domain/eval/eval.hpp +++ b/SeQuant/domain/eval/eval.hpp @@ -112,7 +112,7 @@ CacheManager make_cache_manager(EvalNode const& node, auto less_repeating = [](auto const& pair) { return pair.second < 2; }; ranges::actions::remove_if(hash_to_counts, less_repeating); - if (!persistent_leaves) return CacheManager{hash_to_counts}; + if (!persistent_leaves) return CacheManager{hash_to_counts,{}}; container::svector leaf_hashes{}; node.visit_leaf([&leaf_hashes](auto const& node) { From cd3f358a280807054b31764cc4bad5f467d7300f Mon Sep 17 00:00:00 2001 From: connermasteran Date: Mon, 25 Apr 2022 09:09:14 -0400 Subject: [PATCH 069/120] bug fix --- SeQuant/domain/eqs/single_ref_uccf12.h | 38 +++++++++---------- .../domain/transcorrelated/simplifications.h | 33 ++++++++-------- .../transcorrelated/three_body_decomp.hpp | 8 ---- 3 files changed, 34 insertions(+), 45 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index ccbadc015..e7745b639 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -50,15 +50,15 @@ class uccf12 { ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, int ansatz = 2) { auto first_com = do_wick((e1 * e2) - (e2 * e1)); - non_canon_simplify(first_com); + simplify(first_com); auto second_com_1 = first_com * e3; - non_canon_simplify(second_com_1); + simplify(second_com_1); second_com_1 = do_wick(second_com_1); auto second_com_2 = e3 * first_com; - non_canon_simplify(second_com_2); + simplify(second_com_2); second_com_2 = do_wick(second_com_2); auto second_com = second_com_1 - second_com_2; - non_canon_simplify(second_com); + simplify(second_com); if (ansatz == 2) { second_com = keep_up_to_3_body_terms(second_com); second_com = @@ -66,11 +66,11 @@ class uccf12 { ex(0.); // make a sum to avoid heavy code duplication for // product and sum variants. second_com = simplification::overlap_with_obs(second_com); - //second_com = second_com + ex(0.); - //second_com = simplification::screen_F12_proj(second_com, 2); - //second_com = simplification::tens_to_FNOps(second_com); - //second_com = decompositions::three_body_substitution(second_com, 2); - non_canon_simplify(second_com); + second_com = second_com + ex(0.); + second_com = simplification::screen_F12_proj(second_com, 2); + second_com = simplification::tens_to_FNOps(second_com); + second_com = decompositions::three_body_substitution(second_com, 2); + simplify(second_com); return second_com; } if (ansatz == 1) { @@ -165,7 +165,7 @@ class uccf12 { FWickTheorem wick{expr}; wick.spinfree(false).full_contractions(false); auto result = wick.compute(); - non_canon_simplify(result); + simplify(result); return result; } @@ -308,7 +308,6 @@ class uccf12 { } else if (gg_label == "uocc") { gg_space = IndexSpace::unoccupied; } - // currently not supported, but needs to be else if (gg_label == "act_obs") { gg_space = IndexSpace::all_active; } else { @@ -342,9 +341,10 @@ class uccf12 { ExprPtr A = ex(0.0); if (doubles) { A = A + (r - adjoint(r)) + single; + simplify(A); } else { A = A + single; - non_canon_simplify(A); + simplify(A); } auto A_ = A->clone(); A_ = relable(A_); @@ -369,8 +369,8 @@ class uccf12 { auto two_body = com_1.second + (sim.second); // cannot use non_canon_simplify here because of B term. - simplify(one_body); - simplify(two_body); + non_canon_simplify(one_body); + non_canon_simplify(two_body); int term_count = 0; if(!one_body->is()){ term_count += 1; @@ -415,23 +415,23 @@ class uccf12 { H_A_3 = H_A_3 + ex(0.); H_A_3 = simplification::screen_F12_proj(H_A_3, 1); H_A_3 = simplification::tens_to_FNOps(H_A_3); - non_canon_simplify(H_A_3); + simplify(H_A_3); auto com_1 = simplification::hamiltonian_based_projector_1(H_A_3); auto fFF = ex(1. / 2) * compute_double_com(F(), r, r_1, 1); - non_canon_simplify(fFF); + simplify(fFF); auto fFFt = ex(1. / 2) * compute_double_com(F(), r, ex(-1.) * adjoint(r_1), 1); - non_canon_simplify(fFFt); + simplify(fFFt); auto fFtFt = ex(1. / 2) * compute_double_com(F(), ex(-1.) * adjoint(r), ex(-1.) * adjoint(r_1), 1); - non_canon_simplify(fFtFt); + simplify(fFtFt); auto fFtF = ex(1. / 2) * compute_double_com(F(), ex(-1.) * adjoint(r), r_1, 1); - non_canon_simplify(fFtF); + simplify(fFtF); auto fFF_sim = simplification::fock_based_projector_1(fFF); // std::wcout << "FF: " << to_latex_align(fFF_sim.second,20,2) << diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index c544b673d..39d01bfba 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -180,7 +180,7 @@ ExprPtr overlap_with_obs(ExprPtr ex_) { FWickTheorem wick{overlap_expr}; // std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; wick.reduce(overlap_expr); - non_canon_simplify(overlap_expr); + simplify(overlap_expr); // std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; return overlap_expr; } @@ -203,7 +203,7 @@ ExprPtr remove_const(const ExprPtr ex_) { } } } - non_canon_simplify(new_expression); + simplify(new_expression); return new_expression; } @@ -281,7 +281,7 @@ ExprPtr tens_to_op(ExprPtr ex_) { ex(ex_->as().ket(), ex_->as().bra()); return result; } -// F tensors must contain contain indices in the bra with space > all. this +// F tensors must contain indices in the bra with space > all. this // includes complete, completeunoccupied, and inactiveunoccupied. and if one of // the particle indices is connected to the obs virtual space, then the other // must be from the CABS set. i.e. if G^{a \beta}_{ij} -> G^{a a'}_{ij} @@ -504,7 +504,7 @@ auto treat_fock(ExprPtr ex_) { } FWickTheorem wick{new_ex_}; wick.reduce(new_ex_); - non_canon_simplify(new_ex_); + simplify(new_ex_); return new_ex_; } @@ -684,7 +684,7 @@ ExprPtr densities_to_occ(const ExprPtr& ex_) { FWickTheorem wick{result}; wick.reduce(result); - non_canon_simplify(result); + simplify(result); return result; } @@ -722,7 +722,7 @@ ExprPtr biproduct_intermediate(ExprPtr T1, ExprPtr T2) { IDX_list{external_ket[1], external_ket[0]}); auto V = GR_ijpq - F_ijrs * g_rspq - F_ijmc * g_mcpq - F_jicm * g_cmqp; - non_canon_simplify(V); + simplify(V); return V; } else { auto GR_pqij = @@ -745,7 +745,7 @@ ExprPtr biproduct_intermediate(ExprPtr T1, ExprPtr T2) { IDX_list{L"α'_4", L"m_6"}); auto V = GR_pqij - F_rsij * g_pqrs - F_mcij * g_pqmc - F_cmji * g_qpcm; - non_canon_simplify(V); + simplify(V); return V; } } else { @@ -817,7 +817,7 @@ ExprPtr find_F12_interms(ExprPtr ex_) { } result = result * ex_; - non_canon_simplify(result); + simplify(result); return result; } return ex_; @@ -918,18 +918,18 @@ ExprPtr screen_F12_proj(ExprPtr exprs, int ansatz = 2) { product_clone * contains_tens(temp_factor, L"s").second; FWickTheorem wick_f{product_clone}; wick_f.reduce(product_clone); - non_canon_simplify(product_clone); + simplify(product_clone); product_clone = screen_F12_proj(product_clone, ansatz); return_sum = product_clone + return_sum; new_product = ex(0.); break; } new_product = new_product * temp_factor; - non_canon_simplify(new_product); + simplify(new_product); } return_sum = new_product + return_sum; } - non_canon_simplify(return_sum); + simplify(return_sum); return return_sum; } else if (exprs->is()) { auto new_product = ex(exprs->as().scalar()); @@ -975,7 +975,7 @@ ExprPtr FNOPs_to_tens(ExprPtr ex_) { } new_sum = new_product + new_sum; } - simplify(new_sum); + non_canon_simplify(new_sum); return new_sum; } else if (ex_->is()) { for (auto&& factor : ex_->as().factors()) { @@ -1007,7 +1007,7 @@ ExprPtr tens_to_FNOps(ExprPtr ex_) { } new_sum = new_product + new_sum; } - simplify(new_sum); + non_canon_simplify(new_sum); return new_sum; } else if (ex_->is()) { for (auto&& factor : ex_->as().factors()) { @@ -1097,7 +1097,7 @@ ExprPtr partition_F12(ExprPtr exprs) { } } } - non_canon_simplify(exprs); + simplify(exprs); return (exprs); } @@ -1135,7 +1135,6 @@ std::pair hamiltonian_based_projector_1(ExprPtr exprs) { simplify(exprs); exprs = partition_F12(exprs); simplify(exprs); - ; exprs = screen_F12_proj(exprs, 1); simplify(exprs); auto exprs_intmed = ex(0.0); @@ -1143,7 +1142,7 @@ std::pair hamiltonian_based_projector_1(ExprPtr exprs) { auto new_product = simplification::find_F12_interms(product); exprs_intmed = new_product + exprs_intmed; } - simplify(exprs_intmed); + non_canon_simplify(exprs_intmed); return fnop_to_overlap(exprs_intmed); } // G can only project to alpha and Beta space. still need to use fock based @@ -1196,8 +1195,6 @@ std::pair fock_based_projector_2(ExprPtr exprs) { if (final_screen->is()) { return std::pair{final_screen, final_screen}; } - final_screen = FNOPs_to_tens(final_screen); - simplify(final_screen); final_screen = screen_densities(final_screen); simplify(final_screen); // find the special f12 intermediates that cannot efficiently be solved diff --git a/SeQuant/domain/transcorrelated/three_body_decomp.hpp b/SeQuant/domain/transcorrelated/three_body_decomp.hpp index cee065afa..58aec75cd 100644 --- a/SeQuant/domain/transcorrelated/three_body_decomp.hpp +++ b/SeQuant/domain/transcorrelated/three_body_decomp.hpp @@ -195,7 +195,6 @@ three_body_decomp(ExprPtr ex_, bool approx = true) { } auto temp_result = sum_of_terms.result; - temp_result->canonicalize(); simplify(temp_result); // std::wcout << "result before substitiutions: " << // to_latex_align(temp_result) << std::endl; @@ -241,7 +240,6 @@ three_body_decomposition(ExprPtr _ex, int rank) { auto ex_pair = three_body_decomp(_ex); _ex = ex_pair.first; initial_pairing = ex_pair.second; - _ex->canonicalize(); simplify(_ex); for (auto&& product : _ex->as().summands()) { if (product->is()) { @@ -263,7 +261,6 @@ three_body_decomposition(ExprPtr _ex, int rank) { } } } - _ex->canonicalize(); simplify(_ex); } else if (rank == 2) { auto ex_pair = three_body_decomp(_ex, true); @@ -296,7 +293,6 @@ three_body_decomposition(ExprPtr _ex, int rank) { auto ex_pair = three_body_decomp(_ex, true); _ex = ex_pair.first; initial_pairing = ex_pair.second; - _ex->canonicalize(); simplify(_ex); for (auto&& product : _ex->as().summands()) { if (product->is()) { @@ -314,7 +310,6 @@ three_body_decomposition(ExprPtr _ex, int rank) { } } } - _ex->canonicalize(); simplify(_ex); } else { throw "rank not supported!"; @@ -350,7 +345,6 @@ ExprPtr three_body_substitution(ExprPtr& input, int rank) { if (get_default_context().spbasis() == SPBasis::spinfree) { factor = antisymm::spin_sum(initial_pairing.second, initial_pairing.first, factor); - factor->canonicalize(); simplify(factor); } } @@ -371,7 +365,6 @@ ExprPtr three_body_substitution(ExprPtr& input, int rank) { if (get_default_context().spbasis() == SPBasis::spinfree) { factor = antisymm::spin_sum(initial_pairing.second, initial_pairing.first, factor); - factor->canonicalize(); simplify(factor); } } @@ -385,7 +378,6 @@ ExprPtr three_body_substitution(ExprPtr& input, int rank) { // std::wcout << to_latex_align(input,20) << std::endl; input = antisymm::spin_sum(initial_pairing.second, initial_pairing.first, input); - input->canonicalize(); simplify(input); } } else { From bb9613e8d543352d49372f5e76f9d2e3031991e1 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 26 Apr 2022 11:34:43 -0400 Subject: [PATCH 070/120] attempt at mpqc f12 partition --- SeQuant/domain/eqs/single_ref_uccf12.h | 12 ++-- .../domain/transcorrelated/simplifications.h | 60 ++++++++++--------- .../transcorrelated/three_body_decomp.hpp | 6 +- 3 files changed, 42 insertions(+), 36 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index e7745b639..c23eedb9f 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -50,15 +50,15 @@ class uccf12 { ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, int ansatz = 2) { auto first_com = do_wick((e1 * e2) - (e2 * e1)); - simplify(first_com); + non_canon_simplify(first_com); auto second_com_1 = first_com * e3; - simplify(second_com_1); + non_canon_simplify(second_com_1); second_com_1 = do_wick(second_com_1); auto second_com_2 = e3 * first_com; - simplify(second_com_2); + non_canon_simplify(second_com_2); second_com_2 = do_wick(second_com_2); auto second_com = second_com_1 - second_com_2; - simplify(second_com); + non_canon_simplify(second_com); if (ansatz == 2) { second_com = keep_up_to_3_body_terms(second_com); second_com = @@ -70,7 +70,7 @@ class uccf12 { second_com = simplification::screen_F12_proj(second_com, 2); second_com = simplification::tens_to_FNOps(second_com); second_com = decompositions::three_body_substitution(second_com, 2); - simplify(second_com); + non_canon_simplify(second_com); return second_com; } if (ansatz == 1) { @@ -348,7 +348,7 @@ class uccf12 { } auto A_ = A->clone(); A_ = relable(A_); - std::wcout << "singles_A: " << to_latex_align(A_, 20,4) << std::endl; + //std::wcout << "singles_A: " << to_latex_align(A_, 20,4) << std::endl; // first commutator in eq 9. Chem. Phys. 136, 084107 (2012). auto H_A = do_wick(ex(1.) * ((h * A) - (A * h))); auto H_A_3 = keep_up_to_3_body_terms(H_A); diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 39d01bfba..bebf2cc3d 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -47,7 +47,7 @@ ExprPtr op_to_tens(ExprPtr ex_) { /// TODO this dictates that the resulting hamiltonian will be in a particular /// basis. ExprPtr overlap_with_obs(ExprPtr ex_) { - std::wcout << to_latex_align(ex_,20,4) << std::endl; + //std::wcout << to_latex_align(ex_,20,4) << std::endl; auto overlap_expr = ex(0); // enforce an overlap each E with elements from for (auto&& product : @@ -180,7 +180,7 @@ ExprPtr overlap_with_obs(ExprPtr ex_) { FWickTheorem wick{overlap_expr}; // std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; wick.reduce(overlap_expr); - simplify(overlap_expr); + non_canon_simplify(overlap_expr); // std::wcout << to_latex_align(overlap_expr,20,2) << std::endl; return overlap_expr; } @@ -203,7 +203,7 @@ ExprPtr remove_const(const ExprPtr ex_) { } } } - simplify(new_expression); + non_canon_simplify(new_expression); return new_expression; } @@ -448,10 +448,13 @@ ExprPtr screen_densities(ExprPtr ex_) { if (ex_->is()) { for (auto&& product : ex_->as().summands()) { for (auto&& factor : product->as()) { - if (factor->as().label() == L"\\Gamma" || - factor->as().label() == L"\\gamma") { - factor = screen_density(factor); + if(factor->is()) { + if (factor->as().label() == L"\\Gamma" || + factor->as().label() == L"\\gamma") { + factor = screen_density(factor); + } } + else{std::wcout <<"problematic factor" << to_latex_align(factor);} } } return ex_; @@ -697,10 +700,10 @@ ExprPtr biproduct_intermediate(ExprPtr T1, ExprPtr T2) { auto [nconnects, space, external_ket, external_bra, T1_ket] = ncon_spa_extket_extbra(T1->as(), T2->as()); if (T1->as().label() == L"g" || T2->as().label() == L"g") { + // V^pq_ij + // intermediate decomposition handled by SeQuant so space labels can be + // properly handled if (nconnects == 2 && space == IndexSpace::complete_unoccupied) { - // V^pq_ij - // intermediate decomposition handled by SeQuant so space labels can be - // properly handled if (T1_ket) { auto GR_ijpq = ex(L"GR", IDX_list{external_bra[0], external_bra[1]}, @@ -817,7 +820,7 @@ ExprPtr find_F12_interms(ExprPtr ex_) { } result = result * ex_; - simplify(result); + non_canon_simplify(result); return result; } return ex_; @@ -925,11 +928,11 @@ ExprPtr screen_F12_proj(ExprPtr exprs, int ansatz = 2) { break; } new_product = new_product * temp_factor; - simplify(new_product); + non_canon_simplify(new_product); } return_sum = new_product + return_sum; } - simplify(return_sum); + non_canon_simplify(return_sum); return return_sum; } else if (exprs->is()) { auto new_product = ex(exprs->as().scalar()); @@ -966,10 +969,10 @@ ExprPtr FNOPs_to_tens(ExprPtr ex_) { for (auto factor : product->as().factors()) { auto new_factor = ex(0); if (factor->is()) { - new_factor = op_to_tens(factor); + new_factor = op_to_tens(factor) + new_factor; assert(!new_factor->is()); } else { - new_factor = factor; + new_factor = factor + new_factor; } new_product = new_product * new_factor; } @@ -1112,13 +1115,13 @@ ExprPtr partition_F12(ExprPtr exprs) { // operator prefactor. std::pair hamiltonian_based_projector_2(ExprPtr exprs) { exprs = FNOPs_to_tens(exprs); - simplify(exprs); - exprs = partition_F12(exprs); - simplify(exprs); - exprs = screen_F12_proj(exprs, 2); - simplify(exprs); + non_canon_simplify(exprs); exprs = screen_densities(exprs); - simplify(exprs); + non_canon_simplify(exprs); + exprs = screen_F12_proj(exprs, 2); + //simplify(exprs); + //exprs = partition_F12(exprs); + non_canon_simplify(exprs); auto exprs_intmed = ex(0.0); for (auto&& product : exprs->as().summands()) { auto new_product = simplification::find_F12_interms(product); @@ -1182,21 +1185,24 @@ std::pair fock_based_projector_1(ExprPtr exprs) { // allow analysis of multiple expressions who have the same normal order // operator prefactor. std::pair fock_based_projector_2(ExprPtr exprs) { - exprs = FNOPs_to_tens(exprs); - simplify(exprs); if (exprs->is()) { return std::pair{exprs, exprs}; } - exprs = partition_F12(exprs); + non_canon_simplify(exprs); + exprs = FNOPs_to_tens(exprs); + non_canon_simplify(exprs); + exprs = screen_densities(exprs); + non_canon_simplify(exprs); + //std::wcout << "pre partition expression: " << to_latex_align(exprs,30,3) << std::endl; + //exprs = partition_F12(exprs); auto final_screen = exprs; - simplify(final_screen); + non_canon_simplify(final_screen); // in some cases, there will now be no contributing terms left so return zero // to one and two body. if (final_screen->is()) { return std::pair{final_screen, final_screen}; } - final_screen = screen_densities(final_screen); - simplify(final_screen); + non_canon_simplify(final_screen); // find the special f12 intermediates that cannot efficiently be solved // directly. This seems to work already for the general case! auto last_screen = ex(0.0); @@ -1204,7 +1210,7 @@ std::pair fock_based_projector_2(ExprPtr exprs) { auto new_product = simplification::find_F12_interms(product); last_screen = last_screen + new_product; } - simplify(last_screen); + non_canon_simplify(last_screen); return fnop_to_overlap(last_screen); } } // namespace simplification diff --git a/SeQuant/domain/transcorrelated/three_body_decomp.hpp b/SeQuant/domain/transcorrelated/three_body_decomp.hpp index 58aec75cd..c0f6d172d 100644 --- a/SeQuant/domain/transcorrelated/three_body_decomp.hpp +++ b/SeQuant/domain/transcorrelated/three_body_decomp.hpp @@ -345,7 +345,7 @@ ExprPtr three_body_substitution(ExprPtr& input, int rank) { if (get_default_context().spbasis() == SPBasis::spinfree) { factor = antisymm::spin_sum(initial_pairing.second, initial_pairing.first, factor); - simplify(factor); + non_canon_simplify(factor); } } } @@ -365,7 +365,7 @@ ExprPtr three_body_substitution(ExprPtr& input, int rank) { if (get_default_context().spbasis() == SPBasis::spinfree) { factor = antisymm::spin_sum(initial_pairing.second, initial_pairing.first, factor); - simplify(factor); + non_canon_simplify(factor); } } } @@ -378,7 +378,7 @@ ExprPtr three_body_substitution(ExprPtr& input, int rank) { // std::wcout << to_latex_align(input,20) << std::endl; input = antisymm::spin_sum(initial_pairing.second, initial_pairing.first, input); - simplify(input); + non_canon_simplify(input); } } else { throw "cannot handle this type"; From 5f10806400aafb1526cc40fca7aaf354d69de28a Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Tue, 26 Apr 2022 12:55:53 -0400 Subject: [PATCH 071/120] remove SeQuant's FindTBB (very very legacy at this point) and adapt uses to the most recent FindTBB in VG cmake kit --- CMakeLists.txt | 5 +- cmake/modules/CheckCXXFeatures.cmake | 7 +- cmake/modules/FindTBB.cmake | 142 --------------------------- 3 files changed, 7 insertions(+), 147 deletions(-) delete mode 100644 cmake/modules/FindTBB.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index e42b1dfbd..b9bfece7a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -107,10 +107,13 @@ find_package(Threads REQUIRED) # PSTL (used by g++-9 and clang++-8 in c++17 mode) needs TBB if ((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER 9) OR -(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang" AND + (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang" AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER 8)) if (ENABLE_TBB) # but user must enable the search for TBB since this is an additional source of build entropy find_package(TBB REQUIRED) + # TBB::tbb by default is not GLOBAL, so to allow users of LINALG_LIBRARIES to safely use it we need to make it global + # more discussion here: https://gitlab.kitware.com/cmake/cmake/-/issues/17256 + set_target_properties(TBB::tbb PROPERTIES IMPORTED_GLOBAL TRUE) endif (ENABLE_TBB) endif () # check of header is usable diff --git a/cmake/modules/CheckCXXFeatures.cmake b/cmake/modules/CheckCXXFeatures.cmake index a2bcf2075..b7e713546 100644 --- a/cmake/modules/CheckCXXFeatures.cmake +++ b/cmake/modules/CheckCXXFeatures.cmake @@ -8,16 +8,15 @@ macro(check_cxx_execution_header _prefix) # compilation checks ############################################## set(_prereq_list "_STANDALONE") - if (TBB_FOUND) + if (TARGET TBB::tbb) list(APPEND _prereq_list _WITH_TBB) - endif (TBB_FOUND) + endif () foreach (_prereq ${_prereq_list}) cmake_push_check_state() if (_prereq STREQUAL _WITH_TBB) - list(APPEND CMAKE_REQUIRED_LIBRARIES ${TBB_LIBRARIES}) - list(APPEND CMAKE_REQUIRED_INCLUDES ${TBB_INCLUDE_DIRS}) + list(APPEND CMAKE_REQUIRED_LIBRARIES TBB::tbb) endif () CHECK_CXX_SOURCE_COMPILES( diff --git a/cmake/modules/FindTBB.cmake b/cmake/modules/FindTBB.cmake deleted file mode 100644 index 33f0e3620..000000000 --- a/cmake/modules/FindTBB.cmake +++ /dev/null @@ -1,142 +0,0 @@ -# - Try to find Intel TBB -# Input variables: -# TBB_ROOT_DIR - The TBB install directory -# TBB_INCLUDE_DIR - The TBB include directory -# TBB_LIBRARY - The TBB library directory -# Output variables: -# TBB_FOUND - System has TBB -# TBB_INCLUDE_DIRS - The tbb include directories -# TBB_LIBRARIES - The libraries needed to use TBB -# TBB_VERSION - The version string for TBB - -include(FindPackageHandleStandardArgs) - -if(NOT TBB_FOUND) - - # Set default sarch paths for TBB - if(NOT TBB_ROOT_DIR AND NOT DEFINED TBB_ROOT_DIR) - if(EXISTS $ENV{TBBROOT}) - set(TBB_ROOT_DIR "$ENV{TBBROOT}") - elseif(EXISTS /opt/intel/tbb) - set(TBB_ROOT_DIR /opt/intel/tbb) - endif() - endif() - if(TBB_ROOT_DIR) - # NOTE: Will not overwrite user defined include and library directory variables - set(TBB_INCLUDE_DIR ${TBB_ROOT_DIR}/include - CACHE PATH "The include directory for TBB") - if(CMAKE_SYSTEM_NAME MATCHES "Darwin") - set(TBB_LIBRARY ${TBB_ROOT_DIR}/lib/libc++;${TBB_ROOT_DIR}/lib - CACHE PATH "The library directory for TBB") - elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") - if (EXISTS ${TBB_ROOT_DIR}/lib/intel64/gcc4.7) # Intel packaged TBB - set(TBB_LIBRARY ${TBB_ROOT_DIR}/lib/intel64/gcc4.7 - CACHE PATH "The library directory for TBB") - elseif(EXISTS ${TBB_ROOT_DIR}/lib/intel64/gcc4.4) # Intel packaged TBB - set(TBB_LIBRARY ${TBB_ROOT_DIR}/lib/intel64/gcc4.4 - CACHE PATH "The library directory for TBB") - elseif(EXISTS ${TBB_ROOT_DIR}/lib/intel64/gcc4.1) # Intel packaged TBB - set(TBB_LIBRARY ${TBB_ROOT_DIR}/lib/intel64/gcc4.1 - CACHE PATH "The library directory for TBB") - elseif(EXISTS ${TBB_ROOT_DIR}/lib/intel64/gcc4.8) # Intel packaged TBB - set(TBB_LIBRARY ${TBB_ROOT_DIR}/lib/intel64/gcc4.8 - CACHE PATH "The library directory for TBB") - else() # Intel OSS TBB - set(TBB_LIBRARY ${TBB_ROOT_DIR}/lib - CACHE PATH "The library directory for TBB") - endif() - else() - set(TBB_LIBRARY ${TBB_ROOT_DIR}/lib - CACHE PATH "The library directory for TBB") - endif() - endif() - - if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") - set(TBB_USE_DEBUG TRUE) - else() - set(TBB_USE_DEBUG FALSE) - endif() - - # Search for TBB include directory - find_path(TBB_INCLUDE_DIRS NAMES tbb/tbb.h - HINTS ${TBB_INCLUDE_DIR}) - - # Search for TBB libraries - find_library(TBB_tbb_LIBRARY tbb - HINTS ${TBB_LIBRARY}) - if(TBB_tbb_LIBRARY) - get_filename_component(TBB_tbb_LIBRARY_DIR "${TBB_tbb_LIBRARY}" PATH) - find_library(TBB_tbb_debug_LIBRARY tbb_debug - HINTS ${TBB_tbb_LIBRARY_DIR} - NO_DEFAULT_PATH) - - foreach(_comp tbb_preview tbbmalloc tbbmalloc_proxy) - find_library(TBB_${_comp}_LIBRARY ${_comp} - HINTS ${TBB_tbb_LIBRARY_DIR} - NO_DEFAULT_PATH) - find_library(TBB_${_comp}_debug_LIBRARY ${_comp}_debug - HINTS ${TBB_tbb_LIBRARY_DIR} - NO_DEFAULT_PATH) - endforeach() - endif() - - # Process TBB libaraies - foreach(_lib tbb tbb_preview tbbmalloc tbbmalloc_proxy) - # Set library found variables - if(TBB_${_lib}_LIBRARY) - set(TBB_${_lib}_FOUND TRUE) - else() - set(TBB_${_lib}_FOUND FALSE) - endif() - if(TBB_${_lib}_debug_LIBRARY) - set(TBB_${_lib}_debug_FOUND TRUE) - else() - set(TBB_${_lib}_debug_FOUND FALSE) - endif() - - # Set the build type TBB library variables - if(_lib STREQUAL "tbb" OR ";${TBB_FIND_COMPONENTS};" MATCHES ";${_lib};") - if(TBB_${_lib}_FOUND) - set(TBB_LIBRARIES_RELEASE ${TBB_${_lib}_LIBRARY} ${TBB_LIBRARIES_RELEASE}) - endif() - if(TBB_${_lib}_debug_FOUND) - set(TBB_LIBRARIES_DEBUG ${TBB_${_lib}_debug_LIBRARY} ${TBB_LIBRARIES_DEBUG}) - endif() - endif() - endforeach() - - # Set the TBB_LIBRARIES variable - if(TBB_USE_DEBUG AND TBB_LIBRARIES_DEBUG) - set(TBB_LIBRARIES ${TBB_LIBRARIES_DEBUG}) - else() - set(TBB_LIBRARIES ${TBB_LIBRARIES_RELEASE}) - endif() - - # Get TBB version - if(TBB_INCLUDE_DIRS) - file(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _tbb_version_file) - string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" - TBB_VERSION_MAJOR "${_tbb_version_file}") - string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" - TBB_VERSION_MINOR "${_tbb_version_file}") - string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1" - TBB_INTERFACE_VERSION "${_tbb_version_file}") - set(TBB_VERSION "${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}") - unset(_tbb_version_header) - endif() - - # handle the QUIETLY and REQUIRED arguments and set TBB_FOUND to TRUE - # if all listed variables are TRUE - find_package_handle_standard_args(TBB - FOUND_VAR TBB_FOUND - VERSION_VAR TBB_VERSION - REQUIRED_VARS TBB_LIBRARIES TBB_INCLUDE_DIRS - HANDLE_COMPONENTS) - - if(TBB_LIBRARIES_DEBUG) - set(TBB_COMPILE_FLAGS_DEBUG "-DTBB_USE_DEBUG=1") - endif() - - mark_as_advanced(TBB_INCLUDE_DIR TBB_LIBRARY TBB_INCLUDE_DIRS TBB_LIBRARIES) - -endif() From 0f402aa10cbdeee5768c12692fce07ec40d7e1f0 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 27 Apr 2022 11:24:01 -0400 Subject: [PATCH 072/120] can associate multiple keys with same IndexSpace, this will help with using multiple labels for same space but different semantics; each space still have a unique base key --- SeQuant/core/space.cpp | 23 ++- SeQuant/core/space.hpp | 305 ++++++++++++++++++++++++-------------- tests/unit/test_index.cpp | 62 +++++--- tests/unit/test_space.cpp | 41 +++-- 4 files changed, 281 insertions(+), 150 deletions(-) diff --git a/SeQuant/core/space.cpp b/SeQuant/core/space.cpp index 64590ba7f..0ab331694 100644 --- a/SeQuant/core/space.cpp +++ b/SeQuant/core/space.cpp @@ -4,9 +4,15 @@ #include "space.hpp" -sequant::container::map sequant::IndexSpace::keys_{}; -sequant::container::map sequant::IndexSpace::instances_{}; -sequant::IndexSpace sequant::IndexSpace::null_instance_{sequant::IndexSpace::Attr::null()}; +sequant::container::map + sequant::IndexSpace::attr2basekey_{}; +sequant::container::map + sequant::IndexSpace::key2attr_{}; +sequant::container::map + sequant::IndexSpace::instances_{}; +sequant::IndexSpace sequant::IndexSpace::null_instance_{ + sequant::IndexSpace::Attr::null()}; namespace sequant { @@ -23,9 +29,12 @@ IndexSpace::Type IndexSpace::other_unoccupied = Type{0b100000}; IndexSpace::Type IndexSpace::complete_unoccupied = Type{0b111000}; IndexSpace::Type IndexSpace::complete = Type{0b111111}; -IndexSpace::QuantumNumbers IndexSpace::nullqns = IndexSpace::QuantumNumbers{0b000000}; //!< no quantum numbers -IndexSpace::QuantumNumbers IndexSpace::alpha = IndexSpace::QuantumNumbers{0b000001}; //!< spin-up -IndexSpace::QuantumNumbers IndexSpace::beta = IndexSpace::QuantumNumbers{0b000010}; //!< spin-down +IndexSpace::QuantumNumbers IndexSpace::nullqns = + IndexSpace::QuantumNumbers{0b000000}; //!< no quantum numbers +IndexSpace::QuantumNumbers IndexSpace::alpha = + IndexSpace::QuantumNumbers{0b000001}; //!< spin-up +IndexSpace::QuantumNumbers IndexSpace::beta = + IndexSpace::QuantumNumbers{0b000010}; //!< spin-down std::wstring to_wolfram(const IndexSpace& space) { std::wstring result = L"particleSpace["; @@ -52,4 +61,4 @@ std::wstring to_wolfram(const IndexSpace& space) { return result; } -} // namespace sequant \ No newline at end of file +} // namespace sequant diff --git a/SeQuant/core/space.hpp b/SeQuant/core/space.hpp index 3b65f5843..73330c9fc 100644 --- a/SeQuant/core/space.hpp +++ b/SeQuant/core/space.hpp @@ -5,8 +5,8 @@ #ifndef SEQUANT_SPACE_H #define SEQUANT_SPACE_H -#include #include +#include #include "attr.hpp" #include "container.hpp" @@ -18,21 +18,23 @@ namespace sequant { /// IndexSpace is a set of attributes associated 1-to-1 with keys class IndexSpace { public: - /// @brief TypeAttr is the type of index space. /// - /// The type is described as a set of (orthogonal) attributes; for simplicity it is encoded as a bitset for ease of - /// computing. + /// The type is described as a set of (orthogonal) attributes; for simplicity + /// it is encoded as a bitset for ease of computing. struct TypeAttr : std::bitset<32> { - constexpr explicit TypeAttr(int32_t value) noexcept : std::bitset<32>(static_cast(value)) {} + constexpr explicit TypeAttr(int32_t value) noexcept + : std::bitset<32>(static_cast(value)) {} operator int64_t() const { return static_cast(this->to_ulong()); } int32_t to_int32() const { return static_cast(this->to_ulong()); } - TypeAttr intersection(TypeAttr other) const { return TypeAttr(this->to_int32() & other.to_int32()); } - TypeAttr unIon(TypeAttr other) const { return TypeAttr(this->to_int32() | other.to_int32()); } - /// @return true if \c other is included in this object - bool includes(TypeAttr other) const { - return intersection(other) == other; + TypeAttr intersection(TypeAttr other) const { + return TypeAttr(this->to_int32() & other.to_int32()); + } + TypeAttr unIon(TypeAttr other) const { + return TypeAttr(this->to_int32() | other.to_int32()); } + /// @return true if \c other is included in this object + bool includes(TypeAttr other) const { return intersection(other) == other; } /// @return true if in canonical order this object preceeds \c other bool operator<(TypeAttr other) const { return this->to_int32() < other.to_int32(); @@ -43,8 +45,11 @@ class IndexSpace { }; /// denotes other quantum numbers (particle type, spin, etc.) struct QuantumNumbersAttr : std::bitset<32> { - constexpr explicit QuantumNumbersAttr(int32_t value) noexcept : std::bitset<32>(static_cast(value)) {} - explicit operator int64_t() const { return static_cast(this->to_ulong()); } + constexpr explicit QuantumNumbersAttr(int32_t value) noexcept + : std::bitset<32>(static_cast(value)) {} + explicit operator int64_t() const { + return static_cast(this->to_ulong()); + } int32_t to_int32() const { return static_cast(this->to_ulong()); } QuantumNumbersAttr intersection(QuantumNumbersAttr other) const { return QuantumNumbersAttr(this->to_int32() & other.to_int32()); @@ -62,44 +67,67 @@ class IndexSpace { } /// @return an invalid TypeAttr - static constexpr QuantumNumbersAttr invalid() noexcept { return QuantumNumbersAttr(0xffff); } + static constexpr QuantumNumbersAttr invalid() noexcept { + return QuantumNumbersAttr(0xffff); + } }; - /// @brief Attr describes all attributes of a space (occupancy + quantum numbers) + /// @brief Attr describes all attributes of a space (occupancy + quantum + /// numbers) struct Attr : TypeAttr, QuantumNumbersAttr { - Attr(TypeAttr type, QuantumNumbersAttr qns) noexcept : TypeAttr(type), QuantumNumbersAttr(qns) {}; - Attr(int32_t type, int32_t qns) noexcept : TypeAttr(type), QuantumNumbersAttr(qns) {}; -// explicit Attr(int64_t value) : TypeAttr((value & 0xffffffff00000000) >> 32), QuantumNumbersAttr(value & 0x00000000ffffffff) {} + Attr(TypeAttr type, QuantumNumbersAttr qns) noexcept + : TypeAttr(type), QuantumNumbersAttr(qns){}; + Attr(int32_t type, int32_t qns) noexcept + : TypeAttr(type), QuantumNumbersAttr(qns){}; + // explicit Attr(int64_t value) : TypeAttr((value & 0xffffffff00000000) + // >> 32), QuantumNumbersAttr(value & 0x00000000ffffffff) {} Attr(const Attr &) = default; Attr(Attr &&) = default; Attr &operator=(const Attr &) = default; Attr &operator=(Attr &&) = default; - const TypeAttr &type() const { return static_cast(*this); } + const TypeAttr &type() const { + return static_cast(*this); + } TypeAttr &type() { return static_cast(*this); } - const QuantumNumbersAttr &qns() const { return static_cast(*this); } - QuantumNumbersAttr &qns() { return static_cast(*this); } + const QuantumNumbersAttr &qns() const { + return static_cast(*this); + } + QuantumNumbersAttr &qns() { + return static_cast(*this); + } explicit operator int64_t() const { - return (static_cast(this->type()) << 32) + static_cast(this->qns()); + return (static_cast(this->type()) << 32) + + static_cast(this->qns()); } Attr intersection(Attr other) const { return Attr(this->type().intersection(other.type()), this->qns().intersection(other.qns())); } - Attr unIon(Attr other) const { return Attr(this->type().unIon(other.type()), this->qns().unIon(other.qns())); } + Attr unIon(Attr other) const { + return Attr(this->type().unIon(other.type()), + this->qns().unIon(other.qns())); + } /// @return true if \c other is included in this object bool includes(Attr other) const { - return this->type().includes(other.type()) && this->qns().includes(other.qns()); + return this->type().includes(other.type()) && + this->qns().includes(other.qns()); } - bool operator==(Attr other) const { return this->type() == other.type() && this->qns() == other.qns(); } + bool operator==(Attr other) const { + return this->type() == other.type() && this->qns() == other.qns(); + } bool operator!=(Attr other) const { return !(*this == other); } - static Attr null() noexcept { return Attr{TypeAttr{0}, QuantumNumbersAttr{0}}; } - static Attr invalid() noexcept { return Attr{TypeAttr::invalid(), QuantumNumbersAttr::invalid()}; } + static Attr null() noexcept { + return Attr{TypeAttr{0}, QuantumNumbersAttr{0}}; + } + static Attr invalid() noexcept { + return Attr{TypeAttr::invalid(), QuantumNumbersAttr::invalid()}; + } bool is_valid() const noexcept { return *this != Attr::invalid(); } /// Attr objects are ordered by quantum numbers, then by type @@ -114,7 +142,8 @@ class IndexSpace { using Type = TypeAttr; using QuantumNumbers = QuantumNumbersAttr; - /// standard space tags are predefined that helps implement set theory of standard spaces as binary ops on bitsets + /// standard space tags are predefined that helps implement set theory of + /// standard spaces as binary ops on bitsets static Type frozen_occupied; static Type inactive_occupied; static Type active_occupied; @@ -127,19 +156,24 @@ class IndexSpace { static Type other_unoccupied; static Type complete_unoccupied; static Type complete; - template static const constexpr bool is_standard_type() { + template + static const constexpr bool is_standard_type() { const Type type{typeint}; - return (type == frozen_occupied || type == inactive_occupied || type == active_occupied || - type == occupied || type == active_unoccupied || type == inactive_unoccupied || - type == unoccupied || type == all_active || type == all || type == other_unoccupied || - type == complete_unoccupied || type == complete); + return (type == frozen_occupied || type == inactive_occupied || + type == active_occupied || type == occupied || + type == active_unoccupied || type == inactive_unoccupied || + type == unoccupied || type == all_active || type == all || + type == other_unoccupied || type == complete_unoccupied || + type == complete); } - /// standard space tags are predefined that helps implement set theory of standard spaces as binary ops on bitsets + /// standard space tags are predefined that helps implement set theory of + /// standard spaces as binary ops on bitsets static QuantumNumbers nullqns; //!< no quantum numbers - static QuantumNumbers alpha; //!< spin-up - static QuantumNumbers beta; //!< spin-down - template static const constexpr bool is_standard_qns() { + static QuantumNumbers alpha; //!< spin-up + static QuantumNumbers beta; //!< spin-down + template + static const constexpr bool is_standard_qns() { const QuantumNumbers qns{qnsint}; return (qns == nullqns || qns == alpha || qns == beta); } @@ -151,24 +185,31 @@ class IndexSpace { bad_attr() : std::invalid_argument("bad attribute") {} }; + struct KeyCompare { + using is_transparent = void; + bool operator()(const std::wstring &a, const std::wstring &b) const { + return a < b; + } + bool operator()(const std::wstring &a, const std::wstring_view &b) const { + return a < b; + } + bool operator()(const std::wstring_view &a, const std::wstring &b) const { + return a < b; + } + }; + /// IndexSpace needs null IndexSpace - static const IndexSpace &null_instance() { - return null_instance_; - } + static const IndexSpace &null_instance() { return null_instance_; } /// the null IndexSpace is keyed by this key - static std::wstring null_key() { - return L""; - } + static std::wstring null_key() { return L""; } /// @brief returns the instance of an IndexSpace object /// @param attr the space attribute /// @throw bad_key if key not found static const IndexSpace &instance(Attr attr) { assert(attr.is_valid()); - if (attr == Attr::null()) - return null_instance(); - if (!instance_exists(attr)) - throw bad_attr(); + if (attr == Attr::null()) return null_instance(); + if (!instance_exists(attr)) throw bad_attr(); return instances_.find(attr)->second; } @@ -179,38 +220,40 @@ class IndexSpace { static const IndexSpace &instance(Type type, QuantumNumbers qns = nullqns) { const auto attr = Attr(type, qns); assert(attr.is_valid()); - if (attr == Attr::null()) - return null_instance(); - if (!instance_exists(attr)) - throw bad_attr(); + if (attr == Attr::null()) return null_instance(); + if (!instance_exists(attr)) throw bad_attr(); return instances_.find(attr)->second; } - /// @brief returns the instance of an IndexSpace object - /// @param key a string key describing a particular space that has been registered before + /// @brief returns the instance of an IndexSpace object associated + /// with the given key + /// @param key the key associated with this space; this can be either + /// the base key used to invoke `IndexSpace::register_instance()` + /// or a key used to invoke `IndexSpace::register_key()` /// @throw bad_key if key not found static const IndexSpace &instance(const std::wstring_view key) { - if (key == null_key()) - return null_instance(); + if (key == null_key()) return null_instance(); const auto attr = to_attr(reduce_key(key)); assert(attr.is_valid()); - if (!instance_exists(attr)) - throw bad_key(); + if (!instance_exists(attr)) throw bad_key(); return instances_.find(attr)->second; } - /// @brief returns the instance of an IndexSpace object - /// @param key string key describing a particular space - static void register_instance(const std::wstring_view key, - Type type, + /// @brief constructs a registered instance of an IndexSpace object, + /// associates it with a base key + /// @param base_key string key that will be used as the "base key" for this + /// particular space, i.e. the default used for example for + /// constructing temporary indices for this space + static void register_instance(const std::wstring_view base_key, Type type, QuantumNumbers qn = nullqns, bool throw_if_already_registered = true) { const auto attr = Attr(type, qn); assert(attr.is_valid()); - if (instance_exists(attr) && throw_if_already_registered) - throw bad_key(); - const auto irreducible_key = reduce_key(key); - keys_[attr] = to_wstring(irreducible_key); + if (instance_exists(attr) && throw_if_already_registered) throw bad_key(); + const auto irreducible_basekey = reduce_key(base_key); + const auto irreducible_basekey_str = to_wstring(irreducible_basekey); + attr2basekey_[attr] = irreducible_basekey_str; + key2attr_.emplace(irreducible_basekey_str, attr); instances_.emplace(std::make_pair(attr, IndexSpace(attr))); } @@ -218,34 +261,47 @@ class IndexSpace { return instance_exists(to_attr(reduce_key(key))); } + /// @brief associate a given key with the IndexSpace + /// @note every IndexSpace constructed via + /// `register_instance(base_key,...)` is associated + /// with `base_key`; this allows to associated additional + /// keys to map to the same IndexSpace + /// @param key string key that will map to this particular space + static void register_key(const std::wstring_view key, Type type, + QuantumNumbers qn = nullqns, + bool throw_if_already_registered = true) { + const auto attr = Attr(type, qn); + assert(attr.is_valid()); + const auto irreducible_key = reduce_key(key); + const auto irreducible_key_str = to_wstring(irreducible_key); + if (key2attr_.find(irreducible_key_str) != key2attr_.end() && + throw_if_already_registered) + throw bad_key(); + key2attr_.emplace(irreducible_key_str, attr); + } + Attr attr() const noexcept { assert(attr_.is_valid()); return attr_; } - Type type() const noexcept { - return attr().type(); - } - QuantumNumbers qns() const noexcept { - return attr().qns(); - } + Type type() const noexcept { return attr().type(); } + QuantumNumbers qns() const noexcept { return attr().qns(); } /// @brief returns the base key for IndexSpace objects /// @param space an IndexSpace object - /// @throw bad_key if this space has not beed registered - static std::wstring base_key(const IndexSpace& space) { + /// @throw bad_key if this space has not been registered + static std::wstring base_key(const IndexSpace &space) { return base_key(space.attr()); } /// @brief returns the base key for IndexSpace objects of the given attribute /// @param attr the space attribute - /// @throw bad_key if this object has not beed registered + /// @throw bad_key if this object has not been registered static std::wstring base_key(Attr attr) { assert(attr.is_valid()); - if (attr == Attr::null()) - return L""; - if (!instance_exists(attr)) - throw bad_attr(); - return keys_.find(attr)->second; + if (attr == Attr::null()) return L""; + if (!instance_exists(attr)) throw bad_attr(); + return attr2basekey_.find(attr)->second; } /// Default ctor creates an invalid space @@ -253,23 +309,27 @@ class IndexSpace { IndexSpace(const IndexSpace &other) { if (!other.attr().is_valid()) - throw std::invalid_argument("IndexSpace copy ctor received invalid argument"); + throw std::invalid_argument( + "IndexSpace copy ctor received invalid argument"); attr_ = other.attr_; } IndexSpace(IndexSpace &&other) { if (!other.attr().is_valid()) - throw std::invalid_argument("IndexSpace move ctor received invalid argument"); + throw std::invalid_argument( + "IndexSpace move ctor received invalid argument"); attr_ = other.attr_; } IndexSpace &operator=(const IndexSpace &other) { if (!other.attr().is_valid()) - throw std::invalid_argument("IndexSpace copy assignment operator received invalid argument"); + throw std::invalid_argument( + "IndexSpace copy assignment operator received invalid argument"); attr_ = other.attr_; return *this; } IndexSpace &operator=(IndexSpace &&other) { if (!other.attr().is_valid()) - throw std::invalid_argument("IndexSpace move assignment operator received invalid argument"); + throw std::invalid_argument( + "IndexSpace move assignment operator received invalid argument"); attr_ = other.attr_; return *this; } @@ -280,14 +340,14 @@ class IndexSpace { } private: - Attr attr_ = Attr::invalid(); /// @brief constructs an instance of an IndexSpace object explicit IndexSpace(Attr attr) noexcept : attr_(attr) { assert(attr.is_valid()); } - static container::map keys_; + static container::map attr2basekey_; + static container::map key2attr_; static container::map instances_; static IndexSpace null_instance_; @@ -296,11 +356,14 @@ class IndexSpace { return key.substr(0, underscore_position); } + /// @param key the key associated with a registered IndexSpace; this can be + /// either + /// the base key used to invoke `IndexSpace::register_instance()` + /// or a key used to invoke `IndexSpace::register_key()` + /// @return the attribute of the IndexSpace object corresponding to @p key static Attr to_attr(std::wstring_view key) { - for (const auto &attr_key: keys_) { - if (attr_key.second == key) - return attr_key.first; - } + const auto found_it = key2attr_.find(key); + if (found_it != key2attr_.end()) return found_it->second; throw bad_key(); } @@ -311,7 +374,6 @@ class IndexSpace { static bool instance_exists(Attr attr) { return instances_.find(attr) != instances_.end(); } - }; inline bool operator==(const IndexSpace &space, IndexSpace::Type t) { @@ -326,16 +388,20 @@ inline bool operator!=(const IndexSpace &space, IndexSpace::Type t) { inline bool operator!=(IndexSpace::Type t, const IndexSpace &space) { return !(t == space); } -inline bool operator==(const IndexSpace &space, IndexSpace::QuantumNumbers qns) { +inline bool operator==(const IndexSpace &space, + IndexSpace::QuantumNumbers qns) { return space.qns() == qns; } -inline bool operator==(IndexSpace::QuantumNumbers qns, const IndexSpace &space) { +inline bool operator==(IndexSpace::QuantumNumbers qns, + const IndexSpace &space) { return space.qns() == qns; } -inline bool operator!=(const IndexSpace &space, IndexSpace::QuantumNumbers qns) { +inline bool operator!=(const IndexSpace &space, + IndexSpace::QuantumNumbers qns) { return !(space == qns); } -inline bool operator!=(IndexSpace::QuantumNumbers qns, const IndexSpace &space) { +inline bool operator!=(IndexSpace::QuantumNumbers qns, + const IndexSpace &space) { return !(qns == space); } inline bool operator==(const IndexSpace &space1, const IndexSpace &space2) { @@ -344,51 +410,66 @@ inline bool operator==(const IndexSpace &space1, const IndexSpace &space2) { inline bool operator!=(const IndexSpace &space1, const IndexSpace &space2) { return !(space1 == space2); } -inline IndexSpace::Type intersection(IndexSpace::Type type1, IndexSpace::Type type2) { +inline IndexSpace::Type intersection(IndexSpace::Type type1, + IndexSpace::Type type2) { return type1.intersection(type2); } -inline IndexSpace::QuantumNumbers intersection(IndexSpace::QuantumNumbers v1, IndexSpace::QuantumNumbers v2) { +inline IndexSpace::QuantumNumbers intersection(IndexSpace::QuantumNumbers v1, + IndexSpace::QuantumNumbers v2) { return v1.intersection(v2); } -inline const IndexSpace &intersection(const IndexSpace &space1, const IndexSpace &space2) { +inline const IndexSpace &intersection(const IndexSpace &space1, + const IndexSpace &space2) { return IndexSpace::instance(space1.attr().intersection(space2.attr())); } -inline const IndexSpace &intersection(const IndexSpace &space1, const IndexSpace &space2, const IndexSpace &space3) { - return IndexSpace::instance(space1.attr().intersection(space2.attr().intersection(space3.attr()))); +inline const IndexSpace &intersection(const IndexSpace &space1, + const IndexSpace &space2, + const IndexSpace &space3) { + return IndexSpace::instance( + space1.attr().intersection(space2.attr().intersection(space3.attr()))); } inline IndexSpace::Type unIon(IndexSpace::Type type1, IndexSpace::Type type2) { return type1.unIon(type2); } -inline IndexSpace::QuantumNumbers unIon(IndexSpace::QuantumNumbers qns1, IndexSpace::QuantumNumbers qns2) { +inline IndexSpace::QuantumNumbers unIon(IndexSpace::QuantumNumbers qns1, + IndexSpace::QuantumNumbers qns2) { return qns1.unIon(qns2); } -inline const IndexSpace &unIon(const IndexSpace &space1, const IndexSpace &space2) { +inline const IndexSpace &unIon(const IndexSpace &space1, + const IndexSpace &space2) { return IndexSpace::instance(space1.attr().unIon(space2.attr())); } -/// @return true if type2 is included in type1, i.e. intersection(type1, type2) == type2 +/// @return true if type2 is included in type1, i.e. intersection(type1, type2) +/// == type2 inline bool includes(IndexSpace::Type type1, IndexSpace::Type type2) { return type1.includes(type2); } -/// @return true if qns2 is included in qns1, i.e. \code intersection(qns1, qns2) == qns2 \endcode is true -inline bool includes(IndexSpace::QuantumNumbers qns1, IndexSpace::QuantumNumbers qns2) { +/// @return true if qns2 is included in qns1, i.e. \code intersection(qns1, +/// qns2) == qns2 \endcode is true +inline bool includes(IndexSpace::QuantumNumbers qns1, + IndexSpace::QuantumNumbers qns2) { return qns1.includes(qns2); } -/// @return true if space2 is included in space1, i.e. intersection(space1, space2) == space2 +/// @return true if space2 is included in space1, i.e. intersection(space1, +/// space2) == space2 inline bool includes(const IndexSpace &space1, const IndexSpace &space2) { return space1.attr().includes(space2.attr()); } -/// IndexSpace are ordered by their attributes (i.e. labels do not matter one bit) +/// IndexSpace are ordered by their attributes (i.e. labels do not matter one +/// bit) inline bool operator<(const IndexSpace &space1, const IndexSpace &space2) { return space1.attr() < space2.attr(); } - -/// @return -1 if @c space includes no orbitals with zero occupancy, +1 if it includes only orbitals with zero occupancy, +/// @return -1 if @c space includes no orbitals with zero occupancy, +1 if it +/// includes only orbitals with zero occupancy, /// and 0 of it includes some orbitals with zero occupancy. -inline int occupancy_class(const IndexSpace& space) { - const auto included_in_occupied = includes(IndexSpace::occupied, space.type()); - const auto included_in_unoccupied = includes(IndexSpace::complete_unoccupied, space.type()); +inline int occupancy_class(const IndexSpace &space) { + const auto included_in_occupied = + includes(IndexSpace::occupied, space.type()); + const auto included_in_unoccupied = + includes(IndexSpace::complete_unoccupied, space.type()); assert(!(included_in_occupied && included_in_unoccupied)); if (included_in_occupied && !included_in_unoccupied) return -1; @@ -396,11 +477,11 @@ inline int occupancy_class(const IndexSpace& space) { return 0; else if (!included_in_occupied && included_in_unoccupied) return 1; - abort(); // unreachable + abort(); // unreachable } std::wstring to_wolfram(const IndexSpace &space); } // namespace sequant -#endif //SEQUANT_SPACE_H +#endif // SEQUANT_SPACE_H diff --git a/tests/unit/test_index.cpp b/tests/unit/test_index.cpp index f1a9de8b7..a89cb8589 100644 --- a/tests/unit/test_index.cpp +++ b/tests/unit/test_index.cpp @@ -9,7 +9,6 @@ #include "SeQuant/core/latex.hpp" TEST_CASE("Index", "[elements]") { - using namespace sequant; SECTION("constructors") { @@ -86,6 +85,16 @@ TEST_CASE("Index", "[elements]") { REQUIRE_THROWS(Index(L"i_5", {L"i_1", L"i_1"})); #endif } + + // 'g' is not a standard base key, but we can associate it with an existing + // space to be able to extend the index vocabulary + REQUIRE_THROWS(Index{L"g"}); + REQUIRE_NOTHROW(IndexSpace::register_key( + L"g", + IndexSpace::all)); // can assign additional key to a space already + // registered, this does not redefine base key + // and now ... + REQUIRE_NOTHROW(Index{L"g"}); } SECTION("equality") { @@ -110,11 +119,15 @@ TEST_CASE("Index", "[elements]") { REQUIRE(!(a1 < i1)); } - SECTION("qns ordering"){ - auto p1A = Index(L"p⁺_1", IndexSpace::instance(IndexSpace::all, IndexSpace::alpha)); - auto p1B = Index(L"p⁻_1", IndexSpace::instance(IndexSpace::all, IndexSpace::beta)); - auto p2A = Index(L"p⁺_2", IndexSpace::instance(IndexSpace::all, IndexSpace::alpha)); - auto p2B = Index(L"p⁻_2", IndexSpace::instance(IndexSpace::all, IndexSpace::beta)); + SECTION("qns ordering") { + auto p1A = Index(L"p⁺_1", + IndexSpace::instance(IndexSpace::all, IndexSpace::alpha)); + auto p1B = + Index(L"p⁻_1", IndexSpace::instance(IndexSpace::all, IndexSpace::beta)); + auto p2A = Index(L"p⁺_2", + IndexSpace::instance(IndexSpace::all, IndexSpace::alpha)); + auto p2B = + Index(L"p⁻_2", IndexSpace::instance(IndexSpace::all, IndexSpace::beta)); REQUIRE(p1A.space().qns() == IndexSpace::alpha); REQUIRE(p2A.space().qns() == IndexSpace::alpha); REQUIRE(p1B.space().qns() == IndexSpace::beta); @@ -125,7 +138,6 @@ TEST_CASE("Index", "[elements]") { REQUIRE(p1B < p2B); } - SECTION("hashing") { REQUIRE_NOTHROW(hash_value(Index{})); Index i1(L"i_1"); @@ -175,27 +187,37 @@ TEST_CASE("Index", "[elements]") { Index i1(L"i_1"); std::wstring i1_str; REQUIRE_NOTHROW(i1_str = i1.to_wolfram()); - REQUIRE(i1_str == L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), \\(1\\)]\\)\",particleSpace[occupied]]"); + REQUIRE(i1_str == + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), " + L"\\(1\\)]\\)\",particleSpace[occupied]]"); REQUIRE(i1.to_wolfram(Action::create) == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), \\(1\\)]\\)\",particleSpace[occupied],indexType[cre]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), " + L"\\(1\\)]\\)\",particleSpace[occupied],indexType[cre]]"); REQUIRE(i1.to_wolfram(BraKetPos::ket) == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), \\(1\\)]\\)\",particleSpace[occupied],indexType[ket]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), " + L"\\(1\\)]\\)\",particleSpace[occupied],indexType[ket]]"); REQUIRE(i1.to_wolfram(Action::annihilate) == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), \\(1\\)]\\)\",particleSpace[occupied],indexType[ann]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), " + L"\\(1\\)]\\)\",particleSpace[occupied],indexType[ann]]"); REQUIRE(i1.to_wolfram(BraKetPos::bra) == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), \\(1\\)]\\)\",particleSpace[occupied],indexType[bra]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(i\\), " + L"\\(1\\)]\\)\",particleSpace[occupied],indexType[bra]]"); REQUIRE(Index(L"a_1").to_wolfram() == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(a\\), \\(1\\)]\\)\",particleSpace[virtual]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(a\\), " + L"\\(1\\)]\\)\",particleSpace[virtual]]"); REQUIRE(Index(L"p_1").to_wolfram() == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(p\\), \\(1\\)]\\)\",particleSpace[occupied,virtual]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(p\\), " + L"\\(1\\)]\\)\",particleSpace[occupied,virtual]]"); REQUIRE(Index(L"α'_1").to_wolfram() == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(α'\\), \\(1\\)]\\)\",particleSpace[othervirtual]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(α'\\), " + L"\\(1\\)]\\)\",particleSpace[othervirtual]]"); REQUIRE(Index(L"α_1").to_wolfram() == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(α\\), \\(1\\)]\\)\",particleSpace[virtual,othervirtual]]"); - REQUIRE( - Index(L"κ_1").to_wolfram() == - L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(κ\\), \\(1\\)]\\)\",particleSpace[occupied,virtual,othervirtual]]"); + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(α\\), " + L"\\(1\\)]\\)\",particleSpace[virtual,othervirtual]]"); + REQUIRE(Index(L"κ_1").to_wolfram() == + L"particleIndex[\"\\!\\(\\*SubscriptBox[\\(κ\\), " + L"\\(1\\)]\\)\",particleSpace[occupied,virtual,othervirtual]]"); } -} // TEST_CASE("Index") \ No newline at end of file +} // TEST_CASE("Index") diff --git a/tests/unit/test_space.cpp b/tests/unit/test_space.cpp index 431cafa27..181a6afec 100644 --- a/tests/unit/test_space.cpp +++ b/tests/unit/test_space.cpp @@ -28,11 +28,20 @@ TEST_CASE("IndexSpace", "[elements]") { REQUIRE(IndexSpace::instance_exists(L"κ_48")); } + SECTION("register_key") { + REQUIRE_NOTHROW(IndexSpace::register_key( + L"g", + IndexSpace::all)); // can assign additional key to a space already + // registered, this does not redefine base key + REQUIRE(IndexSpace::instance(L"g") == IndexSpace::instance(L"p")); + } + SECTION("equality") { REQUIRE(IndexSpace::instance(L"i") == IndexSpace::instance(L"i")); REQUIRE(IndexSpace::instance(L"i") != IndexSpace::instance(L"p")); - REQUIRE(IndexSpace::null_instance() == IndexSpace::instance(IndexSpace::null_key())); + REQUIRE(IndexSpace::null_instance() == + IndexSpace::instance(IndexSpace::null_key())); REQUIRE(IndexSpace::instance(L"i").type() == IndexSpace::active_occupied); REQUIRE(IndexSpace::instance(L"i") == IndexSpace::active_occupied); @@ -59,10 +68,14 @@ TEST_CASE("IndexSpace", "[elements]") { { auto i = IndexSpace::instance(L"i"); auto a = IndexSpace::instance(L"a"); - auto iA = IndexSpace::instance(IndexSpace::active_occupied, IndexSpace::alpha); - auto iB = IndexSpace::instance(IndexSpace::active_occupied, IndexSpace::beta); - auto aA = IndexSpace::instance(IndexSpace::active_unoccupied, IndexSpace::alpha); - auto aB = IndexSpace::instance(IndexSpace::active_unoccupied, IndexSpace::beta); + auto iA = + IndexSpace::instance(IndexSpace::active_occupied, IndexSpace::alpha); + auto iB = + IndexSpace::instance(IndexSpace::active_occupied, IndexSpace::beta); + auto aA = IndexSpace::instance(IndexSpace::active_unoccupied, + IndexSpace::alpha); + auto aB = + IndexSpace::instance(IndexSpace::active_unoccupied, IndexSpace::beta); REQUIRE(iA < aA); REQUIRE(iB < aB); @@ -73,16 +86,22 @@ TEST_CASE("IndexSpace", "[elements]") { REQUIRE(!(iA < iA)); REQUIRE(i < iA); REQUIRE(i < iB); - } } SECTION("set operations") { - REQUIRE(IndexSpace::instance(L"i") == intersection(IndexSpace::instance(L"i"), IndexSpace::instance(L"p"))); - REQUIRE(IndexSpace::null_instance() == intersection(IndexSpace::instance(L"a"), IndexSpace::instance(L"i"))); - REQUIRE(IndexSpace::null_instance() == intersection(IndexSpace::instance(L"a"), IndexSpace::instance(L"α'"))); - - REQUIRE(IndexSpace::instance(L"κ") == unIon(IndexSpace::instance(L"m"), IndexSpace::instance(L"α"))); + REQUIRE( + IndexSpace::instance(L"i") == + intersection(IndexSpace::instance(L"i"), IndexSpace::instance(L"p"))); + REQUIRE( + IndexSpace::null_instance() == + intersection(IndexSpace::instance(L"a"), IndexSpace::instance(L"i"))); + REQUIRE( + IndexSpace::null_instance() == + intersection(IndexSpace::instance(L"a"), IndexSpace::instance(L"α'"))); + + REQUIRE(IndexSpace::instance(L"κ") == + unIon(IndexSpace::instance(L"m"), IndexSpace::instance(L"α"))); REQUIRE(includes(IndexSpace::instance(L"κ"), IndexSpace::instance(L"m"))); REQUIRE(!includes(IndexSpace::instance(L"m"), IndexSpace::instance(L"κ"))); From 3cd6dbf0472bb9a0f5d3ff72247f54a8f34daf9a Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 27 Apr 2022 14:38:41 -0400 Subject: [PATCH 073/120] amended 0f402aa10cbdeee5768c12692fce07ec40d7e1f0 --- tests/unit/test_index.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_index.cpp b/tests/unit/test_index.cpp index a89cb8589..96b916dcc 100644 --- a/tests/unit/test_index.cpp +++ b/tests/unit/test_index.cpp @@ -88,13 +88,15 @@ TEST_CASE("Index", "[elements]") { // 'g' is not a standard base key, but we can associate it with an existing // space to be able to extend the index vocabulary - REQUIRE_THROWS(Index{L"g"}); +#ifndef NDEBUG + REQUIRE_THROWS(Index{L"h"}); +#endif REQUIRE_NOTHROW(IndexSpace::register_key( - L"g", + L"h", IndexSpace::all)); // can assign additional key to a space already // registered, this does not redefine base key // and now ... - REQUIRE_NOTHROW(Index{L"g"}); + REQUIRE_NOTHROW(Index{L"h"}); } SECTION("equality") { From a5ab426eb3e425190846cee217ea03cb32f7abd3 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Mon, 2 May 2022 11:24:02 -0400 Subject: [PATCH 074/120] WickTheorem can handle bosons (but real vacuum only) --- SeQuant/core/wick.hpp | 232 +++++++++++++++++++++++---------------- tests/unit/test_wick.cpp | 82 ++++++++++++++ 2 files changed, 219 insertions(+), 95 deletions(-) diff --git a/SeQuant/core/wick.hpp b/SeQuant/core/wick.hpp index cba8d8cd0..78536b312 100644 --- a/SeQuant/core/wick.hpp +++ b/SeQuant/core/wick.hpp @@ -30,8 +30,6 @@ class WickTheorem { friend struct access_by; static constexpr const Statistics statistics = S; - static_assert(S == Statistics::FermiDirac, - "WickTheorem not yet implemented for Bose-Einstein"); WickTheorem(WickTheorem &&) = default; WickTheorem &operator=(WickTheorem &&) = default; @@ -42,11 +40,13 @@ class WickTheorem { WickTheorem &operator=(const WickTheorem &) = default; public: - explicit WickTheorem(const NormalOperatorSequence &input) : input_(input) { assert(input.size() <= max_input_size); assert(input.empty() || input.vacuum() != Vacuum::Invalid); assert(input.empty() || input.vacuum() != Vacuum::Invalid); + if constexpr (statistics == Statistics::BoseEinstein) { + assert(input.empty() || input.vacuum() == Vacuum::Physical); + } } explicit WickTheorem(ExprPtr expr_input) : expr_input_(expr_input) {} @@ -162,17 +162,19 @@ class WickTheorem { /// @tparam Integer an integral type template - WickTheorem& set_op_connections(std::initializer_list> op_index_pairs) { - return this->set_op_connections(op_index_pairs); + WickTheorem &set_op_connections( + std::initializer_list> op_index_pairs) { + return this->set_op_connections( + op_index_pairs); } ///@} /// @name topological partition specifiers /// - /// Specifies topological partition of normal operators; free (non-connected) operators - /// in the same partition are considered topologically equivalent, hence if - /// only full contractions are needed only contractions to the first available - /// operator in a partition is needed (multiplied by the degeneracy) + /// Specifies topological partition of normal operators; free (non-connected) + /// operators in the same partition are considered topologically equivalent, + /// hence if only full contractions are needed only contractions to the first + /// available operator in a partition is needed (multiplied by the degeneracy) /// @param op_partitions list of operator partitions /// @note if this partitions are not given, every operator is assumed to be in /// its own partition @@ -209,33 +211,40 @@ class WickTheorem { /// @tparam Integer an integral type template - WickTheorem& set_op_partitions(std::initializer_list> op_partitions) { - return this->set_op_partitions(op_partitions); + WickTheorem &set_op_partitions( + std::initializer_list> op_partitions) { + return this->set_op_partitions( + op_partitions); } ///@} /// Computes and returns the result - /// @param count_only if true, will return the total number of terms, as a Constant. - /// @return the result of applying Wick's theorem; either a Constant, a Product, or a Sum + /// @param count_only if true, will return the total number of terms, as a + /// Constant. + /// @return the result of applying Wick's theorem; either a Constant, a + /// Product, or a Sum ExprPtr compute(const bool count_only = false); /// Collects compute statistics class Stats { public: Stats() : num_attempted_contractions(0), num_useful_contractions(0) {} - Stats(const Stats& other) noexcept { + Stats(const Stats &other) noexcept { num_attempted_contractions.store(other.num_attempted_contractions.load()); num_useful_contractions.store(other.num_useful_contractions.load()); } - Stats& operator=(const Stats& other) noexcept { + Stats &operator=(const Stats &other) noexcept { num_attempted_contractions.store(other.num_attempted_contractions.load()); num_useful_contractions.store(other.num_useful_contractions.load()); return *this; } - void reset() { num_attempted_contractions = 0; num_useful_contractions = 0; } + void reset() { + num_attempted_contractions = 0; + num_useful_contractions = 0; + } - Stats& operator+=(const Stats& other) { + Stats &operator+=(const Stats &other) { num_attempted_contractions += other.num_attempted_contractions; num_useful_contractions += other.num_useful_contractions; return *this; @@ -246,12 +255,14 @@ class WickTheorem { }; /// Statistics accessor - /// @return statistics of compute calls since creation, or since the last call to reset_stats() - const Stats& stats() const { return stats_; } + /// @return statistics of compute calls since creation, or since the last call + /// to reset_stats() + const Stats &stats() const { return stats_; } /// Statistics accessor - /// @return statistics of compute calls since creation, or since the last call to reset_stats() - Stats& stats() { return stats_; } + /// @return statistics of compute calls since creation, or since the last call + /// to reset_stats() + Stats &stats() { return stats_; } /// Statistics reset void reset_stats() { stats_.reset(); } @@ -337,18 +348,23 @@ class WickTheorem { init_input_index_columns(); } - NontensorWickState(const NontensorWickState&) = delete; - NontensorWickState(NontensorWickState&&) = delete; - NontensorWickState& operator=(const NontensorWickState&) = delete; - NontensorWickState& operator=(NontensorWickState&&) = delete; + NontensorWickState(const NontensorWickState &) = delete; + NontensorWickState(NontensorWickState &&) = delete; + NontensorWickState &operator=(const NontensorWickState &) = delete; + NontensorWickState &operator=(NontensorWickState &&) = delete; NormalOperatorSequence opseq; //!< current state of operator sequence std::size_t opseq_size; //!< current size of opseq Product sp; //!< current prefactor int level; //!< level in recursive wick call stack - size_t left_op_offset; //!< where to start looking for contractions - bool count_only; //!< if true, only track the total number of summands in the result (i.e. 1 (the normal product) + the number of contractions (if normal wick result is wanted) or the number of complete constractions (if want complete contractions only) - std::atomic count; //!< if count_only is true, will countain the total number of terms + size_t left_op_offset; //!< where to start looking for contractions + bool count_only; //!< if true, only track the total number of summands in + //!< the result (i.e. 1 (the normal product) + the number + //!< of contractions (if normal wick result is wanted) or + //!< the number of complete constractions (if want + //!< complete contractions only) + std::atomic count; //!< if count_only is true, will countain the + //!< total number of terms /// TODO rename op -> nop to distinguish Op and NormalOperator container::svector> op_connections; //!< bitmask of connections for each op (1 = connected) @@ -356,27 +372,34 @@ class WickTheorem { adjacency_matrix; //!< number of connections between each normop, only //!< lower triangle is kept - container::svector> - input_partner_indices; //!< list of {cre,ann} pairs of Index objects in the input whose corresponding Op objects act on the same particle + container::svector> + input_partner_indices; //!< list of {cre,ann} pairs of Index objects in + //!< the input whose corresponding Op objects + //!< act on the same particle - /// "merges" partner index pair from input_index_columns with contracted Index pairs in this->sp + /// "merges" partner index pair from input_index_columns with contracted + /// Index pairs in this->sp auto make_target_partner_indices() const { // copy all pairs in the input product - container::svector> result(input_partner_indices); + container::svector> result(input_partner_indices); // for every contraction so far encountered ... - for(auto& contr: sp) { + for (auto &contr : sp) { // N.B. sp is composed of 1-particle contractions - assert(contr->template is() && contr->template as().rank() == 1 && contr->template as().label() == sequant::overlap_label()); - const auto& contr_t = contr->template as(); - const auto& bra_idx = contr_t.bra().at(0); - const auto& ket_idx = contr_t.ket().at(0); - // ... if both bra and ket indices were in the input list, "merge" their pairs - auto bra_it = ranges::find_if(result, [&bra_idx](const auto& p){ + assert(contr->template is() && + contr->template as().rank() == 1 && + contr->template as().label() == + sequant::overlap_label()); + const auto &contr_t = contr->template as(); + const auto &bra_idx = contr_t.bra().at(0); + const auto &ket_idx = contr_t.ket().at(0); + // ... if both bra and ket indices were in the input list, "merge" their + // pairs + auto bra_it = ranges::find_if(result, [&bra_idx](const auto &p) { assert(p.first != bra_idx); return p.second == bra_idx; }); if (bra_it != result.end()) { - auto ket_it = ranges::find_if(result, [&ket_idx](const auto& p){ + auto ket_it = ranges::find_if(result, [&ket_idx](const auto &p) { assert(p.second != ket_idx); return p.first == ket_idx; }); @@ -385,8 +408,7 @@ class WickTheorem { if (ket_it > bra_it) { bra_it->second = std::move(ket_it->second); result.erase(ket_it); - } - else { + } else { ket_it->first = std::move(bra_it->first); result.erase(bra_it); } @@ -402,7 +424,8 @@ class WickTheorem { /// maps op to its topological partition index (1-based, 0 = no partition) /// TODO rename op -> nop to distinguish Op and NormalOperator container::svector op_topological_partition; - /// current state of partitions (will only match op_topological_partition before any contractions have occurred) + /// current state of partitions (will only match op_topological_partition + /// before any contractions have occurred) /// - when an operator is connected it's removed from the partition /// - when it is disconnected fully it's re-added to the partition container::svector> topological_partitions; @@ -413,24 +436,28 @@ class WickTheorem { const auto npartitions = *ranges::max_element(op_topological_partition); topological_partitions.resize(npartitions); size_t op_cnt = 0; - ranges::for_each(op_topological_partition, [this,&op_cnt](size_t toppart_idx) { - if (toppart_idx > 0) { // in a partition - topological_partitions.at(toppart_idx - 1).insert(op_cnt); - } - ++op_cnt; - }); - // assert that we don't have empty partitions due to invalid contents of op_topological_partition - assert(ranges::any_of(topological_partitions, [](auto&& partition){ - return partition.empty(); - }) == false); + ranges::for_each( + op_topological_partition, [this, &op_cnt](size_t toppart_idx) { + if (toppart_idx > 0) { // in a partition + topological_partitions.at(toppart_idx - 1).insert(op_cnt); + } + ++op_cnt; + }); + // assert that we don't have empty partitions due to invalid contents of + // op_topological_partition + assert(ranges::any_of(topological_partitions, [](auto &&partition) { + return partition.empty(); + }) == false); } // populates target_particle_ops void init_input_index_columns() { // for each NormalOperator - for(auto& nop: opseq) { - for(auto&& cre_ann : ranges::views::zip(nop.creators(), nop.annihilators())) { - input_partner_indices.emplace_back(std::get<0>(cre_ann).index(), std::get<1>(cre_ann).index()); + for (auto &nop : opseq) { + for (auto &&cre_ann : + ranges::views::zip(nop.creators(), nop.annihilators())) { + input_partner_indices.emplace_back(std::get<0>(cre_ann).index(), + std::get<1>(cre_ann).index()); } } } @@ -453,7 +480,8 @@ class WickTheorem { const Cursor &op1_cursor, const Cursor &op2_cursor) { auto update_topology = [this](size_t op_idx) { const auto nconnections = op_nconnections[op_idx]; - // if using topological partitions for normal ops, and this operator is in one of them, remove it on first connection + // if using topological partitions for normal ops, and this operator is + // in one of them, remove it on first connection if (!topological_partitions.empty()) { auto partition_idx = op_topological_partition[op_idx]; if (nconnections == 0 && partition_idx > 0) { @@ -533,7 +561,8 @@ class WickTheorem { auto partition_idx = op_topological_partition[op_idx]; if (nconnections == 0 && partition_idx > 0) { --partition_idx; // to 0-based - auto inserted = topological_partitions.at(partition_idx).insert(op_idx); + auto inserted = + topological_partitions.at(partition_idx).insert(op_idx); assert(inserted.second); } } @@ -589,10 +618,10 @@ class WickTheorem { if (!full_contractions_) { if (count_only) { ++state.count; - } - else { + } else { auto [phase, normop] = normalize(input_, state.input_partner_indices); - result_plus_mutex.first->push_back(std::make_pair(Product(phase, {}), std::move(normop))); + result_plus_mutex.first->push_back( + std::make_pair(Product(phase, {}), std::move(normop))); } } @@ -602,8 +631,7 @@ class WickTheorem { if (count_only) { // count only? return the total number as a Constant assert(result.empty()); result_expr = ex(state.count); - } - else if (result.size() == 1) { // if result.size() == 1, return Product + } else if (result.size() == 1) { // if result.size() == 1, return Product auto product = std::make_shared(std::move(result.at(0).first)); if (full_contractions_) assert(result.at(0).second == nullptr); @@ -618,8 +646,7 @@ class WickTheorem { if (full_contractions_) { assert(term.second == nullptr); sum->append(ex(std::move(term.first))); - } - else { + } else { auto term_product = std::make_shared(std::move(term.first)); if (term.second) { term_product->append(1, term.second); @@ -628,35 +655,39 @@ class WickTheorem { } } result_expr = sum; - } - else if (result_expr == nullptr) + } else if (result_expr == nullptr) result_expr = ex(0); return result_expr; } public: virtual ~WickTheorem(); + private: void recursive_nontensor_wick( - std::pair>>> *, - std::mutex *> &result, + std::pair< + std::vector>>> *, + std::mutex *> &result, NontensorWickState &state) const { using opseq_view_type = flattened_rangenest>; auto opseq_view = opseq_view_type(&state.opseq); using std::begin; using std::end; - // if full contractions needed, make contractions involving first index with another index, else contract any index i with index j (ipush_back( std::make_pair(std::move(state.sp.deep_copy()), std::shared_ptr>{})); @@ -777,7 +812,8 @@ class WickTheorem { // << " terms" << std::endl; result.second->unlock(); } else { - auto [phase, op] = normalize(state.opseq, state.make_target_partner_indices()); + auto [phase, op] = normalize( + state.opseq, state.make_target_partner_indices()); result.second->lock(); result.first->push_back(std::make_pair( std::move(state.sp.deep_copy().scale(phase)), @@ -799,7 +835,8 @@ class WickTheorem { state.left_op_offset = left_op_offset; recursive_nontensor_wick(result, state); --state.level; - // this contraction is useful if it leads to useful contractions as a result + // this contraction is useful if it leads to useful contractions + // as a result if (current_num_useful_contractions != stats_.num_useful_contractions.load()) ++stats_.num_useful_contractions; @@ -822,12 +859,16 @@ class WickTheorem { ++op_right_iter; } } // right op iter - } // left op iter + } // left op iter } public: static bool can_contract(const Op &left, const Op &right, Vacuum vacuum = get_default_context().vacuum()) { + // can only do Wick's theorem for physical vacuum (or similar) + if constexpr (statistics == Statistics::BoseEinstein) + assert(vacuum == Vacuum::Physical); + if (is_qpannihilator(left, vacuum) && is_qpcreator(right, vacuum)) { const auto qpspace_left = qpannihilator_space(left, vacuum); const auto qpspace_right = qpcreator_space(right, vacuum); @@ -837,9 +878,8 @@ class WickTheorem { return false; } - static ExprPtr contract( - const Op &left, const Op &right, - Vacuum vacuum = get_default_context().vacuum()) { + static ExprPtr contract(const Op &left, const Op &right, + Vacuum vacuum = get_default_context().vacuum()) { assert(can_contract(left, right, vacuum)); // assert( // !left.index().has_proto_indices() && @@ -864,10 +904,12 @@ class WickTheorem { right.index().space()) { // may need 2 overlaps if neither space // is pure qp creator/annihilator auto result = std::make_shared(); - result->append(1, left_is_ann ? make_overlap(left.index(), index_common) - : make_overlap(index_common, left.index())); - result->append(1, left_is_ann ? make_overlap(index_common, right.index()) - : make_overlap(right.index(), index_common)); + result->append(1, left_is_ann + ? make_overlap(left.index(), index_common) + : make_overlap(index_common, left.index())); + result->append(1, left_is_ann + ? make_overlap(index_common, right.index()) + : make_overlap(right.index(), index_common)); return result; } else { return left_is_ann ? make_overlap(left.index(), right.index()) diff --git a/tests/unit/test_wick.cpp b/tests/unit/test_wick.cpp index 0248a83d3..672210a1b 100644 --- a/tests/unit/test_wick.cpp +++ b/tests/unit/test_wick.cpp @@ -89,10 +89,20 @@ TEST_CASE("WickTheorem", "[algorithms][wick]") { Vacuum::SingleProduct)); REQUIRE(!FWickTheorem::can_contract(fann(L"p_1"), fann(L"i_2"), Vacuum::SingleProduct)); + + REQUIRE(BWickTheorem::can_contract(bann(L"i_1"), bcre(L"i_2"), + Vacuum::Physical)); + REQUIRE(!BWickTheorem::can_contract(bcre(L"i_1"), bcre(L"i_2"), + Vacuum::Physical)); + REQUIRE(!BWickTheorem::can_contract(bcre(L"i_1"), bann(L"i_2"), + Vacuum::Physical)); + REQUIRE(!BWickTheorem::can_contract(bann(L"i_1"), bann(L"i_2"), + Vacuum::Physical)); } SECTION("constructors") { REQUIRE_NOTHROW(FWickTheorem{FNOperatorSeq{}}); + REQUIRE_NOTHROW(BWickTheorem{BNOperatorSeq{}}); { auto opseq1 = FNOperatorSeq({FNOperator({L"i_1"}, {L"i_2"}), @@ -108,6 +118,78 @@ TEST_CASE("WickTheorem", "[algorithms][wick]") { SECTION("physical vacuum") { constexpr Vacuum V = Vacuum::Physical; + // number operator + { + { + auto opseq1 = FNOperatorSeq( + {FNOperator({L"i_1"}, {}, V), FNOperator({}, {L"i_2"}, V)}); + auto wick1 = FWickTheorem{opseq1}; + REQUIRE_NOTHROW(wick1.spinfree(false).compute()); + // full contractions = null (N is already in normal form) + auto full_contractions = FWickTheorem{opseq1}.spinfree(false).compute(); + REQUIRE(full_contractions->is()); + REQUIRE(full_contractions->as().value() == 0); + // partial contractions = N + auto partial_contractions = FWickTheorem{opseq1}.full_contractions(false).spinfree(false).compute(); + //std::wcout << "partial_contractions=" << to_latex(partial_contractions) << std::endl; + REQUIRE(partial_contractions->is()); + REQUIRE(partial_contractions->as().size() == 1); + } + { + auto opseq1 = BNOperatorSeq( + {BNOperator({L"i_1"}, {}, V), BNOperator({}, {L"i_2"}, V)}); + auto wick1 = BWickTheorem{opseq1}; + REQUIRE_NOTHROW(wick1.spinfree(false).compute()); + // full contractions = null + auto full_contractions = BWickTheorem{opseq1}.spinfree(false).compute(); + REQUIRE(full_contractions->is()); + REQUIRE(full_contractions->as().value() == 0); + // partial contractions = N + auto partial_contractions = BWickTheorem{opseq1}.full_contractions(false).spinfree(false).compute(); + //std::wcout << "partial_contractions=" << to_latex(partial_contractions) << std::endl; + REQUIRE(partial_contractions->is()); + REQUIRE(partial_contractions->as().size() == 1); + } + } + + // hole number operator + { + { + auto opseq1 = FNOperatorSeq( + {FNOperator({}, {L"i_1"}, V), FNOperator({L"i_2"}, {}, V)}); + auto wick1 = FWickTheorem{opseq1}; + REQUIRE_NOTHROW(wick1.spinfree(false).compute()); + // full contractions = delta + auto full_contractions = FWickTheorem{opseq1}.spinfree(false).compute(); + REQUIRE(full_contractions->is()); + REQUIRE(full_contractions->as().size() == 1); + // partial contractions = delta - N + auto partial_contractions = FWickTheorem{opseq1}.full_contractions(false).spinfree(false).compute(); + //std::wcout << "partial_contractions=" << to_latex(partial_contractions) << std::endl; + REQUIRE(partial_contractions->is()); + REQUIRE(partial_contractions->as().size() == 2); + REQUIRE(to_latex(partial_contractions) == + L"{ \\bigl({{s^{{i_2}}_{{i_1}}}} - {{a^{{i_2}}_{{i_1}}}}\\bigr) }"); + } + { + auto opseq1 = BNOperatorSeq( + {BNOperator({}, {L"i_1"}, V), BNOperator({L"i_2"}, {}, V)}); + auto wick1 = BWickTheorem{opseq1}; + REQUIRE_NOTHROW(wick1.spinfree(false).compute()); + // full contractions = delta + auto full_contractions = BWickTheorem{opseq1}.spinfree(false).compute(); + REQUIRE(full_contractions->is()); + REQUIRE(full_contractions->as().size() == 1); + // partial contractions = delta + N + auto partial_contractions = BWickTheorem{opseq1}.full_contractions(false).spinfree(false).compute(); + //std::wcout << "partial_contractions=" << to_latex(partial_contractions) << std::endl; + REQUIRE(partial_contractions->is()); + REQUIRE(partial_contractions->as().size() == 2); + REQUIRE(to_latex(partial_contractions) == + L"{ \\bigl({{s^{{i_2}}_{{i_1}}}} + {{b^{{i_2}}_{{i_1}}}}\\bigr) }"); + } + } + // three 1-body operators { auto opseq1 = FNOperatorSeq({FNOperator({L"i_1"}, {L"i_2"}, V), From b74661562b87f1ebd62e5c9f93958cc28cf9ac4a Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 3 May 2022 11:25:32 -0400 Subject: [PATCH 075/120] uccf12 class improvement --- SeQuant/domain/eqs/single_ref_uccf12.h | 81 +++++++++++-------- .../domain/transcorrelated/simplifications.h | 2 +- 2 files changed, 50 insertions(+), 33 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index c23eedb9f..95f4c8f55 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -22,13 +22,31 @@ class uccf12 { bool sr; bool fock; unsigned int op_rank; + //no default constructor + IndexSpace::TypeAttr gg_space = IndexSpace::all; + int ansatz_; + bool print_; + bool singles_; + bool doubles_; + + //default constructor + uccf12(){} + // TODO implement logic for non-default variables. should also include logic // for spin-orbital expressions. - uccf12(bool single_reference = true, bool fock_approx = true, + uccf12(std::string gg_label, int ansatz = 2, + bool print = false, bool singles = false, + bool doubles = true,bool single_reference = true, bool fock_approx = true, unsigned int max_op_rank = 2) { sr = single_reference; fock = fock_approx; op_rank = max_op_rank; + ansatz_ = ansatz; + print_ = print; + singles_ = singles; + doubles_ = doubles; + + sequant::set_default_context( SeQuant(Vacuum::Physical, IndexSpaceMetric::Unit, BraKetSymmetry::conjugate, SPBasis::spinfree)); @@ -45,6 +63,28 @@ class uccf12 { sequant::detail::OpIdRegistrar op_id_registrar; TensorCanonicalizer::register_instance( std::make_shared()); + + // auto gg_space = IndexSpace::active_occupied; // Geminal-generating + // space: active occupieds is the normal choice, all orbitals is the + // reference-independent (albeit expensive) choice + assert(singles_ == true || doubles_ == true); + if (gg_label == "act_occ") { + gg_space = IndexSpace::active_occupied; + } else if (gg_label == "occ") { + gg_space = IndexSpace::occupied; + } else if (gg_label == "all") { + gg_space = IndexSpace::all; + } else if (gg_label == "fz") { + gg_space = IndexSpace::frozen_occupied; + } else if (gg_label == "uocc") { + gg_space = IndexSpace::unoccupied; + } + else if (gg_label == "act_obs") { + gg_space = IndexSpace::all_active; + } else { + throw std::runtime_error( + "uccf12::compute(gg_label) unsupported space label"); + } } //[[e1,e2],e3]_12 ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, @@ -289,34 +329,11 @@ class uccf12 { } } - std::pair compute(std::string gg_label, int ansatz = 2, - bool print = false, bool singles = false, - bool doubles = true) { - // auto gg_space = IndexSpace::active_occupied; // Geminal-generating - // space: active occupieds is the normal choice, all orbitals is the - // reference-independent (albeit expensive) choice - assert(singles == true || doubles == true); - auto gg_space = IndexSpace::frozen_occupied; - if (gg_label == "act_occ") { - gg_space = IndexSpace::active_occupied; - } else if (gg_label == "occ") { - gg_space = IndexSpace::occupied; - } else if (gg_label == "all") { - gg_space = IndexSpace::all; - } else if (gg_label == "fz") { - gg_space = IndexSpace::frozen_occupied; - } else if (gg_label == "uocc") { - gg_space = IndexSpace::unoccupied; - } - else if (gg_label == "act_obs") { - gg_space = IndexSpace::all_active; - } else { - throw std::runtime_error( - "uccf12::compute(gg_label) unsupported space label"); - } + std::pair compute() { + auto single = ex(0.0); - if (singles) { + if (singles_) { // this might need to be complete space if we don't have a solution to the // particular blocks of interest. auto C = ex( @@ -333,13 +350,13 @@ class uccf12 { single = single + anti_herm_C; } - if (ansatz == 2) { + if (ansatz_ == 2) { auto h = H(false); auto r = R12(gg_space); auto r_1 = R12(gg_space); ExprPtr A = ex(0.0); - if (doubles) { + if (doubles_) { A = A + (r - adjoint(r)) + single; simplify(A); } else { @@ -390,7 +407,7 @@ class uccf12 { } std::cout << "number of terms: " << term_count << std::endl; - if (print) { + if (print_) { std::wcout << "one body terms: " << to_latex_align(one_body, 20, 2) << std::endl; std::wcout << "two body terms: " << to_latex_align(two_body, 20, 2) @@ -402,7 +419,7 @@ class uccf12 { // they will happen to contain off diagonal G elements. we would get the // same result if we kept the decomposition and simplified, but this should // save time. - if (ansatz == 1) { + if (ansatz_ == 1) { auto h = H(false); auto r = R12(gg_space); auto r_1 = R12(gg_space); @@ -461,7 +478,7 @@ class uccf12 { } std::cout << "number of terms: " << term_count << std::endl; - if (print) { + if (print_) { std::wcout << "one body terms: " << to_latex_align(one_body, 20, 2) << std::endl; std::wcout << "two body terms: " << to_latex_align(two_body, 20, 2) diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index bebf2cc3d..b0d268653 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -830,7 +830,7 @@ ExprPtr find_F12_interms(ExprPtr ex_) { // form of the hamiltonian operator. that is h^p_q E^q_p + 1/2 g^{pq}_{rs} // E^{rs}_{pq}. to achieve this form, the tensor part of the expression must // contain overlaps in place of the normal ordered operators. here we chose a -// canonical form for E^{p_7}_{p_9} and E^{p_7 p_8}_{p_9 p_10} +// canonical form for E^{p_7}_{p_9} and E^{p_7 p_8}_{p_9 p_10} as the external indicies // this also simultaneously partitions the result into one and two body terms. std::pair fnop_to_overlap(ExprPtr exprs) { auto one_body_result = ex(0); From 04057b2d99a76e875c00171d91d087af99fbfc90 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 3 May 2022 13:22:03 -0400 Subject: [PATCH 076/120] simplifications::detail --- SeQuant/domain/eqs/single_ref_uccf12.h | 19 +- .../domain/transcorrelated/simplifications.h | 385 +++++++++--------- 2 files changed, 204 insertions(+), 200 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index 95f4c8f55..10e8ff93b 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -108,7 +108,8 @@ class uccf12 { second_com = simplification::overlap_with_obs(second_com); second_com = second_com + ex(0.); second_com = simplification::screen_F12_proj(second_com, 2); - second_com = simplification::tens_to_FNOps(second_com); + second_com = simplification::detail::tens_to_FNOps(second_com); + //std::wcout << to_latex_align(second_com,20,3) << std::endl; second_com = decompositions::three_body_substitution(second_com, 2); non_canon_simplify(second_com); return second_com; @@ -125,7 +126,7 @@ class uccf12 { second_com = second_com + ex(0.); second_com = simplification::screen_F12_proj(second_com, 1); // std::wcout << to_latex_align(second_com,20,2) << std::endl; - second_com = simplification::tens_to_FNOps(second_com); + second_com = simplification::detail::tens_to_FNOps(second_com); simplify(second_com); return second_com; } @@ -233,7 +234,7 @@ class uccf12 { std::pair, std::vector> new_up_low; if (factor->is()) { for (int i = 0; i < factor->as().bra().size(); i++) { - auto in_where_bra = simplification::in_list( + auto in_where_bra = simplification::detail::in_list( factor->as().bra()[i], original_indices); if (in_where_bra.first) { new_up_low.first.push_back(changed_indices[in_where_bra.second]); @@ -245,7 +246,7 @@ class uccf12 { new_up_low.first.push_back( changed_indices[changed_indices.size() - 1]); } - auto in_where_ket = simplification::in_list( + auto in_where_ket = simplification::detail::in_list( factor->as().ket()[i], original_indices); if (in_where_ket.first) { new_up_low.second.push_back(changed_indices[in_where_ket.second]); @@ -263,7 +264,7 @@ class uccf12 { new_product = new_product * new_factor; } else if (factor->is()) { for (int i = 0; i < factor->as().nannihilators(); i++) { - auto in_where_ann = simplification::in_list( + auto in_where_ann = simplification::detail::in_list( factor->as().annihilators()[i].index(), original_indices); if (in_where_ann.first) { @@ -280,7 +281,7 @@ class uccf12 { new_up_low.first.push_back( changed_indices[changed_indices.size() - 1]); } - auto in_where_cre = simplification::in_list( + auto in_where_cre = simplification::detail::in_list( factor->as().creators()[i].index(), original_indices); if (in_where_cre.first) { @@ -334,8 +335,6 @@ class uccf12 { auto single = ex(0.0); if (singles_) { - // this might need to be complete space if we don't have a solution to the - // particular blocks of interest. auto C = ex( L"C", std::initializer_list{ @@ -372,7 +371,7 @@ class uccf12 { H_A_3 = simplification::overlap_with_obs(H_A_3); H_A_3 = H_A_3 + ex(0.); H_A_3 = simplification::screen_F12_proj(H_A_3, 2); - H_A_3 = simplification::tens_to_FNOps(H_A_3); + H_A_3 = simplification::detail::tens_to_FNOps(H_A_3); simplify(H_A_3); auto H_A_2 = decompositions::three_body_substitution(H_A_3, 2); simplify(H_A_2); @@ -431,7 +430,7 @@ class uccf12 { H_A_3 = simplification::overlap_with_obs(H_A_3); H_A_3 = H_A_3 + ex(0.); H_A_3 = simplification::screen_F12_proj(H_A_3, 1); - H_A_3 = simplification::tens_to_FNOps(H_A_3); + H_A_3 = simplification::detail::tens_to_FNOps(H_A_3); simplify(H_A_3); auto com_1 = simplification::hamiltonian_based_projector_1(H_A_3); diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index b0d268653..2e1ce38f5 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -11,6 +11,7 @@ using namespace sequant; // particular tensors. Also functionality for restricting our operators and // densities to the orbital basis set (obs). namespace simplification { +namespace detail{ template std::pair in_list(Index idx, vec_type ref_list) { bool inlist = false; @@ -24,7 +25,98 @@ std::pair in_list(Index idx, vec_type ref_list) { std::pair result{inlist, where_inlist}; return result; } + // convert a sequant::FNOperator to a sequant::tensor object + + +// in various transformation methods it seems as if the constants are removed or +// treated separatly from the main transformed hamiltonian expression. +ExprPtr remove_const(const ExprPtr ex_) { + auto new_expression = ex(0); + if (ex_->is()) { + for (auto&& product : ex_->as().summands()) { + bool has_fnop = false; + for (auto&& factor : product->as().factors()) { + if (factor->is()) { + has_fnop = true; + } + } + if (has_fnop) { + new_expression = new_expression + product; + } + } + } + non_canon_simplify(new_expression); + return new_expression; +} +// params ex_ : a product to replace indices on. +// og: original index in the product to be replaced +// newer: the new index which replaces the original index. +Product replace_idx(ExprPtr ex_, Index og, Index newer) { + assert(ex_->is()); + auto constant = ex_->as().scalar(); + auto new_product = ex(1); + for (auto&& factor : ex_->as().factors()) { + if (factor->is()) { + std::vector new_bras; + for (auto&& bra : factor->as().bra()) { + if (bra.label() == og.label()) { + new_bras.push_back(newer); + } else { + new_bras.push_back(bra); + } + } + std::vector new_kets; + for (auto&& ket : factor->as().ket()) { + if (ket.label() == og.label()) { + new_kets.push_back(newer); + } else { + new_kets.push_back(ket); + } + } + auto new_tensor = + ex(factor->as().label(), new_bras, new_kets); + new_product = new_tensor * new_product; + } + if (factor->is()) { + std::vector new_cres; + for (auto&& cre : factor->as().creators()) { + if (cre.index().label() == og.label()) { + new_cres.push_back(newer); + } else { + new_cres.push_back(cre.index()); + } + } + std::vector new_anns; + for (auto&& ann : factor->as().annihilators()) { + if (ann.index().label() == og.label()) { + new_anns.push_back(newer); + } else { + new_anns.push_back(ann.index()); + } + } + if (factor->as().ncreators() == 1) { + auto o1 = make_overlap({L"p_7"}, new_anns[0]); + auto o3 = make_overlap(new_cres[0], {L"p_9"}); + new_product = new_product * o1 * o3; + } else if (factor->as().ncreators() == 2) { + auto o1 = make_overlap({L"p_7"}, new_anns[0]); + auto o2 = make_overlap({L"p_8"}, new_anns[1]); + auto o3 = make_overlap(new_cres[0], {L"p_9"}); + auto o4 = make_overlap(new_cres[1], {L"p_10"}); + new_product = new_product * o1 * o2 * o3 * o4; + } else { + throw "does not handle size > 2"; + } + // auto new_op = ex(new_cres,new_anns); + // new_product = new_product * new_op; + } + } + auto result = (ex(constant) * new_product); + return result->as(); +} + +// convert a sequant::Tensor to a sequant::FNOperator ExprPtr op_to_tens(ExprPtr ex_) { assert(ex_->is()); std::vector bra_indices; @@ -41,6 +133,84 @@ ExprPtr op_to_tens(ExprPtr ex_) { return result; } +ExprPtr FNOPs_to_tens(ExprPtr ex_) { + if (ex_->is()) { + auto new_sum = ex(0); + for (auto&& product : ex_->as().summands()) { + auto new_product = ex(product->as().scalar()); + for (auto factor : product->as().factors()) { + auto new_factor = ex(0); + if (factor->is()) { + new_factor = op_to_tens(factor) + new_factor; + assert(!new_factor->is()); + } else { + new_factor = factor + new_factor; + } + new_product = new_product * new_factor; + } + new_sum = new_product + new_sum; + } + non_canon_simplify(new_sum); + return new_sum; + } else if (ex_->is()) { + for (auto&& factor : ex_->as().factors()) { + if (factor->is()) { + factor = op_to_tens(factor); + } + } + } else if (ex_->is()) { + ex_ = detail::op_to_tens(ex_); + } else { + return ex_; + } + return ex_; +} + +ExprPtr tens_to_op(ExprPtr ex_) { + assert(ex_->is()); + auto result = + ex(ex_->as().ket(), ex_->as().bra()); + return result; +} + +ExprPtr tens_to_FNOps(ExprPtr ex_) { + if (ex_->is()) { + auto new_sum = ex(0); + for (auto&& product : ex_->as().summands()) { + auto new_product = ex(product->as().scalar()); + for (auto factor : product->as().factors()) { + auto new_factor = ex(0); + if (factor->is() && (factor->as().label() == L"E" || + factor->as().label() == L"a")) { + new_factor = tens_to_op(factor); + } else { + new_factor = factor; + } + new_product = new_factor * new_product; + } + new_sum = new_product + new_sum; + } + non_canon_simplify(new_sum); + return new_sum; + } else if (ex_->is()) { + for (auto&& factor : ex_->as().factors()) { + if (factor->is() && (factor->as().label() == L"E" || + factor->as().label() == L"a")) { + factor = tens_to_op(factor); + } + } + } else if (ex_->is() && (ex_->as().label() == L"E" || + ex_->as().label() == L"a")) { + ex_ = detail::tens_to_op(ex_); + } else { + return ex_; + } + return ex_; +} + +} + + // all densities and the Hamiltonian operators are confined to a given orbital // basis in second quantized notation. thus any index on a Normal Ordered // operator or density must be confined to the obs. @@ -92,7 +262,7 @@ ExprPtr overlap_with_obs(ExprPtr ex_) { Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), Index{label_6}); new_product = - o1 * o2 * o3 * o4 * o5 * o6 * new_product * op_to_tens(factor); + o1 * o2 * o3 * o4 * o5 * o6 * new_product * detail::op_to_tens(factor); } else if (it == product->as().factors().size() - 1 && factor->is() && factor->as().rank() == 2) { @@ -116,7 +286,7 @@ ExprPtr overlap_with_obs(ExprPtr ex_) { auto o4 = make_overlap( Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), Index{label_4}); - new_product = o1 * o2 * o3 * o4 * new_product * op_to_tens(factor); + new_product = o1 * o2 * o3 * o4 * new_product * detail::op_to_tens(factor); } else if (it == product->as().factors().size() - 1 && factor->is() && factor->as().rank() == 1) { @@ -130,19 +300,19 @@ ExprPtr overlap_with_obs(ExprPtr ex_) { auto o3 = make_overlap( Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), Index{label_3}); - new_product = o1 * o3 * new_product * op_to_tens(factor); + new_product = o1 * o3 * new_product * detail::op_to_tens(factor); } else if (factor->is() && factor->as().label() == L"\\Gamma" && factor->as().rank() == 1) { - std::wstring label_2; - std::wstring label_4; - label_2 = factor->as().ket()[0].label(); - label_4 = factor->as().bra()[0].label(); + std::wstring label_1; + std::wstring label_3; + label_1 = factor->as().ket()[0].label(); + label_3 = factor->as().bra()[0].label(); auto o1 = make_overlap( Index::make_tmp_index(IndexSpace::instance(IndexSpace::all)), - Index{label_2}); + Index{label_1}); auto o3 = make_overlap( - Index{label_4}, + Index{label_3}, Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))); new_product = o1 * o3 * factor * new_product; } else if (factor->is() && @@ -186,101 +356,9 @@ ExprPtr overlap_with_obs(ExprPtr ex_) { } using IDX_list = std::initializer_list; -// in various transformation methods it seems as if the constants are removed or -// treated separatly from the main transformed hamiltonian expression. -ExprPtr remove_const(const ExprPtr ex_) { - auto new_expression = ex(0); - if (ex_->is()) { - for (auto&& product : ex_->as().summands()) { - bool has_fnop = false; - for (auto&& factor : product->as().factors()) { - if (factor->is()) { - has_fnop = true; - } - } - if (has_fnop) { - new_expression = new_expression + product; - } - } - } - non_canon_simplify(new_expression); - return new_expression; -} -// params ex_ : a product to replace indices on. -// og: original index in the product to be replaced -// newer: the new index which replaces the original index. -Product replace_idx(ExprPtr ex_, Index og, Index newer) { - assert(ex_->is()); - auto constant = ex_->as().scalar(); - auto new_product = ex(1); - for (auto&& factor : ex_->as().factors()) { - if (factor->is()) { - std::vector new_bras; - for (auto&& bra : factor->as().bra()) { - if (bra.label() == og.label()) { - new_bras.push_back(newer); - } else { - new_bras.push_back(bra); - } - } - std::vector new_kets; - for (auto&& ket : factor->as().ket()) { - if (ket.label() == og.label()) { - new_kets.push_back(newer); - } else { - new_kets.push_back(ket); - } - } - auto new_tensor = - ex(factor->as().label(), new_bras, new_kets); - new_product = new_tensor * new_product; - } - if (factor->is()) { - std::vector new_cres; - for (auto&& cre : factor->as().creators()) { - if (cre.index().label() == og.label()) { - new_cres.push_back(newer); - } else { - new_cres.push_back(cre.index()); - } - } - std::vector new_anns; - for (auto&& ann : factor->as().annihilators()) { - if (ann.index().label() == og.label()) { - new_anns.push_back(newer); - } else { - new_anns.push_back(ann.index()); - } - } - if (factor->as().ncreators() == 1) { - auto o1 = make_overlap({L"p_7"}, new_anns[0]); - auto o3 = make_overlap(new_cres[0], {L"p_9"}); - new_product = new_product * o1 * o3; - } else if (factor->as().ncreators() == 2) { - auto o1 = make_overlap({L"p_7"}, new_anns[0]); - auto o2 = make_overlap({L"p_8"}, new_anns[1]); - auto o3 = make_overlap(new_cres[0], {L"p_9"}); - auto o4 = make_overlap(new_cres[1], {L"p_10"}); - new_product = new_product * o1 * o2 * o3 * o4; - } else { - throw "does not handle size > 2"; - } - // auto new_op = ex(new_cres,new_anns); - // new_product = new_product * new_op; - } - } - auto result = (ex(constant) * new_product); - return result->as(); -} -// convert a sequant::Tensor to a sequant::FNOperator -ExprPtr tens_to_op(ExprPtr ex_) { - assert(ex_->is()); - auto result = - ex(ex_->as().ket(), ex_->as().bra()); - return result; -} + // F tensors must contain indices in the bra with space > all. this // includes complete, completeunoccupied, and inactiveunoccupied. and if one of // the particle indices is connected to the obs virtual space, then the other @@ -418,8 +496,7 @@ ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { } } -ExprPtr screen_density( - ExprPtr ex_) { // densities probably should be non-zero if each index has a +ExprPtr screen_density(ExprPtr ex_) { // densities probably should be non-zero if each index has a // chance to be occupied, in other words, screen out // densities containing unoccupied labels. assert(ex_->is()); @@ -483,7 +560,6 @@ auto treat_fock(ExprPtr ex_) { auto new_product = ex(real); for (auto&& factor : product->as().factors()) { if (factor->is() && factor->as().label() == L"f") { - // TODO do not assume EBC auto space = intersection(factor->as().bra()[0].space(), factor->as().ket()[0].space()); if (space.type().none()) { @@ -567,7 +643,7 @@ ncon_spa_extket_extbra(Tensor T1, Tensor T2, bool print_ = false) { // list. for (int i = 0; i < T1.bra().size(); i++) { // is the bra T1 index a connected index? - if (in_list(T1.bra()[i], connected_indices).first) { + if (detail::in_list(T1.bra()[i], connected_indices).first) { T1_ket = true; for (int j = 0; j < T2.ket().size(); j++) { if (T2.ket()[j].label() == T1.bra()[i].label()) { @@ -577,7 +653,7 @@ ncon_spa_extket_extbra(Tensor T1, Tensor T2, bool print_ = false) { } } // is the ket T1 index a connected index? - else if (in_list(T1.ket()[i], connected_indices).first) { + else if (detail::in_list(T1.ket()[i], connected_indices).first) { T1_ket = false; for (int j = 0; j < T2.ket().size(); j++) { if (T2.bra()[j].label() == T1.ket()[i].label()) { @@ -596,13 +672,13 @@ ncon_spa_extket_extbra(Tensor T1, Tensor T2, bool print_ = false) { for (int i = 0; i < T2.ket().size(); i++) { // if the ket index is connected, do nothing because the external index is // already accounted for - if (in_list(T2.ket()[i], connected_indices).first || - in_list(T2.ket()[i], external_ket).first) { + if (detail::in_list(T2.ket()[i], connected_indices).first || + detail::in_list(T2.ket()[i], external_ket).first) { } // if the bra index is connected, do nothing because the external index is // already accounted for - else if (in_list(T2.bra()[i], connected_indices).first || - in_list(T2.bra()[i], external_bra).first) { + else if (detail::in_list(T2.bra()[i], connected_indices).first || + detail::in_list(T2.bra()[i], external_bra).first) { } // if niether the bra or the ket are connected or made the external lists by // now, add them. @@ -627,12 +703,12 @@ ncon_spa_extket_extbra(Tensor T1, Tensor T2, bool print_ = false) { bool bra_connected = false; bool ket_connected = false; for (int i = 0; i < T1.bra().size(); i++) { - if (in_list(T1.bra()[i], connected_indices).first) { + if (detail::in_list(T1.bra()[i], connected_indices).first) { bra_connected = true; } } for (int j = 0; j < T1.ket().size(); j++) { - if (in_list(T1.ket()[j], connected_indices).first) { + if (detail::in_list(T1.ket()[j], connected_indices).first) { ket_connected = true; } } @@ -841,7 +917,7 @@ std::pair fnop_to_overlap(ExprPtr exprs) { for (auto&& factor : product->as().factors()) { if (factor->is() && (factor->as().label() == L"E" || factor->as().label() == L"a")) { - factor = tens_to_op(factor); + factor = detail::tens_to_op(factor); if (factor->is()) { if (factor->as().ncreators() == 1) { auto o1 = make_overlap( @@ -961,72 +1037,7 @@ ExprPtr screen_F12_proj(ExprPtr exprs, int ansatz = 2) { return exprs; } -ExprPtr FNOPs_to_tens(ExprPtr ex_) { - if (ex_->is()) { - auto new_sum = ex(0); - for (auto&& product : ex_->as().summands()) { - auto new_product = ex(product->as().scalar()); - for (auto factor : product->as().factors()) { - auto new_factor = ex(0); - if (factor->is()) { - new_factor = op_to_tens(factor) + new_factor; - assert(!new_factor->is()); - } else { - new_factor = factor + new_factor; - } - new_product = new_product * new_factor; - } - new_sum = new_product + new_sum; - } - non_canon_simplify(new_sum); - return new_sum; - } else if (ex_->is()) { - for (auto&& factor : ex_->as().factors()) { - if (factor->is()) { - factor = op_to_tens(factor); - } - } - } else if (ex_->is()) { - ex_ = op_to_tens(ex_); - } else { - return ex_; - } - return ex_; -} -ExprPtr tens_to_FNOps(ExprPtr ex_) { - if (ex_->is()) { - auto new_sum = ex(0); - for (auto&& product : ex_->as().summands()) { - auto new_product = ex(product->as().scalar()); - for (auto factor : product->as().factors()) { - auto new_factor = ex(0); - if (factor->is() && (factor->as().label() == L"E" || - factor->as().label() == L"a")) { - new_factor = tens_to_op(factor); - } else { - new_factor = factor; - } - new_product = new_factor * new_product; - } - new_sum = new_product + new_sum; - } - non_canon_simplify(new_sum); - return new_sum; - } else if (ex_->is()) { - for (auto&& factor : ex_->as().factors()) { - if (factor->is() && (factor->as().label() == L"E" || - factor->as().label() == L"a")) { - factor = tens_to_op(factor); - } - } - } else if (ex_->is() && (ex_->as().label() == L"E" || - ex_->as().label() == L"a")) { - ex_ = tens_to_op(ex_); - } else { - return ex_; - } - return ex_; -} + // split F12 operator into its 2 components seen in eq 11. of Chem. Phys. 136, // 084107 (2012). @@ -1114,14 +1125,10 @@ ExprPtr partition_F12(ExprPtr exprs) { // allow analysis of multiple expressions who have the same normal order // operator prefactor. std::pair hamiltonian_based_projector_2(ExprPtr exprs) { - exprs = FNOPs_to_tens(exprs); + exprs = detail::FNOPs_to_tens(exprs); non_canon_simplify(exprs); exprs = screen_densities(exprs); non_canon_simplify(exprs); - exprs = screen_F12_proj(exprs, 2); - //simplify(exprs); - //exprs = partition_F12(exprs); - non_canon_simplify(exprs); auto exprs_intmed = ex(0.0); for (auto&& product : exprs->as().summands()) { auto new_product = simplification::find_F12_interms(product); @@ -1134,9 +1141,7 @@ std::pair hamiltonian_based_projector_2(ExprPtr exprs) { // here G can only have projection to the alpha and Beta space otherwise // projector constructs it to be be zero. std::pair hamiltonian_based_projector_1(ExprPtr exprs) { - exprs = FNOPs_to_tens(exprs); - simplify(exprs); - exprs = partition_F12(exprs); + exprs = detail::FNOPs_to_tens(exprs); simplify(exprs); exprs = screen_F12_proj(exprs, 1); simplify(exprs); @@ -1151,7 +1156,7 @@ std::pair hamiltonian_based_projector_1(ExprPtr exprs) { // G can only project to alpha and Beta space. still need to use fock based // expression. std::pair fock_based_projector_1(ExprPtr exprs) { - exprs = FNOPs_to_tens(exprs); + exprs = detail::FNOPs_to_tens(exprs); simplify(exprs); if (exprs->is()) { return std::pair{exprs, exprs}; @@ -1189,7 +1194,7 @@ std::pair fock_based_projector_2(ExprPtr exprs) { return std::pair{exprs, exprs}; } non_canon_simplify(exprs); - exprs = FNOPs_to_tens(exprs); + exprs = detail::FNOPs_to_tens(exprs); non_canon_simplify(exprs); exprs = screen_densities(exprs); non_canon_simplify(exprs); From 2e8b45ceee8927ec85b4277e0f12838003f41854 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Fri, 13 May 2022 15:40:06 -0400 Subject: [PATCH 077/120] rename variable in screening and remove logical redundancy. --- SeQuant/domain/eqs/single_ref_uccf12.h | 6 ++--- .../domain/transcorrelated/simplifications.h | 22 ++++++++----------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index 10e8ff93b..fd3b69c25 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -340,7 +340,7 @@ class uccf12 { std::initializer_list{ Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))}, std::initializer_list{Index::make_tmp_index( - IndexSpace::instance(IndexSpace::complete_unoccupied))}); + IndexSpace::instance(IndexSpace::other_unoccupied))}); auto E_pa = ex( std::initializer_list{C->as().bra()[0]}, std::initializer_list{C->as().ket()[0]}); @@ -360,7 +360,7 @@ class uccf12 { simplify(A); } else { A = A + single; - simplify(A); + non_canon_simplify(A); } auto A_ = A->clone(); A_ = relable(A_); @@ -372,7 +372,7 @@ class uccf12 { H_A_3 = H_A_3 + ex(0.); H_A_3 = simplification::screen_F12_proj(H_A_3, 2); H_A_3 = simplification::detail::tens_to_FNOps(H_A_3); - simplify(H_A_3); + non_canon_simplify(H_A_3); auto H_A_2 = decompositions::three_body_substitution(H_A_3, 2); simplify(H_A_2); auto com_1 = simplification::hamiltonian_based_projector_2(H_A_2); diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 2e1ce38f5..0f29031bd 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -367,19 +367,17 @@ ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { assert(ex_->is()); assert(ex_->as().label() == L"F"); auto overlap = ex(1); - bool good = false; - bool bra_good = false; if (ansatz == 2) { + bool non_zero = false; + bool bra_good = false; for (int i = 0; i < ex_->as().bra().size(); i++) { auto bra = ex_->as().bra()[i]; if (bra.space().type() == IndexSpace::complete || bra.space().type() == IndexSpace::complete_unoccupied) { - good = true; + non_zero = true; bra_good = true; - } else if (bra.space().type() == IndexSpace::complete || - bra.space().type() == IndexSpace::complete_unoccupied || - bra.space().type() == IndexSpace::other_unoccupied) { - good = true; + } else if (bra.space().type() == IndexSpace::other_unoccupied) { + non_zero = true; } } @@ -412,12 +410,10 @@ ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { auto ket = ex_->as().ket()[j]; if (ket.space().type() == IndexSpace::complete || ket.space().type() == IndexSpace::complete_unoccupied) { - good = true; + non_zero = true; ket_good = true; - } else if (ket.space().type() == IndexSpace::complete || - ket.space().type() == IndexSpace::complete_unoccupied || - ket.space().type() == IndexSpace::other_unoccupied) { - good = true; + } else if (ket.space().type() == IndexSpace::other_unoccupied) { + non_zero = true; } } for (int j = 0; j < ex_->as().ket().size(); j++) { @@ -437,7 +433,7 @@ ExprPtr screen_F_tensors(ExprPtr ex_, int ansatz = 2) { } } } - if (good) { + if (non_zero) { return ex_ * overlap; } else { return ex(0); From 160607213e7b62d6a2624325a7b44d8805c013be Mon Sep 17 00:00:00 2001 From: connermasteran Date: Tue, 24 May 2022 13:50:58 -0400 Subject: [PATCH 078/120] rename variable in screening and remove logical redundancy. --- SeQuant/domain/transcorrelated/simplifications.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/SeQuant/domain/transcorrelated/simplifications.h b/SeQuant/domain/transcorrelated/simplifications.h index 0f29031bd..7386c1029 100644 --- a/SeQuant/domain/transcorrelated/simplifications.h +++ b/SeQuant/domain/transcorrelated/simplifications.h @@ -905,6 +905,7 @@ ExprPtr find_F12_interms(ExprPtr ex_) { // canonical form for E^{p_7}_{p_9} and E^{p_7 p_8}_{p_9 p_10} as the external indicies // this also simultaneously partitions the result into one and two body terms. std::pair fnop_to_overlap(ExprPtr exprs) { + //std::wcout << to_latex_align(exprs,20,3) << std::endl; auto one_body_result = ex(0); auto two_body_result = ex(0); for (auto&& product : exprs->as().summands()) { @@ -943,8 +944,8 @@ std::pair fnop_to_overlap(ExprPtr exprs) { one_body_result = one_body_product + one_body_result; two_body_result = two_body_product + two_body_result; } - non_canon_simplify(one_body_result); - non_canon_simplify(two_body_result); + simplify(one_body_result); + simplify(two_body_result); return {one_body_result, two_body_result}; } From fceaf6a7fdc975e57baaefae6c76b69038ba3a61 Mon Sep 17 00:00:00 2001 From: connermasteran Date: Wed, 25 May 2022 13:18:59 -0400 Subject: [PATCH 079/120] include singles gg_space --- SeQuant/domain/eqs/single_ref_uccf12.h | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/SeQuant/domain/eqs/single_ref_uccf12.h b/SeQuant/domain/eqs/single_ref_uccf12.h index fd3b69c25..d7dc3db78 100644 --- a/SeQuant/domain/eqs/single_ref_uccf12.h +++ b/SeQuant/domain/eqs/single_ref_uccf12.h @@ -24,6 +24,7 @@ class uccf12 { unsigned int op_rank; //no default constructor IndexSpace::TypeAttr gg_space = IndexSpace::all; + IndexSpace::TypeAttr singles_gg_space = IndexSpace::all; int ansatz_; bool print_; bool singles_; @@ -35,7 +36,7 @@ class uccf12 { // TODO implement logic for non-default variables. should also include logic // for spin-orbital expressions. uccf12(std::string gg_label, int ansatz = 2, - bool print = false, bool singles = false, + bool print = false, bool singles = false, std::string singles_gg = "all", bool doubles = true,bool single_reference = true, bool fock_approx = true, unsigned int max_op_rank = 2) { sr = single_reference; @@ -68,6 +69,7 @@ class uccf12 { // space: active occupieds is the normal choice, all orbitals is the // reference-independent (albeit expensive) choice assert(singles_ == true || doubles_ == true); + //doubles space options if (gg_label == "act_occ") { gg_space = IndexSpace::active_occupied; } else if (gg_label == "occ") { @@ -85,6 +87,20 @@ class uccf12 { throw std::runtime_error( "uccf12::compute(gg_label) unsupported space label"); } + //singles space options + if (singles_gg == "all"){ + singles_gg_space = IndexSpace::all; + } + else if (singles_gg == "occ") { + singles_gg_space = IndexSpace::occupied; + } + else if (singles_gg == "all_active"){ + singles_gg_space = IndexSpace::all_active; + } + else if(singles_gg == "occ_active"){ + singles_gg_space = IndexSpace::active_occupied; + } + else{throw "singles index space not supported!";} } //[[e1,e2],e3]_12 ExprPtr compute_double_com(ExprPtr e1, ExprPtr e2, ExprPtr e3, @@ -338,7 +354,7 @@ class uccf12 { auto C = ex( L"C", std::initializer_list{ - Index::make_tmp_index(IndexSpace::instance(IndexSpace::all))}, + Index::make_tmp_index(IndexSpace::instance(singles_gg_space))}, std::initializer_list{Index::make_tmp_index( IndexSpace::instance(IndexSpace::other_unoccupied))}); auto E_pa = ex( From a04bf90062c271375a5d78a93bb10aa12e3af3c2 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 25 May 2022 15:08:23 -0400 Subject: [PATCH 080/120] ODR violation: test_spin.cpp included spin.cpp, not spin.hpp --- tests/unit/test_spin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_spin.cpp b/tests/unit/test_spin.cpp index cf3f7af1d..6445dab07 100644 --- a/tests/unit/test_spin.cpp +++ b/tests/unit/test_spin.cpp @@ -2,7 +2,7 @@ // Created by Nakul Teke on 12/20/19. // -#include "SeQuant/domain/mbpt/spin.cpp" +#include "SeQuant/domain/mbpt/spin.hpp" #include "catch.hpp" #include "test_config.hpp" From b9429a5c51a00801cc244d2d4f29a07fa7b285c7 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 25 May 2022 15:12:18 -0400 Subject: [PATCH 081/120] fix to_latex_align, added unit test --- SeQuant/core/expr.hpp | 2 +- tests/unit/test_expr.cpp | 237 ++++++++++++++++++++++++--------------- 2 files changed, 146 insertions(+), 93 deletions(-) diff --git a/SeQuant/core/expr.hpp b/SeQuant/core/expr.hpp index faa0ced08..33f878d8f 100644 --- a/SeQuant/core/expr.hpp +++ b/SeQuant/core/expr.hpp @@ -1241,7 +1241,7 @@ inline std::wstring to_latex_align(const ExprPtr &exprptr, std::wstring result = to_latex(exprptr); if (exprptr->is()) { result.erase(0, 7); // remove leading "{ \bigl" - result.replace(result.size() - 9, 9, + result.replace(result.size() - 8, 8, L")"); // replace trailing "\bigr) }" with ")" result = std::wstring(L"\\begin{align}\n& ") + result; // assume no inner sums diff --git a/tests/unit/test_expr.cpp b/tests/unit/test_expr.cpp index 142ce947f..e7b97d105 100644 --- a/tests/unit/test_expr.cpp +++ b/tests/unit/test_expr.cpp @@ -10,32 +10,28 @@ struct Dummy : public sequant::Expr { virtual ~Dummy() = default; - std::wstring to_latex() const override { - return L"{\\text{Dummy}}"; - } - std::wstring to_wolfram() const override { - return L"Dummy[]"; - } + std::wstring to_latex() const override { return L"{\\text{Dummy}}"; } + std::wstring to_wolfram() const override { return L"Dummy[]"; } type_id_type type_id() const override { return get_type_id(); }; sequant::ExprPtr clone() const override { return sequant::ex(); } bool static_equal(const sequant::Expr &that) const override { return true; } }; -template +template struct VecExpr : public std::vector, public sequant::Expr { using base_type = std::vector; using base_type::begin; using base_type::end; VecExpr() = default; - template + template VecExpr(std::initializer_list elements) : std::vector(elements) {} - template + template VecExpr(Iter begin, Iter end) : std::vector(begin, end) {} virtual ~VecExpr() = default; std::wstring to_latex() const override { std::wstring result = L"{\\text{VecExpr}\\{"; - for (const auto &e: *this) { + for (const auto &e : *this) { if constexpr (sequant::Expr::is_shared_ptr_of_expr_or_derived::value) { result += e->to_latex() + L" "; } else { @@ -48,7 +44,7 @@ struct VecExpr : public std::vector, public sequant::Expr { std::wstring to_wolfram() const override { std::wstring result = L"VecExpr["; size_t count = 1; - for (const auto &e: *this) { + for (const auto &e : *this) { const auto last_it = count == this->std::vector::size(); if constexpr (sequant::Expr::is_shared_ptr_of_expr_or_derived::value) { result += e->to_wolfram() + (last_it ? L"" : L","); @@ -61,21 +57,21 @@ struct VecExpr : public std::vector, public sequant::Expr { return result; } - type_id_type type_id() const override{ - return get_type_id>(); - }; + type_id_type type_id() const override { return get_type_id>(); }; private: cursor begin_cursor() const override { if constexpr (sequant::Expr::is_shared_ptr_of_expr::value) { - return base_type::empty() ? Expr::begin_cursor() : cursor{&base_type::at(0)}; + return base_type::empty() ? Expr::begin_cursor() + : cursor{&base_type::at(0)}; } else { return Expr::begin_cursor(); } }; cursor end_cursor() const override { if constexpr (sequant::Expr::is_shared_ptr_of_expr::value) { - return base_type::empty() ? Expr::end_cursor() : cursor{&base_type::at(0) + base_type::size()}; + return base_type::empty() ? Expr::end_cursor() + : cursor{&base_type::at(0) + base_type::size()}; } else { return Expr::end_cursor(); } @@ -88,9 +84,9 @@ struct VecExpr : public std::vector, public sequant::Expr { }; bool static_equal(const sequant::Expr &that) const override { - return static_cast(*this) == static_cast(static_cast(that)); + return static_cast(*this) == + static_cast(static_cast(that)); } - }; struct Adjointable : public sequant::Expr { @@ -104,22 +100,25 @@ struct Adjointable : public sequant::Expr { return L"Adjointable[" + std::to_wstring(v) + L"]"; } type_id_type type_id() const override { return get_type_id(); }; - sequant::ExprPtr clone() const override { return sequant::ex(v); } - bool static_equal(const sequant::Expr &that) const override { return v == that.as().v; } + sequant::ExprPtr clone() const override { + return sequant::ex(v); + } + bool static_equal(const sequant::Expr &that) const override { + return v == that.as().v; + } void adjoint() override { v = -v; }; int v = 1; }; struct latex_visitor { - void operator()(const std::shared_ptr& expr) { + void operator()(const std::shared_ptr &expr) { result += expr->to_latex(); } - std::wstring result {}; + std::wstring result{}; }; TEST_CASE("Expr", "[elements]") { - using namespace sequant; SECTION("constructors") { @@ -127,13 +126,18 @@ TEST_CASE("Expr", "[elements]") { const auto ex2 = std::make_shared(2); REQUIRE_NOTHROW(std::make_shared>()); const auto ex3 = std::make_shared>(); - REQUIRE_NOTHROW(std::make_shared>(std::initializer_list{1.0, 2.0, 3.0})); - const auto ex4 = std::make_shared>(std::initializer_list{1.0, 2.0, 3.0}); - REQUIRE_NOTHROW(std::make_shared>>(std::initializer_list>{ - std::make_shared(1.0), std::make_shared(2.0), std::make_shared(3.0)})); - const auto ex5 = - std::make_shared>>(std::initializer_list>{ - std::make_shared(1.0), std::make_shared(2.0), std::make_shared(3.0)}); + REQUIRE_NOTHROW(std::make_shared>( + std::initializer_list{1.0, 2.0, 3.0})); + const auto ex4 = std::make_shared>( + std::initializer_list{1.0, 2.0, 3.0}); + REQUIRE_NOTHROW(std::make_shared>>( + std::initializer_list>{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)})); + const auto ex5 = std::make_shared>>( + std::initializer_list>{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)}); REQUIRE_NOTHROW(std::make_shared()); const auto ex1 = std::make_shared(); } @@ -144,7 +148,8 @@ TEST_CASE("Expr", "[elements]") { REQUIRE(ex->is_atom()); } { - const auto ex = std::make_shared>(std::initializer_list{1.0, 2.0, 3.0}); + const auto ex = std::make_shared>( + std::initializer_list{1.0, 2.0, 3.0}); REQUIRE(ex->is_atom()); } { @@ -154,22 +159,24 @@ TEST_CASE("Expr", "[elements]") { } SECTION("comparison") { - { const auto ex1 = std::make_shared(1); const auto ex2 = std::make_shared(2); const auto ex3 = std::make_shared(1); const auto ex4 = std::make_shared>(); - const auto ex5 = - std::make_shared>(ExprPtrList{ - std::make_shared(1.0), std::make_shared(2.0), std::make_shared(3.0)}); + const auto ex5 = std::make_shared>(ExprPtrList{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)}); const auto ex0 = std::make_shared(); - // type ids get assigned in the order of use, which is program dependent, only check basic relations here + // type ids get assigned in the order of use, which is program dependent, + // only check basic relations here REQUIRE(ex0->type_id() == Expr::get_type_id()); REQUIRE(ex1->type_id() == Expr::get_type_id()); REQUIRE(ex4->type_id() == Expr::get_type_id>()); - REQUIRE(ex4->type_id() < Expr::get_type_id>()); // VecExpr had not been used yet + REQUIRE(ex4->type_id() < + Expr::get_type_id>()); // VecExpr had not + // been used yet REQUIRE(*ex0 == *ex0); REQUIRE(*ex1 == *ex1); @@ -178,9 +185,7 @@ TEST_CASE("Expr", "[elements]") { REQUIRE(*ex4 == *ex4); REQUIRE(*ex5 == *ex5); REQUIRE(*ex0 != *ex1); - } - } SECTION("iteration") { @@ -200,28 +205,31 @@ TEST_CASE("Expr", "[elements]") { REQUIRE(begin(ex3->expr()) == end(ex3->expr())); REQUIRE(size(ex3->expr()) == 0); - const auto ex4 = std::make_shared>(std::initializer_list{1.0, 2.0, 3.0}); + const auto ex4 = std::make_shared>( + std::initializer_list{1.0, 2.0, 3.0}); REQUIRE(begin(*ex4) != end(*ex4)); REQUIRE(size(*ex4) == 3); REQUIRE(begin(ex4->expr()) == end(ex4->expr())); REQUIRE(size(ex4->expr()) == 0); - const auto ex5_init = - std::vector>{std::make_shared(1.0), std::make_shared(2.0), - std::make_shared(3.0)}; - const auto ex5 = std::make_shared>>(begin(ex5_init), end(ex5_init)); + const auto ex5_init = std::vector>{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)}; + const auto ex5 = std::make_shared>>( + begin(ex5_init), end(ex5_init)); REQUIRE(begin(*ex5) != end(*ex5)); REQUIRE(size(*ex5) == 3); REQUIRE(begin(ex5->expr()) == end(ex5->expr())); REQUIRE(size(ex5->expr()) == 0); { - auto ex6 = std::make_shared>(begin(ex5_init), end(ex5_init)); + auto ex6 = + std::make_shared>(begin(ex5_init), end(ex5_init)); REQUIRE(begin(*ex6) != end(*ex6)); REQUIRE(size(*ex6) == 3); REQUIRE(begin(ex6->expr()) != end(ex6->expr())); REQUIRE(size(ex6->expr()) == 3); - const auto& front_ptr = *begin(ex6->expr()); + const auto &front_ptr = *begin(ex6->expr()); auto front_ptr_cast = std::dynamic_pointer_cast(front_ptr); REQUIRE(front_ptr_cast); REQUIRE(front_ptr_cast->value() == 1.0); @@ -242,7 +250,8 @@ TEST_CASE("Expr", "[elements]") { REQUIRE(ex->value>() == std::complex{2, 0}); REQUIRE_THROWS_AS(ex->value(), std::invalid_argument); REQUIRE_THROWS_AS(ex->value(), boost::numeric::positive_overflow); - REQUIRE_THROWS_AS(std::make_shared(-2)->value(), boost::numeric::negative_overflow); + REQUIRE_THROWS_AS(std::make_shared(-2)->value(), + boost::numeric::negative_overflow); } SECTION("scaled_product") { @@ -264,48 +273,48 @@ TEST_CASE("Expr", "[elements]") { } SECTION("adjoint") { - { // not implemented by default + { // not implemented by default const auto e = std::make_shared(); REQUIRE_THROWS_AS(e->adjoint(), std::logic_error); } - { // implemented in Adjointable + { // implemented in Adjointable const auto e = std::make_shared(); REQUIRE_NOTHROW(e->adjoint()); REQUIRE_NOTHROW(adjoint(e)); // check free-function adjoint } - { // Constant - const auto e = std::make_shared(std::complex{1,2}); + { // Constant + const auto e = std::make_shared(std::complex{1, 2}); REQUIRE_NOTHROW(e->adjoint()); - REQUIRE(e->value() == std::complex{1,-2}); + REQUIRE(e->value() == std::complex{1, -2}); } - { // Product + { // Product const auto e = std::make_shared(); - e->append(std::complex{2,-1}, ex()); + e->append(std::complex{2, -1}, ex()); e->append(1, ex(-2)); REQUIRE_NOTHROW(e->adjoint()); - REQUIRE(e->scalar() == std::complex{2,1}); + REQUIRE(e->scalar() == std::complex{2, 1}); REQUIRE(e->factors()[0]->as().v == 2); REQUIRE(e->factors()[1]->as().v == -1); } - { // CProduct + { // CProduct const auto e = std::make_shared(); - e->append(std::complex{2,-1}, ex()); + e->append(std::complex{2, -1}, ex()); e->append(1, ex(-2)); REQUIRE_NOTHROW(e->adjoint()); - REQUIRE(e->scalar() == std::complex{2,1}); + REQUIRE(e->scalar() == std::complex{2, 1}); REQUIRE(e->factors()[0]->as().v == -1); REQUIRE(e->factors()[1]->as().v == 2); } - { // NCProduct + { // NCProduct const auto e = std::make_shared(); - e->append(std::complex{2,-1}, ex()); + e->append(std::complex{2, -1}, ex()); e->append(1, ex(-2)); REQUIRE_NOTHROW(e->adjoint()); - REQUIRE(e->scalar() == std::complex{2,1}); + REQUIRE(e->scalar() == std::complex{2, 1}); REQUIRE(e->factors()[0]->as().v == 2); REQUIRE(e->factors()[1]->as().v == -1); } - { // Sum + { // Sum const auto e = std::make_shared(); e->append(ex()); e->append(ex(-2)); @@ -322,14 +331,52 @@ TEST_CASE("Expr", "[elements]") { // VecExpr { - const auto ex5_init = - std::vector>{std::make_shared(1.0), std::make_shared(2.0), - std::make_shared(3.0)}; + const auto ex5_init = std::vector>{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)}; auto ex6 = std::make_shared>(begin(ex5_init), end(ex5_init)); REQUIRE(ex6->to_latex() == L"{\\text{VecExpr}\\{{{{1}}} {{{2}}} {{{3}}} \\}}"); } + + // to_latex_align + { + const auto e = std::make_shared(); + e->append(ex(1)); + e->append(ex(2)); + e->append(ex(3)); + e->append(ex(4)); + // std::wcout << "to_latex(e) = " << to_latex(e) << std::endl; + REQUIRE(to_latex(e) == + L"{ \\bigl({\\text{Adjointable}{1}} + {\\text{Adjointable}{2}} + " + L"{\\text{Adjointable}{3}} + {\\text{Adjointable}{4}}\\bigr) }"); + // std::wcout << "to_latex_align(e) = " << to_latex_align(e) << std::endl; + REQUIRE(to_latex_align(e) == + L"\\begin{align}\n" + "& ({\\text{Adjointable}{1}} \\\\\n" + "& + {\\text{Adjointable}{2}} \\\\\n" + "& + {\\text{Adjointable}{3}} \\\\\n" + "& + {\\text{Adjointable}{4}})\n" + "\\end{align}"); + // std::wcout << "to_latex_align(e,5,2) = " << to_latex_align(e,5,2) << + // std::endl; + REQUIRE(to_latex_align(e, 5, 2) == + L"\\begin{align}\n" + "& ({\\text{Adjointable}{1}} + {\\text{Adjointable}{2}} \\\\\n" + "& + {\\text{Adjointable}{3}} + {\\text{Adjointable}{4}})\n" + "\\end{align}"); + // std::wcout << "to_latex_align(e,1,2) = " << to_latex_align(e,1,2) << + // std::endl; + REQUIRE(to_latex_align(e, 1, 2) == + L"\\begin{align}\n" + "& ({\\text{Adjointable}{1}} + {\\text{Adjointable}{2}} \\\\\n" + "& + {\\text{Adjointable}{3}} \n" + "\\end{align}\n" + "\\begin{align}\n" + "& + {\\text{Adjointable}{4}})\n" + "\\end{align}"); + } } SECTION("wolfram") { @@ -339,27 +386,29 @@ TEST_CASE("Expr", "[elements]") { // VecExpr { - const auto ex5_init = - std::vector>{std::make_shared(1.0), std::make_shared(2.0), - std::make_shared(3.0)}; - auto ex6 = std::make_shared>(begin(ex5_init), end(ex5_init)); + const auto ex5_init = std::vector>{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)}; + auto ex6 = + std::make_shared>(begin(ex5_init), end(ex5_init)); REQUIRE(ex6->to_wolfram() == L"VecExpr[1,2,3]"); } } SECTION("visitor") { { - const auto ex5_init = - std::vector>{std::make_shared(1.0), std::make_shared(2.0), - std::make_shared(3.0)}; - ExprPtr ex6 = std::make_shared>(begin(ex5_init), end(ex5_init)); + const auto ex5_init = std::vector>{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)}; + ExprPtr ex6 = + std::make_shared>(begin(ex5_init), end(ex5_init)); auto ex = ex6 + ex6; latex_visitor v1{}; ex->visit(v1); -// std::wcout << "v1.result = " << v1.result << std::endl; + // std::wcout << "v1.result = " << v1.result << std::endl; REQUIRE( v1.result == L"{{{1}}}{{{2}}}{{{3}}}{\\text{VecExpr}\\{{{{1}}} {{{2}}} {{{3}}} " @@ -377,7 +426,6 @@ TEST_CASE("Expr", "[elements]") { } SECTION("range") { - { REQUIRE_NOTHROW(expr_range{}); expr_range exrng{}; @@ -385,17 +433,22 @@ TEST_CASE("Expr", "[elements]") { REQUIRE(ranges::begin(exrng) == ranges::end(exrng)); } - // compares indices in address provided by cursor::address() to a list of indices - auto compare = [](const container::svector>& address1, - std::initializer_list address2) { - return address1.size() == address2.size() && - std::equal(begin(address1), end(address1), begin(address2), [](const auto& parent_and_index1, const auto& index2) { - return parent_and_index1.second == index2; - }); - }; + // compares indices in address provided by cursor::address() to a list of + // indices + auto compare = + [](const container::svector> &address1, + std::initializer_list address2) { + return address1.size() == address2.size() && + std::equal( + begin(address1), end(address1), begin(address2), + [](const auto &parent_and_index1, const auto &index2) { + return parent_and_index1.second == index2; + }); + }; { - auto x = (ex(1.0) + ex(2.0)) * (ex(3.0) + ex(4.0)); + auto x = (ex(1.0) + ex(2.0)) * + (ex(3.0) + ex(4.0)); REQUIRE_NOTHROW(expr_range{x}); expr_range exrng{x}; REQUIRE(ranges::begin(exrng) == ranges::begin(exrng)); @@ -434,7 +487,7 @@ TEST_CASE("Expr", "[elements]") { switch (i) { case 0: REQUIRE(to_latex(*it) == L"{{{1}}}"); - REQUIRE( compare(ranges::get_cursor(it).address(), {0,0}) ); + REQUIRE(compare(ranges::get_cursor(it).address(), {0, 0})); REQUIRE(ranges::get_cursor(it).ordinal() == 0); break; case 1: @@ -462,7 +515,7 @@ TEST_CASE("Expr", "[elements]") { case 5: REQUIRE(to_latex(*it) == L"{\\text{Dummy}}"); REQUIRE(compare(ranges::get_cursor(it).address(), {1, 1, 0, 1})); - REQUIRE( ranges::get_cursor(it).ordinal() == 5 ); + REQUIRE(ranges::get_cursor(it).ordinal() == 5); break; } ++i; @@ -490,14 +543,14 @@ TEST_CASE("Expr", "[elements]") { (ex(1.0) + ex(2.0) * (ex(3.0) - ex())) * (ex(5.0) * (ex(6.0) + ex()) + ex()); - //std::wcout << "x = " << to_latex(x) << std::endl; + // std::wcout << "x = " << to_latex(x) << std::endl; REQUIRE(to_latex(x) == L"{{ \\bigl({{{1}}} + {{{2}}{ \\bigl({{{3}}} - {" L"{\\text{Dummy}}}\\bigr) }}\\bigr) }{ \\bigl({{{5}}" L"{ \\bigl({{{6}}} + {\\text{Dummy}}\\bigr) }} + " L"{\\text{Dummy}}\\bigr) }}"); expand(x); - //std::wcout << "ex = " << to_latex(x) << std::endl; + // std::wcout << "ex = " << to_latex(x) << std::endl; REQUIRE(to_latex(x) == L"{ \\bigl({{{30}}} + {{{5}}{\\text{Dummy}}} + " L"{{\\text{Dummy}}} + {{{180}}} + {{{30}}" @@ -509,13 +562,13 @@ TEST_CASE("Expr", "[elements]") { } SECTION("hashing") { - const auto ex5_init = - std::vector>{std::make_shared(1.0), std::make_shared(2.0), - std::make_shared(3.0)}; + const auto ex5_init = std::vector>{ + std::make_shared(1.0), std::make_shared(2.0), + std::make_shared(3.0)}; REQUIRE_NOTHROW(hash_value(ex5_init)); REQUIRE(hash_value(ex5_init) != hash_value(ex(1))); - auto hasher = [](const std::shared_ptr&) ->unsigned int { + auto hasher = [](const std::shared_ptr &) -> unsigned int { return 0; }; REQUIRE_NOTHROW(ex(1)->hash_value(hasher) == 0); From 15be7a2863a35210356567759d68317117b08edf Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 25 May 2022 15:12:48 -0400 Subject: [PATCH 082/120] namespace unit_tests, i.e. rename to unit_tests-sequant --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b9bfece7a..891778a93 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -314,7 +314,7 @@ if (BUILD_TESTING) set(utests_deps SeQuant) - set(unit_test_executable unit_tests) + set(unit_test_executable unit_tests-sequant) add_executable(${unit_test_executable} EXCLUDE_FROM_ALL tests/unit/test_main.cpp tests/unit/catch.hpp From 17eb4c820e070b92ab5042081ee4bd9293fcab84 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Tue, 12 Jul 2022 18:28:22 -0400 Subject: [PATCH 083/120] default Context uses Physical vacuum as more reasonable for non-QC use --- SeQuant/core/sequant.hpp | 12 +++++------- examples/eval/btas/main.cpp | 5 ++++- examples/eval/ta/main.cpp | 5 ++++- examples/srcc/srcc.cpp | 4 +++- tests/unit/test_main.cpp | 15 ++++++++------- 5 files changed, 24 insertions(+), 17 deletions(-) diff --git a/SeQuant/core/sequant.hpp b/SeQuant/core/sequant.hpp index 1bc3eca62..a6b8e6fc6 100644 --- a/SeQuant/core/sequant.hpp +++ b/SeQuant/core/sequant.hpp @@ -1,9 +1,9 @@ #ifndef SEQUANT_SEQUANT_H #define SEQUANT_SEQUANT_H -#include "space.hpp" #include "attr.hpp" #include "index.hpp" +#include "space.hpp" namespace sequant { @@ -16,9 +16,7 @@ class SeQuant { /// @param m an IndexSpaceMetric object /// @param bks a BraKetSymmetry object /// @param spb single-particle basis (spin-free or spin-dependent) - explicit SeQuant(Vacuum vac, - IndexSpaceMetric m, - BraKetSymmetry bks, + explicit SeQuant(Vacuum vac, IndexSpaceMetric m, BraKetSymmetry bks, SPBasis spb = sequant::SPBasis::spinorbital) : vacuum_(vac), metric_(m), braket_symmetry_(bks), spbasis_(spb) {} ~SeQuant() = default; @@ -26,12 +24,12 @@ class SeQuant { Vacuum vacuum() const { return vacuum_; } IndexSpaceMetric metric() const { return metric_; } BraKetSymmetry braket_symmetry() const { return braket_symmetry_; } - SPBasis spbasis() const { return spbasis_;} + SPBasis spbasis() const { return spbasis_; } /// @return the IndexRegistry object std::shared_ptr index_registry() const; private: - Vacuum vacuum_ = Vacuum::SingleProduct; + Vacuum vacuum_ = Vacuum::Physical; IndexSpaceMetric metric_ = IndexSpaceMetric::Unit; BraKetSymmetry braket_symmetry_ = BraKetSymmetry::conjugate; SPBasis spbasis_ = sequant::SPBasis::spinorbital; @@ -43,4 +41,4 @@ void reset_default_context(); } // namespace sequant -#endif \ No newline at end of file +#endif diff --git a/examples/eval/btas/main.cpp b/examples/eval/btas/main.cpp index 62e532182..4f2cd77bc 100644 --- a/examples/eval/btas/main.cpp +++ b/examples/eval/btas/main.cpp @@ -49,6 +49,9 @@ int main(int argc, char* argv[]) { using namespace sequant; detail::OpIdRegistrar op_id_registrar; + sequant::set_default_context( + SeQuant(Vacuum::SingleProduct, IndexSpaceMetric::Unit, + BraKetSymmetry::conjugate, SPBasis::spinorbital)); mbpt::set_default_convention(); TensorCanonicalizer::register_instance( std::make_shared()); @@ -67,4 +70,4 @@ int main(int argc, char* argv[]) { std::wcout); return 0; -} \ No newline at end of file +} diff --git a/examples/eval/ta/main.cpp b/examples/eval/ta/main.cpp index e4684f571..35d63b341 100644 --- a/examples/eval/ta/main.cpp +++ b/examples/eval/ta/main.cpp @@ -69,6 +69,9 @@ int main(int argc, char* argv[]) { using namespace sequant; detail::OpIdRegistrar op_id_registrar; mbpt::set_default_convention(); + sequant::set_default_context( + SeQuant(Vacuum::SingleProduct, IndexSpaceMetric::Unit, + BraKetSymmetry::conjugate, SPBasis::spinorbital)); TensorCanonicalizer::register_instance( std::make_shared()); @@ -86,4 +89,4 @@ int main(int argc, char* argv[]) { TA::finalize(); return 0; -} \ No newline at end of file +} diff --git a/examples/srcc/srcc.cpp b/examples/srcc/srcc.cpp index 5b8270d4e..f88a07134 100644 --- a/examples/srcc/srcc.cpp +++ b/examples/srcc/srcc.cpp @@ -39,7 +39,9 @@ int main(int argc, char* argv[]) { std::wcout.sync_with_stdio(true); std::wcerr.sync_with_stdio(true); sequant::detail::OpIdRegistrar op_id_registrar; - + sequant::set_default_context( + SeQuant(Vacuum::SingleProduct, IndexSpaceMetric::Unit, + BraKetSymmetry::conjugate, SPBasis::spinorbital)); mbpt::set_default_convention(); using sequant::eqs::compute_all; diff --git a/tests/unit/test_main.cpp b/tests/unit/test_main.cpp index f1250fba2..13df44571 100644 --- a/tests/unit/test_main.cpp +++ b/tests/unit/test_main.cpp @@ -4,19 +4,18 @@ #define CATCH_CONFIG_RUNNER #include -#include "SeQuant/core/runtime.hpp" #include "SeQuant/core/op.hpp" +#include "SeQuant/core/runtime.hpp" #include "SeQuant/core/space.hpp" #include "SeQuant/core/utility.hpp" #include "SeQuant/domain/mbpt/convention.hpp" #include "catch.hpp" #ifdef SEQUANT_HAS_TILEDARRAY -# include +#include #endif -int main( int argc, char* argv[] ) -{ +int main(int argc, char* argv[]) { using namespace std; using namespace sequant; @@ -33,13 +32,15 @@ int main( int argc, char* argv[] ) std::wcout.sync_with_stdio(true); std::wcerr.sync_with_stdio(true); detail::OpIdRegistrar op_id_registrar; - + sequant::set_default_context( + SeQuant(Vacuum::SingleProduct, IndexSpaceMetric::Unit, + BraKetSymmetry::conjugate, SPBasis::spinorbital)); mbpt::set_default_convention(); // uncomment to enable verbose output ... - //Logger::set_instance(1); + // Logger::set_instance(1); // ... or can instead selectively set/unset particular logging flags - //Logger::get_instance().wick_contract = true; + // Logger::get_instance().wick_contract = true; #ifdef SEQUANT_HAS_TILEDARRAY auto& world = TA::initialize(argc, argv); From c2fd6b75f24f5b880677f467fde9420ea9a0a406 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Tue, 12 Jul 2022 19:55:01 -0400 Subject: [PATCH 084/120] dox fixup --- SeQuant/core/wick.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/SeQuant/core/wick.hpp b/SeQuant/core/wick.hpp index 78536b312..035d08a5c 100644 --- a/SeQuant/core/wick.hpp +++ b/SeQuant/core/wick.hpp @@ -61,9 +61,9 @@ class WickTheorem { } /// Controls whether next call to compute() will full contractions only or all - /// (including partial) contractions. By default compute() generates all - /// contractions. - /// @param sf if true, will complete full contractions only. + /// (including partial) contractions. By default compute() generates full + /// contractions only. + /// @param sf if false, will evaluate all contractions. /// @return reference to @c *this , for daisy-chaining WickTheorem &full_contractions(bool fc) { full_contractions_ = fc; From 1135c1a36cd16ad65295167e0a225716e6d9e327 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Tue, 12 Jul 2022 19:56:33 -0400 Subject: [PATCH 085/120] IndexSpace::{intersection,unIon} implementations simplified so that for the case of equal args these do not require space registration (this allows minimal examples to just work) --- SeQuant/core/space.hpp | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/SeQuant/core/space.hpp b/SeQuant/core/space.hpp index 73330c9fc..eafec0229 100644 --- a/SeQuant/core/space.hpp +++ b/SeQuant/core/space.hpp @@ -412,32 +412,38 @@ inline bool operator!=(const IndexSpace &space1, const IndexSpace &space2) { } inline IndexSpace::Type intersection(IndexSpace::Type type1, IndexSpace::Type type2) { - return type1.intersection(type2); + return type1 == type2 ? type1 : type1.intersection(type2); } inline IndexSpace::QuantumNumbers intersection(IndexSpace::QuantumNumbers v1, IndexSpace::QuantumNumbers v2) { - return v1.intersection(v2); + return v1 == v2 ? v1 : v1.intersection(v2); } inline const IndexSpace &intersection(const IndexSpace &space1, const IndexSpace &space2) { - return IndexSpace::instance(space1.attr().intersection(space2.attr())); + return space1 == space2 + ? space1 + : IndexSpace::instance(space1.attr().intersection(space2.attr())); } inline const IndexSpace &intersection(const IndexSpace &space1, const IndexSpace &space2, const IndexSpace &space3) { - return IndexSpace::instance( - space1.attr().intersection(space2.attr().intersection(space3.attr()))); + return space1 == space2 && space1 == space3 + ? space1 + : IndexSpace::instance(space1.attr().intersection( + space2.attr().intersection(space3.attr()))); } inline IndexSpace::Type unIon(IndexSpace::Type type1, IndexSpace::Type type2) { - return type1.unIon(type2); + return type1 == type2 ? type1 : type1.unIon(type2); } inline IndexSpace::QuantumNumbers unIon(IndexSpace::QuantumNumbers qns1, IndexSpace::QuantumNumbers qns2) { - return qns1.unIon(qns2); + return qns1 == qns2 ? qns1 : qns1.unIon(qns2); } inline const IndexSpace &unIon(const IndexSpace &space1, const IndexSpace &space2) { - return IndexSpace::instance(space1.attr().unIon(space2.attr())); + return space1 == space2 + ? space1 + : IndexSpace::instance(space1.attr().unIon(space2.attr())); } /// @return true if type2 is included in type1, i.e. intersection(type1, type2) /// == type2 From 30df3f9ac678e15a2579cfef8a77fdfc5cedb32e Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Tue, 12 Jul 2022 19:57:54 -0400 Subject: [PATCH 086/120] TensorCanonicalizer::instance_ptr introduced to avoid the need to register canonicalizers at all if no canonicalization is necessary --- SeQuant/core/abstract_tensor.cpp | 26 ++-- SeQuant/core/abstract_tensor.hpp | 200 ++++++++++++++++++------------- SeQuant/core/tensor.cpp | 9 +- 3 files changed, 143 insertions(+), 92 deletions(-) diff --git a/SeQuant/core/abstract_tensor.cpp b/SeQuant/core/abstract_tensor.cpp index 6291446c3..a508600ab 100644 --- a/SeQuant/core/abstract_tensor.cpp +++ b/SeQuant/core/abstract_tensor.cpp @@ -8,18 +8,20 @@ namespace sequant { TensorCanonicalizer::~TensorCanonicalizer() = default; -container::map> &TensorCanonicalizer::instance_map_accessor() { - static container::map> map_; +container::map> + &TensorCanonicalizer::instance_map_accessor() { + static container::map> + map_; return map_; } container::vector -&TensorCanonicalizer::cardinal_tensor_labels_accessor() { + &TensorCanonicalizer::cardinal_tensor_labels_accessor() { static container::vector ctlabels_; return ctlabels_; } -std::shared_ptr TensorCanonicalizer::instance( +std::shared_ptr TensorCanonicalizer::instance_ptr( std::wstring_view label) { auto &map = instance_map_accessor(); // look for label-specific canonicalizer @@ -32,15 +34,25 @@ std::shared_ptr TensorCanonicalizer::instance( return it->second; } } - throw std::runtime_error("must first register canonicalizer via TensorCanonicalizer::register_instance(...)"); + return {}; +} + +std::shared_ptr TensorCanonicalizer::instance( + std::wstring_view label) { + auto inst_ptr = instance_ptr(label); + if (!inst_ptr) + throw std::runtime_error( + "must first register canonicalizer via " + "TensorCanonicalizer::register_instance(...)"); + return inst_ptr; } -void TensorCanonicalizer::register_instance(std::shared_ptr can, std::wstring_view label) { +void TensorCanonicalizer::register_instance( + std::shared_ptr can, std::wstring_view label) { auto &map = instance_map_accessor(); map[std::wstring{label}] = can; } - ExprPtr DefaultTensorCanonicalizer::apply(AbstractTensor &t) { // tag all indices as ext->true/ind->false auto braket_view = braket(t); diff --git a/SeQuant/core/abstract_tensor.hpp b/SeQuant/core/abstract_tensor.hpp index e1127aa5b..1e902f9be 100644 --- a/SeQuant/core/abstract_tensor.hpp +++ b/SeQuant/core/abstract_tensor.hpp @@ -17,17 +17,33 @@ namespace sequant { class TensorCanonicalizer; -/// This interface class defines a Tensor concept. Object @c t of a type that meets the concept must satisfy the following: -/// - @c bra(t) , @c ket(t) , and @c braket(t) are valid expressions and evaluate to a range of Index objects; -/// - @c bra_rank(t) and @c ket_rank(t) are valid expression and return sizes of the @c bra(t) and @c ket(t) ranges, respectively; -/// - @c symmetry(t) is a valid expression and evaluates to a Symmetry object that describes the symmetry of bra/ket of a _particle-symmetric_ @c t ; -/// - @c braket_symmetry(t) is a valid expression and evaluates to a BraKetSymmetry object that describes the bra-ket symmetry of @c t ; -/// - @c particle_symmetry(t) is a valid expression and evaluates to a ParticleSymmetry object that describes the symmetry of @c t with respect to permutations of particles; -/// - @c color(t) is a valid expression and returns whether a nonnegative integer that identifies the type of a tensor; tensors with different colors can be reordered in a Product at will -/// - @c is_cnumber(t) is a valid expression and returns whether t commutes with other tensor of same color (tensors of different colors are, for now, always assumed to commute) -/// - @c label(t) is a valid expression and its return is convertible to a std::wstring; -/// - @c to_latex(t) is a valid expression and its return is convertible to a std::wstring. -/// To adapt an existing class intrusively derive it from AbstractTensor and implement all member functions. This allows to implememnt heterogeneous containers of objects that meet the Tensor concept. +/// This interface class defines a Tensor concept. Object @c t of a type that +/// meets the concept must satisfy the following: +/// - @c bra(t) , @c ket(t) , and @c braket(t) are valid expressions and +/// evaluate to a range of Index objects; +/// - @c bra_rank(t) and @c ket_rank(t) are valid expression and return +/// sizes of the @c bra(t) and @c ket(t) ranges, respectively; +/// - @c symmetry(t) is a valid expression and evaluates to a Symmetry +/// object that describes the symmetry of bra/ket of a +/// _particle-symmetric_ @c t ; +/// - @c braket_symmetry(t) is a valid expression and evaluates to a +/// BraKetSymmetry object that describes the bra-ket symmetry of @c t ; +/// - @c particle_symmetry(t) is a valid expression and evaluates to a +/// ParticleSymmetry object that describes the symmetry of @c t with +/// respect to permutations of particles; +/// - @c color(t) is a valid expression and returns whether a +/// nonnegative integer that identifies the type of a tensor; tensors +/// with different colors can be reordered in a Product at will +/// - @c is_cnumber(t) is a valid expression and returns whether t +/// commutes with other tensor of same color (tensors of different +/// colors are, for now, always assumed to commute) +/// - @c label(t) is a valid expression and its return is convertible to +/// a std::wstring; +/// - @c to_latex(t) is a valid expression and its return is convertible +/// to a std::wstring. +/// To adapt an existing class intrusively derive it from AbstractTensor and +/// implement all member functions. This allows to implememnt heterogeneous +/// containers of objects that meet the Tensor concept. class AbstractTensor { inline auto missing_instantiation_for(const char* fn_name) const { std::ostringstream oss; @@ -39,10 +55,16 @@ class AbstractTensor { public: virtual ~AbstractTensor() = default; - using const_any_view_rand = ranges::any_view; - using const_any_view_randsz = ranges::any_view; - using any_view_rand = ranges::any_view; - using any_view_randsz = ranges::any_view; + using const_any_view_rand = + ranges::any_view; + using const_any_view_randsz = + ranges::any_view; + using any_view_rand = + ranges::any_view; + using any_view_randsz = + ranges::any_view; /// view of a contiguous range of Index objects virtual const_any_view_randsz _bra() const { @@ -90,21 +112,22 @@ class AbstractTensor { throw missing_instantiation_for("operator<"); } - virtual bool _transform_indices(const container::map& index_map) { + virtual bool _transform_indices( + const container::map& index_map) { throw missing_instantiation_for("_transform_indices"); } - virtual void _reset_tags() { - throw missing_instantiation_for("_reset_tags"); - } + virtual void _reset_tags() { throw missing_instantiation_for("_reset_tags"); } private: /// @return mutable view of bra - /// @warning this is used for mutable access, flush memoized state before returning! + /// @warning this is used for mutable access, flush memoized state before + /// returning! virtual any_view_randsz _bra_mutable() { throw missing_instantiation_for("_bra_mutable"); } /// @return mutable view to ket - /// @warning this is used for mutable access, flush memoized state before returning! + /// @warning this is used for mutable access, flush memoized state before + /// returning! virtual any_view_randsz _ket_mutable() { throw missing_instantiation_for("_ket_mutable"); } @@ -112,7 +135,8 @@ class AbstractTensor { friend class TensorCanonicalizer; }; -/// @name customization points to support generic algorithms on AbstractTensor objects. +/// @name customization points to support generic algorithms on AbstractTensor +/// objects. /// @{ inline auto bra(const AbstractTensor& t) { return t._bra(); } inline auto ket(const AbstractTensor& t) { return t._ket(); } @@ -120,25 +144,31 @@ inline auto braket(const AbstractTensor& t) { return t._braket(); } inline auto bra_rank(const AbstractTensor& t) { return t._bra_rank(); } inline auto ket_rank(const AbstractTensor& t) { return t._ket_rank(); } inline auto symmetry(const AbstractTensor& t) { return t._symmetry(); } -inline auto braket_symmetry(const AbstractTensor& t) { return t._braket_symmetry(); } -inline auto particle_symmetry(const AbstractTensor& t) { return t._particle_symmetry(); } +inline auto braket_symmetry(const AbstractTensor& t) { + return t._braket_symmetry(); +} +inline auto particle_symmetry(const AbstractTensor& t) { + return t._particle_symmetry(); +} inline auto color(const AbstractTensor& t) { return t._color(); } inline auto is_cnumber(const AbstractTensor& t) { return t._is_cnumber(); } inline auto label(const AbstractTensor& t) { return t._label(); } inline auto to_latex(const AbstractTensor& t) { return t._to_latex(); } -/// @tparam IndexMap a {source Index -> target Index} map type; if it is not @c container::map +/// @tparam IndexMap a {source Index -> target Index} map type; if it is not @c +/// container::map /// will need to make a copy. /// @param[in,out] t an AbstractTensor object whose indices will be transformed -/// @param[in] index_map a const reference to an IndexMap object that specifies the transformation +/// @param[in] index_map a const reference to an IndexMap object that specifies +/// the transformation /// @return false if no indices were transformed, true otherwise -/// @pre indices are not tagged, or (if want to protect them from replacement) tagged with (int)0 +/// @pre indices are not tagged, or (if want to protect them from replacement) +/// tagged with (int)0 /// @post transformed indices are tagged with (int)0 template > inline bool transform_indices(AbstractTensor& t, const IndexMap& index_map) { if constexpr (std::is_same_v>) { return t._transform_indices(index_map); - } - else { + } else { container::map index_map_copy; ranges::copy(index_map, index_map_copy); return t._transform_indices(index_map_copy); @@ -146,12 +176,11 @@ inline bool transform_indices(AbstractTensor& t, const IndexMap& index_map) { } /// Removes tags from tensor indices /// @param[in,out] t an AbstractTensor object whose indices will be untagged -inline void reset_tags(AbstractTensor& t) { - t._reset_tags(); -} +inline void reset_tags(AbstractTensor& t) { t._reset_tags(); } // defined in AbstractTensor -//inline bool operator<(const AbstractTensor& first, const AbstractTensor& second) { +// inline bool operator<(const AbstractTensor& first, const AbstractTensor& +// second) { // return first.operator<(second); //} @@ -160,27 +189,37 @@ inline void reset_tags(AbstractTensor& t) { using AbstractTensorPtr = std::shared_ptr; /// @brief Base class for Tensor canonicalizers -/// To make custom canonicalizer make a derived class and register an instance of that class with TensorCanonicalizer::register_instance +/// To make custom canonicalizer make a derived class and register an instance +/// of that class with TensorCanonicalizer::register_instance class TensorCanonicalizer { public: virtual ~TensorCanonicalizer(); - /// returns a TensorCanonicalizer previously registered via TensorCanonicalizer::register_instance() - /// with @c label - static std::shared_ptr instance(std::wstring_view label = L""); - /// registers @c canonicalizer to be applied to Tensor objects with label @c label ; leave the label - /// empty if @c canonicalizer is to apply to Tensor objects with any label + /// @return ptr to the TensorCanonicalizer object, if any, that had been + /// previously registered via TensorCanonicalizer::register_instance() + /// with @c label , or to the default canonicalizer, if any + static std::shared_ptr instance_ptr( + std::wstring_view label = L""); + /// @return a TensorCanonicalizer previously registered via + /// TensorCanonicalizer::register_instance() with @c label or to the default + /// canonicalizer + /// @throw std::runtime_error if no canonicalizer has been registered + static std::shared_ptr instance( + std::wstring_view label = L""); + /// registers @c canonicalizer to be applied to Tensor objects with label @c + /// label ; leave the label empty if @c canonicalizer is to apply to Tensor + /// objects with any label static void register_instance( std::shared_ptr canonicalizer, std::wstring_view label = L""); /// @return a list of Tensor labels with lexicographic preference (in order) - static const auto &cardinal_tensor_labels() { + static const auto& cardinal_tensor_labels() { return cardinal_tensor_labels_accessor(); } /// @param cardinal_tensor_labels a list of Tensor labels with lexicographic /// preference (in order) static void set_cardinal_tensor_labels( - const container::vector &labels) { + const container::vector& labels) { cardinal_tensor_labels_accessor() = labels; } @@ -189,45 +228,48 @@ class TensorCanonicalizer { /// complex conjugation? Special ExprPtr type (e.g. ConjOp)? Or the actual /// return of the canonicalization? // TODO generalize for complex tensors - virtual ExprPtr apply(AbstractTensor &) = 0; + virtual ExprPtr apply(AbstractTensor&) = 0; protected: - inline auto bra_range(AbstractTensor& t) { - return t._bra_mutable(); - } - inline auto ket_range(AbstractTensor& t) { - return t._ket_mutable(); - } + inline auto bra_range(AbstractTensor& t) { return t._bra_mutable(); } + inline auto ket_range(AbstractTensor& t) { return t._ket_mutable(); } private: - static container::map> - &instance_map_accessor(); - static container::vector &cardinal_tensor_labels_accessor(); + static container::map>& + instance_map_accessor(); + static container::vector& cardinal_tensor_labels_accessor(); }; class DefaultTensorCanonicalizer : public TensorCanonicalizer { public: DefaultTensorCanonicalizer() = default; - /// @tparam IndexContainer a Container of Index objects such that @c IndexContainer::value_type is convertible to Index (e.g. this can be std::vector or std::set , but not std::map) + /// @tparam IndexContainer a Container of Index objects such that @c + /// IndexContainer::value_type is convertible to Index (e.g. this can be + /// std::vector or std::set , but not std::map) /// @param external_indices container of external Index objects - /// @warning @c external_indices is assumed to be immutable during the lifetime of this object - template - DefaultTensorCanonicalizer(IndexContainer &&external_indices) { - ranges::for_each(external_indices, [this](const Index&idx) { + /// @warning @c external_indices is assumed to be immutable during the + /// lifetime of this object + template + DefaultTensorCanonicalizer(IndexContainer&& external_indices) { + ranges::for_each(external_indices, [this](const Index& idx) { this->external_indices_.emplace(idx.label(), idx); }); } virtual ~DefaultTensorCanonicalizer() = default; /// Implements TensorCanonicalizer::apply - /// @note Canonicalizes @c t by sorting its bra (if @c t.symmetry()==Symmetry::nonsymm ) or its bra and ket (if @c t.symmetry()!=Symmetry::nonsymm ), - /// with the external indices appearing "before" (smaller particle indices) than the internal indices - ExprPtr apply(AbstractTensor &t) override; - - /// Core of DefaultTensorCanonicalizer::apply, only does the canonicalization, i.e. no tagging/untagging - template - ExprPtr apply(AbstractTensor &t, const Compare &comp) { + /// @note Canonicalizes @c t by sorting its bra (if @c + /// t.symmetry()==Symmetry::nonsymm ) or its bra and ket (if @c + /// t.symmetry()!=Symmetry::nonsymm ), + /// with the external indices appearing "before" (smaller particle + /// indices) than the internal indices + ExprPtr apply(AbstractTensor& t) override; + + /// Core of DefaultTensorCanonicalizer::apply, only does the canonicalization, + /// i.e. no tagging/untagging + template + ExprPtr apply(AbstractTensor& t, const Compare& comp) { // std::wcout << "abstract tensor: " << to_latex(t) << "\n"; auto s = symmetry(t); auto is_antisymm = (s == Symmetry::antisymm); @@ -236,23 +278,21 @@ class DefaultTensorCanonicalizer : public TensorCanonicalizer { const auto _rank = std::min(_bra_rank, _ket_rank); // nothing to do for rank-1 tensors - if (_bra_rank == 1 && _ket_rank == 1) - return nullptr; + if (_bra_rank == 1 && _ket_rank == 1) return nullptr; using ranges::begin; using ranges::end; - using ranges::views::zip; - using ranges::views::take; using ranges::views::counted; + using ranges::views::take; + using ranges::views::zip; bool even = true; switch (s) { case Symmetry::antisymm: - case Symmetry::symm: - { + case Symmetry::symm: { auto _bra = bra_range(t); auto _ket = ket_range(t); -// std::wcout << "canonicalizing " << to_latex(t); + // std::wcout << "canonicalizing " << to_latex(t); IndexSwapper::thread_instance().reset(); // std::{stable_}sort does not necessarily use swap! so must implement // sort outselves .. thankfully ranks will be low so can stick with @@ -261,17 +301,16 @@ class DefaultTensorCanonicalizer : public TensorCanonicalizer { bubble_sort(begin(_ket), end(_ket), comp); if (is_antisymm) even = IndexSwapper::thread_instance().even_num_of_swaps(); -// std::wcout << " is " << (even ? "even" : "odd") << " and produces " << to_latex(t) << std::endl; - } - break; + // std::wcout << " is " << (even ? "even" : "odd") << " and + // produces " << to_latex(t) << std::endl; + } break; case Symmetry::nonsymm: { // sort particles with bra and ket functions first, // then the particles with either bra or ket index auto _bra = bra_range(t); auto _ket = ket_range(t); - auto _zip_braket = zip(take(_bra, _rank), - take(_ket, _rank)); + auto _zip_braket = zip(take(_bra, _rank), take(_ket, _rank)); bubble_sort(begin(_zip_braket), end(_zip_braket), comp); if (_bra_rank > _rank) { auto size_of_rest = _bra_rank - _rank; @@ -282,13 +321,14 @@ class DefaultTensorCanonicalizer : public TensorCanonicalizer { auto rest_of = counted(begin(_ket) + _rank, size_of_rest); bubble_sort(begin(rest_of), end(rest_of), comp); } - } - break; + } break; - default:abort(); + default: + abort(); } - ExprPtr result = is_antisymm ? (even == false ? ex(-1) : nullptr) : nullptr; + ExprPtr result = + is_antisymm ? (even == false ? ex(-1) : nullptr) : nullptr; return result; } @@ -298,4 +338,4 @@ class DefaultTensorCanonicalizer : public TensorCanonicalizer { } // namespace sequant -#endif //SEQUANT_ABSTRACT_TENSOR_HPP +#endif // SEQUANT_ABSTRACT_TENSOR_HPP diff --git a/SeQuant/core/tensor.cpp b/SeQuant/core/tensor.cpp index c25a67426..0f6edcfcc 100644 --- a/SeQuant/core/tensor.cpp +++ b/SeQuant/core/tensor.cpp @@ -12,15 +12,14 @@ void Tensor::assert_nonreserved_label(std::wstring_view label) const { assert(label != overlap_label()); } -void -Tensor::adjoint() { +void Tensor::adjoint() { std::swap(bra_, ket_); reset_hash_value(); } ExprPtr Tensor::canonicalize() { - const auto &canonicalizer = TensorCanonicalizer::instance(label_); - return canonicalizer->apply(*this); + auto canonicalizer_ptr = TensorCanonicalizer::instance_ptr(label_); + return canonicalizer_ptr ? canonicalizer_ptr->apply(*this) : ExprPtr{}; } -} // namespace sequant \ No newline at end of file +} // namespace sequant From e3f310426585aef760438778c0756869ae3004ea Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Tue, 12 Jul 2022 20:00:23 -0400 Subject: [PATCH 087/120] fixup --- examples/uccf12/uccf12.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/uccf12/uccf12.cpp b/examples/uccf12/uccf12.cpp index 857a51151..e8e32f46b 100644 --- a/examples/uccf12/uccf12.cpp +++ b/examples/uccf12/uccf12.cpp @@ -125,7 +125,7 @@ try_main() { second_com = second_com + ex(0.); second_com = simplification::screen_F12_proj(second_com); std::wcout << to_latex_align(second_com,20,2) << std::endl; - second_com = simplification::tens_to_FNOps(second_com); + second_com = simplification::detail::tens_to_FNOps(second_com); second_com = decompositions::three_body_substitution(second_com,2); simplify(second_com); std::wcout << "three body decomp: " << to_latex_align(second_com,20,2) << std::endl; @@ -156,7 +156,7 @@ try_main() { H_A_3 = H_A_3 + ex(0.); H_A_3 = simplification::screen_F12_proj(H_A_3); std::wcout << to_latex_align(H_A_3,20,2) << std::endl; - H_A_3 = simplification::tens_to_FNOps(H_A_3); + H_A_3 = simplification::detail::tens_to_FNOps(H_A_3); auto H_A_2 = decompositions::three_body_substitution(H_A_3,2); simplify(H_A_2); auto com_1 = simplification::hamiltonian_based_projector_2(H_A_2); From d3d70bb286e4d6559620d3f64b90c78e8f9ee661 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 13 Jul 2022 16:46:35 -0400 Subject: [PATCH 088/120] IndexSpace default ctor produces valid state with nonnull type so that minimal examples do not need to register spaces --- SeQuant/core/space.cpp | 2 ++ SeQuant/core/space.hpp | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/SeQuant/core/space.cpp b/SeQuant/core/space.cpp index 0ab331694..387017267 100644 --- a/SeQuant/core/space.cpp +++ b/SeQuant/core/space.cpp @@ -16,6 +16,8 @@ sequant::IndexSpace sequant::IndexSpace::null_instance_{ namespace sequant { +IndexSpace::Type IndexSpace::nulltype = Type{0}; +IndexSpace::Type IndexSpace::nonnulltype = Type{0x7fffffff}; IndexSpace::Type IndexSpace::frozen_occupied = Type{0b000001}; IndexSpace::Type IndexSpace::inactive_occupied = Type{0b000010}; IndexSpace::Type IndexSpace::active_occupied = Type{0b000100}; diff --git a/SeQuant/core/space.hpp b/SeQuant/core/space.hpp index eafec0229..80b574fd3 100644 --- a/SeQuant/core/space.hpp +++ b/SeQuant/core/space.hpp @@ -68,7 +68,7 @@ class IndexSpace { /// @return an invalid TypeAttr static constexpr QuantumNumbersAttr invalid() noexcept { - return QuantumNumbersAttr(0xffff); + return QuantumNumbersAttr(-0); } }; @@ -122,9 +122,7 @@ class IndexSpace { } bool operator!=(Attr other) const { return !(*this == other); } - static Attr null() noexcept { - return Attr{TypeAttr{0}, QuantumNumbersAttr{0}}; - } + static Attr null() noexcept { return Attr{nulltype, nullqns}; } static Attr invalid() noexcept { return Attr{TypeAttr::invalid(), QuantumNumbersAttr::invalid()}; } @@ -144,6 +142,7 @@ class IndexSpace { /// standard space tags are predefined that helps implement set theory of /// standard spaces as binary ops on bitsets + static Type nulltype; static Type frozen_occupied; static Type inactive_occupied; static Type active_occupied; @@ -156,6 +155,7 @@ class IndexSpace { static Type other_unoccupied; static Type complete_unoccupied; static Type complete; + static Type nonnulltype; template static const constexpr bool is_standard_type() { const Type type{typeint}; @@ -164,7 +164,7 @@ class IndexSpace { type == active_unoccupied || type == inactive_unoccupied || type == unoccupied || type == all_active || type == all || type == other_unoccupied || type == complete_unoccupied || - type == complete); + type == complete || type == nulltype || type == nonnulltype); } /// standard space tags are predefined that helps implement set theory of @@ -304,8 +304,8 @@ class IndexSpace { return attr2basekey_.find(attr)->second; } - /// Default ctor creates an invalid space - IndexSpace() : attr_(Attr::invalid()) {} + /// Default ctor creates space with nonnull type and null quantum numbers + IndexSpace() noexcept : attr_(nonnulltype, nullqns) {} IndexSpace(const IndexSpace &other) { if (!other.attr().is_valid()) From 2d103abf89f6eba57b46ea645c69be1416c9e1d2 Mon Sep 17 00:00:00 2001 From: Eduard Valeyev Date: Wed, 13 Jul 2022 16:47:41 -0400 Subject: [PATCH 089/120] introduced {b,f}{cre,ann}x that make normal operators with single op --- SeQuant/core/op.hpp | 394 ++++++++++++++++++++++++-------------------- 1 file changed, 216 insertions(+), 178 deletions(-) diff --git a/SeQuant/core/op.hpp b/SeQuant/core/op.hpp index 9dd9d6c1e..787c76a82 100644 --- a/SeQuant/core/op.hpp +++ b/SeQuant/core/op.hpp @@ -25,27 +25,26 @@ namespace sequant { /// /// Op = Index + Action /// @tparam S specifies the particle statistics -template +template class Op { public: static constexpr Statistics statistics = S; Op() = default; - Op(Index index, Action action) noexcept : index_(std::move(index)), action_(action) {} + Op(Index index, Action action) noexcept + : index_(std::move(index)), action_(action) {} const Index &index() const { return index_; } Index &index() { return index_; } const Action &action() const { return action_; } /// @brief changes this to its (Hermitian) adjoint - void adjoint() { - action_ = sequant::adjoint(action_); - } + void adjoint() { action_ = sequant::adjoint(action_); } static std::wstring core_label() { return get_default_context().spbasis() == SPBasis::spinorbital - ? (S == Statistics::FermiDirac ? L"a" : L"b") - : L"E"; + ? (S == Statistics::FermiDirac ? L"a" : L"b") + : L"E"; } /// @return the string representation of @c this in LaTeX format @@ -97,15 +96,13 @@ inline bool operator<(const Op &op1, const Op &op2) { if (op1.action() == op2.action()) { if (op1.index() == op2.index()) { return false; - } - else { + } else { return op1.index() < op2.index(); } } else { return op1.action() < op2.action(); } - } - else + } else return S1 < S2; } @@ -122,12 +119,12 @@ inline auto hash_value(const Op &op) { return val; } -template +template bool operator==(const Op &op1, const Op &op2) { return op1.index() == op2.index() && op1.action() == op2.action(); } -template +template bool operator!=(const Op &op1, const Op &op2) { return !(op1 == op2); } @@ -289,7 +286,7 @@ class NormalOperator; /// @brief Operator is a sequence of Op objects /// /// @tparam S specifies the particle statistics -template +template class Operator : public container::svector>, public Expr { public: using base_type = container::svector>; @@ -299,20 +296,18 @@ class Operator : public container::svector>, public Expr { using iterator = typename base_type::iterator; using const_iterator = typename base_type::const_iterator; + using base_type::at; using base_type::begin; - using base_type::end; using base_type::cbegin; using base_type::cend; using base_type::empty; + using base_type::end; using base_type::size; - using base_type::at; using base_type::operator[]; Operator() = default; - explicit Operator(std::initializer_list> ops) - : base_type(ops) {} - explicit Operator(base_type &&ops) - : base_type(std::move(ops)) {} + explicit Operator(std::initializer_list> ops) : base_type(ops) {} + explicit Operator(base_type &&ops) : base_type(std::move(ops)) {} template Operator(Action action, std::initializer_list indices) : base_type(make_ops(action, indices)) {} @@ -320,7 +315,8 @@ class Operator : public container::svector>, public Expr { operator base_type &() const & { return *this; } operator base_type &&() && { return *this; } - /// @brief adjoint of an Operator is a reversed string of the adjoints of its ops + /// @brief adjoint of an Operator is a reversed string of the adjoints of its + /// ops virtual void adjoint() override { std::reverse(this->begin(), this->end()); std::for_each(this->begin(), this->end(), [](Op &op) { op.adjoint(); }); @@ -331,33 +327,24 @@ class Operator : public container::svector>, public Expr { std::wstring to_latex() const override { std::wstring result; result = L"{"; - for (const auto &o : *this) - result += o.to_latex(); + for (const auto &o : *this) result += o.to_latex(); result += L"}"; return result; } - type_id_type type_id() const override { - return get_type_id(); - }; + type_id_type type_id() const override { return get_type_id(); }; - ExprPtr clone() const override { - return std::make_shared(*this); - } + ExprPtr clone() const override { return std::make_shared(*this); } private: - base_type make_ops(Action action, - IndexList indices) { + base_type make_ops(Action action, IndexList indices) { base_type result; result.reserve(indices.size()); - for (const auto &idx : indices) - result.emplace_back(idx, action); + for (const auto &idx : indices) result.emplace_back(idx, action); return result; } - base_type - make_ops(Action action, - WstrList index_labels) { + base_type make_ops(Action action, WstrList index_labels) { base_type result; result.reserve(index_labels.size()); for (const auto &idx_label : index_labels) @@ -367,18 +354,15 @@ class Operator : public container::svector>, public Expr { bool static_equal(const Expr &that) const override; - bool is_cnumber() const override { - return false; - } + bool is_cnumber() const override { return false; } - bool commutes_with_atom(const Expr& that) const override { + bool commutes_with_atom(const Expr &that) const override { bool result = true; /// does not commute with Operator /// TODO implement checks of commutativity with Operator if (that.is>()) { result = false; - } - else if (that.is>()) { + } else if (that.is>()) { result = that.as>().commutes_with_atom(*this); } return result; @@ -387,14 +371,13 @@ class Operator : public container::svector>, public Expr { hash_type memoizing_hash() const override { using std::begin; using std::end; - const auto& ops = static_cast(*this); + const auto &ops = static_cast(*this); return hash::range(begin(ops), end(ops)); } - }; -template -inline bool operator==(const Operator& one, const Operator& another) { +template +inline bool operator==(const Operator &one, const Operator &another) { using base_type = container::svector>; if (one.size() == another.size()) { if (one.empty()) return true; @@ -409,7 +392,7 @@ inline bool operator==(const Operator& one, const Operator& another) { return false; } -template +template bool Operator::static_equal(const Expr &that) const { const auto &that_cast = static_cast(that); return *this == that_cast; @@ -434,33 +417,35 @@ bool Operator::static_equal(const Expr &that) const { /// ann(q1) ann(q2) is represented as a^{⎵ p1}_{q1 q2}. /// /// @tparam S specifies the particle statistics -template +template class NormalOperator : public Operator, public AbstractTensor { public: static constexpr Statistics statistics = S; using base_type = Operator; + using vector_type = typename Operator::base_type; // iterate over this using the base_type using iterator = typename Operator::iterator; using const_iterator = typename Operator::const_iterator; + using base_type::at; using base_type::begin; - using base_type::end; using base_type::cbegin; using base_type::cend; using base_type::empty; + using base_type::end; using base_type::size; - using base_type::at; using base_type::operator[]; /// constructs an identity operator NormalOperator(Vacuum v = get_default_context().vacuum()) {} /// @param creators sequence of creator indices - /// @param annihilators sequence of annihilator indices (in order of particle indices, see the class documentation for more info). - template < - typename IndexContainer, - typename = std::enable_if_t::value_type, Index>>> + /// @param annihilators sequence of annihilator indices (in order of particle + /// indices, see the class documentation for more info). + template ::value_type, Index>>> NormalOperator(IndexContainer &&creator_indices, IndexContainer &&annihilator_indices, Vacuum v = get_default_context().vacuum()) @@ -475,12 +460,17 @@ class NormalOperator : public Operator, public AbstractTensor { } /// @param creators sequence of creators - /// @param annihilators sequence of annihilators (in order of particle indices, see the class documentation for more info). + /// @param annihilators sequence of annihilators (in order of particle + /// indices, see the class documentation for more info). template - NormalOperator(OpContainer &&creators, - OpContainer &&annihilators, - Vacuum v = get_default_context().vacuum(), - std::enable_if_t, NormalOperator> && std::is_same_v::value_type, Op>>* = nullptr) : Operator{}, vacuum_(v), ncreators_(ranges::size(creators)) { + NormalOperator( + OpContainer &&creators, OpContainer &&annihilators, + Vacuum v = get_default_context().vacuum(), + std::enable_if_t< + !std::is_same_v, NormalOperator> && + std::is_same_v::value_type, Op>> + * = nullptr) + : Operator{}, vacuum_(v), ncreators_(ranges::size(creators)) { for (const auto &op : creators) { assert(op.action() == Action::create); } @@ -488,15 +478,18 @@ class NormalOperator : public Operator, public AbstractTensor { assert(op.action() == Action::annihilate); } this->reserve(ranges::size(creators) + ranges::size(annihilators)); - static_cast>*>(this)->insert(this->end(), ranges::cbegin(creators), ranges::cend(creators)); - static_cast>*>(this)->insert(this->end(), ranges::crbegin(annihilators), ranges::crend(annihilators)); + static_cast(this)->insert( + this->end(), ranges::cbegin(creators), ranges::cend(creators)); + static_cast(this)->insert(this->end(), + ranges::crbegin(annihilators), + ranges::crend(annihilators)); } /// @param creators initializer_list of creator indices - /// @param annihilators initializer_list of annihilator indices (in order of particle indices, see the class documentation for more info). - template < - typename I, - typename = std::enable_if_t,Op>>> + /// @param annihilators initializer_list of annihilator indices (in order of + /// particle indices, see the class documentation for more info). + template , Op>>> NormalOperator(std::initializer_list creator_indices, std::initializer_list annihilator_indices, Vacuum v = get_default_context().vacuum()) @@ -511,19 +504,23 @@ class NormalOperator : public Operator, public AbstractTensor { } /// @param creators initializer_list of creators - /// @param annihilators initializer_list of annihilators (in order of particle indices, see the class documentation for more info). + /// @param annihilators initializer_list of annihilators (in order of particle + /// indices, see the class documentation for more info). NormalOperator(std::initializer_list> creators, std::initializer_list> annihilators, - Vacuum v = get_default_context().vacuum()) : Operator{}, vacuum_(v), ncreators_(size(creators)) { + Vacuum v = get_default_context().vacuum()) + : Operator{}, vacuum_(v), ncreators_(std::size(creators)) { for (const auto &op : creators) { assert(op.action() == Action::create); } for (const auto &op : annihilators) { assert(op.action() == Action::annihilate); } - this->reserve(size(creators) + size(annihilators)); - this->insert(this->end(), cbegin(creators), cend(creators)); - this->insert(this->end(), crbegin(annihilators), crend(annihilators)); + this->reserve(std::size(creators) + std::size(annihilators)); + static_cast(this)->insert(this->end(), std::cbegin(creators), + std::cend(creators)); + static_cast(this)->insert( + this->end(), std::crbegin(annihilators), std::crend(annihilators)); } NormalOperator(const NormalOperator &other) : Operator(other), @@ -540,12 +537,18 @@ class NormalOperator : public Operator, public AbstractTensor { return *this; } - /// @return the vacuum state with respect to which the operator is normal-ordered. + /// @return the vacuum state with respect to which the operator is + /// normal-ordered. Vacuum vacuum() const { return vacuum_; } /// @return the range of creators, in the order of increasing particle index - auto creators() const { return ranges::views::counted(this->cbegin(), ncreators()); } - /// @return the range of annihilators, in the order of increasing particle index - auto annihilators() const { return ranges::views::counted(this->crbegin(), nannihilators()); } + auto creators() const { + return ranges::views::counted(this->cbegin(), ncreators()); + } + /// @return the range of annihilators, in the order of increasing particle + /// index + auto annihilators() const { + return ranges::views::counted(this->crbegin(), nannihilators()); + } /// @return the number of creators auto ncreators() const { return ncreators_; } /// @return the number of annihilators @@ -556,10 +559,12 @@ class NormalOperator : public Operator, public AbstractTensor { } /// @return number of creators/annihilators - /// @throw std::logic_error if the operator is not particle number conserving (i.e. if ncreators() != nannihilators() ) + /// @throw std::logic_error if the operator is not particle number conserving + /// (i.e. if ncreators() != nannihilators() ) auto rank() const { if (ncreators() != nannihilators()) { - throw std::logic_error("NormalOperator::rank(): ncreators != nannihilators"); + throw std::logic_error( + "NormalOperator::rank(): ncreators != nannihilators"); } return ncreators(); } @@ -575,7 +580,7 @@ class NormalOperator : public Operator, public AbstractTensor { } /// @return all possible values returned by label() for this operator type - static const container::vector& labels(); + static const container::vector &labels(); std::wstring label() const; @@ -584,8 +589,7 @@ class NormalOperator : public Operator, public AbstractTensor { result = L"{"; if (vacuum() == Vacuum::Physical) { result += Op::core_label(); - } - else { + } else { result += L"\\tilde{"; result += Op::core_label(); result += L"}"; @@ -604,8 +608,7 @@ class NormalOperator : public Operator, public AbstractTensor { result += L"\\,"; } } - for (const auto &o : creators()) - result += o.index().to_latex(); + for (const auto &o : creators()) result += o.index().to_latex(); result += L"}_{"; if (ncreators > nannihilators) { // pad on the left with square underbrackets, i.e. ⎵ @@ -618,8 +621,7 @@ class NormalOperator : public Operator, public AbstractTensor { result += L"\\,"; } } - for (const auto &o : annihilators()) - result += o.index().to_latex(); + for (const auto &o : annihilators()) result += o.index().to_latex(); result += L"}}"; return result; } @@ -658,17 +660,17 @@ class NormalOperator : public Operator, public AbstractTensor { /// Replaces indices using the index map /// @param index_map maps Index to Index /// @return true if one or more indices changed - /// @pre indices are not tagged, or (if want to protect them from replacement) tagged with (int)0 + /// @pre indices are not tagged, or (if want to protect them from replacement) + /// tagged with (int)0 /// @post indices that were replaced will be tagged with (int)0 template