From d224fdd5a44c8227d09feeee88083c3b122f02a1 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 20 Sep 2024 23:34:45 -0400 Subject: [PATCH 001/193] fix: fix `DPH5Path.glob` for new keys (#4152) Fix #4151. ## Summary by CodeRabbit - **New Features** - Enhanced path filtering logic to include a broader range of keys when generating subpaths. - **Bug Fixes** - Improved the accuracy of path results returned by the `glob` method. Signed-off-by: Jinzhe Zeng --- deepmd/utils/path.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/deepmd/utils/path.py b/deepmd/utils/path.py index 377953cc35..e794a36cab 100644 --- a/deepmd/utils/path.py +++ b/deepmd/utils/path.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import itertools import os from abc import ( ABC, @@ -373,7 +374,11 @@ def glob(self, pattern: str) -> List["DPPath"]: list of paths """ # got paths starts with current path first, which is faster - subpaths = [ii for ii in self._keys if ii.startswith(self._name)] + subpaths = [ + ii + for ii in itertools.chain(self._keys, self._new_keys) + if ii.startswith(self._name) + ] return [ type(self)(f"{self.root_path}#{pp}", mode=self.mode) for pp in globfilter(subpaths, self._connect_path(pattern)) From f34cbe1c416c3399596fe4ab40e2fa3f58a83806 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 21 Sep 2024 11:57:54 +0800 Subject: [PATCH 002/193] feat(pt/tf): support spin lammps plugin --- source/api_c/include/c_api.h | 223 +++++++++++++---- source/api_c/include/deepmd.hpp | 395 +++++++++++++++++++++++++++++ source/api_c/src/c_api.cc | 392 +++++++++++++++++++++++++++++ source/api_cc/include/DeepPot.h | 92 +++++++ source/api_cc/include/DeepPotTF.h | 81 ++++++ source/api_cc/src/DeepPot.cc | 208 ++++++++++++++++ source/api_cc/src/DeepPotPT.cc | 16 +- source/api_cc/src/DeepPotTF.cc | 396 ++++++++++++++++++++++++++++++ source/lmp/pair_deepmd.cpp | 207 +++++++++++----- 9 files changed, 1892 insertions(+), 118 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 2f88f25e43..9d2e732d6e 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -230,6 +230,22 @@ extern void DP_DeepPotComputeNList(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); +extern void DP_DeepPotComputeNListSP(DP_DeepPot* dp, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor *list. (float version) @@ -268,6 +284,22 @@ extern void DP_DeepPotComputeNListf(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); +extern void DP_DeepPotComputeNListfSP(DP_DeepPot* dp, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP. (double version) * @version 2 @@ -392,6 +424,25 @@ extern void DP_DeepPotComputeNList2(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); +extern void DP_DeepPotComputeNList2SP(DP_DeepPot* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor *list. (float version) @@ -438,45 +489,64 @@ extern void DP_DeepPotComputeNListf2(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); -/** - * @brief Evaluate the energy, force and virial by using a DP with the mixed - *type. (double version) - * @param[in] dp The DP to use. - * @param[in] nframes The number of frames. - * @param[in] natoms The number of atoms. - * @param[in] coord The coordinates of atoms. The array should be of size natoms - *x 3. - * @param[in] atype The atom types. The array should contain nframes x natoms - *ints. - * @param[in] box The cell of the region. The array should be of size 9. Pass - *NULL if pbc is not used. - * @param[in] fparam The frame parameters. The array can be of size nframes x - *dim_fparam. - * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. - * @param[out] energy Output energy. - * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] virial Output virial. The array should be of size 9. - * @param[out] atomic_energy Output atomic energy. The array should be of size - *natoms. - * @param[out] atomic_virial Output atomic virial. The array should be of size - *natoms x 9. - * @warning The output arrays should be allocated before calling this function. - *Pass NULL if not required. - **/ -extern void DP_DeepPotComputeMixedType(DP_DeepPot* dp, +extern void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, const int nframes, const int natoms, - const double* coord, + const float* coord, + const float* spin, const int* atype, - const double* cell, - const double* fparam, - const double* aparam, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, double* energy, - double* force, - double* virial, - double* atomic_energy, - double* atomic_virial); + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) + + /** + * @brief Evaluate the energy, force and virial by using a DP with the mixed + *type. (double version) + * @param[in] dp The DP to use. + * @param[in] nframes The number of frames. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size + *natoms x 3. + * @param[in] atype The atom types. The array should contain nframes x + *natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. + *Pass NULL if pbc is not used. + * @param[in] fparam The frame parameters. The array can be of size nframes + *x dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of + *size natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of + *size natoms x 9. + * @warning The output arrays should be allocated before calling this + *function. Pass NULL if not required. + **/ + extern void DP_DeepPotComputeMixedType(DP_DeepPot* dp, + const int nframes, + const int natoms, + const double* coord, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the mixed *type. (float version) @@ -734,6 +804,22 @@ extern void DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi* dp, double* atomic_energy, double* atomic_virial); +extern void DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi* dp, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (float version) @@ -771,6 +857,22 @@ extern void DP_DeepPotModelDeviComputeNListf(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); +extern void DP_DeepPotModelDeviComputeNListfSP(DP_DeepPotModelDevi* dp, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (double version) @@ -816,6 +918,26 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, double* virial, double* atomic_energy, double* atomic_virial); + +void DP_DeepPotModelDeviComputeNList2SP(DP_DeepPotModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (float version) @@ -862,12 +984,31 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); -/** - * @brief Get the type map of a DP model deviation. - * @param[in] dp The DP model deviation to use. - * @return The cutoff radius. - */ -double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp); +void DP_DeepPotModelDeviComputeNListf2SP(DP_DeepPotModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) + + /** + * @brief Get the type map of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The cutoff radius. + */ + double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp); /** * @brief Get the number of types of a DP model deviation. diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 9d0310d99a..ca695b4a35 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -157,6 +157,75 @@ inline void _DP_DeepPotComputeNList(DP_DeepPot *dp, atomic_energy, atomic_virial); } +// support spin +template +inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, + const int nframes, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); + +template <> +inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, + const int nframes, + const int natom, + const double *coord, + const double *spin, + const int *atype, + const double *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const double *fparam, + const double *aparam, + double *energy, + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { + DP_DeepPotComputeNList2SP(dp, nframes, natom, coord, spin, atype, cell, + nghost, nlist, ago, fparam, aparam, energy, force, + force_mag, virial, atomic_energy, atomic_virial); +} + +template <> +inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, + const int nframes, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { + DP_DeepPotComputeNListf2SP(dp, nframes, natom, coord, spin, atype, cell, + nghost, nlist, ago, fparam, aparam, energy, force, + force_mag, virial, atomic_energy, atomic_virial); +} + template inline void _DP_DeepPotComputeMixedType(DP_DeepPot *dp, const int nframes, @@ -319,6 +388,69 @@ inline void _DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi *dp, virial, atomic_energy, atomic_virial); } +template +inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); +template <> +inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, + const int natom, + const double *coord, + const double *spin, + const int *atype, + const double *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const double *fparam, + const double *aparam, + double *energy, + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { + DP_DeepPotModelDeviComputeNList2SP( + dp, 1, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); +} +template <> +inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { + DP_DeepPotModelDeviComputeNListf2SP( + dp, 1, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + template inline void _DP_DeepTensorComputeTensor(DP_DeepTensor *dt, const int natom, @@ -882,6 +1014,54 @@ class DeepPot { fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; + // support spin + template + void compute( + ENERGYVTYPE &ener, + std::vector &force, + std::vector &force_mag, + std::vector &virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; + assert(nframes * natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == nframes * 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); + force.resize(static_cast(nframes) * natoms * 3); + force_mag.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + VALUETYPE *force_ = &force[0]; + VALUETYPE *force_mag_ = &force_mag[0]; + VALUETYPE *virial_ = &virial[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + _DP_DeepPotComputeNListSP(dp, nframes, natoms, coord_, spin_, + atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, + force_mag_, virial_, nullptr, nullptr); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); + }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP with the neighbor list. @@ -958,6 +1138,60 @@ class DeepPot { atomic_ener_, atomic_virial_); DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; + // support spin + template + void compute( + ENERGYVTYPE &ener, + std::vector &force, + std::vector &force_mag, + std::vector &virial, + std::vector &atom_energy, + std::vector &atom_virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; + assert(nframes * natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == nframes * 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); + force.resize(static_cast(nframes) * natoms * 3); + force_mag.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); + VALUETYPE *force_ = &force[0]; + VALUETYPE *force_mag_ = &force_mag[0]; + VALUETYPE *virial_ = &virial[0]; + VALUETYPE *atomic_ener_ = &atom_energy[0]; + VALUETYPE *atomic_virial_ = &atom_virial[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + _DP_DeepPotComputeNListSP( + dp, nframes, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, + ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, + atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); + }; /** * @brief Evaluate the energy, force and virial by using this DP with the *mixed type. @@ -1503,6 +1737,78 @@ class DeepPotModelDevi { } } }; + // support spin + template + void compute( + std::vector &ener, + std::vector> &force, + std::vector> &force_mag, + std::vector> &virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = 1; + assert(natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + // memory will be continous for std::vector but not std::vector + std::vector energy_flat(numb_models); + std::vector force_flat(static_cast(numb_models) * + natoms * 3); + std::vector force_mag_flat(static_cast(numb_models) * + natoms * 3); + std::vector virial_flat(numb_models * 9); + double *ener_ = &energy_flat[0]; + VALUETYPE *force_ = &force_flat[0]; + VALUETYPE *force_mag_ = &force_mag_flat[0]; + VALUETYPE *virial_ = &virial_flat[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + _DP_DeepPotModelDeviComputeNListSP( + dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, + nullptr); + DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + // reshape + ener.resize(numb_models); + force.resize(numb_models); + force_mag.resize(numb_models); + virial.resize(numb_models); + for (int i = 0; i < numb_models; i++) { + ener[i] = energy_flat[i]; + force[i].resize(static_cast(natoms) * 3); + force_mag[i].resize(static_cast(natoms) * 3); + virial[i].resize(9); + for (int j = 0; j < natoms * 3; j++) { + force[i][j] = force_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < natoms * 3; j++) { + force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < 9; j++) { + virial[i][j] = virial_flat[i * 9 + j]; + } + } + }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP model deviation. @@ -1607,6 +1913,95 @@ class DeepPotModelDevi { } } }; + // support spin + template + void compute( + std::vector &ener, + std::vector> &force, + std::vector> &force_mag, + std::vector> &virial, + std::vector> &atom_energy, + std::vector> &atom_virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = 1; + assert(natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + std::vector energy_flat(numb_models); + std::vector force_flat(static_cast(numb_models) * + natoms * 3); + std::vector force_mag_flat(static_cast(numb_models) * + natoms * 3); + std::vector virial_flat(numb_models * 9); + std::vector atom_energy_flat(static_cast(numb_models) * + natoms); + std::vector atom_virial_flat(static_cast(numb_models) * + natoms * 9); + double *ener_ = &energy_flat[0]; + VALUETYPE *force_ = &force_flat[0]; + VALUETYPE *force_mag_ = &force_mag_flat[0]; + VALUETYPE *virial_ = &virial_flat[0]; + VALUETYPE *atomic_ener_ = &atom_energy_flat[0]; + VALUETYPE *atomic_virial_ = &atom_virial_flat[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + _DP_DeepPotModelDeviComputeNListSP( + dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, + atomic_virial_); + DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + // reshape + ener.resize(numb_models); + force.resize(numb_models); + force_mag.resize(numb_models); + virial.resize(numb_models); + atom_energy.resize(numb_models); + atom_virial.resize(numb_models); + for (int i = 0; i < numb_models; i++) { + ener[i] = energy_flat[i]; + force[i].resize(static_cast(natoms) * 3); + force_mag[i].resize(static_cast(natoms) * 3); + virial[i].resize(9); + atom_energy[i].resize(natoms); + atom_virial[i].resize(static_cast(natoms) * 9); + for (int j = 0; j < natoms * 3; j++) { + force[i][j] = force_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < natoms * 3; j++) { + force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < 9; j++) { + virial[i][j] = virial_flat[i * 9 + j]; + } + for (int j = 0; j < natoms; j++) { + atom_energy[i][j] = atom_energy_flat[i * natoms + j]; + } + for (int j = 0; j < natoms * 9; j++) { + atom_virial[i][j] = atom_virial_flat[i * natoms * 9 + j]; + } + } + }; /** * @brief Get the cutoff radius. * @return The cutoff radius. diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 9ed37d04aa..e919833560 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -351,6 +351,108 @@ template void DP_DeepPotComputeNList_variant(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); +// support spin +template +inline void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { + // init C++ vectors from C arrays + std::vector coord_(coord, coord + nframes * natoms * 3); + std::vector spin_(spin, spin + nframes * natoms * 3); + std::vector atype_(atype, atype + natoms); + std::vector cell_; + if (cell) { + // pbc + cell_.assign(cell, cell + nframes * 9); + } + std::vector fparam_; + if (fparam) { + fparam_.assign(fparam, fparam + nframes * dp->dfparam); + } + std::vector aparam_; + if (aparam) { + aparam_.assign(aparam, + aparam + nframes * + (dp->aparam_nall ? natoms : (natoms - nghost)) * + dp->daparam); + } + std::vector e; + std::vector f, fm, v, ae, av; + DP_REQUIRES_OK( + dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + nghost, nlist->nl, ago, fparam_, aparam_)); + // copy from C++ vectors to C arrays, if not NULL pointer + if (energy) { + std::copy(e.begin(), e.end(), energy); + } + if (force) { + std::copy(f.begin(), f.end(), force); + } + if (force_mag) { + std::copy(fm.begin(), fm.end(), force_mag); + } + if (virial) { + std::copy(v.begin(), v.end(), virial); + } + if (atomic_energy) { + std::copy(ae.begin(), ae.end(), atomic_energy); + } + if (atomic_virial) { + std::copy(av.begin(), av.end(), atomic_virial); + } +} +template void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); +template void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + template inline void DP_DeepPotComputeMixedType_variant(DP_DeepPot* dp, const int nframes, @@ -653,6 +755,123 @@ template void DP_DeepPotModelDeviComputeNList_variant( float* atomic_energy, float* atomic_virial); +// support spin multi model. +template +void DP_DeepPotModelDeviComputeNList_variant_sp(DP_DeepPotModelDevi* dp, + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { + if (nframes > 1) { + throw std::runtime_error("nframes > 1 not supported yet"); + } + // init C++ vectors from C arrays + std::vector coord_(coord, coord + natoms * 3); + std::vector spin_(spin, spin + natoms * 3); + std::vector atype_(atype, atype + natoms); + std::vector cell_; + if (cell) { + // pbc + cell_.assign(cell, cell + 9); + } + std::vector fparam_; + if (fparam) { + fparam_.assign(fparam, fparam + dp->dfparam); + } + std::vector aparam_; + if (aparam) { + aparam_.assign( + aparam, + aparam + (dp->aparam_nall ? natoms : (natoms - nghost)) * dp->daparam); + } + // different from DeepPot + std::vector e; + std::vector> f, fm, v, ae, av; + DP_REQUIRES_OK( + dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + nghost, nlist->nl, ago, fparam_, aparam_)); + // 2D vector to 2D array, flatten first + if (energy) { + std::copy(e.begin(), e.end(), energy); + } + if (force) { + std::vector f_flat; + flatten_vector(f_flat, f); + std::copy(f_flat.begin(), f_flat.end(), force); + } + if (force_mag) { + std::vector f_mag_flat; + flatten_vector(f_mag_flat, fm); + std::copy(f_mag_flat.begin(), f_mag_flat.end(), force_mag); + } + if (virial) { + std::vector v_flat; + flatten_vector(v_flat, v); + std::copy(v_flat.begin(), v_flat.end(), virial); + } + if (atomic_energy) { + std::vector ae_flat; + flatten_vector(ae_flat, ae); + std::copy(ae_flat.begin(), ae_flat.end(), atomic_energy); + } + if (atomic_virial) { + std::vector av_flat; + flatten_vector(av_flat, av); + std::copy(av_flat.begin(), av_flat.end(), atomic_virial); + } +} +template void DP_DeepPotModelDeviComputeNList_variant_sp( + DP_DeepPotModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); +template void DP_DeepPotModelDeviComputeNList_variant_sp( + DP_DeepPotModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + template inline void DP_DeepTensorComputeTensor_variant(DP_DeepTensor* dt, const int natoms, @@ -1038,6 +1257,26 @@ void DP_DeepPotComputeNList(DP_DeepPot* dp, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotComputeNListSP(DP_DeepPot* dp, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotComputeNList_variant_sp( + dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, + energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + void DP_DeepPotComputeNListf(DP_DeepPot* dp, const int natoms, const float* coord, @@ -1056,6 +1295,26 @@ void DP_DeepPotComputeNListf(DP_DeepPot* dp, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotComputeNListfSP(DP_DeepPot* dp, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotComputeNList_variant_sp( + dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, + energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + // multiple frames void DP_DeepPotCompute2(DP_DeepPot* dp, const int nframes, @@ -1114,6 +1373,29 @@ void DP_DeepPotComputeNList2(DP_DeepPot* dp, aparam, energy, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotComputeNList2SP(DP_DeepPot* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotComputeNList_variant_sp( + dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + void DP_DeepPotComputeNListf2(DP_DeepPot* dp, const int nframes, const int natoms, @@ -1134,6 +1416,30 @@ void DP_DeepPotComputeNListf2(DP_DeepPot* dp, dp, nframes, natoms, coord, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, virial, atomic_energy, atomic_virial); } + +void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotComputeNList_variant_sp( + dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + // end multiple frames void DP_DeepPotComputeMixedType(DP_DeepPot* dp, @@ -1280,6 +1586,26 @@ void DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi* dp, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi* dp, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotModelDeviComputeNList_variant_sp( + dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, + energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + void DP_DeepPotModelDeviComputeNListf(DP_DeepPotModelDevi* dp, const int natoms, const float* coord, @@ -1298,6 +1624,26 @@ void DP_DeepPotModelDeviComputeNListf(DP_DeepPotModelDevi* dp, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotModelDeviComputeNListfSP(DP_DeepPotModelDevi* dp, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotModelDeviComputeNList_variant_sp( + dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, + energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, const int nframes, const int natoms, @@ -1319,6 +1665,29 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, aparam, energy, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotModelDeviComputeNList2SP(DP_DeepPotModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotModelDeviComputeNList_variant_sp( + dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, const int nframes, const int natoms, @@ -1340,6 +1709,29 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, aparam, energy, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotModelDeviComputeNListf2SP(DP_DeepPotModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotModelDeviComputeNList_variant_sp( + dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp) { return dp->dp.cutoff(); } diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 884f76ab6f..bd090a7b08 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -143,6 +143,38 @@ class DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic) = 0; + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; /** @} */ /** @@ -358,6 +390,34 @@ class DeepPot { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + template + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial @@ -460,6 +520,38 @@ class DeepPot { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + template + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** * @brief Evaluate the energy, force, and virial with the mixed type diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index ffc3aab08b..dd8b10b375 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -115,6 +115,23 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. @@ -262,6 +279,38 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); void computew_mixed_type(std::vector& ener, std::vector& force, std::vector& virial, @@ -286,6 +335,25 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + void extend(int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm;); private: tensorflow::Session* session; @@ -301,6 +369,19 @@ class DeepPotTF : public DeepPotBase { std::string model_version; int ntypes; int ntypes_spin; + std::vector virtual_len; + std::vector spin_norm; + int extend_inum; + std::vector extend_ilist; + std::vector extend_numneigh; + std::vector> extend_neigh; + std::vector extend_firstneigh; + std::vector extend_dcoord; + std::vector extend_dtype; + int extend_nghost; + // for spin systems, search new index of atoms by their old index + std::map new_idx_map; + std::map old_idx_map; int dfparam; int daparam; bool aparam_nall; diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index c184446288..52085748fa 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -218,6 +218,105 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); +// support spin +template +void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + std::vector dener_; + std::vector datom_energy_, datom_virial_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, false); + dener = dener_[0]; +} + +template +void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + std::vector datom_energy_, datom_virial_; + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, false); +} + +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -386,6 +485,115 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); +// support spin + +template +void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + std::vector dener_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, true); + dener = dener_[0]; +} +template +void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, true); +} + +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + // mixed type template void DeepPot::compute_mixed_type(ENERGYTYPE& dener, diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index c03576635a..3d406879de 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -164,12 +164,11 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, std::vector atype_64(datype.begin(), datype.end()); at::Tensor atype_Tensor = torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); - c10::optional mapping_tensor; if (ago == 0) { nlist_data.copy_from_nlist(lmp_list); nlist_data.shuffle_exclude_empty(fwd_map); nlist_data.padding(); - if (do_message_passing == 1 && nghost > 0) { + if (do_message_passing == 1) { int nswap = lmp_list.nswap; torch::Tensor sendproc_tensor = torch::from_blob(lmp_list.sendproc, {nswap}, int32_option); @@ -197,16 +196,11 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, comm_dict.insert("recv_num", recvnum_tensor); comm_dict.insert("communicator", communicator_tensor); } - if (do_message_passing == 1 && nghost == 0) { - // for the situation that no ghost atoms (e.g. serial nopbc) - // set the mapping arange(nloc) is enough - auto option = torch::TensorOptions().device(device).dtype(torch::kInt64); - mapping_tensor = at::arange(nloc_real, option).unsqueeze(0); - } } at::Tensor firstneigh = createNlistTensor(nlist_data.jlist); firstneigh_tensor = firstneigh.to(torch::kInt64).to(device); bool do_atom_virial_tensor = atomic; + c10::optional optional_tensor; c10::optional fparam_tensor; if (!fparam.empty()) { fparam_tensor = @@ -225,15 +219,15 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, .to(device); } c10::Dict outputs = - (do_message_passing == 1 && nghost > 0) + (do_message_passing == 1) ? module .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, - firstneigh_tensor, mapping_tensor, fparam_tensor, + firstneigh_tensor, optional_tensor, fparam_tensor, aparam_tensor, do_atom_virial_tensor, comm_dict) .toGenericDict() : module .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, - firstneigh_tensor, mapping_tensor, fparam_tensor, + firstneigh_tensor, optional_tensor, fparam_tensor, aparam_tensor, do_atom_virial_tensor) .toGenericDict(); c10::IValue energy_ = outputs.at("energy"); diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 2c09c17a69..f5f6e28c88 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -477,6 +477,8 @@ void DeepPotTF::init(const std::string& model, ntypes = get_scalar("descrpt_attr/ntypes"); try { ntypes_spin = get_scalar("spin_attr/ntypes_spin"); + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); } catch (const deepmd::deepmd_exception&) { ntypes_spin = 0; } @@ -819,6 +821,193 @@ template void DeepPotTF::compute>( const std::vector& aparam_, const bool atomic); +// support spin +template +void DeepPotTF::compute(ENERGYVTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__, + const bool atomic) { + int nall = datype_.size(); + // if nall==0, unclear nframes, but 1 is ok + int nframes = nall > 0 ? (dcoord_.size() / nall / 3) : 1; + int nloc = nall - nghost; + + extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, + extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, + new_idx_map, old_idx_map, lmp_list, dcoord, dtype, nghost, dspin_, + numb_types, numb_types_spin, virtual_len); + // extend_lmp_list = InputNlist(extend_inum, &extend_ilist[0], + // &extend_numneigh[0], &extend_firstneigh[0]); + deepmd_compat::InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], + &extend_numneigh[0], + &extend_firstneigh[0]); + std::vector fparam; + std::vector aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, + aparam__); + tile_fparam_aparam(fparam, nframes, dfparam, fparam_); + tile_fparam_aparam(aparam_, nframes, (aparam_nall ? nall : nloc) * daparam, + aparam__); + std::vector> input_tensors; + // select real atoms + std::vector dcoord, dforce, aparam, datom_energy, datom_virial; + std::vector datype, fwd_map, bkw_map; + int nghost_real, nall_real, nloc_real; + select_real_atoms_coord(dcoord, datype, aparam, nghost_real, fwd_map, bkw_map, + nall_real, nloc_real, extend_dcoord, extend_dtype, + aparam_, extend_nghost, ntypes, nframes, daparam, + nall, aparam_nall); + + if (ago == 0) { + atommap = deepmd::AtomMap(datype.begin(), datype.begin() + nloc_real); + assert(nloc_real == atommap.get_type().size()); + + nlist_data.copy_from_nlist(extend_lmp_list); + nlist_data.shuffle_exclude_empty(fwd_map); + nlist_data.shuffle(atommap); + nlist_data.make_inlist(nlist); + } + + if (dtype == tensorflow::DT_DOUBLE) { + int ret = session_input_tensors( + input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, + atommap, nghost_real, ago, "", aparam_nall); + assert(nloc_real == ret); + if (atomic) { + run_model(dener, dforce, dvirial, datom_energy, datom_virial, + session, input_tensors, atommap, nframes, nghost_real); + } else { + run_model(dener, dforce, dvirial, session, input_tensors, atommap, + nframes, nghost_real); + } + } else { + int ret = session_input_tensors( + input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, + atommap, nghost_real, ago, "", aparam_nall); + assert(nloc_real == ret); + if (atomic) { + run_model(dener, dforce, dvirial, datom_energy, datom_virial, + session, input_tensors, atommap, nframes, nghost_real); + } else { + run_model(dener, dforce, dvirial, session, input_tensors, atommap, + nframes, nghost_real); + } + } + + // bkw map + dforce_tmp.resize(static_cast(nframes) * fwd_map.size() * 3); + datom_energy_.resize(static_cast(nframes) * fwd_map.size()); + datom_virial_.resize(static_cast(nframes) * fwd_map.size() * 9); + select_map(dforce_tmp, dforce, bkw_map, 3, nframes, fwd_map.size(), + nall_real); + select_map(datom_energy_, datom_energy, bkw_map, 1, nframes, + fwd_map.size(), nall_real); + select_map(datom_virial_, datom_virial, bkw_map, 9, nframes, + fwd_map.size(), nall_real); + // backward force and mag. + dforce_.resize(static_cast(nframes) * nall * 3); + dforce_mag_.resize(static_cast(nframes) * nall * 3); + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + int new_idx = new_idx_map[ii]; + dforce_[ii][dd] = dforce_tmp[3 * new_idx + dd]; + if (datype[ii] < numb_types_spin && ii < nlocal) { + dforce_mag_[ii][dd] = dforce_tmp[3 * (new_idx + nlocal) + dd]; + } else if (datype[ii] < numb_types_spin) { + dforce_mag_[ii][dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; + } else { + dforce_mag_[ii][dd] = 0.0; + } + } + } +} + +template void DeepPotTF::compute( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +template void DeepPotTF::compute( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +template void DeepPotTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +template void DeepPotTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +// end support spin + // mixed type template @@ -993,6 +1182,45 @@ void DeepPotTF::computew(std::vector& ener, compute(ener, force, virial, atom_energy, atom_virial, coord, atype, box, nghost, inlist, ago, fparam, aparam, atomic); } +// support spin +void DeepPotTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, nghost, inlist, ago, fparam, aparam, atomic); +} +void DeepPotTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, nghost, inlist, ago, fparam, aparam, atomic); +} void DeepPotTF::computew_mixed_type(std::vector& ener, std::vector& force, std::vector& virial, @@ -1023,4 +1251,172 @@ void DeepPotTF::computew_mixed_type(std::vector& ener, compute_mixed_type(ener, force, virial, atom_energy, atom_virial, nframes, coord, atype, box, fparam, aparam, atomic); } +void DeepPotTF::extend(int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm) { + extend_ilist.clear(); + extend_numneigh.clear(); + extend_neigh.clear(); + extend_firstneigh.clear(); + extend_dcoord.clear(); + extend_atype.clear(); + + int nall = dcoord.size() / 3; + int nloc = nall - nghost; + assert(nloc == lmp_list.inum); + + // record numb_types_real and nloc_virt + int numb_types_real = numb_types - numb_types_spin; + std::map loc_type_count; + std::map::iterator iter = loc_type_count.begin(); + for (int i = 0; i < nloc; i++) { + iter = loc_type_count.find(atype[i]); + if (iter != loc_type_count.end()) { + iter->second += 1; + } else { + loc_type_count.insert(pair(atype[i], 1)); + } + } + assert(numb_types_real - 1 == loc_type_count.rbegin()->first); + int nloc_virt = 0; + for (int i = 0; i < numb_types_spin; i++) { + nloc_virt += loc_type_count[i]; + } + + // record nghost_virt + std::map ghost_type_count; + for (int i = nloc; i < nall; i++) { + iter = ghost_type_count.find(atype[i]); + if (iter != ghost_type_count.end()) { + iter->second += 1; + } else { + ghost_type_count.insert(pair(atype[i], 1)); + } + } + int nghost_virt = 0; + for (int i = 0; i < numb_types_spin; i++) { + nghost_virt += ghost_type_count[i]; + } + + // for extended system, search new index by old index, and vice versa + extend_nghost = nghost + nghost_virt; + int extend_nloc = nloc + nloc_virt; + int extend_nall = extend_nloc + extend_nghost; + std::map cum_loc_type_count; + std::map cum_ghost_type_count; + cum_sum(cum_loc_type_count, loc_type_count); + cum_sum(cum_ghost_type_count, ghost_type_count); + std::vector loc_type_reset(numb_types_real, 0); + std::vector ghost_type_reset(numb_types_real, 0); + + new_idx_map.clear(); + old_idx_map.clear(); + for (int ii = 0; ii < nloc; ii++) { + int new_idx = cum_loc_type_count[atype[ii]] + loc_type_reset[atype[ii]]; + new_idx_map[ii] = new_idx; + old_idx_map[new_idx] = ii; + loc_type_reset[atype[ii]]++; + } + for (int ii = nloc; ii < nall; ii++) { + int new_idx = cum_ghost_type_count[atype[ii]] + + ghost_type_reset[atype[ii]] + extend_nloc; + new_idx_map[ii] = new_idx; + old_idx_map[new_idx] = ii; + ghost_type_reset[atype[ii]]++; + } + + // extend lmp_list + extend_inum = extend_nloc; + + extend_ilist.resize(extend_nloc); + for (int ii = 0; ii < extend_nloc; ii++) { + extend_ilist[ii] = ii; + } + + extend_neigh.resize(extend_nloc); + for (int ii = 0; ii < nloc; ii++) { + int jnum = lmp_list.numneigh[old_idx_map[ii]]; + const int* jlist = lmp_list.firstneigh[old_idx_map[ii]]; + if (atype[old_idx_map[ii]] < numb_types_spin) { + extend_neigh[ii].push_back(ii + nloc); + } + for (int jj = 0; jj < jnum; jj++) { + int new_idx = new_idx_map[jlist[jj]]; + extend_neigh[ii].push_back(new_idx); + if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nloc) { + extend_neigh[ii].push_back(new_idx + nloc); + } else if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nall) { + extend_neigh[ii].push_back(new_idx + nghost); + } + } + } + for (int ii = nloc; ii < extend_nloc; ii++) { + extend_neigh[ii].assign(extend_neigh[ii - nloc].begin(), + extend_neigh[ii - nloc].end()); + std::vector::iterator it = + find(extend_neigh[ii].begin(), extend_neigh[ii].end(), ii); + *it = ii - nloc; + } + + extend_firstneigh.resize(extend_nloc); + extend_numneigh.resize(extend_nloc); + for (int ii = 0; ii < extend_nloc; ii++) { + extend_firstneigh[ii] = &extend_neigh[ii][0]; + extend_numneigh[ii] = extend_neigh[ii].size(); + } + + // extend coord + extend_dcoord.resize(static_cast(extend_nall) * 3); + for (int ii = 0; ii < nloc; ii++) { + for (int jj = 0; jj < 3; jj++) { + extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; + if (atype[ii] < numb_types_spin) { + double temp_dcoord = dcoord[ii * 3 + jj] + spin[ii * 3 + jj] / + spin_norm[atype[ii]] * + virtual_len[atype[ii]]; + extend_dcoord[(new_idx_map[ii] + nloc) * 3 + jj] = temp_dcoord; + } + } + } + for (int ii = nloc; ii < nall; ii++) { + for (int jj = 0; jj < 3; jj++) { + extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; + if (atype[ii] < numb_types_spin) { + double temp_dcoord = dcoord[ii * 3 + jj] + spin[ii * 3 + jj] / + spin_norm[atype[ii]] * + virtual_len[atype[ii]]; + extend_dcoord[(new_idx_map[ii] + nghost) * 3 + jj] = temp_dcoord; + } + } + } + + // extend atype + extend_atype.resize(extend_nall); + for (int ii = 0; ii < nall; ii++) { + extend_atype[new_idx_map[ii]] = atype[ii]; + if (atype[ii] < numb_types_spin) { + if (ii < nloc) { + extend_atype[new_idx_map[ii] + nloc] = atype[ii] + numb_types_real; + } else { + extend_atype[new_idx_map[ii] + nghost] = atype[ii] + numb_types_real; + } + } + } +} #endif diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 2cb6cfacd4..b902f2a4c0 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -490,7 +490,7 @@ void PairDeepMD::compute(int eflag, int vflag) { // get spin for (int ii = 0; ii < nall; ++ii) { for (int dd = 0; dd < 3; ++dd) { - dspin[ii * 3 + dd] = sp[ii][dd]; + dspin[ii * 3 + dd] = sp[ii][dd] * sp[ii][3]; // get real spin vector } } } @@ -502,6 +502,7 @@ void PairDeepMD::compute(int eflag, int vflag) { double dener(0); vector dforce(nall * 3); + vector dforce_mag(nall * 3); vector dvirial(9, 0); vector dcoord(nall * 3, 0.); vector dbox(9, 0); @@ -566,15 +567,6 @@ void PairDeepMD::compute(int eflag, int vflag) { commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, commdata_->recvproc, &world); deepmd_compat::InputNlist extend_lmp_list; - if (atom->sp_flag) { - extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, - extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, - new_idx_map, old_idx_map, lmp_list, dcoord, dtype, nghost, dspin, - numb_types, numb_types_spin, virtual_len); - extend_lmp_list = - deepmd_compat::InputNlist(extend_inum, &extend_ilist[0], - &extend_numneigh[0], &extend_firstneigh[0]); - } if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { @@ -586,11 +578,10 @@ void PairDeepMD::compute(int eflag, int vflag) { error->one(FLERR, e.what()); } } else { - dforce.resize(static_cast(extend_inum + extend_nghost) * 3); try { - deep_pot.compute(dener, dforce, dvirial, extend_dcoord, - extend_dtype, dbox, extend_nghost, extend_lmp_list, - ago, fparam, daparam); + deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, + dtype, dbox, nghost, lmp_list, ago, fparam, + daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -609,11 +600,10 @@ void PairDeepMD::compute(int eflag, int vflag) { error->one(FLERR, e.what()); } } else { - dforce.resize(static_cast(extend_inum + extend_nghost) * 3); try { - deep_pot.compute(dener, dforce, dvirial, extend_dcoord, - extend_dtype, dbox, extend_nghost, extend_lmp_list, - ago, fparam, daparam); + deep_pot.compute(dener, dforce, dforce_mag, dvirial, deatom, dvatom, + dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -662,22 +652,43 @@ void PairDeepMD::compute(int eflag, int vflag) { vector all_energy; vector> all_atom_energy; vector> all_atom_virial; - if (!(eflag_atom || cvflag_atom)) { - try { - deep_pot_model_devi.compute(all_energy, all_force, all_virial, dcoord, - dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); + if (!atom->sp_flag) { + if (!(eflag_atom || cvflag_atom)) { + try { + deep_pot_model_devi.compute(all_energy, all_force, all_virial, + dcoord, dtype, dbox, nghost, lmp_list, + ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } else { + try { + deep_pot_model_devi.compute(all_energy, all_force, all_virial, + all_atom_energy, all_atom_virial, + dcoord, dtype, dbox, nghost, lmp_list, + ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } } } else { - try { - deep_pot_model_devi.compute(all_energy, all_force, all_virial, - all_atom_energy, all_atom_virial, dcoord, - dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); + if (!(eflag_atom || cvflag_atom)) { + try { + deep_pot_model_devi.compute(all_energy, all_force, all_force_mag, + all_virial, dcoord, dtype, dbox, dspin, + nghost, lmp_list, ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } else { + try { + deep_pot_model_devi.compute( + all_energy, all_force, all_force_mag, all_virial, + all_atom_energy, all_atom_virial, dcoord, dspin, dtype, dbox, + nghost, lmp_list, ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } } } // deep_pot_model_devi.compute_avg (dener, all_energy); @@ -687,6 +698,7 @@ void PairDeepMD::compute(int eflag, int vflag) { // deep_pot_model_devi.compute_avg (dvatom, all_atom_virial); dener = all_energy[0]; dforce = all_force[0]; + dforce_mag = all_force_mag[0]; dvirial = all_virial[0]; if (eflag_atom) { deatom = all_atom_energy[0]; @@ -738,6 +750,8 @@ void PairDeepMD::compute(int eflag, int vflag) { } vector std_f; vector tmp_avg_f; + vector std_fm; + vector tmp_avg_fm; deep_pot_model_devi.compute_avg(tmp_avg_f, all_force); deep_pot_model_devi.compute_std_f(std_f, tmp_avg_f, all_force); if (out_rel == 1) { @@ -750,6 +764,19 @@ void PairDeepMD::compute(int eflag, int vflag) { MPI_Reduce(&max, &all_f_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); MPI_Reduce(&avg, &all_f_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); all_f_avg /= double(atom->natoms); + if (atom->sp_flag) { + deep_pot_model_devi.compute_avg(tmp_avg_fm, all_force_mag); + deep_pot_model_devi.compute_std_f(std_fm, tmp_avg_fm, all_force_mag); + if (out_rel == 1) { + deep_pot_model_devi.compute_relative_std_f(std_fm, tmp_avg_fm, eps); + } + min = numeric_limits::max(), max = 0, avg = 0; + ana_st(max, min, avg, std_fm, nlocal); + MPI_Reduce(&min, &all_fm_min, 1, MPI_DOUBLE, MPI_MIN, 0, world); + MPI_Reduce(&max, &all_fm_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); + MPI_Reduce(&avg, &all_fm_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); + all_fm_avg /= double(all_nlocal); + } // std v std::vector send_v(9 * numb_models); std::vector recv_v(9 * numb_models); @@ -796,12 +823,25 @@ void PairDeepMD::compute(int eflag, int vflag) { all_f_max *= force_unit_cvt_factor; all_f_min *= force_unit_cvt_factor; all_f_avg *= force_unit_cvt_factor; - fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max - << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg - << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min - << " " << setw(18) << all_f_avg; + if (!atom->sp_flag) { + fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max + << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg + << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min + << " " << setw(18) << all_f_avg; + } else { + all_fm_max *= force_unit_cvt_factor; + all_fm_min *= force_unit_cvt_factor; + all_fm_avg *= force_unit_cvt_factor; + fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max + << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg + << " " << setw(18) << all_fr_max << " " << setw(18) << all_fr_min + << " " << setw(18) << all_fr_avg << " " << setw(18) << all_fm_max + << " " << setw(18) << all_fm_min << " " << setw(18) + << all_fm_avg; + } } if (out_each == 1) { + // need support for spin atomic force. vector std_f_all(atom->natoms); // Gather std_f and tags tagint *tag = atom->tag; @@ -849,6 +889,7 @@ void PairDeepMD::compute(int eflag, int vflag) { } } else { if (numb_models == 1) { + // need support for spin try { deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox); } catch (deepmd_compat::deepmd_exception &e) { @@ -871,16 +912,9 @@ void PairDeepMD::compute(int eflag, int vflag) { const double hbar = 6.5821191e-04; for (int ii = 0; ii < nall; ++ii) { for (int dd = 0; dd < 3; ++dd) { - int new_idx = new_idx_map[ii]; - f[ii][dd] += - scale[1][1] * dforce[3 * new_idx + dd] * force_unit_cvt_factor; - if (dtype[ii] < numb_types_spin && ii < nlocal) { - fm[ii][dd] += scale[1][1] * dforce[3 * (new_idx + nlocal) + dd] / - (hbar / spin_norm[dtype[ii]]) * force_unit_cvt_factor; - } else if (dtype[ii] < numb_types_spin) { - fm[ii][dd] += scale[1][1] * dforce[3 * (new_idx + nghost) + dd] / - (hbar / spin_norm[dtype[ii]]) * force_unit_cvt_factor; - } + f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; + fm[ii][dd] += scale[1][1] * dforce_mag[3 * ii + dd] / + (hbar / sp[ii][3]) * force_unit_cvt_factor; } } } @@ -1138,15 +1172,24 @@ void PairDeepMD::settings(int narg, char **arg) { if (!is_restart) { fp.open(out_file); fp << scientific; - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" - << setw(18 + 1) << "avg_devi_f"; - if (out_each) { - // at this time, we don't know how many atoms - fp << setw(18 + 1) << "atm_devi_f(N)"; + if (!atom->sp_flag) { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" + << setw(18 + 1) << "avg_devi_f"; + if (out_each) { + // at this time, we don't know how many atoms + fp << setw(18 + 1) << "atm_devi_f(N)"; + } + fp << endl; + } else { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" + << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" + << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" + << endl; } - fp << endl; } else { fp.open(out_file, std::ofstream::out | std::ofstream::app); fp << scientific; @@ -1198,7 +1241,12 @@ void PairDeepMD::settings(int narg, char **arg) { } } - comm_reverse = numb_models * 3; + // comm_reverse = numb_models * 3; + if (atom->sp_flag) { + comm_reverse = numb_models * 3 * 2; + } else { + comm_reverse = numb_models * 3; + } all_force.resize(numb_models); } @@ -1351,11 +1399,24 @@ int PairDeepMD::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; - for (i = first; i < last; i++) { - for (int dd = 0; dd < numb_models; ++dd) { - buf[m++] = all_force[dd][3 * i + 0]; - buf[m++] = all_force[dd][3 * i + 1]; - buf[m++] = all_force[dd][3 * i + 2]; + if (atom->sp_flag) { + for (i = first; i < last; i++) { + for (int dd = 0; dd < numb_models; ++dd) { + buf[m++] = all_force[dd][3 * i + 0]; + buf[m++] = all_force[dd][3 * i + 1]; + buf[m++] = all_force[dd][3 * i + 2]; + buf[m++] = all_force_mag[dd][3 * i + 0]; + buf[m++] = all_force_mag[dd][3 * i + 1]; + buf[m++] = all_force_mag[dd][3 * i + 2]; + } + } + } else { + for (i = first; i < last; i++) { + for (int dd = 0; dd < numb_models; ++dd) { + buf[m++] = all_force[dd][3 * i + 0]; + buf[m++] = all_force[dd][3 * i + 1]; + buf[m++] = all_force[dd][3 * i + 2]; + } } } return m; @@ -1367,12 +1428,26 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { int i, j, m; m = 0; - for (i = 0; i < n; i++) { - j = list[i]; - for (int dd = 0; dd < numb_models; ++dd) { - all_force[dd][3 * j + 0] += buf[m++]; - all_force[dd][3 * j + 1] += buf[m++]; - all_force[dd][3 * j + 2] += buf[m++]; + if (atom->sp_flag) { + for (i = 0; i < n; i++) { + j = list[i]; + for (int dd = 0; dd < numb_models; ++dd) { + all_force[dd][3 * j + 0] += buf[m++]; + all_force[dd][3 * j + 1] += buf[m++]; + all_force[dd][3 * j + 2] += buf[m++]; + all_force_mag[dd][3 * j + 0] += buf[m++]; + all_force_mag[dd][3 * j + 1] += buf[m++]; + all_force_mag[dd][3 * j + 2] += buf[m++]; + } + } + } else { + for (i = 0; i < n; i++) { + j = list[i]; + for (int dd = 0; dd < numb_models; ++dd) { + all_force[dd][3 * j + 0] += buf[m++]; + all_force[dd][3 * j + 1] += buf[m++]; + all_force[dd][3 * j + 2] += buf[m++]; + } } } } From 532e30952311a64bc830f3db65836c3481b72886 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 21 Sep 2024 11:59:31 +0800 Subject: [PATCH 003/193] fix(pt): make `state_dict` safe for `weights_only` (#4148) See #4147 and #4143. We can first make `state_dict` safe for `weights_only`, then make a breaking change when loading `state_dict` in the future. ## Summary by CodeRabbit - **New Features** - Enhanced model saving functionality by ensuring learning rates are consistently stored as floats, improving type consistency. - **Bug Fixes** - Updated model loading behavior in tests to focus solely on model weights, which may resolve issues related to state dictionary loading. --- deepmd/pt/train/training.py | 7 +++++-- source/tests/pt/test_change_bias.py | 10 +++++++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index a7b9e25b4e..c3d603dadd 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -1030,10 +1030,13 @@ def save_model(self, save_path, lr=0.0, step=0): if dist.is_available() and dist.is_initialized() else self.wrapper ) - module.train_infos["lr"] = lr + module.train_infos["lr"] = float(lr) module.train_infos["step"] = step + optim_state_dict = deepcopy(self.optimizer.state_dict()) + for item in optim_state_dict["param_groups"]: + item["lr"] = float(item["lr"]) torch.save( - {"model": module.state_dict(), "optimizer": self.optimizer.state_dict()}, + {"model": module.state_dict(), "optimizer": optim_state_dict}, save_path, ) checkpoint_dir = save_path.parent diff --git a/source/tests/pt/test_change_bias.py b/source/tests/pt/test_change_bias.py index f76be40b3f..febc439f50 100644 --- a/source/tests/pt/test_change_bias.py +++ b/source/tests/pt/test_change_bias.py @@ -92,7 +92,9 @@ def test_change_bias_with_data(self): run_dp( f"dp --pt change-bias {self.model_path!s} -s {self.data_file[0]} -o {self.model_path_data_bias!s}" ) - state_dict = torch.load(str(self.model_path_data_bias), map_location=DEVICE) + state_dict = torch.load( + str(self.model_path_data_bias), map_location=DEVICE, weights_only=True + ) model_params = state_dict["model"]["_extra_state"]["model_params"] model_for_wrapper = get_model_for_wrapper(model_params) wrapper = ModelWrapper(model_for_wrapper) @@ -114,7 +116,7 @@ def test_change_bias_with_data_sys_file(self): f"dp --pt change-bias {self.model_path!s} -f {tmp_file.name} -o {self.model_path_data_file_bias!s}" ) state_dict = torch.load( - str(self.model_path_data_file_bias), map_location=DEVICE + str(self.model_path_data_file_bias), map_location=DEVICE, weights_only=True ) model_params = state_dict["model"]["_extra_state"]["model_params"] model_for_wrapper = get_model_for_wrapper(model_params) @@ -134,7 +136,9 @@ def test_change_bias_with_user_defined(self): run_dp( f"dp --pt change-bias {self.model_path!s} -b {' '.join([str(_) for _ in user_bias])} -o {self.model_path_user_bias!s}" ) - state_dict = torch.load(str(self.model_path_user_bias), map_location=DEVICE) + state_dict = torch.load( + str(self.model_path_user_bias), map_location=DEVICE, weights_only=True + ) model_params = state_dict["model"]["_extra_state"]["model_params"] model_for_wrapper = get_model_for_wrapper(model_params) wrapper = ModelWrapper(model_for_wrapper) From 6010c7305c551f78c7e9b6ab55984b699578a920 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 21 Sep 2024 10:44:01 -0400 Subject: [PATCH 004/193] chore(pt): move `deepmd.pt.infer.deep_eval.eval_model` to tests (#4153) Per discussion in https://github.com/deepmodeling/deepmd-kit/pull/4142#issuecomment-2359848991. It should not be a public API as it lacks maintainance. ## Summary by CodeRabbit - **New Features** - Introduced a new `eval_model` function in the testing module to enhance model evaluation capabilities with various input configurations. - **Bug Fixes** - Removed the old `eval_model` function from the main module to streamline functionality and improve code organization. - **Refactor** - Consolidated the import of `eval_model` to a common module across multiple test files for better organization and reduced dependencies. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> --- deepmd/pt/infer/deep_eval.py | 227 ----------------- source/tests/pt/common.py | 239 ++++++++++++++++++ source/tests/pt/model/test_autodiff.py | 4 +- source/tests/pt/model/test_forward_lower.py | 6 +- source/tests/pt/model/test_null_input.py | 6 +- source/tests/pt/model/test_permutation.py | 6 +- .../pt/model/test_permutation_denoise.py | 6 +- source/tests/pt/model/test_rot.py | 6 +- source/tests/pt/model/test_rot_denoise.py | 6 +- source/tests/pt/model/test_smooth.py | 6 +- source/tests/pt/model/test_smooth_denoise.py | 6 +- source/tests/pt/model/test_trans.py | 6 +- source/tests/pt/model/test_trans_denoise.py | 6 +- source/tests/pt/model/test_unused_params.py | 6 +- 14 files changed, 275 insertions(+), 261 deletions(-) diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 353109d650..d5eae71731 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -602,230 +602,3 @@ def eval_typeebd(self) -> np.ndarray: def get_model_def_script(self) -> str: """Get model defination script.""" return self.model_def_script - - -# For tests only -def eval_model( - model, - coords: Union[np.ndarray, torch.Tensor], - cells: Optional[Union[np.ndarray, torch.Tensor]], - atom_types: Union[np.ndarray, torch.Tensor, List[int]], - spins: Optional[Union[np.ndarray, torch.Tensor]] = None, - atomic: bool = False, - infer_batch_size: int = 2, - denoise: bool = False, -): - model = model.to(DEVICE) - energy_out = [] - atomic_energy_out = [] - force_out = [] - force_mag_out = [] - virial_out = [] - atomic_virial_out = [] - updated_coord_out = [] - logits_out = [] - err_msg = ( - f"All inputs should be the same format, " - f"but found {type(coords)}, {type(cells)}, {type(atom_types)} instead! " - ) - return_tensor = True - if isinstance(coords, torch.Tensor): - if cells is not None: - assert isinstance(cells, torch.Tensor), err_msg - if spins is not None: - assert isinstance(spins, torch.Tensor), err_msg - assert isinstance(atom_types, torch.Tensor) or isinstance(atom_types, list) - atom_types = torch.tensor(atom_types, dtype=torch.long, device=DEVICE) - elif isinstance(coords, np.ndarray): - if cells is not None: - assert isinstance(cells, np.ndarray), err_msg - if spins is not None: - assert isinstance(spins, np.ndarray), err_msg - assert isinstance(atom_types, np.ndarray) or isinstance(atom_types, list) - atom_types = np.array(atom_types, dtype=np.int32) - return_tensor = False - - nframes = coords.shape[0] - if len(atom_types.shape) == 1: - natoms = len(atom_types) - if isinstance(atom_types, torch.Tensor): - atom_types = torch.tile(atom_types.unsqueeze(0), [nframes, 1]).reshape( - nframes, -1 - ) - else: - atom_types = np.tile(atom_types, nframes).reshape(nframes, -1) - else: - natoms = len(atom_types[0]) - - coord_input = torch.tensor( - coords.reshape([-1, natoms, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - spin_input = None - if spins is not None: - spin_input = torch.tensor( - spins.reshape([-1, natoms, 3]), - dtype=GLOBAL_PT_FLOAT_PRECISION, - device=DEVICE, - ) - has_spin = getattr(model, "has_spin", False) - if callable(has_spin): - has_spin = has_spin() - type_input = torch.tensor(atom_types, dtype=torch.long, device=DEVICE) - box_input = None - if cells is None: - pbc = False - else: - pbc = True - box_input = torch.tensor( - cells.reshape([-1, 3, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - num_iter = int((nframes + infer_batch_size - 1) / infer_batch_size) - - for ii in range(num_iter): - batch_coord = coord_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] - batch_atype = type_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] - batch_box = None - batch_spin = None - if spin_input is not None: - batch_spin = spin_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] - if pbc: - batch_box = box_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] - input_dict = { - "coord": batch_coord, - "atype": batch_atype, - "box": batch_box, - "do_atomic_virial": atomic, - } - if has_spin: - input_dict["spin"] = batch_spin - batch_output = model(**input_dict) - if isinstance(batch_output, tuple): - batch_output = batch_output[0] - if not return_tensor: - if "energy" in batch_output: - energy_out.append(batch_output["energy"].detach().cpu().numpy()) - if "atom_energy" in batch_output: - atomic_energy_out.append( - batch_output["atom_energy"].detach().cpu().numpy() - ) - if "force" in batch_output: - force_out.append(batch_output["force"].detach().cpu().numpy()) - if "force_mag" in batch_output: - force_mag_out.append(batch_output["force_mag"].detach().cpu().numpy()) - if "virial" in batch_output: - virial_out.append(batch_output["virial"].detach().cpu().numpy()) - if "atom_virial" in batch_output: - atomic_virial_out.append( - batch_output["atom_virial"].detach().cpu().numpy() - ) - if "updated_coord" in batch_output: - updated_coord_out.append( - batch_output["updated_coord"].detach().cpu().numpy() - ) - if "logits" in batch_output: - logits_out.append(batch_output["logits"].detach().cpu().numpy()) - else: - if "energy" in batch_output: - energy_out.append(batch_output["energy"]) - if "atom_energy" in batch_output: - atomic_energy_out.append(batch_output["atom_energy"]) - if "force" in batch_output: - force_out.append(batch_output["force"]) - if "force_mag" in batch_output: - force_mag_out.append(batch_output["force_mag"]) - if "virial" in batch_output: - virial_out.append(batch_output["virial"]) - if "atom_virial" in batch_output: - atomic_virial_out.append(batch_output["atom_virial"]) - if "updated_coord" in batch_output: - updated_coord_out.append(batch_output["updated_coord"]) - if "logits" in batch_output: - logits_out.append(batch_output["logits"]) - if not return_tensor: - energy_out = ( - np.concatenate(energy_out) if energy_out else np.zeros([nframes, 1]) # pylint: disable=no-explicit-dtype - ) - atomic_energy_out = ( - np.concatenate(atomic_energy_out) - if atomic_energy_out - else np.zeros([nframes, natoms, 1]) # pylint: disable=no-explicit-dtype - ) - force_out = ( - np.concatenate(force_out) if force_out else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype - ) - force_mag_out = ( - np.concatenate(force_mag_out) - if force_mag_out - else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype - ) - virial_out = ( - np.concatenate(virial_out) if virial_out else np.zeros([nframes, 3, 3]) # pylint: disable=no-explicit-dtype - ) - atomic_virial_out = ( - np.concatenate(atomic_virial_out) - if atomic_virial_out - else np.zeros([nframes, natoms, 3, 3]) # pylint: disable=no-explicit-dtype - ) - updated_coord_out = ( - np.concatenate(updated_coord_out) if updated_coord_out else None - ) - logits_out = np.concatenate(logits_out) if logits_out else None - else: - energy_out = ( - torch.cat(energy_out) - if energy_out - else torch.zeros( - [nframes, 1], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - ) - atomic_energy_out = ( - torch.cat(atomic_energy_out) - if atomic_energy_out - else torch.zeros( - [nframes, natoms, 1], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - ) - force_out = ( - torch.cat(force_out) - if force_out - else torch.zeros( - [nframes, natoms, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - ) - force_mag_out = ( - torch.cat(force_mag_out) - if force_mag_out - else torch.zeros( - [nframes, natoms, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - ) - virial_out = ( - torch.cat(virial_out) - if virial_out - else torch.zeros( - [nframes, 3, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - ) - atomic_virial_out = ( - torch.cat(atomic_virial_out) - if atomic_virial_out - else torch.zeros( - [nframes, natoms, 3, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE - ) - ) - updated_coord_out = torch.cat(updated_coord_out) if updated_coord_out else None - logits_out = torch.cat(logits_out) if logits_out else None - if denoise: - return updated_coord_out, logits_out - else: - results_dict = { - "energy": energy_out, - "force": force_out, - "virial": virial_out, - } - if has_spin: - results_dict["force_mag"] = force_mag_out - if atomic: - results_dict["atom_energy"] = atomic_energy_out - results_dict["atom_virial"] = atomic_virial_out - return results_dict diff --git a/source/tests/pt/common.py b/source/tests/pt/common.py index 8886522360..16b343be8a 100644 --- a/source/tests/pt/common.py +++ b/source/tests/pt/common.py @@ -1,7 +1,20 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, + Optional, + Union, +) + +import numpy as np +import torch + from deepmd.main import ( main, ) +from deepmd.pt.utils.env import ( + DEVICE, + GLOBAL_PT_FLOAT_PRECISION, +) def run_dp(cmd: str) -> int: @@ -27,3 +40,229 @@ def run_dp(cmd: str) -> int: main(cmds) return 0 + + +def eval_model( + model, + coords: Union[np.ndarray, torch.Tensor], + cells: Optional[Union[np.ndarray, torch.Tensor]], + atom_types: Union[np.ndarray, torch.Tensor, List[int]], + spins: Optional[Union[np.ndarray, torch.Tensor]] = None, + atomic: bool = False, + infer_batch_size: int = 2, + denoise: bool = False, +): + model = model.to(DEVICE) + energy_out = [] + atomic_energy_out = [] + force_out = [] + force_mag_out = [] + virial_out = [] + atomic_virial_out = [] + updated_coord_out = [] + logits_out = [] + err_msg = ( + f"All inputs should be the same format, " + f"but found {type(coords)}, {type(cells)}, {type(atom_types)} instead! " + ) + return_tensor = True + if isinstance(coords, torch.Tensor): + if cells is not None: + assert isinstance(cells, torch.Tensor), err_msg + if spins is not None: + assert isinstance(spins, torch.Tensor), err_msg + assert isinstance(atom_types, torch.Tensor) or isinstance(atom_types, list) + atom_types = torch.tensor(atom_types, dtype=torch.int32, device=DEVICE) + elif isinstance(coords, np.ndarray): + if cells is not None: + assert isinstance(cells, np.ndarray), err_msg + if spins is not None: + assert isinstance(spins, np.ndarray), err_msg + assert isinstance(atom_types, np.ndarray) or isinstance(atom_types, list) + atom_types = np.array(atom_types, dtype=np.int32) + return_tensor = False + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + if isinstance(atom_types, torch.Tensor): + atom_types = torch.tile(atom_types.unsqueeze(0), [nframes, 1]).reshape( + nframes, -1 + ) + else: + atom_types = np.tile(atom_types, nframes).reshape(nframes, -1) + else: + natoms = len(atom_types[0]) + + coord_input = torch.tensor( + coords.reshape([-1, natoms, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + spin_input = None + if spins is not None: + spin_input = torch.tensor( + spins.reshape([-1, natoms, 3]), + dtype=GLOBAL_PT_FLOAT_PRECISION, + device=DEVICE, + ) + has_spin = getattr(model, "has_spin", False) + if callable(has_spin): + has_spin = has_spin() + type_input = torch.tensor(atom_types, dtype=torch.long, device=DEVICE) + box_input = None + if cells is None: + pbc = False + else: + pbc = True + box_input = torch.tensor( + cells.reshape([-1, 3, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + num_iter = int((nframes + infer_batch_size - 1) / infer_batch_size) + + for ii in range(num_iter): + batch_coord = coord_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_atype = type_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_box = None + batch_spin = None + if spin_input is not None: + batch_spin = spin_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + if pbc: + batch_box = box_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + input_dict = { + "coord": batch_coord, + "atype": batch_atype, + "box": batch_box, + "do_atomic_virial": atomic, + } + if has_spin: + input_dict["spin"] = batch_spin + batch_output = model(**input_dict) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + if not return_tensor: + if "energy" in batch_output: + energy_out.append(batch_output["energy"].detach().cpu().numpy()) + if "atom_energy" in batch_output: + atomic_energy_out.append( + batch_output["atom_energy"].detach().cpu().numpy() + ) + if "force" in batch_output: + force_out.append(batch_output["force"].detach().cpu().numpy()) + if "force_mag" in batch_output: + force_mag_out.append(batch_output["force_mag"].detach().cpu().numpy()) + if "virial" in batch_output: + virial_out.append(batch_output["virial"].detach().cpu().numpy()) + if "atom_virial" in batch_output: + atomic_virial_out.append( + batch_output["atom_virial"].detach().cpu().numpy() + ) + if "updated_coord" in batch_output: + updated_coord_out.append( + batch_output["updated_coord"].detach().cpu().numpy() + ) + if "logits" in batch_output: + logits_out.append(batch_output["logits"].detach().cpu().numpy()) + else: + if "energy" in batch_output: + energy_out.append(batch_output["energy"]) + if "atom_energy" in batch_output: + atomic_energy_out.append(batch_output["atom_energy"]) + if "force" in batch_output: + force_out.append(batch_output["force"]) + if "force_mag" in batch_output: + force_mag_out.append(batch_output["force_mag"]) + if "virial" in batch_output: + virial_out.append(batch_output["virial"]) + if "atom_virial" in batch_output: + atomic_virial_out.append(batch_output["atom_virial"]) + if "updated_coord" in batch_output: + updated_coord_out.append(batch_output["updated_coord"]) + if "logits" in batch_output: + logits_out.append(batch_output["logits"]) + if not return_tensor: + energy_out = ( + np.concatenate(energy_out) if energy_out else np.zeros([nframes, 1]) # pylint: disable=no-explicit-dtype + ) + atomic_energy_out = ( + np.concatenate(atomic_energy_out) + if atomic_energy_out + else np.zeros([nframes, natoms, 1]) # pylint: disable=no-explicit-dtype + ) + force_out = ( + np.concatenate(force_out) if force_out else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype + ) + force_mag_out = ( + np.concatenate(force_mag_out) + if force_mag_out + else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype + ) + virial_out = ( + np.concatenate(virial_out) if virial_out else np.zeros([nframes, 3, 3]) # pylint: disable=no-explicit-dtype + ) + atomic_virial_out = ( + np.concatenate(atomic_virial_out) + if atomic_virial_out + else np.zeros([nframes, natoms, 3, 3]) # pylint: disable=no-explicit-dtype + ) + updated_coord_out = ( + np.concatenate(updated_coord_out) if updated_coord_out else None + ) + logits_out = np.concatenate(logits_out) if logits_out else None + else: + energy_out = ( + torch.cat(energy_out) + if energy_out + else torch.zeros( + [nframes, 1], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + atomic_energy_out = ( + torch.cat(atomic_energy_out) + if atomic_energy_out + else torch.zeros( + [nframes, natoms, 1], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + force_out = ( + torch.cat(force_out) + if force_out + else torch.zeros( + [nframes, natoms, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + force_mag_out = ( + torch.cat(force_mag_out) + if force_mag_out + else torch.zeros( + [nframes, natoms, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + virial_out = ( + torch.cat(virial_out) + if virial_out + else torch.zeros( + [nframes, 3, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + atomic_virial_out = ( + torch.cat(atomic_virial_out) + if atomic_virial_out + else torch.zeros( + [nframes, natoms, 3, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + updated_coord_out = torch.cat(updated_coord_out) if updated_coord_out else None + logits_out = torch.cat(logits_out) if logits_out else None + if denoise: + return updated_coord_out, logits_out + else: + results_dict = { + "energy": energy_out, + "force": force_out, + "virial": virial_out, + } + if has_spin: + results_dict["force_mag"] = force_mag_out + if atomic: + results_dict["atom_energy"] = atomic_energy_out + results_dict["atom_virial"] = atomic_virial_out + return results_dict diff --git a/source/tests/pt/model/test_autodiff.py b/source/tests/pt/model/test_autodiff.py index d891583491..1adcff55fc 100644 --- a/source/tests/pt/model/test_autodiff.py +++ b/source/tests/pt/model/test_autodiff.py @@ -21,8 +21,10 @@ dtype = torch.float64 -from .test_permutation import ( +from ..common import ( eval_model, +) +from .test_permutation import ( model_dpa1, model_dpa2, model_hybrid, diff --git a/source/tests/pt/model/test_forward_lower.py b/source/tests/pt/model/test_forward_lower.py index c9857a6343..87a3f5b06e 100644 --- a/source/tests/pt/model/test_forward_lower.py +++ b/source/tests/pt/model/test_forward_lower.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -20,6 +17,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation import ( # model_dpau, model_dpa1, model_dpa2, diff --git a/source/tests/pt/model/test_null_input.py b/source/tests/pt/model/test_null_input.py index 1dca7ee119..a2e0fa66db 100644 --- a/source/tests/pt/model/test_null_input.py +++ b/source/tests/pt/model/test_null_input.py @@ -5,9 +5,6 @@ import numpy as np import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, get_zbl_model, @@ -22,6 +19,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation import ( model_dpa1, model_dpa2, diff --git a/source/tests/pt/model/test_permutation.py b/source/tests/pt/model/test_permutation.py index f5edc6ef64..2fbc5fde3c 100644 --- a/source/tests/pt/model/test_permutation.py +++ b/source/tests/pt/model/test_permutation.py @@ -5,9 +5,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -18,6 +15,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) CUR_DIR = os.path.dirname(__file__) diff --git a/source/tests/pt/model/test_permutation_denoise.py b/source/tests/pt/model/test_permutation_denoise.py index 133c48f551..53bf55fb0f 100644 --- a/source/tests/pt/model/test_permutation_denoise.py +++ b/source/tests/pt/model/test_permutation_denoise.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation import ( # model_dpau, model_dpa1, model_dpa2, diff --git a/source/tests/pt/model/test_rot.py b/source/tests/pt/model/test_rot.py index 23bdede923..ca6a6375c8 100644 --- a/source/tests/pt/model/test_rot.py +++ b/source/tests/pt/model/test_rot.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation import ( # model_dpau, model_dos, model_dpa1, diff --git a/source/tests/pt/model/test_rot_denoise.py b/source/tests/pt/model/test_rot_denoise.py index 5fe99a0d7a..9828ba5225 100644 --- a/source/tests/pt/model/test_rot_denoise.py +++ b/source/tests/pt/model/test_rot_denoise.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation_denoise import ( model_dpa1, model_dpa2, diff --git a/source/tests/pt/model/test_smooth.py b/source/tests/pt/model/test_smooth.py index c33dddfab5..9a7040f9cc 100644 --- a/source/tests/pt/model/test_smooth.py +++ b/source/tests/pt/model/test_smooth.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation import ( # model_dpau, model_dos, model_dpa1, diff --git a/source/tests/pt/model/test_smooth_denoise.py b/source/tests/pt/model/test_smooth_denoise.py index 069c578d52..faa892c5d0 100644 --- a/source/tests/pt/model/test_smooth_denoise.py +++ b/source/tests/pt/model/test_smooth_denoise.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation_denoise import ( model_dpa2, ) diff --git a/source/tests/pt/model/test_trans.py b/source/tests/pt/model/test_trans.py index afd70f8995..b62fac1312 100644 --- a/source/tests/pt/model/test_trans.py +++ b/source/tests/pt/model/test_trans.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation import ( # model_dpau, model_dos, model_dpa1, diff --git a/source/tests/pt/model/test_trans_denoise.py b/source/tests/pt/model/test_trans_denoise.py index 2d31d5de50..84ec21929c 100644 --- a/source/tests/pt/model/test_trans_denoise.py +++ b/source/tests/pt/model/test_trans_denoise.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation_denoise import ( model_dpa1, model_dpa2, diff --git a/source/tests/pt/model/test_unused_params.py b/source/tests/pt/model/test_unused_params.py index e225719e7f..3f068d5e5b 100644 --- a/source/tests/pt/model/test_unused_params.py +++ b/source/tests/pt/model/test_unused_params.py @@ -4,9 +4,6 @@ import torch -from deepmd.pt.infer.deep_eval import ( - eval_model, -) from deepmd.pt.model.model import ( get_model, ) @@ -17,6 +14,9 @@ from ...seed import ( GLOBAL_SEED, ) +from ..common import ( + eval_model, +) from .test_permutation import ( model_dpa2, ) From d5b544bb4933685b3ddb45e1de62ed836a8ca0eb Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sun, 22 Sep 2024 00:59:02 +0800 Subject: [PATCH 005/193] update typo --- source/lmp/pair_deepmd.cpp | 8 +++++--- source/lmp/pair_deepmd.h | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index b902f2a4c0..634be3eff7 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -760,6 +760,7 @@ void PairDeepMD::compute(int eflag, int vflag) { double min = numeric_limits::max(), max = 0, avg = 0; ana_st(max, min, avg, std_f, nlocal); double all_f_min = 0, all_f_max = 0, all_f_avg = 0; + double all_fm_min = 0, all_fm_max = 0, all_fm_avg = 0; MPI_Reduce(&min, &all_f_min, 1, MPI_DOUBLE, MPI_MIN, 0, world); MPI_Reduce(&max, &all_f_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); MPI_Reduce(&avg, &all_f_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); @@ -775,7 +776,8 @@ void PairDeepMD::compute(int eflag, int vflag) { MPI_Reduce(&min, &all_fm_min, 1, MPI_DOUBLE, MPI_MIN, 0, world); MPI_Reduce(&max, &all_fm_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); MPI_Reduce(&avg, &all_fm_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); - all_fm_avg /= double(all_nlocal); + // need modified for only spin atoms + all_fm_avg /= double(atom->natoms); } // std v std::vector send_v(9 * numb_models); @@ -834,8 +836,8 @@ void PairDeepMD::compute(int eflag, int vflag) { all_fm_avg *= force_unit_cvt_factor; fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg - << " " << setw(18) << all_fr_max << " " << setw(18) << all_fr_min - << " " << setw(18) << all_fr_avg << " " << setw(18) << all_fm_max + << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min + << " " << setw(18) << all_f_avg << " " << setw(18) << all_fm_max << " " << setw(18) << all_fm_min << " " << setw(18) << all_fm_avg; } diff --git a/source/lmp/pair_deepmd.h b/source/lmp/pair_deepmd.h index a3f6717a3b..3b42b0f7de 100644 --- a/source/lmp/pair_deepmd.h +++ b/source/lmp/pair_deepmd.h @@ -93,6 +93,7 @@ class PairDeepMD : public Pair { int numb_types; int numb_types_spin; std::vector > all_force; + std::vector > all_force_mag; std::ofstream fp; int out_freq; std::string out_file; From dd331fd84dfc711e5af412eecbafe311853ebb24 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sun, 22 Sep 2024 23:55:10 +0800 Subject: [PATCH 006/193] update pt backend --- deepmd/pt/model/model/spin_model.py | 4 + source/api_cc/include/DeepPotPT.h | 89 ++++++ source/api_cc/src/DeepPotPT.cc | 458 +++++++++++++++++++++++++++- 3 files changed, 546 insertions(+), 5 deletions(-) diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index 717a7ee7c8..f5ab81e16d 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -471,6 +471,7 @@ def forward_common_lower( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, + comm_dict: Optional[Dict[str, torch.Tensor]] = None, extra_nlist_sort: bool = False, ): nframes, nloc = nlist.shape[:2] @@ -492,6 +493,7 @@ def forward_common_lower( fparam=fparam, aparam=aparam, do_atomic_virial=do_atomic_virial, + comm_dict=comm_dict, extra_nlist_sort=extra_nlist_sort, ) model_output_type = self.backbone_model.model_output_type() @@ -607,6 +609,7 @@ def forward_lower( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, + comm_dict: Optional[Dict[str, torch.Tensor]] = None, ): model_ret = self.forward_common_lower( extended_coord, @@ -617,6 +620,7 @@ def forward_lower( fparam=fparam, aparam=aparam, do_atomic_virial=do_atomic_virial, + comm_dict=comm_dict, extra_nlist_sort=self.backbone_model.need_sorted_nlist_for_lower(), ) model_predict = {} diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index 973c02c434..aa24895a54 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -74,6 +74,20 @@ class DeepPotPT : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -115,6 +129,23 @@ class DeepPotPT : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. @@ -270,10 +301,66 @@ class DeepPotPT : public DeepPotBase { const bool atomic); void computew(std::vector& ener, std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, std::vector& virial, std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, + const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, @@ -284,10 +371,12 @@ class DeepPotPT : public DeepPotBase { const bool atomic); void computew(std::vector& ener, std::vector& force, + std::vector& force_mag, std::vector& virial, std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, + const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 3d406879de..ed6d3f9eb1 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -164,11 +164,12 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, std::vector atype_64(datype.begin(), datype.end()); at::Tensor atype_Tensor = torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); + c10::optional mapping_tensor; if (ago == 0) { nlist_data.copy_from_nlist(lmp_list); nlist_data.shuffle_exclude_empty(fwd_map); nlist_data.padding(); - if (do_message_passing == 1) { + if (do_message_passing == 1 && nghost > 0) { int nswap = lmp_list.nswap; torch::Tensor sendproc_tensor = torch::from_blob(lmp_list.sendproc, {nswap}, int32_option); @@ -196,11 +197,16 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, comm_dict.insert("recv_num", recvnum_tensor); comm_dict.insert("communicator", communicator_tensor); } + if (do_message_passing == 1 && nghost == 0) { + // for the situation that no ghost atoms (e.g. serial nopbc) + // set the mapping arange(nloc) is enough + auto option = torch::TensorOptions().device(device).dtype(torch::kInt64); + mapping_tensor = at::arange(nloc_real, option).unsqueeze(0); + } } at::Tensor firstneigh = createNlistTensor(nlist_data.jlist); firstneigh_tensor = firstneigh.to(torch::kInt64).to(device); bool do_atom_virial_tensor = atomic; - c10::optional optional_tensor; c10::optional fparam_tensor; if (!fparam.empty()) { fparam_tensor = @@ -219,15 +225,15 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, .to(device); } c10::Dict outputs = - (do_message_passing == 1) + (do_message_passing == 1 && nghost > 0) ? module .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, - firstneigh_tensor, optional_tensor, fparam_tensor, + firstneigh_tensor, mapping_tensor, fparam_tensor, aparam_tensor, do_atom_virial_tensor, comm_dict) .toGenericDict() : module .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, - firstneigh_tensor, optional_tensor, fparam_tensor, + firstneigh_tensor, mapping_tensor, fparam_tensor, aparam_tensor, do_atom_virial_tensor) .toGenericDict(); c10::IValue energy_ = outputs.at("energy"); @@ -305,6 +311,227 @@ template void DeepPotPT::compute>( const std::vector& fparam, const std::vector& aparam, const bool atomic); + +template +void DeepPotPT::compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + torch::Device device(torch::kCUDA, gpu_id); + if (!gpu_enabled) { + device = torch::Device(torch::kCPU); + } + int natoms = atype.size(); + auto options = torch::TensorOptions().dtype(torch::kFloat64); + torch::ScalarType floatType = torch::kFloat64; + if (std::is_same_v) { + options = torch::TensorOptions().dtype(torch::kFloat32); + floatType = torch::kFloat32; + } + auto int32_option = + torch::TensorOptions().device(torch::kCPU).dtype(torch::kInt32); + auto int_option = + torch::TensorOptions().device(torch::kCPU).dtype(torch::kInt64); + // select real atoms + std::vector dcoord, dforce, dforce_mag, aparam_, datom_energy, + datom_virial; + std::vector datype, fwd_map, bkw_map; + int nghost_real, nall_real, nloc_real; + int nall = natoms; + select_real_atoms_coord(dcoord, datype, aparam_, nghost_real, fwd_map, + bkw_map, nall_real, nloc_real, coord, atype, aparam, + nghost, ntypes, 1, daparam, nall, aparam_nall); + int nloc = nall_real - nghost_real; + int nframes = 1; + std::vector coord_wrapped = dcoord; + at::Tensor coord_wrapped_Tensor = + torch::from_blob(coord_wrapped.data(), {1, nall_real, 3}, options) + .to(device); + std::vector spin_wrapped = spin; + at::Tensor spin_wrapped_Tensor = + torch::from_blob(spin_wrapped.data(), {1, nall_real, 3}, options) + .to(device); + std::vector atype_64(datype.begin(), datype.end()); + at::Tensor atype_Tensor = + torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); + c10::optional mapping_tensor; + if (ago == 0) { + nlist_data.copy_from_nlist(lmp_list); + nlist_data.shuffle_exclude_empty(fwd_map); + nlist_data.padding(); + if (do_message_passing == 1 && nghost > 0) { + int nswap = lmp_list.nswap; + torch::Tensor sendproc_tensor = + torch::from_blob(lmp_list.sendproc, {nswap}, int32_option); + torch::Tensor recvproc_tensor = + torch::from_blob(lmp_list.recvproc, {nswap}, int32_option); + torch::Tensor firstrecv_tensor = + torch::from_blob(lmp_list.firstrecv, {nswap}, int32_option); + torch::Tensor recvnum_tensor = + torch::from_blob(lmp_list.recvnum, {nswap}, int32_option); + torch::Tensor sendnum_tensor = + torch::from_blob(lmp_list.sendnum, {nswap}, int32_option); + torch::Tensor communicator_tensor = torch::from_blob( + const_cast(lmp_list.world), {1}, torch::kInt64); + // torch::Tensor communicator_tensor = + // torch::tensor(lmp_list.world, int32_option); + torch::Tensor nswap_tensor = torch::tensor(nswap, int32_option); + int total_send = + std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); + torch::Tensor sendlist_tensor = + torch::from_blob(lmp_list.sendlist, {total_send}, int32_option); + comm_dict.insert("send_list", sendlist_tensor); + comm_dict.insert("send_proc", sendproc_tensor); + comm_dict.insert("recv_proc", recvproc_tensor); + comm_dict.insert("send_num", sendnum_tensor); + comm_dict.insert("recv_num", recvnum_tensor); + comm_dict.insert("communicator", communicator_tensor); + } + if (do_message_passing == 1 && nghost == 0) { + // for the situation that no ghost atoms (e.g. serial nopbc) + // set the mapping arange(nloc) is enough + auto option = torch::TensorOptions().device(device).dtype(torch::kInt64); + mapping_tensor = at::arange(nloc_real, option).unsqueeze(0); + } + } + at::Tensor firstneigh = createNlistTensor(nlist_data.jlist); + firstneigh_tensor = firstneigh.to(torch::kInt64).to(device); + bool do_atom_virial_tensor = atomic; + c10::optional fparam_tensor; + if (!fparam.empty()) { + fparam_tensor = + torch::from_blob(const_cast(fparam.data()), + {1, static_cast(fparam.size())}, options) + .to(device); + } + c10::optional aparam_tensor; + if (!aparam_.empty()) { + aparam_tensor = + torch::from_blob( + const_cast(aparam_.data()), + {1, lmp_list.inum, + static_cast(aparam_.size()) / lmp_list.inum}, + options) + .to(device); + } + c10::Dict outputs = + (do_message_passing == 1 && nghost > 0) + ? module + .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, + spin_wrapped_Tensor, firstneigh_tensor, + mapping_tensor, fparam_tensor, aparam_tensor, + do_atom_virial_tensor, comm_dict) + .toGenericDict() + : module + .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, + spin_wrapped_Tensor, firstneigh_tensor, + mapping_tensor, fparam_tensor, aparam_tensor, + do_atom_virial_tensor) + .toGenericDict(); + c10::IValue energy_ = outputs.at("energy"); + c10::IValue force_ = outputs.at("extended_force"); + c10::IValue force_mag_ = outputs.at("extended_force_mag"); + // spin model not suported yet + // c10::IValue virial_ = outputs.at("virial"); + torch::Tensor flat_energy_ = energy_.toTensor().view({-1}); + torch::Tensor cpu_energy_ = flat_energy_.to(torch::kCPU); + ener.assign(cpu_energy_.data_ptr(), + cpu_energy_.data_ptr() + cpu_energy_.numel()); + torch::Tensor flat_force_ = force_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_ = flat_force_.to(torch::kCPU); + dforce.assign(cpu_force_.data_ptr(), + cpu_force_.data_ptr() + cpu_force_.numel()); + torch::Tensor flat_force_mag_ = + force_mag_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_mag_ = flat_force_mag_.to(torch::kCPU); + dforce_mag.assign( + cpu_force_mag_.data_ptr(), + cpu_force_mag_.data_ptr() + cpu_force_mag_.numel()); + // spin model not suported yet + // torch::Tensor flat_virial_ = virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_virial_ = flat_virial_.to(torch::kCPU); + // virial.assign(cpu_virial_.data_ptr(), + // cpu_virial_.data_ptr() + cpu_virial_.numel()); + + // bkw map + force.resize(static_cast(nframes) * fwd_map.size() * 3); + force_mag.resize(static_cast(nframes) * fwd_map.size() * 3); + select_map(force, dforce, bkw_map, 3, nframes, fwd_map.size(), + nall_real); + select_map(force_mag, dforce_mag, bkw_map, 3, nframes, + fwd_map.size(), nall_real); + if (atomic) { + // spin model not suported yet + // c10::IValue atom_virial_ = outputs.at("extended_virial"); + c10::IValue atom_energy_ = outputs.at("atom_energy"); + torch::Tensor flat_atom_energy_ = + atom_energy_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_atom_energy_ = flat_atom_energy_.to(torch::kCPU); + datom_energy.resize(nall_real, + 0.0); // resize to nall to be consistenet with TF. + datom_energy.assign( + cpu_atom_energy_.data_ptr(), + cpu_atom_energy_.data_ptr() + cpu_atom_energy_.numel()); + // spin model not suported yet + // torch::Tensor flat_atom_virial_ = + // atom_virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_atom_virial_ = flat_atom_virial_.to(torch::kCPU); + // datom_virial.assign( + // cpu_atom_virial_.data_ptr(), + // cpu_atom_virial_.data_ptr() + cpu_atom_virial_.numel()); + atom_energy.resize(static_cast(nframes) * fwd_map.size()); + // atom_virial.resize(static_cast(nframes) * fwd_map.size() * 9); + select_map(atom_energy, datom_energy, bkw_map, 1, nframes, + fwd_map.size(), nall_real); + // select_map(atom_virial, datom_virial, bkw_map, 9, nframes, + // fwd_map.size(), nall_real); + } +} +template void DeepPotPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); +template void DeepPotPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + template void DeepPotPT::compute(ENERGYVTYPE& ener, std::vector& force, @@ -409,6 +636,147 @@ template void DeepPotPT::compute>( std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); +template void DeepPotPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template +void DeepPotPT::compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + torch::Device device(torch::kCUDA, gpu_id); + if (!gpu_enabled) { + device = torch::Device(torch::kCPU); + } + std::vector coord_wrapped = coord; + std::vector spin_wrapped = spin; + int natoms = atype.size(); + auto options = torch::TensorOptions().dtype(torch::kFloat64); + torch::ScalarType floatType = torch::kFloat64; + if (std::is_same_v) { + options = torch::TensorOptions().dtype(torch::kFloat32); + floatType = torch::kFloat32; + } + auto int_options = torch::TensorOptions().dtype(torch::kInt64); + int nframes = 1; + std::vector inputs; + at::Tensor coord_wrapped_Tensor = + torch::from_blob(coord_wrapped.data(), {1, natoms, 3}, options) + .to(device); + inputs.push_back(coord_wrapped_Tensor); + std::vector atype_64(atype.begin(), atype.end()); + at::Tensor atype_Tensor = + torch::from_blob(atype_64.data(), {1, natoms}, int_options).to(device); + inputs.push_back(atype_Tensor); + at::Tensor spin_wrapped_Tensor = + torch::from_blob(spin_wrapped.data(), {1, natoms, 3}, options).to(device); + inputs.push_back(spin_wrapped_Tensor); + c10::optional box_Tensor; + if (!box.empty()) { + box_Tensor = + torch::from_blob(const_cast(box.data()), {1, 9}, options) + .to(device); + } + inputs.push_back(box_Tensor); + c10::optional fparam_tensor; + if (!fparam.empty()) { + fparam_tensor = + torch::from_blob(const_cast(fparam.data()), + {1, static_cast(fparam.size())}, options) + .to(device); + } + inputs.push_back(fparam_tensor); + c10::optional aparam_tensor; + if (!aparam.empty()) { + aparam_tensor = + torch::from_blob( + const_cast(aparam.data()), + {1, natoms, static_cast(aparam.size()) / natoms}, + options) + .to(device); + } + inputs.push_back(aparam_tensor); + bool do_atom_virial_tensor = atomic; + inputs.push_back(do_atom_virial_tensor); + c10::Dict outputs = + module.forward(inputs).toGenericDict(); + c10::IValue energy_ = outputs.at("energy"); + c10::IValue force_ = outputs.at("force"); + c10::IValue force_mag_ = outputs.at("force_mag"); + // spin model not suported yet + // c10::IValue virial_ = outputs.at("virial"); + torch::Tensor flat_energy_ = energy_.toTensor().view({-1}); + torch::Tensor cpu_energy_ = flat_energy_.to(torch::kCPU); + ener.assign(cpu_energy_.data_ptr(), + cpu_energy_.data_ptr() + cpu_energy_.numel()); + torch::Tensor flat_force_ = force_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_ = flat_force_.to(torch::kCPU); + force.assign(cpu_force_.data_ptr(), + cpu_force_.data_ptr() + cpu_force_.numel()); + torch::Tensor flat_force_mag_ = + force_mag_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_mag_ = flat_force_mag_.to(torch::kCPU); + force_mag.assign( + cpu_force_mag_.data_ptr(), + cpu_force_mag_.data_ptr() + cpu_force_mag_.numel()); + // spin model not suported yet + // torch::Tensor flat_virial_ = virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_virial_ = flat_virial_.to(torch::kCPU); + // virial.assign(cpu_virial_.data_ptr(), + // cpu_virial_.data_ptr() + cpu_virial_.numel()); + if (atomic) { + // c10::IValue atom_virial_ = outputs.at("atom_virial"); + c10::IValue atom_energy_ = outputs.at("atom_energy"); + torch::Tensor flat_atom_energy_ = + atom_energy_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_atom_energy_ = flat_atom_energy_.to(torch::kCPU); + atom_energy.assign( + cpu_atom_energy_.data_ptr(), + cpu_atom_energy_.data_ptr() + cpu_atom_energy_.numel()); + // torch::Tensor flat_atom_virial_ = + // atom_virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_atom_virial_ = flat_atom_virial_.to(torch::kCPU); + // atom_virial.assign( + // cpu_atom_virial_.data_ptr(), + // cpu_atom_virial_.data_ptr() + cpu_atom_virial_.numel()); + } +} + +template void DeepPotPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, const std::vector& atype, const std::vector& box, const std::vector& fparam, @@ -417,10 +785,12 @@ template void DeepPotPT::compute>( template void DeepPotPT::compute>( std::vector& ener, std::vector& force, + std::vector& force_mag, std::vector& virial, std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, + const std::vector& spin, const std::vector& atype, const std::vector& box, const std::vector& fparam, @@ -467,6 +837,42 @@ void DeepPotPT::computew(std::vector& ener, fparam, aparam, atomic); }); } +void DeepPotPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, fparam, aparam, atomic); + }); +} +void DeepPotPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, fparam, aparam, atomic); + }); +} void DeepPotPT::computew(std::vector& ener, std::vector& force, std::vector& virial, @@ -505,6 +911,48 @@ void DeepPotPT::computew(std::vector& ener, nghost, inlist, ago, fparam, aparam, atomic); }); } +void DeepPotPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, nghost, inlist, ago, fparam, aparam, atomic); + }); +} +void DeepPotPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, nghost, inlist, ago, fparam, aparam, atomic); + }); +} void DeepPotPT::computew_mixed_type(std::vector& ener, std::vector& force, std::vector& virial, From 31bafb117bbc7d8de8538393b302c715d4e3a6e8 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 23 Sep 2024 00:02:07 +0800 Subject: [PATCH 007/193] rm extend from pair-deepmd --- source/api_cc/src/DeepPotTF.cc | 2 +- source/lmp/pair_deepmd.cpp | 166 --------------------------------- source/lmp/pair_deepmd.h | 26 ------ 3 files changed, 1 insertion(+), 193 deletions(-) diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index f5f6e28c88..92773da2b4 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -847,7 +847,7 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, new_idx_map, old_idx_map, lmp_list, dcoord, dtype, nghost, dspin_, - numb_types, numb_types_spin, virtual_len); + numb_types, numb_types_spin, virtual_len, spin_norm); // extend_lmp_list = InputNlist(extend_inum, &extend_ilist[0], // &extend_numneigh[0], &extend_firstneigh[0]); deepmd_compat::InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 634be3eff7..a0dc4faae7 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -1465,169 +1465,3 @@ void *PairDeepMD::extract(const char *str, int &dim) { } return NULL; } - -void PairDeepMD::extend(int &extend_inum, - std::vector &extend_ilist, - std::vector &extend_numneigh, - std::vector> &extend_neigh, - std::vector &extend_firstneigh, - std::vector &extend_dcoord, - std::vector &extend_atype, - int &extend_nghost, - std::map &new_idx_map, - std::map &old_idx_map, - const deepmd_compat::InputNlist &lmp_list, - const std::vector &dcoord, - const std::vector &atype, - const int nghost, - const std::vector &spin, - const int numb_types, - const int numb_types_spin, - const std::vector &virtual_len) { - extend_ilist.clear(); - extend_numneigh.clear(); - extend_neigh.clear(); - extend_firstneigh.clear(); - extend_dcoord.clear(); - extend_atype.clear(); - - int nall = dcoord.size() / 3; - int nloc = nall - nghost; - assert(nloc == lmp_list.inum); - - // record numb_types_real and nloc_virt - int numb_types_real = numb_types - numb_types_spin; - std::map loc_type_count; - std::map::iterator iter = loc_type_count.begin(); - for (int i = 0; i < nloc; i++) { - iter = loc_type_count.find(atype[i]); - if (iter != loc_type_count.end()) { - iter->second += 1; - } else { - loc_type_count.insert(pair(atype[i], 1)); - } - } - assert(numb_types_real - 1 == loc_type_count.rbegin()->first); - int nloc_virt = 0; - for (int i = 0; i < numb_types_spin; i++) { - nloc_virt += loc_type_count[i]; - } - - // record nghost_virt - std::map ghost_type_count; - for (int i = nloc; i < nall; i++) { - iter = ghost_type_count.find(atype[i]); - if (iter != ghost_type_count.end()) { - iter->second += 1; - } else { - ghost_type_count.insert(pair(atype[i], 1)); - } - } - int nghost_virt = 0; - for (int i = 0; i < numb_types_spin; i++) { - nghost_virt += ghost_type_count[i]; - } - - // for extended system, search new index by old index, and vice versa - extend_nghost = nghost + nghost_virt; - int extend_nloc = nloc + nloc_virt; - int extend_nall = extend_nloc + extend_nghost; - std::map cum_loc_type_count; - std::map cum_ghost_type_count; - cum_sum(cum_loc_type_count, loc_type_count); - cum_sum(cum_ghost_type_count, ghost_type_count); - std::vector loc_type_reset(numb_types_real, 0); - std::vector ghost_type_reset(numb_types_real, 0); - - new_idx_map.clear(); - old_idx_map.clear(); - for (int ii = 0; ii < nloc; ii++) { - int new_idx = cum_loc_type_count[atype[ii]] + loc_type_reset[atype[ii]]; - new_idx_map[ii] = new_idx; - old_idx_map[new_idx] = ii; - loc_type_reset[atype[ii]]++; - } - for (int ii = nloc; ii < nall; ii++) { - int new_idx = cum_ghost_type_count[atype[ii]] + - ghost_type_reset[atype[ii]] + extend_nloc; - new_idx_map[ii] = new_idx; - old_idx_map[new_idx] = ii; - ghost_type_reset[atype[ii]]++; - } - - // extend lmp_list - extend_inum = extend_nloc; - - extend_ilist.resize(extend_nloc); - for (int ii = 0; ii < extend_nloc; ii++) { - extend_ilist[ii] = ii; - } - - extend_neigh.resize(extend_nloc); - for (int ii = 0; ii < nloc; ii++) { - int jnum = lmp_list.numneigh[old_idx_map[ii]]; - const int *jlist = lmp_list.firstneigh[old_idx_map[ii]]; - if (atype[old_idx_map[ii]] < numb_types_spin) { - extend_neigh[ii].push_back(ii + nloc); - } - for (int jj = 0; jj < jnum; jj++) { - int new_idx = new_idx_map[jlist[jj]]; - extend_neigh[ii].push_back(new_idx); - if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nloc) { - extend_neigh[ii].push_back(new_idx + nloc); - } else if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nall) { - extend_neigh[ii].push_back(new_idx + nghost); - } - } - } - for (int ii = nloc; ii < extend_nloc; ii++) { - extend_neigh[ii].assign(extend_neigh[ii - nloc].begin(), - extend_neigh[ii - nloc].end()); - std::vector::iterator it = - find(extend_neigh[ii].begin(), extend_neigh[ii].end(), ii); - *it = ii - nloc; - } - - extend_firstneigh.resize(extend_nloc); - extend_numneigh.resize(extend_nloc); - for (int ii = 0; ii < extend_nloc; ii++) { - extend_firstneigh[ii] = &extend_neigh[ii][0]; - extend_numneigh[ii] = extend_neigh[ii].size(); - } - - // extend coord - extend_dcoord.resize(static_cast(extend_nall) * 3); - for (int ii = 0; ii < nloc; ii++) { - for (int jj = 0; jj < 3; jj++) { - extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; - if (atype[ii] < numb_types_spin) { - double temp_dcoord = - dcoord[ii * 3 + jj] + spin[ii * 3 + jj] * virtual_len[atype[ii]]; - extend_dcoord[(new_idx_map[ii] + nloc) * 3 + jj] = temp_dcoord; - } - } - } - for (int ii = nloc; ii < nall; ii++) { - for (int jj = 0; jj < 3; jj++) { - extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; - if (atype[ii] < numb_types_spin) { - double temp_dcoord = - dcoord[ii * 3 + jj] + spin[ii * 3 + jj] * virtual_len[atype[ii]]; - extend_dcoord[(new_idx_map[ii] + nghost) * 3 + jj] = temp_dcoord; - } - } - } - - // extend atype - extend_atype.resize(extend_nall); - for (int ii = 0; ii < nall; ii++) { - extend_atype[new_idx_map[ii]] = atype[ii]; - if (atype[ii] < numb_types_spin) { - if (ii < nloc) { - extend_atype[new_idx_map[ii] + nloc] = atype[ii] + numb_types_real; - } else { - extend_atype[new_idx_map[ii] + nghost] = atype[ii] + numb_types_real; - } - } - } -} diff --git a/source/lmp/pair_deepmd.h b/source/lmp/pair_deepmd.h index 3b42b0f7de..54830260a2 100644 --- a/source/lmp/pair_deepmd.h +++ b/source/lmp/pair_deepmd.h @@ -55,24 +55,6 @@ class PairDeepMD : public Pair { void unpack_reverse_comm(int, int *, double *) override; void print_summary(const std::string pre) const; int get_node_rank(); - void extend(int &extend_inum, - std::vector &extend_ilist, - std::vector &extend_numneigh, - std::vector > &extend_neigh, - std::vector &extend_firstneigh, - std::vector &extend_coord, - std::vector &extend_atype, - int &extend_nghost, - std::map &new_idx_map, - std::map &old_idx_map, - const deepmd_compat::InputNlist &lmp_list, - const std::vector &coord, - const std::vector &atype, - const int nghost, - const std::vector &spin, - const int numb_types, - const int numb_types_spin, - const std::vector &virtual_len); void cum_sum(std::map &, std::map &); std::string get_file_content(const std::string &model); @@ -109,14 +91,6 @@ class PairDeepMD : public Pair { bool is_restart; std::vector virtual_len; std::vector spin_norm; - int extend_inum; - std::vector extend_ilist; - std::vector extend_numneigh; - std::vector > extend_neigh; - std::vector extend_firstneigh; - std::vector extend_dcoord; - std::vector extend_dtype; - int extend_nghost; // for spin systems, search new index of atoms by their old index std::map new_idx_map; std::map old_idx_map; From f5cfeab050b818a87a1ff51363d439c3909a7bea Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 22 Sep 2024 21:23:22 -0400 Subject: [PATCH 008/193] fix(pt): fix `compute_output_stats_global` when `atomic_output` is `None` (#4155) ## Summary by CodeRabbit - **Bug Fixes** - Improved error handling by ensuring that the output data is not `None` before processing, preventing potential runtime errors. --- deepmd/pt/utils/stat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 6de70eb175..58e02f436d 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -478,7 +478,7 @@ def compute_output_stats_global( std_atom_e = {} for kk in keys: if kk in stats_input: - if atomic_output.get_data()[kk].intensive: + if atomic_output is not None and atomic_output.get_data()[kk].intensive: task_dim = stats_input[kk].shape[1] assert merged_natoms[kk].shape == (nf[kk], ntypes) stats_input[kk] = ( From 0b72dae39d269963740b27af0d163510d269de4b Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 23 Sep 2024 03:26:25 -0400 Subject: [PATCH 009/193] feat(jax): support neural networks (#4156) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced JAX support, enhancing functionality and compatibility with JAX library. - Added new `JAXBackend` class for backend integration with JAX. - New functions for converting between NumPy and JAX arrays. - **Bug Fixes** - Improved compatibility of neural network layers with array API standards. - **Tests** - Added tests for JAX functionality and consistency checks against reference outputs. - Enhanced testing framework for activation functions and type embeddings. - **Chores** - Updated dependency requirements to include JAX library. --------- Signed-off-by: Jinzhe Zeng --- .github/workflows/test_cuda.yml | 2 +- .github/workflows/test_python.yml | 2 +- deepmd/backend/jax.py | 110 ++++++++++++++++++ deepmd/dpmodel/common.py | 22 ++++ deepmd/dpmodel/utils/network.py | 50 ++++++-- deepmd/dpmodel/utils/type_embed.py | 14 ++- deepmd/jax/__init__.py | 2 + deepmd/jax/common.py | 37 ++++++ deepmd/jax/env.py | 14 +++ deepmd/jax/utils/__init__.py | 1 + deepmd/jax/utils/network.py | 29 +++++ deepmd/jax/utils/type_embed.py | 21 ++++ pyproject.toml | 3 + .../array_api/test_activation_functions.py | 1 + source/tests/consistent/common.py | 59 ++++++++++ source/tests/consistent/test_activation.py | 26 +++++ .../tests/consistent/test_type_embedding.py | 18 +++ 17 files changed, 393 insertions(+), 18 deletions(-) create mode 100644 deepmd/backend/jax.py create mode 100644 deepmd/jax/__init__.py create mode 100644 deepmd/jax/common.py create mode 100644 deepmd/jax/env.py create mode 100644 deepmd/jax/utils/__init__.py create mode 100644 deepmd/jax/utils/network.py create mode 100644 deepmd/jax/utils/type_embed.py create mode 100644 source/tests/common/dpmodel/array_api/test_activation_functions.py diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index 2883f01b5a..d60a9c909a 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -51,7 +51,7 @@ jobs: - run: | export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') - source/install/uv_with_retry.sh pip install --system -v -e .[gpu,test,lmp,cu12,torch] mpi4py + source/install/uv_with_retry.sh pip install --system -v -e .[gpu,test,lmp,cu12,torch,jax] mpi4py env: DP_VARIANT: cuda DP_ENABLE_NATIVE_OPTIMIZATION: 1 diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index 36f9bd78b8..8274921909 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -28,7 +28,7 @@ jobs: source/install/uv_with_retry.sh pip install --system mpich source/install/uv_with_retry.sh pip install --system "torch==2.3.0+cpu.cxx11.abi" -i https://download.pytorch.org/whl/ export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') - source/install/uv_with_retry.sh pip install --system --only-binary=horovod -e .[cpu,test] horovod[tensorflow-cpu] mpi4py + source/install/uv_with_retry.sh pip install --system --only-binary=horovod -e .[cpu,test,jax] horovod[tensorflow-cpu] mpi4py env: # Please note that uv has some issues with finding # existing TensorFlow package. Currently, it uses diff --git a/deepmd/backend/jax.py b/deepmd/backend/jax.py new file mode 100644 index 0000000000..ece0761772 --- /dev/null +++ b/deepmd/backend/jax.py @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from importlib.util import ( + find_spec, +) +from typing import ( + TYPE_CHECKING, + Callable, + ClassVar, + List, + Type, +) + +from deepmd.backend.backend import ( + Backend, +) + +if TYPE_CHECKING: + from argparse import ( + Namespace, + ) + + from deepmd.infer.deep_eval import ( + DeepEvalBackend, + ) + from deepmd.utils.neighbor_stat import ( + NeighborStat, + ) + + +@Backend.register("jax") +class JAXBackend(Backend): + """JAX backend.""" + + name = "JAX" + """The formal name of the backend.""" + features: ClassVar[Backend.Feature] = ( + Backend.Feature(0) + # Backend.Feature.ENTRY_POINT + # | Backend.Feature.DEEP_EVAL + # | Backend.Feature.NEIGHBOR_STAT + # | Backend.Feature.IO + ) + """The features of the backend.""" + suffixes: ClassVar[List[str]] = [] + """The suffixes of the backend.""" + + def is_available(self) -> bool: + """Check if the backend is available. + + Returns + ------- + bool + Whether the backend is available. + """ + return find_spec("jax") is not None + + @property + def entry_point_hook(self) -> Callable[["Namespace"], None]: + """The entry point hook of the backend. + + Returns + ------- + Callable[[Namespace], None] + The entry point hook of the backend. + """ + raise NotImplementedError + + @property + def deep_eval(self) -> Type["DeepEvalBackend"]: + """The Deep Eval backend of the backend. + + Returns + ------- + type[DeepEvalBackend] + The Deep Eval backend of the backend. + """ + raise NotImplementedError + + @property + def neighbor_stat(self) -> Type["NeighborStat"]: + """The neighbor statistics of the backend. + + Returns + ------- + type[NeighborStat] + The neighbor statistics of the backend. + """ + raise NotImplementedError + + @property + def serialize_hook(self) -> Callable[[str], dict]: + """The serialize hook to convert the model file to a dictionary. + + Returns + ------- + Callable[[str], dict] + The serialize hook of the backend. + """ + raise NotImplementedError + + @property + def deserialize_hook(self) -> Callable[[str, dict], None]: + """The deserialize hook to convert the dictionary to a model file. + + Returns + ------- + Callable[[str, dict], None] + The deserialize hook of the backend. + """ + raise NotImplementedError diff --git a/deepmd/dpmodel/common.py b/deepmd/dpmodel/common.py index 56cb8ec1e9..d9d57d2d6c 100644 --- a/deepmd/dpmodel/common.py +++ b/deepmd/dpmodel/common.py @@ -3,6 +3,10 @@ ABC, abstractmethod, ) +from typing import ( + Any, + Optional, +) import ml_dtypes import numpy as np @@ -59,6 +63,24 @@ def __call__(self, *args, **kwargs): return self.call(*args, **kwargs) +def to_numpy_array(x: Any) -> Optional[np.ndarray]: + """Convert an array to a NumPy array. + + Parameters + ---------- + x : Any + The array to be converted. + + Returns + ------- + Optional[np.ndarray] + The NumPy array. + """ + if x is None: + return None + return np.asarray(x) + + __all__ = [ "GLOBAL_NP_FLOAT_PRECISION", "GLOBAL_ENER_FLOAT_PRECISION", diff --git a/deepmd/dpmodel/utils/network.py b/deepmd/dpmodel/utils/network.py index 941e2cfc86..22e85c9890 100644 --- a/deepmd/dpmodel/utils/network.py +++ b/deepmd/dpmodel/utils/network.py @@ -15,6 +15,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( @@ -22,6 +23,12 @@ PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.array_api import ( + support_array_api, +) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.utils.seed import ( child_seed, ) @@ -105,9 +112,9 @@ def serialize(self) -> dict: The serialized layer. """ data = { - "w": self.w, - "b": self.b, - "idt": self.idt, + "w": to_numpy_array(self.w), + "b": to_numpy_array(self.b), + "idt": to_numpy_array(self.idt), } return { "@class": "Layer", @@ -215,6 +222,7 @@ def dim_in(self) -> int: def dim_out(self) -> int: return self.w.shape[1] + @support_array_api(version="2022.12") def call(self, x: np.ndarray) -> np.ndarray: """Forward pass. @@ -230,11 +238,12 @@ def call(self, x: np.ndarray) -> np.ndarray: """ if self.w is None or self.activation_function is None: raise ValueError("w, b, and activation_function must be set") + xp = array_api_compat.array_namespace(x) fn = get_activation_fn(self.activation_function) y = ( - np.matmul(x, self.w) + self.b + xp.matmul(x, self.w) + self.b if self.b is not None - else np.matmul(x, self.w) + else xp.matmul(x, self.w) ) y = fn(y) if self.idt is not None: @@ -242,47 +251,64 @@ def call(self, x: np.ndarray) -> np.ndarray: if self.resnet and self.w.shape[1] == self.w.shape[0]: y += x elif self.resnet and self.w.shape[1] == 2 * self.w.shape[0]: - y += np.concatenate([x, x], axis=-1) + y += xp.concatenate([x, x], axis=-1) return y +@support_array_api(version="2022.12") def get_activation_fn(activation_function: str) -> Callable[[np.ndarray], np.ndarray]: activation_function = activation_function.lower() if activation_function == "tanh": - return np.tanh + + def fn(x): + xp = array_api_compat.array_namespace(x) + return xp.tanh(x) + + return fn elif activation_function == "relu": def fn(x): + xp = array_api_compat.array_namespace(x) # https://stackoverflow.com/a/47936476/9567349 - return x * (x > 0) + return x * xp.astype(x > 0, x.dtype) return fn elif activation_function in ("gelu", "gelu_tf"): def fn(x): + xp = array_api_compat.array_namespace(x) # generated by GitHub Copilot - return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * x**3))) + return ( + 0.5 + * x + * (1 + xp.tanh(xp.sqrt(xp.asarray(2 / xp.pi)) * (x + 0.044715 * x**3))) + ) return fn elif activation_function == "relu6": def fn(x): + xp = array_api_compat.array_namespace(x) # generated by GitHub Copilot - return np.minimum(np.maximum(x, 0), 6) + return xp.where( + x < 0, xp.full_like(x, 0), xp.where(x > 6, xp.full_like(x, 6), x) + ) return fn elif activation_function == "softplus": def fn(x): + xp = array_api_compat.array_namespace(x) # generated by GitHub Copilot - return np.log(1 + np.exp(x)) + return xp.log(1 + xp.exp(x)) return fn elif activation_function == "sigmoid": def fn(x): + xp = array_api_compat.array_namespace(x) # generated by GitHub Copilot - return 1 / (1 + np.exp(-x)) + return 1 / (1 + xp.exp(-x)) return fn elif activation_function.lower() in ("none", "linear"): diff --git a/deepmd/dpmodel/utils/type_embed.py b/deepmd/dpmodel/utils/type_embed.py index 2e695171d6..e11c415cfd 100644 --- a/deepmd/dpmodel/utils/type_embed.py +++ b/deepmd/dpmodel/utils/type_embed.py @@ -5,8 +5,12 @@ Union, ) +import array_api_compat import numpy as np +from deepmd.dpmodel.array_api import ( + support_array_api, +) from deepmd.dpmodel.common import ( PRECISION_DICT, NativeOP, @@ -92,16 +96,18 @@ def __init__( bias=self.use_tebd_bias, ) + @support_array_api(version="2022.12") def call(self) -> np.ndarray: """Compute the type embedding network.""" + sample_array = self.embedding_net[0]["w"] + xp = array_api_compat.array_namespace(sample_array) if not self.use_econf_tebd: - embed = self.embedding_net( - np.eye(self.ntypes, dtype=PRECISION_DICT[self.precision]) - ) + embed = self.embedding_net(xp.eye(self.ntypes, dtype=sample_array.dtype)) else: embed = self.embedding_net(self.econf_tebd) if self.padding: - embed = np.pad(embed, ((0, 1), (0, 0)), mode="constant") + embed_pad = xp.zeros((1, embed.shape[-1]), dtype=embed.dtype) + embed = xp.concatenate([embed, embed_pad], axis=0) return embed @classmethod diff --git a/deepmd/jax/__init__.py b/deepmd/jax/__init__.py new file mode 100644 index 0000000000..2ff078e797 --- /dev/null +++ b/deepmd/jax/__init__.py @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""JAX backend.""" diff --git a/deepmd/jax/common.py b/deepmd/jax/common.py new file mode 100644 index 0000000000..550b168b29 --- /dev/null +++ b/deepmd/jax/common.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Union, + overload, +) + +import numpy as np + +from deepmd.jax.env import ( + jnp, +) + + +@overload +def to_jax_array(array: np.ndarray) -> jnp.ndarray: ... + + +@overload +def to_jax_array(array: None) -> None: ... + + +def to_jax_array(array: Union[np.ndarray]) -> Union[jnp.ndarray]: + """Convert a numpy array to a JAX array. + + Parameters + ---------- + array : np.ndarray + The numpy array to convert. + + Returns + ------- + jnp.ndarray + The JAX tensor. + """ + if array is None: + return None + return jnp.array(array) diff --git a/deepmd/jax/env.py b/deepmd/jax/env.py new file mode 100644 index 0000000000..34e4aa6240 --- /dev/null +++ b/deepmd/jax/env.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os + +os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false" + +import jax +import jax.numpy as jnp + +jax.config.update("jax_enable_x64", True) + +__all__ = [ + "jax", + "jnp", +] diff --git a/deepmd/jax/utils/__init__.py b/deepmd/jax/utils/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/jax/utils/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/jax/utils/network.py b/deepmd/jax/utils/network.py new file mode 100644 index 0000000000..629b51b8cd --- /dev/null +++ b/deepmd/jax/utils/network.py @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.common import ( + NativeOP, +) +from deepmd.dpmodel.utils.network import NativeLayer as NativeLayerDP +from deepmd.dpmodel.utils.network import ( + make_embedding_network, + make_fitting_network, + make_multilayer_network, +) +from deepmd.jax.common import ( + to_jax_array, +) + + +class NativeLayer(NativeLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"w", "b", "idt"}: + value = to_jax_array(value) + return super().__setattr__(name, value) + + +NativeNet = make_multilayer_network(NativeLayer, NativeOP) +EmbeddingNet = make_embedding_network(NativeNet, NativeLayer) +FittingNet = make_fitting_network(EmbeddingNet, NativeNet, NativeLayer) diff --git a/deepmd/jax/utils/type_embed.py b/deepmd/jax/utils/type_embed.py new file mode 100644 index 0000000000..bc7c469524 --- /dev/null +++ b/deepmd/jax/utils/type_embed.py @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.utils.type_embed import TypeEmbedNet as TypeEmbedNetDP +from deepmd.jax.common import ( + to_jax_array, +) +from deepmd.jax.utils.network import ( + EmbeddingNet, +) + + +class TypeEmbedNet(TypeEmbedNetDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"econf_tebd"}: + value = to_jax_array(value) + if name in {"embedding_net"}: + value = EmbeddingNet.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/pyproject.toml b/pyproject.toml index f181b616a3..28fe114e01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -132,6 +132,9 @@ cu12 = [ "nvidia-cudnn-cu12<9", "nvidia-cuda-nvcc-cu12", ] +jax = [ + 'jax>=0.4.33;python_version>="3.10"', +] [tool.deepmd_build_backend.scripts] dp = "deepmd.main:main" diff --git a/source/tests/common/dpmodel/array_api/test_activation_functions.py b/source/tests/common/dpmodel/array_api/test_activation_functions.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/common/dpmodel/array_api/test_activation_functions.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index edafc7c02e..e8873e528a 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -35,6 +35,7 @@ INSTALLED_TF = Backend.get_backend("tensorflow")().is_available() INSTALLED_PT = Backend.get_backend("pytorch")().is_available() +INSTALLED_JAX = Backend.get_backend("jax")().is_available() if os.environ.get("CI") and not (INSTALLED_TF and INSTALLED_PT): raise ImportError("TensorFlow or PyTorch should be tested in the CI") @@ -57,6 +58,7 @@ "CommonTest", "INSTALLED_TF", "INSTALLED_PT", + "INSTALLED_JAX", ] @@ -71,6 +73,8 @@ class CommonTest(ABC): """Native DP model class.""" pt_class: ClassVar[Optional[type]] """PyTorch model class.""" + jax_class: ClassVar[Optional[type]] + """JAX model class.""" args: ClassVar[Optional[Union[Argument, List[Argument]]]] """Arguments that maps to the `data`.""" skip_dp: ClassVar[bool] = False @@ -79,6 +83,9 @@ class CommonTest(ABC): """Whether to skip the TensorFlow model.""" skip_pt: ClassVar[bool] = not INSTALLED_PT """Whether to skip the PyTorch model.""" + # we may usually skip jax before jax is fully supported + skip_jax: ClassVar[bool] = True + """Whether to skip the JAX model.""" rtol = 1e-10 """Relative tolerance for comparing the return value. Override for float32.""" atol = 1e-10 @@ -149,12 +156,23 @@ def eval_pt(self, pt_obj: Any) -> Any: The object of PT """ + def eval_jax(self, jax_obj: Any) -> Any: + """Evaluate the return value of JAX. + + Parameters + ---------- + jax_obj : Any + The object of JAX + """ + raise NotImplementedError("Not implemented") + class RefBackend(Enum): """Reference backend.""" TF = 1 DP = 2 PT = 3 + JAX = 5 @abstractmethod def extract_ret(self, ret: Any, backend: RefBackend) -> Tuple[np.ndarray, ...]: @@ -215,6 +233,11 @@ def get_dp_ret_serialization_from_cls(self, obj): data = obj.serialize() return ret, data + def get_jax_ret_serialization_from_cls(self, obj): + ret = self.eval_jax(obj) + data = obj.serialize() + return ret, data + def get_reference_backend(self): """Get the reference backend. @@ -226,6 +249,8 @@ def get_reference_backend(self): return self.RefBackend.TF if not self.skip_pt: return self.RefBackend.PT + if not self.skip_jax: + return self.RefBackend.JAX raise ValueError("No available reference") def get_reference_ret_serialization(self, ref: RefBackend): @@ -359,6 +384,40 @@ def test_pt_self_consistent(self): else: self.assertEqual(rr1, rr2) + def test_jax_consistent_with_ref(self): + """Test whether JAX and reference are consistent.""" + if self.skip_jax: + self.skipTest("Unsupported backend") + ref_backend = self.get_reference_backend() + if ref_backend == self.RefBackend.JAX: + self.skipTest("Reference is self") + ret1, data1 = self.get_reference_ret_serialization(ref_backend) + ret1 = self.extract_ret(ret1, ref_backend) + jax_obj = self.jax_class.deserialize(data1) + ret2 = self.eval_jax(jax_obj) + ret2 = self.extract_ret(ret2, self.RefBackend.JAX) + data2 = jax_obj.serialize() + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + + def test_jax_self_consistent(self): + """Test whether JAX is self consistent.""" + if self.skip_jax: + self.skipTest("Unsupported backend") + obj1 = self.init_backend_cls(self.jax_class) + ret1, data1 = self.get_jax_ret_serialization_from_cls(obj1) + obj1 = self.jax_class.deserialize(data1) + ret2, data2 = self.get_jax_ret_serialization_from_cls(obj1) + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + else: + self.assertEqual(rr1, rr2) + def tearDown(self) -> None: """Clear the TF session.""" if not self.skip_tf: diff --git a/source/tests/consistent/test_activation.py b/source/tests/consistent/test_activation.py index 3fcb9b2fa5..5630e913a8 100644 --- a/source/tests/consistent/test_activation.py +++ b/source/tests/consistent/test_activation.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import sys import unittest import numpy as np @@ -12,6 +13,7 @@ GLOBAL_SEED, ) from .common import ( + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, parameterized, @@ -28,6 +30,10 @@ from deepmd.tf.env import ( tf, ) +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) @parameterized( @@ -57,3 +63,23 @@ def test_pt_consistent_with_ref(self): ActivationFn_pt(self.activation)(to_torch_tensor(self.random_input)) ) np.testing.assert_allclose(self.ref, test, atol=1e-10) + + @unittest.skipUnless( + sys.version_info >= (3, 9), "array_api_strict doesn't support Python<=3.8" + ) + def test_arary_api_strict(self): + import array_api_strict as xp + + xp.set_array_api_strict_flags( + api_version=get_activation_fn_dp.array_api_version + ) + input = xp.asarray(self.random_input) + test = get_activation_fn_dp(self.activation)(input) + np.testing.assert_allclose(self.ref, np.array(test), atol=1e-10) + + @unittest.skipUnless(INSTALLED_JAX, "JAX is not installed") + def test_jax_consistent_with_ref(self): + input = jnp.from_dlpack(self.random_input) + test = get_activation_fn_dp(self.activation)(input) + self.assertTrue(isinstance(test, jnp.ndarray)) + np.testing.assert_allclose(self.ref, np.from_dlpack(test), atol=1e-10) diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index 6583dddb5f..c66ef0fbaa 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -13,6 +13,7 @@ ) from .common import ( + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -30,6 +31,13 @@ from deepmd.tf.utils.type_embed import TypeEmbedNet as TypeEmbedNetTF else: TypeEmbedNetTF = object +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) + from deepmd.jax.utils.type_embed import TypeEmbedNet as TypeEmbedNetJAX +else: + TypeEmbedNetJAX = object @parameterized( @@ -63,7 +71,9 @@ def data(self) -> dict: tf_class = TypeEmbedNetTF dp_class = TypeEmbedNetDP pt_class = TypeEmbedNetPT + jax_class = TypeEmbedNetJAX args = type_embedding_args() + skip_jax = not INSTALLED_JAX @property def addtional_data(self) -> dict: @@ -103,6 +113,14 @@ def eval_pt(self, pt_obj: Any) -> Any: for x in (pt_obj(device=PT_DEVICE),) ] + def eval_jax(self, jax_obj: Any) -> Any: + out = jax_obj() + # ensure output is not numpy array + for x in (out,): + if isinstance(x, np.ndarray): + raise ValueError("Output is numpy array") + return [np.array(x) if isinstance(x, jnp.ndarray) else x for x in (out,)] + def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: return (ret[0],) From 15150f68b53e72162c06b28c092900d61abc8897 Mon Sep 17 00:00:00 2001 From: hztttt <940755193@qq.com> Date: Mon, 23 Sep 2024 13:38:23 +0800 Subject: [PATCH 010/193] fix tf interface for spin --- source/api_cc/include/DeepPotTF.h | 24 +++++--- source/api_cc/src/DeepPotTF.cc | 97 +++++++++++++++++++++++++------ 2 files changed, 93 insertions(+), 28 deletions(-) diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index dd8b10b375..dd42a2ae3b 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -335,25 +335,28 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + + template void extend(int& extend_inum, std::vector& extend_ilist, std::vector& extend_numneigh, - std::vector>& extend_neigh, + std::vector>& extend_neigh, std::vector& extend_firstneigh, - std::vector& extend_dcoord, + std::vector& extend_dcoord, std::vector& extend_atype, int& extend_nghost, std::map& new_idx_map, std::map& old_idx_map, const InputNlist& lmp_list, - const std::vector& dcoord, + const std::vector& dcoord, const std::vector& atype, const int nghost, - const std::vector& spin, + const std::vector& spin, const int numb_types, const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm;); + const std::vector& virtual_len, + const std::vector& spin_norm); + void cum_sum(std::map &, std::map &); private: tensorflow::Session* session; @@ -362,6 +365,9 @@ class DeepPotTF : public DeepPotBase { bool inited; template VT get_scalar(const std::string& name) const; + template + void get_vector(std::vector& vec, const std::string& name) const; + double rcut; int dtype; double cell_size; @@ -369,14 +375,14 @@ class DeepPotTF : public DeepPotBase { std::string model_version; int ntypes; int ntypes_spin; - std::vector virtual_len; - std::vector spin_norm; + // std::vector virtual_len; + // std::vector spin_norm; int extend_inum; std::vector extend_ilist; std::vector extend_numneigh; std::vector> extend_neigh; std::vector extend_firstneigh; - std::vector extend_dcoord; + // std::vector extend_dcoord; std::vector extend_dtype; int extend_nghost; // for spin systems, search new index of atoms by their old index diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 92773da2b4..271a33d8d1 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -477,8 +477,6 @@ void DeepPotTF::init(const std::string& model, ntypes = get_scalar("descrpt_attr/ntypes"); try { ntypes_spin = get_scalar("spin_attr/ntypes_spin"); - get_vector(virtual_len, "spin_attr/virtual_len"); - get_vector(spin_norm, "spin_attr/spin_norm"); } catch (const deepmd::deepmd_exception&) { ntypes_spin = 0; } @@ -510,6 +508,12 @@ VT DeepPotTF::get_scalar(const std::string& name) const { return session_get_scalar(session, name); } +template +void DeepPotTF::get_vector(std::vector &vec, + const std::string &name) const { + session_get_vector(vec, session, name); +} + template void DeepPotTF::validate_fparam_aparam( const int& nframes, @@ -844,13 +848,18 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, int nframes = nall > 0 ? (dcoord_.size() / nall / 3) : 1; int nloc = nall - nghost; + std::vector virtual_len; + std::vector spin_norm; + std::vector extend_dcoord; + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, - new_idx_map, old_idx_map, lmp_list, dcoord, dtype, nghost, dspin_, - numb_types, numb_types_spin, virtual_len, spin_norm); + new_idx_map, old_idx_map, lmp_list, dcoord_, datype_, nghost, dspin_, + ntypes, ntypes_spin, virtual_len, spin_norm); // extend_lmp_list = InputNlist(extend_inum, &extend_ilist[0], // &extend_numneigh[0], &extend_firstneigh[0]); - deepmd_compat::InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], + InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], &extend_firstneigh[0]); std::vector fparam; @@ -907,6 +916,7 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, } // bkw map + std::vector dforce_tmp; dforce_tmp.resize(static_cast(nframes) * fwd_map.size() * 3); datom_energy_.resize(static_cast(nframes) * fwd_map.size()); datom_virial_.resize(static_cast(nframes) * fwd_map.size() * 9); @@ -922,13 +932,13 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, for (int ii = 0; ii < nall; ++ii) { for (int dd = 0; dd < 3; ++dd) { int new_idx = new_idx_map[ii]; - dforce_[ii][dd] = dforce_tmp[3 * new_idx + dd]; - if (datype[ii] < numb_types_spin && ii < nlocal) { - dforce_mag_[ii][dd] = dforce_tmp[3 * (new_idx + nlocal) + dd]; - } else if (datype[ii] < numb_types_spin) { - dforce_mag_[ii][dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; + dforce_[3*ii + dd] = dforce_tmp[3 * new_idx + dd]; + if (datype[ii] < ntypes_spin && ii < nloc) { + dforce_mag_[3*ii + dd] = dforce_tmp[3 * (new_idx + nloc) + dd]; + } else if (datype[ii] < ntypes_spin) { + dforce_mag_[3*ii + dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; } else { - dforce_mag_[ii][dd] = 0.0; + dforce_mag_[3*ii + dd] = 0.0; } } } @@ -1251,25 +1261,34 @@ void DeepPotTF::computew_mixed_type(std::vector& ener, compute_mixed_type(ener, force, virial, atom_energy, atom_virial, nframes, coord, atype, box, fparam, aparam, atomic); } + +void DeepPotTF::cum_sum(std::map &sum, std::map &vec) { + sum[0] = 0; + for (int ii = 1; ii < vec.size(); ++ii) { + sum[ii] = sum[ii - 1] + vec[ii - 1]; + } +} + +template void DeepPotTF::extend(int& extend_inum, std::vector& extend_ilist, std::vector& extend_numneigh, - std::vector>& extend_neigh, + std::vector>& extend_neigh, std::vector& extend_firstneigh, - std::vector& extend_dcoord, + std::vector& extend_dcoord, std::vector& extend_atype, int& extend_nghost, std::map& new_idx_map, std::map& old_idx_map, const InputNlist& lmp_list, - const std::vector& dcoord, + const std::vector& dcoord, const std::vector& atype, const int nghost, - const std::vector& spin, + const std::vector& spin, const int numb_types, const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm) { + const std::vector& virtual_len, + const std::vector& spin_norm) { extend_ilist.clear(); extend_numneigh.clear(); extend_neigh.clear(); @@ -1290,7 +1309,7 @@ void DeepPotTF::extend(int& extend_inum, if (iter != loc_type_count.end()) { iter->second += 1; } else { - loc_type_count.insert(pair(atype[i], 1)); + loc_type_count.insert(std::pair(atype[i], 1)); } } assert(numb_types_real - 1 == loc_type_count.rbegin()->first); @@ -1306,7 +1325,7 @@ void DeepPotTF::extend(int& extend_inum, if (iter != ghost_type_count.end()) { iter->second += 1; } else { - ghost_type_count.insert(pair(atype[i], 1)); + ghost_type_count.insert(std::pair(atype[i], 1)); } } int nghost_virt = 0; @@ -1419,4 +1438,44 @@ void DeepPotTF::extend(int& extend_inum, } } } + +template void DeepPotTF::extend(int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm); + +template void DeepPotTF::extend(int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm); #endif From bdfe205faa019122a275d7e3515db76fe5019b85 Mon Sep 17 00:00:00 2001 From: hztttt <940755193@qq.com> Date: Mon, 23 Sep 2024 16:33:31 +0800 Subject: [PATCH 011/193] fix interface for multi model --- source/api_c/include/c_api.h | 4 +- source/api_c/src/c_api.cc | 12 ++- source/api_cc/include/DeepPot.h | 31 ++++++++ source/api_cc/src/DeepPot.cc | 125 ++++++++++++++++++++++++++++++++ 4 files changed, 167 insertions(+), 5 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 9d2e732d6e..f62f438304 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -506,7 +506,7 @@ extern void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, float* force_mag, float* virial, float* atomic_energy, - float* atomic_virial) + float* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the mixed @@ -1001,7 +1001,7 @@ void DP_DeepPotModelDeviComputeNListf2SP(DP_DeepPotModelDevi* dp, float* force_mag, float* virial, float* atomic_energy, - float* atomic_virial) + float* atomic_virial); /** * @brief Get the type map of a DP model deviation. diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index e919833560..3241c3e63e 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -800,9 +800,15 @@ void DP_DeepPotModelDeviComputeNList_variant_sp(DP_DeepPotModelDevi* dp, // different from DeepPot std::vector e; std::vector> f, fm, v, ae, av; - DP_REQUIRES_OK( - dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, - nghost, nlist->nl, ago, fparam_, aparam_)); + if (atomic_energy || atomic_virial) { + DP_REQUIRES_OK( + dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, nghost, + nlist->nl, ago, fparam_, aparam_)); + } else { + DP_REQUIRES_OK( + dp, dp->dp.compute(e, f, fm, v, coord_, spin_, atype_, cell_, + nghost, nlist->nl, ago, fparam_, aparam_)); + } // 2D vector to 2D array, flatten first if (energy) { std::copy(e.begin(), e.end(), energy); diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index bd090a7b08..d906546ee4 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -824,6 +824,20 @@ class DeepPotModelDevi { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + template + void compute(std::vector& all_ener, + std::vector >& all_force, + std::vector >& all_force_mag, + std::vector >& all_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using these DP models. @@ -864,6 +878,23 @@ class DeepPotModelDevi { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + + template + void compute(std::vector& all_ener, + std::vector >& all_force, + std::vector >& all_force_mag, + std::vector >& all_virial, + std::vector >& all_atom_energy, + std::vector >& all_atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** * @brief Get the cutoff radius. * @return The cutoff radius. diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 52085748fa..284ef784f5 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -951,6 +951,64 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); +template +void DeepPotModelDevi::compute(std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_) { + if (numb_models == 0) { + return; + } + all_energy.resize(numb_models); + all_force.resize(numb_models); + all_force_mag.resize(numb_models); + all_virial.resize(numb_models); + for (unsigned ii = 0; ii < numb_models; ++ii) { + dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], all_virial[ii], dcoord_, dspin_, + datype_, dbox, nghost, lmp_list, ago, fparam, aparam_); + } +} + +template void DeepPotModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPotModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); + + template void DeepPotModelDevi::compute( std::vector& all_energy, @@ -1011,6 +1069,73 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); +template +void DeepPotModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_) { + if (numb_models == 0) { + return; + } + all_energy.resize(numb_models); + all_force.resize(numb_models); + all_force_mag.resize(numb_models); + all_virial.resize(numb_models); + all_atom_energy.resize(numb_models); + all_atom_virial.resize(numb_models); + for (unsigned ii = 0; ii < numb_models; ++ii) { + dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], all_virial[ii], + all_atom_energy[ii], all_atom_virial[ii], dcoord_, dspin_, datype_, + dbox, nghost, lmp_list, ago, fparam, aparam_); + } +} + +template void DeepPotModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPotModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); + template void DeepPotModelDevi::compute_avg(VALUETYPE& dener, const std::vector& all_energy) { From 508759c31c27972c12901f7df8ffd7a6f6bfcf25 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 25 Sep 2024 11:54:15 +0800 Subject: [PATCH 012/193] fix(pt ut): make separated uts deterministic (#4162) Fix failed uts in #4145 . ## Summary by CodeRabbit - **New Features** - Added a `"seed"` property to multiple JSON configuration files, enhancing control over randomness in model training and evaluation. - Introduced a global seed parameter in various test functions to improve reproducibility across test runs. - **Bug Fixes** - Ensured consistent random number generation in tests by integrating a global seed parameter. - **Documentation** - Updated configuration files and test methods to reflect the addition of the seed parameter for clarity and consistency. --- source/tests/pt/model/models/dpa1.json | 3 ++- source/tests/pt/model/models/dpa2.json | 1 + source/tests/pt/model/test_descriptor_se_r.py | 3 +++ source/tests/pt/model/test_dipole_fitting.py | 6 ++++++ source/tests/pt/model/test_dpa1.py | 3 +++ source/tests/pt/model/test_dpa2.py | 6 ++++++ source/tests/pt/model/test_embedding_net.py | 5 ++++- source/tests/pt/model/test_ener_fitting.py | 3 +++ source/tests/pt/model/test_permutation.py | 6 ++++++ source/tests/pt/model/test_polarizability_fitting.py | 6 ++++++ source/tests/pt/model/test_property_fitting.py | 10 ++++++++++ source/tests/pt/model/test_se_atten_v2.py | 5 +++++ source/tests/pt/model/test_se_e2_a.py | 3 +++ source/tests/pt/model/test_se_t.py | 2 ++ source/tests/pt/model/water/se_atten.json | 3 ++- 15 files changed, 62 insertions(+), 3 deletions(-) diff --git a/source/tests/pt/model/models/dpa1.json b/source/tests/pt/model/models/dpa1.json index 1321acbd53..a969c290ae 100644 --- a/source/tests/pt/model/models/dpa1.json +++ b/source/tests/pt/model/models/dpa1.json @@ -21,7 +21,8 @@ "activation_function": "tanh", "scaling_factor": 1.0, "normalize": true, - "temperature": 1.0 + "temperature": 1.0, + "seed": 1 }, "fitting_net": { "neuron": [ diff --git a/source/tests/pt/model/models/dpa2.json b/source/tests/pt/model/models/dpa2.json index 7495f5d78a..f83e319de3 100644 --- a/source/tests/pt/model/models/dpa2.json +++ b/source/tests/pt/model/models/dpa2.json @@ -42,6 +42,7 @@ "g1_out_conv": false, "g1_out_mlp": false }, + "seed": 1, "add_tebd_to_repinit_out": false }, "fitting_net": { diff --git a/source/tests/pt/model/test_descriptor_se_r.py b/source/tests/pt/model/test_descriptor_se_r.py index a2b9754714..f3692101c5 100644 --- a/source/tests/pt/model/test_descriptor_se_r.py +++ b/source/tests/pt/model/test_descriptor_se_r.py @@ -63,6 +63,7 @@ def test_consistency( resnet_dt=idt, old_impl=False, exclude_mask=em, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) @@ -130,6 +131,7 @@ def test_load_stat(self): precision=prec, resnet_dt=idt, old_impl=False, + seed=GLOBAL_SEED, ) dd0.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) @@ -180,6 +182,7 @@ def test_jit( precision=prec, resnet_dt=idt, old_impl=False, + seed=GLOBAL_SEED, ) dd0.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_dipole_fitting.py b/source/tests/pt/model/test_dipole_fitting.py index cd3a032ecc..71da2781ac 100644 --- a/source/tests/pt/model/test_dipole_fitting.py +++ b/source/tests/pt/model/test_dipole_fitting.py @@ -87,6 +87,7 @@ def test_consistency( numb_fparam=nfp, numb_aparam=nap, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) ft1 = DPDipoleFitting.deserialize(ft0.serialize()) ft2 = DipoleFittingNet.deserialize(ft1.serialize()) @@ -139,6 +140,7 @@ def test_jit( numb_fparam=nfp, numb_aparam=nap, mixed_types=mixed_types, + seed=GLOBAL_SEED, ).to(env.DEVICE) torch.jit.script(ft0) @@ -180,6 +182,7 @@ def test_rot(self): numb_fparam=nfp, numb_aparam=nap, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) if nfp > 0: ifp = torch.tensor( @@ -234,6 +237,7 @@ def test_permu(self): numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) res = [] for idx_perm in [[0, 1, 2, 3, 4], [1, 0, 4, 3, 2]]: @@ -280,6 +284,7 @@ def test_trans(self): numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) res = [] for xyz in [self.coord, coord_s]: @@ -327,6 +332,7 @@ def setUp(self): numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) self.type_mapping = ["O", "H", "B"] self.model = DipoleModel(self.dd0, self.ft0, self.type_mapping) diff --git a/source/tests/pt/model/test_dpa1.py b/source/tests/pt/model/test_dpa1.py index f1994504fc..b825885311 100644 --- a/source/tests/pt/model/test_dpa1.py +++ b/source/tests/pt/model/test_dpa1.py @@ -71,6 +71,7 @@ def test_consistency( use_econf_tebd=ect, type_map=["O", "H"] if ect else None, old_impl=False, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.se_atten.stddev = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) @@ -125,6 +126,7 @@ def test_consistency( resnet_dt=idt, smooth_type_embedding=sm, old_impl=True, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0_state_dict = dd0.se_atten.state_dict() dd3_state_dict = dd3.se_atten.state_dict() @@ -210,6 +212,7 @@ def test_jit( use_econf_tebd=ect, type_map=["O", "H"] if ect else None, old_impl=False, + seed=GLOBAL_SEED, ) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.se_atten.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_dpa2.py b/source/tests/pt/model/test_dpa2.py index f11be532cb..0beb34c031 100644 --- a/source/tests/pt/model/test_dpa2.py +++ b/source/tests/pt/model/test_dpa2.py @@ -20,6 +20,9 @@ PRECISION_DICT, ) +from ...seed import ( + GLOBAL_SEED, +) from .test_env_mat import ( TestCaseSingleFrameWithNlist, ) @@ -152,6 +155,7 @@ def test_consistency( use_econf_tebd=ect, type_map=["O", "H"] if ect else None, old_impl=False, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.repinit.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) @@ -201,6 +205,7 @@ def test_consistency( add_tebd_to_repinit_out=False, precision=prec, old_impl=True, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0_state_dict = dd0.state_dict() dd3_state_dict = dd3.state_dict() @@ -346,6 +351,7 @@ def test_jit( use_econf_tebd=ect, type_map=["O", "H"] if ect else None, old_impl=False, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.repinit.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_embedding_net.py b/source/tests/pt/model/test_embedding_net.py index 77d14db2a4..3605316437 100644 --- a/source/tests/pt/model/test_embedding_net.py +++ b/source/tests/pt/model/test_embedding_net.py @@ -39,6 +39,9 @@ ) from deepmd.tf.descriptor import DescrptSeA as DescrptSeA_tf +from ...seed import ( + GLOBAL_SEED, +) from ..test_finetune import ( energy_data_requirement, ) @@ -153,7 +156,7 @@ def test_consistency(self): sel=self.sel, neuron=self.filter_neuron, axis_neuron=self.axis_neuron, - seed=1, + seed=GLOBAL_SEED, ) dp_embedding, dp_force, dp_vars = base_se_a( descriptor=dp_d, diff --git a/source/tests/pt/model/test_ener_fitting.py b/source/tests/pt/model/test_ener_fitting.py index 07c0d19935..3255db2784 100644 --- a/source/tests/pt/model/test_ener_fitting.py +++ b/source/tests/pt/model/test_ener_fitting.py @@ -65,6 +65,7 @@ def test_consistency( mixed_types=mixed_types, exclude_types=et, neuron=nn, + seed=GLOBAL_SEED, ).to(env.DEVICE) ft1 = DPInvarFitting.deserialize(ft0.serialize()) ft2 = InvarFitting.deserialize(ft0.serialize()) @@ -168,6 +169,7 @@ def test_jit( numb_aparam=nap, mixed_types=mixed_types, exclude_types=et, + seed=GLOBAL_SEED, ).to(env.DEVICE) torch.jit.script(ft0) @@ -177,6 +179,7 @@ def test_get_set(self): self.nt, 3, 1, + seed=GLOBAL_SEED, ) rng = np.random.default_rng(GLOBAL_SEED) foo = rng.normal([3, 4]) diff --git a/source/tests/pt/model/test_permutation.py b/source/tests/pt/model/test_permutation.py index 2fbc5fde3c..6aec895041 100644 --- a/source/tests/pt/model/test_permutation.py +++ b/source/tests/pt/model/test_permutation.py @@ -88,6 +88,7 @@ "temperature": 1.0, "set_davg_zero": True, "type_one_side": True, + "seed": 1, }, "fitting_net": { "neuron": [24, 24, 24], @@ -155,6 +156,7 @@ "update_g2_has_attn": True, "attn2_has_gate": True, }, + "seed": 1, "add_tebd_to_repinit_out": False, }, "fitting_net": { @@ -207,6 +209,7 @@ "g1_out_conv": True, "g1_out_mlp": True, }, + "seed": 1, "add_tebd_to_repinit_out": False, }, "fitting_net": { @@ -235,6 +238,7 @@ "temperature": 1.0, "set_davg_zero": True, "type_one_side": True, + "seed": 1, }, "fitting_net": { "neuron": [24, 24, 24], @@ -264,6 +268,7 @@ "scaling_factor": 1.0, "normalize": True, "temperature": 1.0, + "seed": 1, }, { "type": "dpa2", @@ -296,6 +301,7 @@ "update_g2_has_attn": True, "attn2_has_gate": True, }, + "seed": 1, "add_tebd_to_repinit_out": False, }, ], diff --git a/source/tests/pt/model/test_polarizability_fitting.py b/source/tests/pt/model/test_polarizability_fitting.py index ba1bf2ea29..1ca563a8c2 100644 --- a/source/tests/pt/model/test_polarizability_fitting.py +++ b/source/tests/pt/model/test_polarizability_fitting.py @@ -77,6 +77,7 @@ def test_consistency( mixed_types=self.dd0.mixed_types(), fit_diag=fit_diag, scale=scale, + seed=GLOBAL_SEED, ).to(env.DEVICE) ft1 = DPPolarFitting.deserialize(ft0.serialize()) ft2 = PolarFittingNet.deserialize(ft0.serialize()) @@ -143,6 +144,7 @@ def test_jit( numb_aparam=nap, mixed_types=mixed_types, fit_diag=fit_diag, + seed=GLOBAL_SEED, ).to(env.DEVICE) torch.jit.script(ft0) @@ -186,6 +188,7 @@ def test_rot(self): mixed_types=self.dd0.mixed_types(), fit_diag=fit_diag, scale=scale, + seed=GLOBAL_SEED, ).to(env.DEVICE) if nfp > 0: ifp = torch.tensor( @@ -248,6 +251,7 @@ def test_permu(self): mixed_types=self.dd0.mixed_types(), fit_diag=fit_diag, scale=scale, + seed=GLOBAL_SEED, ).to(env.DEVICE) res = [] for idx_perm in [[0, 1, 2, 3, 4], [1, 0, 4, 3, 2]]: @@ -298,6 +302,7 @@ def test_trans(self): mixed_types=self.dd0.mixed_types(), fit_diag=fit_diag, scale=scale, + seed=GLOBAL_SEED, ).to(env.DEVICE) res = [] for xyz in [self.coord, coord_s]: @@ -347,6 +352,7 @@ def setUp(self): numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) self.type_mapping = ["O", "H", "B"] self.model = PolarModel(self.dd0, self.ft0, self.type_mapping) diff --git a/source/tests/pt/model/test_property_fitting.py b/source/tests/pt/model/test_property_fitting.py index 59a5b1b172..dfe2725f3b 100644 --- a/source/tests/pt/model/test_property_fitting.py +++ b/source/tests/pt/model/test_property_fitting.py @@ -32,6 +32,9 @@ to_numpy_array, ) +from ...seed import ( + GLOBAL_SEED, +) from .test_env_mat import ( TestCaseSingleFrameWithNlist, ) @@ -78,6 +81,7 @@ def test_consistency( bias_atom_p=bias_atom_p, intensive=intensive, bias_method=bias_method, + seed=GLOBAL_SEED, ).to(env.DEVICE) ft1 = DPProperFittingNet.deserialize(ft0.serialize()) @@ -146,6 +150,7 @@ def test_jit( mixed_types=self.dd0.mixed_types(), intensive=intensive, bias_method=bias_method, + seed=GLOBAL_SEED, ).to(env.DEVICE) torch.jit.script(ft0) @@ -199,6 +204,7 @@ def test_trans(self): numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) res = [] for xyz in [self.coord, coord_s]: @@ -266,6 +272,7 @@ def test_rot(self): mixed_types=self.dd0.mixed_types(), intensive=intensive, bias_method=bias_method, + seed=GLOBAL_SEED, ).to(env.DEVICE) if nfp > 0: ifp = torch.tensor( @@ -320,6 +327,7 @@ def test_permu(self): numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) res = [] for idx_perm in [[0, 1, 2, 3, 4], [1, 0, 4, 3, 2]]: @@ -367,6 +375,7 @@ def test_trans(self): numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), + seed=GLOBAL_SEED, ).to(env.DEVICE) res = [] for xyz in [self.coord, coord_s]: @@ -417,6 +426,7 @@ def setUp(self): numb_aparam=0, mixed_types=self.dd0.mixed_types(), intensive=True, + seed=GLOBAL_SEED, ).to(env.DEVICE) self.type_mapping = ["O", "H", "B"] self.model = PropertyModel(self.dd0, self.ft0, self.type_mapping) diff --git a/source/tests/pt/model/test_se_atten_v2.py b/source/tests/pt/model/test_se_atten_v2.py index caecd0a118..f9857fc728 100644 --- a/source/tests/pt/model/test_se_atten_v2.py +++ b/source/tests/pt/model/test_se_atten_v2.py @@ -16,6 +16,9 @@ PRECISION_DICT, ) +from ...seed import ( + GLOBAL_SEED, +) from .test_env_mat import ( TestCaseSingleFrameWithNlist, ) @@ -64,6 +67,7 @@ def test_consistency( use_econf_tebd=ect, type_map=["O", "H"] if ect else None, old_impl=False, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.se_atten.stddev = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) @@ -135,6 +139,7 @@ def test_jit( use_econf_tebd=ect, type_map=["O", "H"] if ect else None, old_impl=False, + seed=GLOBAL_SEED, ) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.se_atten.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_se_e2_a.py b/source/tests/pt/model/test_se_e2_a.py index 75d47c9054..abe13ce86e 100644 --- a/source/tests/pt/model/test_se_e2_a.py +++ b/source/tests/pt/model/test_se_e2_a.py @@ -60,6 +60,7 @@ def test_consistency( resnet_dt=idt, old_impl=False, exclude_types=em, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.sea.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.sea.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) @@ -113,6 +114,7 @@ def test_consistency( precision=prec, resnet_dt=idt, old_impl=True, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0_state_dict = dd0.sea.state_dict() dd3_state_dict = dd3.sea.state_dict() @@ -168,6 +170,7 @@ def test_jit( precision=prec, resnet_dt=idt, old_impl=False, + seed=GLOBAL_SEED, ) dd0.sea.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.sea.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_se_t.py b/source/tests/pt/model/test_se_t.py index 0d6c87ba8d..d3968d7f03 100644 --- a/source/tests/pt/model/test_se_t.py +++ b/source/tests/pt/model/test_se_t.py @@ -63,6 +63,7 @@ def test_consistency( precision=prec, resnet_dt=idt, exclude_types=em, + seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.seat.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.seat.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) @@ -131,6 +132,7 @@ def test_jit( self.sel, precision=prec, resnet_dt=idt, + seed=GLOBAL_SEED, ) dd0.seat.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) dd0.seat.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/water/se_atten.json b/source/tests/pt/model/water/se_atten.json index 71cee94d8b..4b4c54e0d2 100644 --- a/source/tests/pt/model/water/se_atten.json +++ b/source/tests/pt/model/water/se_atten.json @@ -24,7 +24,8 @@ "activation_function": "tanh", "scaling_factor": 1.0, "normalize": false, - "temperature": 1.0 + "temperature": 1.0, + "seed": 1 }, "fitting_net": { "neuron": [ From 0b3f860424550dcec0cdda012138fb9eafcaba92 Mon Sep 17 00:00:00 2001 From: Chenqqian Zhang <100290172+Chengqian-Zhang@users.noreply.github.com> Date: Wed, 25 Sep 2024 13:41:32 +0800 Subject: [PATCH 013/193] fix(pt): finetuning property/dipole/polar/dos fitting with multi-dimensional data causes error (#4145) Fix issue #4108 If a pretrained model is labeled with energy and the `out_bias` is one dimension. If we want to finetune a dos/polar/dipole/property model using this pretrained model, the `out_bias` of finetuning model is multi-dimension(example: numb_dos = 250). An error occurs: `RuntimeError: Error(s) in loading state_dict for ModelWrapper:` ` size mismatch for model.Default.atomic_model.out_bias: copying a param with shape torch.Size([1, 118, 1]) from checkpoint, the shape in current model is torch.Size([1, 118, 250]).` ` size mismatch for model.Default.atomic_model.out_std: copying a param with shape torch.Size([1, 118, 1]) from checkpoint, the shape in current model is torch.Size([1, 118, 250]).` When using new fitting, old out_bias is useless because we will recompute the new bias in later code. So we do not need to load old out_bias when using new fitting finetune. ## Summary by CodeRabbit - **New Features** - Enhanced parameter collection for fine-tuning, refining criteria for parameter retention. - Introduced a model checkpoint file for saving and resuming training states, facilitating iterative development. - **Tests** - Added a new test class to validate training and fine-tuning processes, ensuring model performance consistency across configurations. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/train/training.py | 2 +- source/tests/pt/test_training.py | 68 ++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index c3d603dadd..9bdc80195f 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -484,7 +484,7 @@ def collect_single_finetune_params( if i != "_extra_state" and f".{_model_key}." in i ] for item_key in target_keys: - if _new_fitting and ".fitting_net." in item_key: + if _new_fitting and (".descriptor." not in item_key): # print(f'Keep {item_key} in old model!') _new_state_dict[item_key] = ( _random_state_dict[item_key].clone().detach() diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index 0833200d47..fa9e5c138a 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -448,5 +448,73 @@ def tearDown(self) -> None: DPTrainTest.tearDown(self) +class TestPropFintuFromEnerModel(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["model"]["type_map"] = ["H", "C", "N", "O"] + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + property_input = str(Path(__file__).parent / "property/input.json") + with open(property_input) as f: + self.config_property = json.load(f) + prop_data_file = [str(Path(__file__).parent / "property/single")] + self.config_property["training"]["training_data"]["systems"] = prop_data_file + self.config_property["training"]["validation_data"]["systems"] = prop_data_file + self.config_property["model"]["descriptor"] = deepcopy(model_dpa1["descriptor"]) + self.config_property["training"]["numb_steps"] = 1 + self.config_property["training"]["save_freq"] = 1 + + def test_dp_train(self): + # test training from scratch + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + state_dict_trained = trainer.wrapper.model.state_dict() + + # test fine-tuning using diffferent fitting_net, here using property fitting + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pt" + self.config_property["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config_property["model"], + model_branch="RANDOM", + ) + trainer_finetune = get_trainer( + deepcopy(self.config_property), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # check parameters + state_dict_finetuned = trainer_finetune.wrapper.model.state_dict() + for state_key in state_dict_finetuned: + if ( + "out_bias" not in state_key + and "out_std" not in state_key + and "fitting" not in state_key + ): + torch.testing.assert_close( + state_dict_trained[state_key], + state_dict_finetuned[state_key], + ) + + # check running + trainer_finetune.run() + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pt"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + if __name__ == "__main__": unittest.main() From ab48d90bb9076c7e2278f8414129820342d3c31b Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Fri, 27 Sep 2024 08:38:11 +0800 Subject: [PATCH 014/193] Feat: output logs on freezing models (#4165) `dp --pt freeze` does not output whether the operation finishes successfully. This PR adds a log message on it. --- deepmd/pt/entrypoints/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index 9133575ec8..3df05cbb47 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -354,6 +354,7 @@ def freeze(FLAGS): FLAGS.output, extra_files, ) + log.info(f"Saved frozen model to {FLAGS.output}") def change_bias(FLAGS): From be59313071f1e0d7fad2fb4fb7051c3dcbe156b1 Mon Sep 17 00:00:00 2001 From: hztttt <940755193@qq.com> Date: Wed, 25 Sep 2024 11:58:39 +0800 Subject: [PATCH 015/193] support spin_norm & virtual_len in model graph and fix bug --- deepmd/tf/entrypoints/freeze.py | 4 ++++ source/lmp/pair_deepmd.cpp | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/deepmd/tf/entrypoints/freeze.py b/deepmd/tf/entrypoints/freeze.py index 787d26e9a4..6ca45773b5 100755 --- a/deepmd/tf/entrypoints/freeze.py +++ b/deepmd/tf/entrypoints/freeze.py @@ -124,6 +124,8 @@ def _make_node_names( "o_atom_energy", "o_atom_virial", "spin_attr/ntypes_spin", + "spin_attr/virtual_len", + "spin_attr/spin_norm", "fitting_attr/dfparam", "fitting_attr/daparam", "fitting_attr/aparam_nall", @@ -259,6 +261,8 @@ def freeze_graph( "train_attr/min_nbor_dist", "fitting_attr/aparam_nall", "spin_attr/ntypes_spin", + "spin_attr/virtual_len", + "spin_attr/spin_norm" ] different_set = set(output_node) - set(input_node) if different_set: diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index a0dc4faae7..0ff3a869a2 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -675,7 +675,7 @@ void PairDeepMD::compute(int eflag, int vflag) { if (!(eflag_atom || cvflag_atom)) { try { deep_pot_model_devi.compute(all_energy, all_force, all_force_mag, - all_virial, dcoord, dtype, dbox, dspin, + all_virial, dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); From 94fe957ca1ccb8dc7e1c73ca3c3c76ff9411d307 Mon Sep 17 00:00:00 2001 From: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Date: Fri, 27 Sep 2024 17:27:56 +0800 Subject: [PATCH 016/193] chore: change econf embed to spin representation (#4166) ## Summary by CodeRabbit - **New Features** - Introduced a function to transform electronic configuration data into a spin representation, enhancing how electronic states are represented. - Updated the electronic configuration embedding to include negative values, reflecting a new encoding scheme. - Added a normalization function for electronic configuration vectors to improve data consistency. - **Tests** - Added a new test for the spin representation functionality to ensure accuracy of the transformation for iron (Fe). - Updated existing tests to align with the new expected output format. --------- Co-authored-by: Han Wang Co-authored-by: Duo <50307526+iProzd@users.noreply.github.com> --- deepmd/dpmodel/utils/type_embed.py | 5 +- deepmd/utils/econf_embd.py | 288 ++++++++++++++----------- source/tests/common/test_econf_embd.py | 16 +- 3 files changed, 186 insertions(+), 123 deletions(-) diff --git a/deepmd/dpmodel/utils/type_embed.py b/deepmd/dpmodel/utils/type_embed.py index e11c415cfd..04c05b6a39 100644 --- a/deepmd/dpmodel/utils/type_embed.py +++ b/deepmd/dpmodel/utils/type_embed.py @@ -222,7 +222,9 @@ def change_type_map( def get_econf_tebd(type_map, precision: str = "default"): from deepmd.utils.econf_embd import ( ECONF_DIM, - electronic_configuration_embedding, + ) + from deepmd.utils.econf_embd import ( + normalized_electronic_configuration_embedding as electronic_configuration_embedding, ) from deepmd.utils.econf_embd import type_map as periodic_table @@ -240,6 +242,5 @@ def get_econf_tebd(type_map, precision: str = "default"): [electronic_configuration_embedding[kk] for kk in type_map], dtype=PRECISION_DICT[precision], ) - econf_tebd /= econf_tebd.sum(-1, keepdims=True) # do normalization embed_input_dim = ECONF_DIM return econf_tebd, embed_input_dim diff --git a/deepmd/utils/econf_embd.py b/deepmd/utils/econf_embd.py index cdd9525f6b..7f12206ae3 100644 --- a/deepmd/utils/econf_embd.py +++ b/deepmd/utils/econf_embd.py @@ -1,135 +1,148 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Dict, + List, +) + import numpy as np from mendeleev import ( element, ) +__all__ = [ + "electronic_configuration_embedding", + "normalized_electronic_configuration_embedding", + "make_econf_embedding", + "transform_to_spin_rep", +] + ### # made by command # ret = make_econf_embedding(type_map, flatten=True) +# ret = transform_to_spin_rep(ret) # print_econf_embedding(ret) ### # fmt: off electronic_configuration_embedding = \ { kk: np.array(vv, dtype=np.int32) for kk,vv in { - "H" : [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "He" : [2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Li" : [2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Be" : [2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "B" : [2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "C" : [2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "N" : [2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "O" : [2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "F" : [2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ne" : [2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Na" : [2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Mg" : [2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Al" : [2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Si" : [2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "P" : [2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "S" : [2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cl" : [2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ar" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "K" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ca" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sc" : [2,2,2,2,2,2,2,2,2,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ti" : [2,2,2,2,2,2,2,2,2,1,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "V" : [2,2,2,2,2,2,2,2,2,1,1,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cr" : [2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Mn" : [2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Fe" : [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Co" : [2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ni" : [2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Zn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ga" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ge" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "As" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Se" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Br" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Kr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Rb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Y" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Zr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Nb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Mo" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Tc" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ru" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Rh" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Pd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ag" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "In" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Te" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "I" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Xe" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cs" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], - "Ba" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "La" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Ce" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Pr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Nd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Pm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Sm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Eu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Gd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Tb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Dy" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Ho" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Er" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Tm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Yb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Lu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Hf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Ta" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "W" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Re" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Os" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Ir" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Pt" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], - "Au" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], - "Hg" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], - "Tl" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0], - "Pb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0], - "Bi" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,1,0,0,0,0,0,0,0,0,0], - "Po" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,0,0,0], - "At" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,1,0,0,0,0,0,0,0,0,0], - "Rn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0], - "Fr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,1,0,0,0], - "Ra" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Ac" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], - "Th" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,1,0,0,0,2,0,0,0], - "Pa" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], - "U" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], - "Np" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], - "Pu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Am" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Cm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,1,0,0,0,0,2,0,0,0], - "Bk" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Cf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Es" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Fm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Md" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,0,0,0,0,0,2,0,0,0], - "No" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,2,0,0,0], - "Lr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,2,0,0,0], - "Rf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,2,0,0,0], - "Db" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,2,0,0,0], - "Sg" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,2,0,0,0], - "Bh" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,0,0,0], - "Hs" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0], - "Mt" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,0,0,0], - "Ds" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0], - "Rg" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0], - "Cn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0], - "Nh" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0], - "Fl" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0], - "Mc" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1], - "Lv" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1], - "Ts" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1], - "Og" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2], + "H" : [-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "He" : [ 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Li" : [ 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Be" : [ 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "B" : [ 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "C" : [ 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "N" : [ 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "O" : [ 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "F" : [ 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ne" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Na" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Mg" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Al" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Si" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "P" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "S" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Cl" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ar" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "K" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ca" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Sc" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ti" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "V" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Cr" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Mn" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Fe" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Co" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ni" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Cu" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Zn" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ga" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ge" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "As" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Se" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Br" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Kr" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Rb" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Sr" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Y" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Zr" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Nb" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Mo" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Tc" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ru" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Rh" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Pd" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ag" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Cd" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "In" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Sn" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Sb" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Te" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "I" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Xe" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Cs" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ba" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "La" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ce" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Pr" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Nd" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Pm" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Sm" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Eu" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Gd" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Tb" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Dy" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ho" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Er" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Tm" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Yb" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Lu" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Hf" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ta" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "W" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Re" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Os" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Ir" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Pt" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Au" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Hg" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Tl" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Pb" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Bi" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Po" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "At" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Rn" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], + "Fr" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1,-1,-1,-1,-1,-1,-1], + "Ra" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Ac" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Th" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Pa" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "U" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Np" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Pu" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Am" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Cm" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Bk" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Cf" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Es" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Fm" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Md" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "No" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Lr" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Rf" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Db" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1,-1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Sg" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1,-1, 1, 1,-1,-1,-1,-1,-1,-1], + "Bh" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1,-1,-1,-1,-1,-1,-1], + "Hs" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1,-1, 1, 1, 1,-1,-1,-1,-1,-1,-1], + "Mt" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1, 1, 1,-1,-1,-1,-1,-1,-1], + "Ds" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1,-1,-1,-1,-1], + "Rg" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1,-1,-1], + "Cn" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1], + "Nh" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1,-1,-1,-1], + "Fl" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1,-1], + "Mc" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1,-1, 1], + "Lv" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1,-1, 1], + "Ts" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1, 1], + "Og" : [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], }.items()} # fmt: on @@ -172,6 +185,16 @@ ECONF_DIM = electronic_configuration_embedding[type_map[0]].shape[0] +def normalize_vec_length(res): + scale = 1.0 / np.sqrt(ECONF_DIM) + return {kk: scale * vv for kk, vv in res.items()} + + +normalized_electronic_configuration_embedding = normalize_vec_length( + electronic_configuration_embedding +) + + def make_empty_list_vec(): ret = {} for kk in conf_keys: @@ -204,7 +227,10 @@ def make_element_embedding_list_vec( return ret -def make_econf_embedding(types, flatten=True): +def make_econf_embedding( + types: List[str], flatten: bool = True +) -> Dict[str, np.ndarray]: + """Make the electronic configuration embedding.""" all_ret = {} for ii in types: ir = make_element_embedding_list_vec(ii) @@ -214,7 +240,29 @@ def make_econf_embedding(types, flatten=True): return all_ret -def print_econf_embedding(res): +def transform_to_spin_rep(res: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + """Tranform electron occupation of 0/1/2 to -1,-1/-1,1/1,1.""" + ret = {} + + def transform(ii): + if ii == 0: + return [-1, -1] + elif ii == 1: + return [-1, 1] + elif ii == 2: + return [1, 1] + else: + raise ValueError(f"wrong input value {ii}") + + for kk, vv in res.items(): + transformed_list = [spin for ii in vv for spin in transform(ii)] + new_vv = np.array(transformed_list, dtype=np.int32) + ret[kk] = new_vv + return ret + + +def print_econf_embedding(res: Dict[str, np.ndarray]): + """Print electron configuration embedding.""" for kk, vv in res.items(): vvstr = ",".join([str(ii) for ii in vv]) space = " " * (2 - len(kk)) diff --git a/source/tests/common/test_econf_embd.py b/source/tests/common/test_econf_embd.py index d24115c860..242ea9ca65 100644 --- a/source/tests/common/test_econf_embd.py +++ b/source/tests/common/test_econf_embd.py @@ -4,6 +4,8 @@ from deepmd.utils.econf_embd import ( electronic_configuration_embedding, make_econf_embedding, + normalized_electronic_configuration_embedding, + transform_to_spin_rep, ) @@ -40,9 +42,21 @@ def test_fe_flatten(self): # fmt: on self.assertEqual(list(res), expected_res) + def test_fe_spin(self): + res = make_econf_embedding(["Fe"], flatten=True) + res = transform_to_spin_rep(res)["Fe"] + # fmt: off + expected_res = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,-1,1,-1,1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1] + # fmt: on + self.assertEqual(list(res), expected_res) + def test_dict(self): res = electronic_configuration_embedding["Fe"] # fmt: off - expected_res = [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + expected_res = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,-1,1,-1,1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1] # fmt: on self.assertEqual(list(res), expected_res) + res = normalized_electronic_configuration_embedding["Fe"] + self.assertEqual( + list(res), [ii / len(expected_res) ** 0.5 for ii in expected_res] + ) From ad8bebe8b8af6e14ce466fa60be61caa4fc7c9db Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 27 Sep 2024 21:56:18 +0000 Subject: [PATCH 017/193] [pre-commit.ci] pre-commit autoupdate (#4159) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.5 → v0.6.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.5...v0.6.7) - https://github.com/pylint-dev/pylint/: v3.2.7 → v3.3.0 --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- .pre-commit-config.yaml | 4 ++-- pyproject.toml | 2 +- source/checker/README.md | 4 ++-- source/checker/deepmd_checker.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2fc1d0ab8..5d34f39752 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.6.5 + rev: v0.6.7 hooks: - id: ruff args: ["--fix"] @@ -146,7 +146,7 @@ repos: exclude: .pre-commit-config.yaml|source/lmp # customized pylint rules - repo: https://github.com/pylint-dev/pylint/ - rev: v3.2.7 + rev: v3.3.0 hooks: - id: pylint entry: env PYTHONPATH=source/checker pylint diff --git a/pyproject.toml b/pyproject.toml index 28fe114e01..a1829016cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -414,4 +414,4 @@ plugins = ["source.3rdparty.coverage_plugins.jit_plugin"] [tool.pylint.'MESSAGES CONTROL'] load-plugins = "deepmd_checker" disable = "all" -enable = "EDP01,EDP02" +enable = "E8001,E8002" diff --git a/source/checker/README.md b/source/checker/README.md index 2a905f93a5..368852034c 100644 --- a/source/checker/README.md +++ b/source/checker/README.md @@ -1,4 +1,4 @@ # DeePMD-kit customized Pylint plugin -- EDP01: Require explicit device when initializing a PyTorch tensor. -- EDP02: Require explicit dtype when initializing a NumPy array, a TensorFlow tensor, or a PyTorch tensor. +- E8001: Require explicit device when initializing a PyTorch tensor. +- E8002: Require explicit dtype when initializing a NumPy array, a TensorFlow tensor, or a PyTorch tensor. diff --git a/source/checker/deepmd_checker.py b/source/checker/deepmd_checker.py index 052d011c47..d763835fdc 100644 --- a/source/checker/deepmd_checker.py +++ b/source/checker/deepmd_checker.py @@ -21,12 +21,12 @@ class DPChecker(BaseChecker): name = "deepmd-checker" msgs: ClassVar[dict] = { - "EDP01": ( + "E8001": ( "No explicit device.", "no-explicit-device", "Require explicit device when initializing a PyTorch tensor.", ), - "EDP02": ( + "E8002": ( "No explicit dtype.", "no-explicit-dtype", "Require explicit dtype when initializing a NumPy array, a TensorFlow tensor, or a PyTorch tensor.", From ec7c16b0ab90b8e138a13217402d80935ff9cc7d Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 28 Sep 2024 19:45:30 +0800 Subject: [PATCH 018/193] fix pt --- source/api_cc/src/DeepPotPT.cc | 4 ++-- source/lmp/pair_deepmd.cpp | 11 +++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index ed6d3f9eb1..3b62a44ef5 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -507,6 +507,7 @@ template void DeepPotPT::compute>( std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, + const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, @@ -523,6 +524,7 @@ template void DeepPotPT::compute>( std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, + const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, @@ -636,7 +638,6 @@ template void DeepPotPT::compute>( std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, - const std::vector& spin, const std::vector& atype, const std::vector& box, const std::vector& fparam, @@ -649,7 +650,6 @@ template void DeepPotPT::compute>( std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, - const std::vector& spin, const std::vector& atype, const std::vector& box, const std::vector& fparam, diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 0ff3a869a2..baafa813d8 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -579,9 +579,11 @@ void PairDeepMD::compute(int eflag, int vflag) { } } else { try { - deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, - dtype, dbox, nghost, lmp_list, ago, fparam, - daparam); + const vector &dcoord_const = dcoord; + const vector &dspin_const = dspin; + deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord_const, + dspin_const, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -601,6 +603,7 @@ void PairDeepMD::compute(int eflag, int vflag) { } } else { try { + std::cout << "calculate atomic energy" << std::endl; deep_pot.compute(dener, dforce, dforce_mag, dvirial, deatom, dvatom, dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); @@ -675,7 +678,7 @@ void PairDeepMD::compute(int eflag, int vflag) { if (!(eflag_atom || cvflag_atom)) { try { deep_pot_model_devi.compute(all_energy, all_force, all_force_mag, - all_virial, dcoord, dspin, dtype, dbox, + all_virial, dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); From 6524e5e5549b06c36e3006fe086a1fa3fcbc3f42 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 28 Sep 2024 19:46:56 +0800 Subject: [PATCH 019/193] Update pair_deepmd.cpp --- source/lmp/pair_deepmd.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index baafa813d8..d3971691a2 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -603,7 +603,6 @@ void PairDeepMD::compute(int eflag, int vflag) { } } else { try { - std::cout << "calculate atomic energy" << std::endl; deep_pot.compute(dener, dforce, dforce_mag, dvirial, deatom, dvatom, dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); From a9ffccbec1c014d0549fe89545906f436060bfc6 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 2 Oct 2024 06:06:30 -0400 Subject: [PATCH 020/193] fix: unpin h5py on aarch64 (#4176) Fix #3864. ## Summary by CodeRabbit - **New Features** - Enhanced logic for detecting TensorFlow installation paths and requirements. - Improved error handling for TensorFlow version detection. - **Bug Fixes** - Adjusted compatibility specifications for the `h5py` dependency on aarch64 architecture. - **Documentation** - Updated comments to clarify changes in TensorFlow detection and compatibility issues. Signed-off-by: Jinzhe Zeng --- backend/find_tensorflow.py | 9 +++------ pyproject.toml | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/backend/find_tensorflow.py b/backend/find_tensorflow.py index 514490a926..ea11aed1b6 100644 --- a/backend/find_tensorflow.py +++ b/backend/find_tensorflow.py @@ -156,18 +156,15 @@ def get_tf_requirement(tf_version: str = "") -> dict: "tensorflow; platform_machine=='aarch64' or (platform_machine=='arm64' and platform_system == 'Darwin')", # https://github.com/tensorflow/tensorflow/issues/61830 "tensorflow-cpu!=2.15.*; platform_system=='Windows'", - # TODO: build(wheel): unpin h5py on aarch64 - # Revert after https://github.com/h5py/h5py/issues/2408 is fixed; - # or set UV_PREFER_BINARY when https://github.com/astral-sh/uv/issues/1794 is resolved. - # 3.6.0 is the first version to have aarch64 wheels. - "h5py>=3.6.0,<3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", + # https://github.com/h5py/h5py/issues/2408 + "h5py>=3.6.0,!=3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", *extra_requires, ], "gpu": [ "tensorflow", "tensorflow-metal; platform_machine=='arm64' and platform_system == 'Darwin'", # See above. - "h5py>=3.6.0,<3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", + "h5py>=3.6.0,!=3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", *extra_requires, ], **extra_select, diff --git a/pyproject.toml b/pyproject.toml index a1829016cb..47cf8e018b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ dependencies = [ 'typing_extensions; python_version < "3.8"', 'importlib_metadata>=1.4; python_version < "3.8"', 'h5py', - "h5py>=3.6.0,<3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", + "h5py>=3.6.0,!=3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", 'wcmatch', 'packaging', 'ml_dtypes', From 7ce5b032503d92abd104627f9d6b9b54967444cf Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 2 Oct 2024 07:13:01 -0400 Subject: [PATCH 021/193] chore: bump LAMMPS to stable_29Aug2024_update1 (#4179) ## Summary by CodeRabbit ## Release Notes - **Documentation** - Updated installation instructions to reference the new LAMMPS version `stable_29Aug2024_update1`. - **New Features** - Enhanced build scripts to support the updated LAMMPS version across various installation methods. - **Chores** - Adjusted dependency management settings to reflect the new LAMMPS version in configuration files. --- doc/install/install-lammps.md | 22 +++++++++++----------- pyproject.toml | 6 +++--- source/install/build_cc.sh | 2 +- source/install/build_from_c.sh | 2 +- source/install/build_lammps.sh | 2 +- source/install/test_cc.sh | 2 +- source/install/test_cc_local.sh | 2 +- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/doc/install/install-lammps.md b/doc/install/install-lammps.md index b43f9998a3..00b887e9c3 100644 --- a/doc/install/install-lammps.md +++ b/doc/install/install-lammps.md @@ -17,11 +17,11 @@ DeePMD-kit will generate a module called `USER-DEEPMD` in the `build` directory, ```bash cd /some/workspace -wget https://github.com/lammps/lammps/archive/stable_29Aug2024.tar.gz -tar xf stable_29Aug2024.tar.gz +wget https://github.com/lammps/lammps/archive/stable_29Aug2024_update1.tar.gz +tar xf stable_29Aug2024_update1.tar.gz ``` -The source code of LAMMPS is stored in the directory `lammps-stable_29Aug2024`. +The source code of LAMMPS is stored in the directory `lammps-stable_29Aug2024_update1`. Then, you can [build LAMMPS](https://docs.lammps.org/Build.html) with either make or CMake. @@ -30,7 +30,7 @@ Then, you can [build LAMMPS](https://docs.lammps.org/Build.html) with either mak Now go into the LAMMPS code and copy the DeePMD-kit module like this ```bash -cd lammps-stable_29Aug2024/src/ +cd lammps-stable_29Aug2024_update1/src/ cp -r $deepmd_source_dir/source/build/USER-DEEPMD . make yes-kspace make yes-extra-fix @@ -60,8 +60,8 @@ make no-user-deepmd Now go into the LAMMPS directory and create a directory called `build`: ```bash -mkdir -p lammps-stable_29Aug2024/build/ -cd lammps-stable_29Aug2024/build/ +mkdir -p lammps-stable_29Aug2024_update1/build/ +cd lammps-stable_29Aug2024_update1/build/ ``` Patch the LAMMPS `CMakeLists.txt` file: @@ -94,15 +94,15 @@ Now download the LAMMPS code (`8Apr2021` or later), and uncompress it: ```bash cd /some/workspace -wget https://github.com/lammps/lammps/archive/stable_29Aug2024.tar.gz -tar xf stable_29Aug2024.tar.gz +wget https://github.com/lammps/lammps/archive/stable_29Aug2024_update1.tar.gz +tar xf stable_29Aug2024_update1.tar.gz ``` -The source code of LAMMPS is stored in the directory `lammps-stable_29Aug2024`. The directory of the source code should be specified as the CMAKE argument `LAMMPS_SOURCE_ROOT` during installation of the DeePMD-kit C++ interface. Now go into the LAMMPS directory and create a directory called `build` +The source code of LAMMPS is stored in the directory `lammps-stable_29Aug2024_update1`. The directory of the source code should be specified as the CMAKE argument `LAMMPS_SOURCE_ROOT` during installation of the DeePMD-kit C++ interface. Now go into the LAMMPS directory and create a directory called `build` ```bash -mkdir -p lammps-stable_29Aug2024/build/ -cd lammps-stable_29Aug2024/build/ +mkdir -p lammps-stable_29Aug2024_update1/build/ +cd lammps-stable_29Aug2024_update1/build/ ``` Now build LAMMPS. Note that `PLUGIN` must be enabled, and `BUILD_SHARED_LIBS` must be set to `yes`. You can install any other package you want. diff --git a/pyproject.toml b/pyproject.toml index 47cf8e018b..1b825ef441 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,7 +104,7 @@ docs = [ "sphinxcontrib-moderncmakedomain", ] lmp = [ - "lammps~=2024.8.29.0.0", + "lammps~=2024.8.29.1.0", ] ipi = [ "ipi", @@ -225,7 +225,7 @@ repair-wheel-command = """delocate-wheel --require-archs {delocate_archs} -w {de [tool.cibuildwheel.macos.environment] PIP_PREFER_BINARY = "1" -DP_LAMMPS_VERSION = "stable_29Aug2024" +DP_LAMMPS_VERSION = "stable_29Aug2024_update1" DP_ENABLE_IPI = "1" DP_ENABLE_PYTORCH = "1" # for unclear reason, when enabling PyTorch, OpenMP is found accidentally @@ -261,7 +261,7 @@ before-build = [ ] [tool.cibuildwheel.linux.environment] PIP_PREFER_BINARY = "1" -DP_LAMMPS_VERSION = "stable_29Aug2024" +DP_LAMMPS_VERSION = "stable_29Aug2024_update1" DP_ENABLE_IPI = "1" DP_ENABLE_PYTORCH = "1" MPI_HOME = "/usr/lib64/mpich" diff --git a/source/install/build_cc.sh b/source/install/build_cc.sh index 60101eb9a8..17b5ed0de4 100755 --- a/source/install/build_cc.sh +++ b/source/install/build_cc.sh @@ -25,7 +25,7 @@ cmake -D ENABLE_TENSORFLOW=ON \ -D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ -D USE_TF_PYTHON_LIBS=TRUE \ ${CUDA_ARGS} \ - -D LAMMPS_VERSION=stable_29Aug2024 \ + -D LAMMPS_VERSION=stable_29Aug2024_update1 \ .. cmake --build . -j${NPROC} cmake --install . diff --git a/source/install/build_from_c.sh b/source/install/build_from_c.sh index ff9268f649..22739ec531 100755 --- a/source/install/build_from_c.sh +++ b/source/install/build_from_c.sh @@ -13,7 +13,7 @@ NPROC=$(nproc --all) BUILD_TMP_DIR=${SCRIPT_PATH}/../build mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} -cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DDEEPMD_C_ROOT=${DEEPMD_C_ROOT} -DLAMMPS_VERSION=stable_29Aug2024 .. +cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DDEEPMD_C_ROOT=${DEEPMD_C_ROOT} -DLAMMPS_VERSION=stable_29Aug2024_update1 .. cmake --build . -j${NPROC} cmake --install . cmake --build . --target=lammps diff --git a/source/install/build_lammps.sh b/source/install/build_lammps.sh index a1e62691ca..add1194151 100755 --- a/source/install/build_lammps.sh +++ b/source/install/build_lammps.sh @@ -14,7 +14,7 @@ BUILD_TMP_DIR=${SCRIPT_PATH}/../build_lammps mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} # download LAMMMPS -LAMMPS_VERSION=stable_29Aug2024 +LAMMPS_VERSION=stable_29Aug2024_update1 if [ ! -d "lammps-${LAMMPS_VERSION}" ]; then curl -L -o lammps.tar.gz https://github.com/lammps/lammps/archive/refs/tags/${LAMMPS_VERSION}.tar.gz tar vxzf lammps.tar.gz diff --git a/source/install/test_cc.sh b/source/install/test_cc.sh index ccdaf124cd..1626f36193 100755 --- a/source/install/test_cc.sh +++ b/source/install/test_cc.sh @@ -17,7 +17,7 @@ INSTALL_PREFIX=${SCRIPT_PATH}/../../dp_test BUILD_TMP_DIR=${SCRIPT_PATH}/../build_tests mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} -cmake -DINSTALL_TENSORFLOW=TRUE -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DTENSORFLOW_ROOT=${INSTALL_PREFIX} -DBUILD_TESTING:BOOL=TRUE -DLAMMPS_VERSION=stable_29Aug2024 ${CUDA_ARGS} .. +cmake -DINSTALL_TENSORFLOW=TRUE -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DTENSORFLOW_ROOT=${INSTALL_PREFIX} -DBUILD_TESTING:BOOL=TRUE -DLAMMPS_VERSION=stable_29Aug2024_update1 ${CUDA_ARGS} .. cmake --build . -j${NPROC} cmake --install . ctest --output-on-failure diff --git a/source/install/test_cc_local.sh b/source/install/test_cc_local.sh index fdb2396a28..8ce4de4b21 100755 --- a/source/install/test_cc_local.sh +++ b/source/install/test_cc_local.sh @@ -25,7 +25,7 @@ cmake \ -D USE_TF_PYTHON_LIBS=TRUE \ -D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ -D BUILD_TESTING:BOOL=TRUE \ - -D LAMMPS_VERSION=stable_29Aug2024 \ + -D LAMMPS_VERSION=stable_29Aug2024_update1 \ ${CUDA_ARGS} .. cmake --build . -j${NPROC} cmake --install . From 192a97a47af249d40ad75206d5de28a9048d492b Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 6 Oct 2024 13:03:28 -0400 Subject: [PATCH 022/193] breaking: drop Python 3.8 support (#4185) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced new functionality for handling input data conversion between different versions. - Added `EnvMatStatSe` class for enhanced environmental matrix statistics calculations. - Implemented a mechanism to track the status of atoms (real vs. virtual) in `BaseAtomicModel`. - **Bug Fixes** - Updated Python version requirements across documentation and configuration files to Python 3.9 or above. - **Documentation** - Updated installation guides to reflect the new Python version requirement and clarified virtual environment setup instructions. - **Chores** - Refined dependency management in `pyproject.toml` to support newer Python versions and improve version control. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/test_python.yml | 6 +- backend/dp_backend.py | 10 +- backend/dynamic_metadata.py | 6 +- backend/find_pytorch.py | 4 +- backend/find_tensorflow.py | 4 +- backend/read_env.py | 5 +- deepmd/backend/backend.py | 17 +-- deepmd/backend/dpmodel.py | 8 +- deepmd/backend/jax.py | 8 +- deepmd/backend/pytorch.py | 8 +- deepmd/backend/suffix.py | 3 +- deepmd/backend/tensorflow.py | 8 +- deepmd/calculator.py | 16 +-- deepmd/common.py | 21 ++- .../dpmodel/atomic_model/base_atomic_model.py | 35 +++-- .../dpmodel/atomic_model/dp_atomic_model.py | 12 +- .../atomic_model/linear_atomic_model.py | 43 +++--- .../atomic_model/make_base_atomic_model.py | 16 +-- .../atomic_model/pairtab_atomic_model.py | 18 ++- deepmd/dpmodel/descriptor/descriptor.py | 16 +-- deepmd/dpmodel/descriptor/dpa1.py | 50 ++++--- deepmd/dpmodel/descriptor/dpa2.py | 34 +++-- deepmd/dpmodel/descriptor/hybrid.py | 31 ++-- .../descriptor/make_base_descriptor.py | 14 +- deepmd/dpmodel/descriptor/repformers.py | 42 +++--- deepmd/dpmodel/descriptor/se_atten_v2.py | 12 +- deepmd/dpmodel/descriptor/se_e2_a.py | 30 ++-- deepmd/dpmodel/descriptor/se_r.py | 28 ++-- deepmd/dpmodel/descriptor/se_t.py | 30 ++-- deepmd/dpmodel/descriptor/se_t_tebd.py | 42 +++--- deepmd/dpmodel/fitting/dipole_fitting.py | 18 ++- deepmd/dpmodel/fitting/dos_fitting.py | 11 +- deepmd/dpmodel/fitting/ener_fitting.py | 15 +- deepmd/dpmodel/fitting/general_fitting.py | 34 +++-- deepmd/dpmodel/fitting/invar_fitting.py | 22 ++- deepmd/dpmodel/fitting/make_base_fitting.py | 8 +- .../dpmodel/fitting/polarizability_fitting.py | 24 ++-- deepmd/dpmodel/fitting/property_fitting.py | 13 +- deepmd/dpmodel/infer/deep_eval.py | 18 +-- deepmd/dpmodel/model/base_model.py | 15 +- deepmd/dpmodel/model/dp_model.py | 6 +- deepmd/dpmodel/model/make_model.py | 28 ++-- deepmd/dpmodel/model/spin_model.py | 12 +- deepmd/dpmodel/model/transform_output.py | 11 +- deepmd/dpmodel/output_def.py | 43 +++--- deepmd/dpmodel/utils/exclude_mask.py | 8 +- deepmd/dpmodel/utils/neighbor_stat.py | 9 +- deepmd/dpmodel/utils/network.py | 20 ++- deepmd/dpmodel/utils/nlist.py | 22 ++- deepmd/dpmodel/utils/seed.py | 7 +- deepmd/dpmodel/utils/type_embed.py | 11 +- deepmd/dpmodel/utils/update_sel.py | 5 +- deepmd/entrypoints/main.py | 2 +- deepmd/entrypoints/neighbor_stat.py | 3 +- deepmd/entrypoints/show.py | 5 +- deepmd/entrypoints/test.py | 33 ++--- deepmd/env.py | 12 +- deepmd/infer/deep_dos.py | 10 +- deepmd/infer/deep_eval.py | 18 +-- deepmd/infer/deep_polar.py | 3 +- deepmd/infer/deep_pot.py | 22 ++- deepmd/infer/deep_property.py | 13 +- deepmd/infer/deep_tensor.py | 8 +- deepmd/infer/model_devi.py | 11 +- deepmd/loggers/training.py | 3 +- deepmd/main.py | 17 +-- deepmd/pt/entrypoints/main.py | 3 +- deepmd/pt/infer/deep_eval.py | 20 ++- deepmd/pt/loss/dos.py | 5 +- deepmd/pt/loss/ener.py | 3 +- deepmd/pt/loss/ener_spin.py | 5 +- deepmd/pt/loss/loss.py | 5 +- deepmd/pt/loss/property.py | 5 +- deepmd/pt/loss/tensor.py | 5 +- .../model/atomic_model/base_atomic_model.py | 71 +++++----- .../model/atomic_model/dipole_atomic_model.py | 5 +- .../pt/model/atomic_model/dp_atomic_model.py | 14 +- .../model/atomic_model/linear_atomic_model.py | 57 ++++---- .../atomic_model/pairtab_atomic_model.py | 30 ++-- .../model/atomic_model/polar_atomic_model.py | 5 +- .../atomic_model/property_atomic_model.py | 5 +- deepmd/pt/model/descriptor/descriptor.py | 16 +-- deepmd/pt/model/descriptor/dpa1.py | 37 +++-- deepmd/pt/model/descriptor/dpa2.py | 39 +++-- deepmd/pt/model/descriptor/gaussian_lcc.py | 3 +- deepmd/pt/model/descriptor/hybrid.py | 37 +++-- deepmd/pt/model/descriptor/repformer_layer.py | 29 ++-- .../descriptor/repformer_layer_old_impl.py | 15 +- deepmd/pt/model/descriptor/repformers.py | 25 ++-- deepmd/pt/model/descriptor/se_a.py | 51 ++++--- deepmd/pt/model/descriptor/se_atten.py | 31 ++-- deepmd/pt/model/descriptor/se_atten_v2.py | 14 +- deepmd/pt/model/descriptor/se_r.py | 35 +++-- deepmd/pt/model/descriptor/se_t.py | 65 ++++----- deepmd/pt/model/descriptor/se_t_tebd.py | 59 ++++---- deepmd/pt/model/model/dipole_model.py | 3 +- deepmd/pt/model/model/dos_model.py | 3 +- deepmd/pt/model/model/dp_model.py | 6 +- deepmd/pt/model/model/dp_zbl_model.py | 9 +- deepmd/pt/model/model/ener_model.py | 5 +- deepmd/pt/model/model/frozen.py | 15 +- deepmd/pt/model/model/make_hessian_model.py | 12 +- deepmd/pt/model/model/make_model.py | 40 +++--- deepmd/pt/model/model/polar_model.py | 3 +- deepmd/pt/model/model/property_model.py | 5 +- deepmd/pt/model/model/spin_model.py | 12 +- deepmd/pt/model/model/transform_output.py | 14 +- deepmd/pt/model/network/layernorm.py | 3 +- deepmd/pt/model/network/mlp.py | 6 +- deepmd/pt/model/network/network.py | 15 +- deepmd/pt/model/task/dipole.py | 23 ++- deepmd/pt/model/task/dos.py | 13 +- deepmd/pt/model/task/ener.py | 16 +-- deepmd/pt/model/task/fitting.py | 37 +++-- deepmd/pt/model/task/invar_fitting.py | 21 ++- deepmd/pt/model/task/polarizability.py | 21 ++- deepmd/pt/model/task/property.py | 7 +- deepmd/pt/train/training.py | 3 +- deepmd/pt/train/wrapper.py | 9 +- deepmd/pt/utils/dataloader.py | 11 +- deepmd/pt/utils/dataset.py | 5 +- deepmd/pt/utils/env_mat_stat.py | 19 ++- deepmd/pt/utils/exclude_mask.py | 15 +- deepmd/pt/utils/neighbor_stat.py | 9 +- deepmd/pt/utils/nlist.py | 28 ++-- deepmd/pt/utils/stat.py | 38 +++-- deepmd/pt/utils/update_sel.py | 5 +- deepmd/pt/utils/utils.py | 7 +- deepmd/tf/cluster/__init__.py | 6 +- deepmd/tf/cluster/local.py | 8 +- deepmd/tf/descriptor/descriptor.py | 40 +++--- deepmd/tf/descriptor/hybrid.py | 21 ++- deepmd/tf/descriptor/loc_frame.py | 16 +-- deepmd/tf/descriptor/se.py | 19 ++- deepmd/tf/descriptor/se_a.py | 18 ++- deepmd/tf/descriptor/se_a_ebd.py | 11 +- deepmd/tf/descriptor/se_a_ebd_v2.py | 7 +- deepmd/tf/descriptor/se_a_ef.py | 22 ++- deepmd/tf/descriptor/se_a_mask.py | 19 ++- deepmd/tf/descriptor/se_atten.py | 43 +++--- deepmd/tf/descriptor/se_atten_v2.py | 7 +- deepmd/tf/descriptor/se_r.py | 16 +-- deepmd/tf/descriptor/se_t.py | 25 ++-- deepmd/tf/entrypoints/freeze.py | 7 +- deepmd/tf/entrypoints/ipi.py | 5 +- deepmd/tf/entrypoints/main.py | 5 +- deepmd/tf/entrypoints/train.py | 5 +- deepmd/tf/entrypoints/transfer.py | 9 +- deepmd/tf/fit/dipole.py | 13 +- deepmd/tf/fit/dos.py | 13 +- deepmd/tf/fit/ener.py | 19 ++- deepmd/tf/fit/fitting.py | 7 +- deepmd/tf/fit/polar.py | 39 +++-- deepmd/tf/infer/data_modifier.py | 10 +- deepmd/tf/infer/deep_eval.py | 36 +++-- deepmd/tf/infer/deep_tensor.py | 15 +- deepmd/tf/infer/ewald_recp.py | 5 +- deepmd/tf/lmp.py | 5 +- deepmd/tf/loss/dos.py | 5 +- deepmd/tf/loss/ener.py | 7 +- deepmd/tf/loss/loss.py | 15 +- deepmd/tf/loss/tensor.py | 5 +- deepmd/tf/model/dos.py | 3 +- deepmd/tf/model/ener.py | 3 +- deepmd/tf/model/frozen.py | 8 +- deepmd/tf/model/linear.py | 10 +- deepmd/tf/model/model.py | 25 ++-- deepmd/tf/model/pairtab.py | 10 +- deepmd/tf/model/pairwise_dprc.py | 13 +- deepmd/tf/model/tensor.py | 3 +- deepmd/tf/nvnmd/utils/fio.py | 5 +- deepmd/tf/train/run_options.py | 9 +- deepmd/tf/train/trainer.py | 18 +-- deepmd/tf/utils/finetune.py | 5 +- deepmd/tf/utils/graph.py | 44 +++--- deepmd/tf/utils/neighbor_stat.py | 11 +- deepmd/tf/utils/parallel_op.py | 14 +- deepmd/tf/utils/spin.py | 13 +- deepmd/tf/utils/tabulate.py | 11 +- deepmd/tf/utils/type_embed.py | 7 +- deepmd/tf/utils/update_sel.py | 5 +- deepmd/utils/argcheck.py | 134 +++++++++--------- deepmd/utils/batch_size.py | 7 +- deepmd/utils/compat.py | 83 +++++------ deepmd/utils/data.py | 11 +- deepmd/utils/data_system.py | 38 +++-- deepmd/utils/econf_embd.py | 12 +- deepmd/utils/env_mat_stat.py | 28 ++-- deepmd/utils/finetune.py | 38 +++-- deepmd/utils/hostlist.py | 8 +- deepmd/utils/neighbor_stat.py | 7 +- deepmd/utils/out_stat.py | 5 +- deepmd/utils/pair_tab.py | 3 +- deepmd/utils/path.py | 32 ++--- deepmd/utils/plugin.py | 10 +- deepmd/utils/random.py | 3 +- deepmd/utils/spin.py | 24 ++-- deepmd/utils/update_sel.py | 19 ++- deepmd/utils/weight_avg.py | 9 +- doc/development/coding-conventions.rst | 2 +- doc/development/create-a-model-pt.md | 4 +- doc/development/create-a-model-tf.md | 2 +- doc/getting-started/quick_start.ipynb | 2 +- doc/install/easy-install.md | 2 +- doc/install/install-from-source.md | 4 +- pyproject.toml | 7 +- source/install/build_tf.py | 40 +++--- .../common/dpmodel/array_api/test_env_mat.py | 6 +- .../tests/common/dpmodel/test_output_def.py | 5 +- source/tests/common/test_argument_parser.py | 11 +- source/tests/common/test_auto_batch_size.py | 8 +- source/tests/consistent/common.py | 15 +- .../tests/consistent/descriptor/test_dpa1.py | 5 +- .../tests/consistent/descriptor/test_dpa2.py | 5 +- .../consistent/descriptor/test_hybrid.py | 5 +- .../consistent/descriptor/test_se_atten_v2.py | 5 +- .../consistent/descriptor/test_se_e2_a.py | 5 +- .../tests/consistent/descriptor/test_se_r.py | 5 +- .../tests/consistent/descriptor/test_se_t.py | 5 +- .../consistent/descriptor/test_se_t_tebd.py | 5 +- .../tests/consistent/fitting/test_dipole.py | 5 +- source/tests/consistent/fitting/test_dos.py | 5 +- source/tests/consistent/fitting/test_ener.py | 5 +- source/tests/consistent/fitting/test_polar.py | 5 +- .../tests/consistent/fitting/test_property.py | 5 +- source/tests/consistent/model/test_ener.py | 5 +- source/tests/consistent/model/test_frozen.py | 5 +- .../tests/consistent/test_type_embedding.py | 5 +- source/tests/infer/case.py | 5 +- source/tests/pt/common.py | 3 +- .../pt/model/test_atomic_model_atomic_stat.py | 5 +- .../pt/model/test_atomic_model_global_stat.py | 5 +- source/tests/pt/model/test_force_grad.py | 3 +- .../pt/model/test_linear_atomic_model_stat.py | 9 +- source/tests/pt/model/test_rotation.py | 3 +- .../common/cases/atomic_model/utils.py | 18 ++- .../universal/common/cases/model/utils.py | 18 ++- 237 files changed, 1612 insertions(+), 2065 deletions(-) diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index 8274921909..87d7266e03 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -16,7 +16,7 @@ jobs: fail-fast: false matrix: group: [1, 2, 3, 4, 5, 6] - python: ["3.8", "3.12"] + python: ["3.9", "3.12"] steps: - uses: actions/checkout@v4 @@ -34,7 +34,7 @@ jobs: # existing TensorFlow package. Currently, it uses # TensorFlow in the build dependency, but if it # changes, setting `TENSORFLOW_ROOT`. - TENSORFLOW_VERSION: ${{ matrix.python == '3.8' && '2.13.1' || '2.16.1' }} + TENSORFLOW_VERSION: 2.16.1 DP_ENABLE_PYTORCH: 1 DP_BUILD_TESTING: 1 UV_EXTRA_INDEX_URL: "https://pypi.anaconda.org/njzjz/simple https://pypi.anaconda.org/mpi4py/simple" @@ -69,7 +69,7 @@ jobs: strategy: fail-fast: false matrix: - python: ["3.8", "3.12"] + python: ["3.9", "3.12"] needs: testpython steps: - name: Get durations from cache diff --git a/backend/dp_backend.py b/backend/dp_backend.py index dbd2d2a52b..81c3f20f19 100644 --- a/backend/dp_backend.py +++ b/backend/dp_backend.py @@ -1,10 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later """A PEP-517 backend to find TensorFlow.""" -from typing import ( - List, -) - from scikit_build_core import build as _orig from .find_pytorch import ( @@ -26,7 +22,7 @@ ] -def __dir__() -> List[str]: +def __dir__() -> list[str]: return __all__ @@ -42,7 +38,7 @@ def __dir__() -> List[str]: def get_requires_for_build_wheel( config_settings: dict, -) -> List[str]: +) -> list[str]: return ( _orig.get_requires_for_build_wheel(config_settings) + find_tensorflow()[1] @@ -52,7 +48,7 @@ def get_requires_for_build_wheel( def get_requires_for_build_editable( config_settings: dict, -) -> List[str]: +) -> list[str]: return ( _orig.get_requires_for_build_editable(config_settings) + find_tensorflow()[1] diff --git a/backend/dynamic_metadata.py b/backend/dynamic_metadata.py index 83123e6e41..a66e9a2759 100644 --- a/backend/dynamic_metadata.py +++ b/backend/dynamic_metadata.py @@ -4,8 +4,6 @@ Path, ) from typing import ( - Dict, - List, Optional, ) @@ -27,13 +25,13 @@ __all__ = ["dynamic_metadata"] -def __dir__() -> List[str]: +def __dir__() -> list[str]: return __all__ def dynamic_metadata( field: str, - settings: Optional[Dict[str, object]] = None, + settings: Optional[dict[str, object]] = None, ): assert field in ["optional-dependencies", "entry-points", "scripts"] _, _, find_libpython_requires, extra_scripts, tf_version, pt_version = ( diff --git a/backend/find_pytorch.py b/backend/find_pytorch.py index 04f297a963..e01f4e84fe 100644 --- a/backend/find_pytorch.py +++ b/backend/find_pytorch.py @@ -18,9 +18,7 @@ get_path, ) from typing import ( - List, Optional, - Tuple, Union, ) @@ -30,7 +28,7 @@ @lru_cache -def find_pytorch() -> Tuple[Optional[str], List[str]]: +def find_pytorch() -> tuple[Optional[str], list[str]]: """Find PyTorch library. Tries to find PyTorch in the order of: diff --git a/backend/find_tensorflow.py b/backend/find_tensorflow.py index ea11aed1b6..5b0de0b2dd 100644 --- a/backend/find_tensorflow.py +++ b/backend/find_tensorflow.py @@ -17,9 +17,7 @@ get_path, ) from typing import ( - List, Optional, - Tuple, Union, ) @@ -29,7 +27,7 @@ @lru_cache -def find_tensorflow() -> Tuple[Optional[str], List[str]]: +def find_tensorflow() -> tuple[Optional[str], list[str]]: """Find TensorFlow library. Tries to find TensorFlow in the order of: diff --git a/backend/read_env.py b/backend/read_env.py index ae82778f4e..edc3600115 100644 --- a/backend/read_env.py +++ b/backend/read_env.py @@ -5,9 +5,6 @@ from functools import ( lru_cache, ) -from typing import ( - Tuple, -) from packaging.version import ( Version, @@ -24,7 +21,7 @@ @lru_cache -def get_argument_from_env() -> Tuple[str, list, list, dict, str, str]: +def get_argument_from_env() -> tuple[str, list, list, dict, str, str]: """Get the arguments from environment variables. The environment variables are assumed to be not changed during the build. diff --git a/deepmd/backend/backend.py b/deepmd/backend/backend.py index 8f7bca319e..3263169f6f 100644 --- a/deepmd/backend/backend.py +++ b/deepmd/backend/backend.py @@ -10,9 +10,6 @@ TYPE_CHECKING, Callable, ClassVar, - Dict, - List, - Type, ) from deepmd.utils.plugin import ( @@ -45,7 +42,7 @@ class Backend(PluginVariant, make_plugin_registry("backend")): """ @staticmethod - def get_backend(key: str) -> Type["Backend"]: + def get_backend(key: str) -> type["Backend"]: """Get the backend by key. Parameters @@ -61,7 +58,7 @@ def get_backend(key: str) -> Type["Backend"]: return Backend.get_class_by_type(key) @staticmethod - def get_backends() -> Dict[str, Type["Backend"]]: + def get_backends() -> dict[str, type["Backend"]]: """Get all the registered backend names. Returns @@ -74,7 +71,7 @@ def get_backends() -> Dict[str, Type["Backend"]]: @staticmethod def get_backends_by_feature( feature: "Backend.Feature", - ) -> Dict[str, Type["Backend"]]: + ) -> dict[str, type["Backend"]]: """Get all the registered backend names with a specific feature. Parameters @@ -94,7 +91,7 @@ def get_backends_by_feature( } @staticmethod - def detect_backend_by_model(filename: str) -> Type["Backend"]: + def detect_backend_by_model(filename: str) -> type["Backend"]: """Detect the backend of the given model file. Parameters @@ -128,7 +125,7 @@ class Feature(Flag): features: ClassVar[Feature] = Feature(0) """The features of the backend.""" - suffixes: ClassVar[List[str]] = [] + suffixes: ClassVar[list[str]] = [] """The supported suffixes of the saved model. The first element is considered as the default suffix.""" @@ -157,7 +154,7 @@ def entry_point_hook(self) -> Callable[["Namespace"], None]: @property @abstractmethod - def deep_eval(self) -> Type["DeepEvalBackend"]: + def deep_eval(self) -> type["DeepEvalBackend"]: """The Deep Eval backend of the backend. Returns @@ -169,7 +166,7 @@ def deep_eval(self) -> Type["DeepEvalBackend"]: @property @abstractmethod - def neighbor_stat(self) -> Type["NeighborStat"]: + def neighbor_stat(self) -> type["NeighborStat"]: """The neighbor statistics of the backend. Returns diff --git a/deepmd/backend/dpmodel.py b/deepmd/backend/dpmodel.py index c51d097d5a..7c21b256ae 100644 --- a/deepmd/backend/dpmodel.py +++ b/deepmd/backend/dpmodel.py @@ -3,8 +3,6 @@ TYPE_CHECKING, Callable, ClassVar, - List, - Type, ) from deepmd.backend.backend import ( @@ -37,7 +35,7 @@ class DPModelBackend(Backend): Backend.Feature.DEEP_EVAL | Backend.Feature.NEIGHBOR_STAT | Backend.Feature.IO ) """The features of the backend.""" - suffixes: ClassVar[List[str]] = [".dp", ".yaml", ".yml"] + suffixes: ClassVar[list[str]] = [".dp", ".yaml", ".yml"] """The suffixes of the backend.""" def is_available(self) -> bool: @@ -62,7 +60,7 @@ def entry_point_hook(self) -> Callable[["Namespace"], None]: raise NotImplementedError(f"Unsupported backend: {self.name}") @property - def deep_eval(self) -> Type["DeepEvalBackend"]: + def deep_eval(self) -> type["DeepEvalBackend"]: """The Deep Eval backend of the backend. Returns @@ -77,7 +75,7 @@ def deep_eval(self) -> Type["DeepEvalBackend"]: return DeepEval @property - def neighbor_stat(self) -> Type["NeighborStat"]: + def neighbor_stat(self) -> type["NeighborStat"]: """The neighbor statistics of the backend. Returns diff --git a/deepmd/backend/jax.py b/deepmd/backend/jax.py index ece0761772..db92d6bed1 100644 --- a/deepmd/backend/jax.py +++ b/deepmd/backend/jax.py @@ -6,8 +6,6 @@ TYPE_CHECKING, Callable, ClassVar, - List, - Type, ) from deepmd.backend.backend import ( @@ -41,7 +39,7 @@ class JAXBackend(Backend): # | Backend.Feature.IO ) """The features of the backend.""" - suffixes: ClassVar[List[str]] = [] + suffixes: ClassVar[list[str]] = [] """The suffixes of the backend.""" def is_available(self) -> bool: @@ -66,7 +64,7 @@ def entry_point_hook(self) -> Callable[["Namespace"], None]: raise NotImplementedError @property - def deep_eval(self) -> Type["DeepEvalBackend"]: + def deep_eval(self) -> type["DeepEvalBackend"]: """The Deep Eval backend of the backend. Returns @@ -77,7 +75,7 @@ def deep_eval(self) -> Type["DeepEvalBackend"]: raise NotImplementedError @property - def neighbor_stat(self) -> Type["NeighborStat"]: + def neighbor_stat(self) -> type["NeighborStat"]: """The neighbor statistics of the backend. Returns diff --git a/deepmd/backend/pytorch.py b/deepmd/backend/pytorch.py index fb7d30e994..f5b0dd92b2 100644 --- a/deepmd/backend/pytorch.py +++ b/deepmd/backend/pytorch.py @@ -6,8 +6,6 @@ TYPE_CHECKING, Callable, ClassVar, - List, - Type, ) from deepmd.backend.backend import ( @@ -41,7 +39,7 @@ class PyTorchBackend(Backend): | Backend.Feature.IO ) """The features of the backend.""" - suffixes: ClassVar[List[str]] = [".pth", ".pt"] + suffixes: ClassVar[list[str]] = [".pth", ".pt"] """The suffixes of the backend.""" def is_available(self) -> bool: @@ -68,7 +66,7 @@ def entry_point_hook(self) -> Callable[["Namespace"], None]: return deepmd_main @property - def deep_eval(self) -> Type["DeepEvalBackend"]: + def deep_eval(self) -> type["DeepEvalBackend"]: """The Deep Eval backend of the backend. Returns @@ -81,7 +79,7 @@ def deep_eval(self) -> Type["DeepEvalBackend"]: return DeepEvalPT @property - def neighbor_stat(self) -> Type["NeighborStat"]: + def neighbor_stat(self) -> type["NeighborStat"]: """The neighbor statistics of the backend. Returns diff --git a/deepmd/backend/suffix.py b/deepmd/backend/suffix.py index 273fbc0951..d694b43488 100644 --- a/deepmd/backend/suffix.py +++ b/deepmd/backend/suffix.py @@ -6,7 +6,6 @@ ) from typing import ( Optional, - Type, Union, ) @@ -18,7 +17,7 @@ def format_model_suffix( filename: str, feature: Optional[Backend.Feature] = None, - preferred_backend: Optional[Union[str, Type["Backend"]]] = None, + preferred_backend: Optional[Union[str, type["Backend"]]] = None, strict_prefer: Optional[bool] = None, ) -> str: """Check and format the suffixes of a filename. diff --git a/deepmd/backend/tensorflow.py b/deepmd/backend/tensorflow.py index 15b03ee7c8..6b73d7c469 100644 --- a/deepmd/backend/tensorflow.py +++ b/deepmd/backend/tensorflow.py @@ -6,8 +6,6 @@ TYPE_CHECKING, Callable, ClassVar, - List, - Type, ) from deepmd.backend.backend import ( @@ -41,7 +39,7 @@ class TensorFlowBackend(Backend): | Backend.Feature.IO ) """The features of the backend.""" - suffixes: ClassVar[List[str]] = [".pb"] + suffixes: ClassVar[list[str]] = [".pb"] """The suffixes of the backend.""" def is_available(self) -> bool: @@ -77,7 +75,7 @@ def entry_point_hook(self) -> Callable[["Namespace"], None]: return deepmd_main @property - def deep_eval(self) -> Type["DeepEvalBackend"]: + def deep_eval(self) -> type["DeepEvalBackend"]: """The Deep Eval backend of the backend. Returns @@ -90,7 +88,7 @@ def deep_eval(self) -> Type["DeepEvalBackend"]: return DeepEvalTF @property - def neighbor_stat(self) -> Type["NeighborStat"]: + def neighbor_stat(self) -> type["NeighborStat"]: """The neighbor statistics of the backend. Returns diff --git a/deepmd/calculator.py b/deepmd/calculator.py index 2d3e7ce831..032fa2bcfa 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -7,8 +7,6 @@ from typing import ( TYPE_CHECKING, ClassVar, - Dict, - List, Optional, Union, ) @@ -42,7 +40,7 @@ class DP(Calculator): path to the model label : str, optional calculator label, by default "DP" - type_dict : Dict[str, int], optional + type_dict : dict[str, int], optional mapping of element types and their numbers, best left None and the calculator will infer this information from model, by default None neighbor_list : ase.neighborlist.NeighborList, optional @@ -72,7 +70,7 @@ class DP(Calculator): """ name = "DP" - implemented_properties: ClassVar[List[str]] = [ + implemented_properties: ClassVar[list[str]] = [ "energy", "free_energy", "forces", @@ -84,7 +82,7 @@ def __init__( self, model: Union[str, "Path"], label: str = "DP", - type_dict: Optional[Dict[str, int]] = None, + type_dict: Optional[dict[str, int]] = None, neighbor_list=None, **kwargs, ) -> None: @@ -100,8 +98,8 @@ def __init__( def calculate( self, atoms: Optional["Atoms"] = None, - properties: List[str] = ["energy", "forces", "virial"], - system_changes: List[str] = all_changes, + properties: list[str] = ["energy", "forces", "virial"], + system_changes: list[str] = all_changes, ): """Run calculation with deepmd model. @@ -109,10 +107,10 @@ def calculate( ---------- atoms : Optional[Atoms], optional atoms object to run the calculation on, by default None - properties : List[str], optional + properties : list[str], optional unused, only for function signature compatibility, by default ["energy", "forces", "stress"] - system_changes : List[str], optional + system_changes : list[str], optional unused, only for function signature compatibility, by default all_changes """ if atoms is not None: diff --git a/deepmd/common.py b/deepmd/common.py index f58634f224..fdfeef0e6d 100644 --- a/deepmd/common.py +++ b/deepmd/common.py @@ -14,9 +14,6 @@ from typing import ( TYPE_CHECKING, Any, - Dict, - List, - Set, TypeVar, Union, get_args, @@ -60,8 +57,8 @@ "linear", ] # get_args is new in py38 -VALID_PRECISION: Set[_PRECISION] = set(get_args(_PRECISION)) -VALID_ACTIVATION: Set[_ACTIVATION] = set(get_args(_ACTIVATION)) +VALID_PRECISION: set[_PRECISION] = set(get_args(_PRECISION)) +VALID_ACTIVATION: set[_ACTIVATION] = set(get_args(_ACTIVATION)) if TYPE_CHECKING: _DICT_VAL = TypeVar("_DICT_VAL") @@ -127,17 +124,17 @@ def make_default_mesh(pbc: bool, mixed_type: bool) -> np.ndarray: def j_deprecated( - jdata: Dict[str, "_DICT_VAL"], key: str, deprecated_key: List[str] = [] + jdata: dict[str, "_DICT_VAL"], key: str, deprecated_key: list[str] = [] ) -> "_DICT_VAL": """Assert that supplied dictionary conaines specified key. Parameters ---------- - jdata : Dict[str, _DICT_VAL] + jdata : dict[str, _DICT_VAL] dictionary to check key : str key to check - deprecated_key : List[str], optional + deprecated_key : list[str], optional list of deprecated keys, by default [] Returns @@ -161,7 +158,7 @@ def j_deprecated( return jdata[key] -def j_loader(filename: Union[str, Path]) -> Dict[str, Any]: +def j_loader(filename: Union[str, Path]) -> dict[str, Any]: """Load yaml or json settings file. Parameters @@ -171,7 +168,7 @@ def j_loader(filename: Union[str, Path]) -> Dict[str, Any]: Returns ------- - Dict[str, Any] + dict[str, Any] loaded dictionary Raises @@ -190,7 +187,7 @@ def j_loader(filename: Union[str, Path]) -> Dict[str, Any]: raise TypeError("config file must be json, or yaml/yml") -def expand_sys_str(root_dir: Union[str, Path]) -> List[str]: +def expand_sys_str(root_dir: Union[str, Path]) -> list[str]: """Recursively iterate over directories taking those that contain `type.raw` file. Parameters @@ -200,7 +197,7 @@ def expand_sys_str(root_dir: Union[str, Path]) -> List[str]: Returns ------- - List[str] + list[str] list of string pointing to system directories """ root_dir = DPPath(root_dir) diff --git a/deepmd/dpmodel/atomic_model/base_atomic_model.py b/deepmd/dpmodel/atomic_model/base_atomic_model.py index 5ea65a9d73..c29a76b3f1 100644 --- a/deepmd/dpmodel/atomic_model/base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/base_atomic_model.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( - Dict, - List, Optional, - Tuple, ) import numpy as np @@ -36,11 +33,11 @@ class BaseAtomicModel(BaseAtomicModel_, NativeOP): def __init__( self, - type_map: List[str], - atom_exclude_types: List[int] = [], - pair_exclude_types: List[Tuple[int, int]] = [], + type_map: list[str], + atom_exclude_types: list[int] = [], + pair_exclude_types: list[tuple[int, int]] = [], rcond: Optional[float] = None, - preset_out_bias: Optional[Dict[str, np.ndarray]] = None, + preset_out_bias: Optional[dict[str, np.ndarray]] = None, ): super().__init__() self.type_map = type_map @@ -52,7 +49,7 @@ def __init__( def init_out_stat(self): """Initialize the output bias.""" ntypes = self.get_ntypes() - self.bias_keys: List[str] = list(self.fitting_output_def().keys()) + self.bias_keys: list[str] = list(self.fitting_output_def().keys()) self.max_out_size = max( [self.atomic_output_def()[kk].size for kk in self.bias_keys] ) @@ -78,13 +75,13 @@ def __getitem__(self, key): else: raise KeyError(key) - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map def reinit_atom_exclude( self, - exclude_types: List[int] = [], + exclude_types: list[int] = [], ): self.atom_exclude_types = exclude_types if exclude_types == []: @@ -94,7 +91,7 @@ def reinit_atom_exclude( def reinit_pair_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.pair_exclude_types = exclude_types if exclude_types == []: @@ -119,7 +116,7 @@ def atomic_output_def(self) -> FittingOutputDef: ) def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -143,7 +140,7 @@ def forward_common_atomic( mapping: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Common interface for atomic inference. This method accept extended coordinates, extended atom typs, neighbor list, @@ -217,7 +214,7 @@ def call( mapping: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: return self.forward_common_atomic( extended_coord, extended_atype, @@ -251,7 +248,7 @@ def deserialize(cls, data: dict) -> "BaseAtomicModel": def apply_out_stat( self, - ret: Dict[str, np.ndarray], + ret: dict[str, np.ndarray], atype: np.ndarray, ): """Apply the stat to each atomic output. @@ -274,7 +271,7 @@ def apply_out_stat( def _varsize( self, - shape: List[int], + shape: list[int], ) -> int: output_size = 1 len_shape = len(shape) @@ -286,7 +283,7 @@ def _get_bias_index( self, kk: str, ) -> int: - res: List[int] = [] + res: list[int] = [] for i, e in enumerate(self.bias_keys): if e == kk: res.append(i) @@ -295,8 +292,8 @@ def _get_bias_index( def _fetch_out_stat( self, - keys: List[str], - ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: + keys: list[str], + ) -> tuple[dict[str, np.ndarray], dict[str, np.ndarray]]: ret_bias = {} ret_std = {} ntypes = self.get_ntypes() diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index a446bde06f..7e576eb484 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( - Dict, - List, Optional, ) @@ -46,7 +44,7 @@ def __init__( self, descriptor, fitting, - type_map: List[str], + type_map: list[str], **kwargs, ): super().__init__(type_map, **kwargs) @@ -64,7 +62,7 @@ def get_rcut(self) -> float: """Get the cut-off radius.""" return self.descriptor.get_rcut() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Get the neighbor selection.""" return self.descriptor.get_sel() @@ -96,7 +94,7 @@ def forward_atomic( mapping: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Models' atomic predictions. Parameters @@ -140,7 +138,7 @@ def forward_atomic( return ret def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -192,7 +190,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return self.fitting.get_dim_aparam() - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index d522347f41..79a51635d2 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( - Dict, - List, Optional, - Tuple, Union, ) @@ -48,8 +45,8 @@ class LinearEnergyAtomicModel(BaseAtomicModel): def __init__( self, - models: List[BaseAtomicModel], - type_map: List[str], + models: list[BaseAtomicModel], + type_map: list[str], **kwargs, ): super().__init__(type_map, **kwargs) @@ -104,12 +101,12 @@ def get_rcut(self) -> float: """Get the cut-off radius.""" return max(self.get_model_rcuts()) - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -125,22 +122,22 @@ def change_type_map( else None, ) - def get_model_rcuts(self) -> List[float]: + def get_model_rcuts(self) -> list[float]: """Get the cut-off radius for each individual models.""" return [model.get_rcut() for model in self.models] - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: return [max([model.get_nsel() for model in self.models])] - def get_model_nsels(self) -> List[int]: + def get_model_nsels(self) -> list[int]: """Get the processed sels for each individual models. Not distinguishing types.""" return [model.get_nsel() for model in self.models] - def get_model_sels(self) -> List[Union[int, List[int]]]: + def get_model_sels(self) -> list[Union[int, list[int]]]: """Get the sels for each individual models.""" return [model.get_sel() for model in self.models] - def _sort_rcuts_sels(self) -> Tuple[List[float], List[int]]: + def _sort_rcuts_sels(self) -> tuple[list[float], list[int]]: # sort the pair of rcut and sels in ascending order, first based on sel, then on rcut. zipped = sorted( zip(self.get_model_rcuts(), self.get_model_nsels()), @@ -156,7 +153,7 @@ def forward_atomic( mapping: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Return atomic prediction. Parameters @@ -219,16 +216,16 @@ def forward_atomic( return fit_ret @staticmethod - def remap_atype(ori_map: List[str], new_map: List[str]) -> np.ndarray: + def remap_atype(ori_map: list[str], new_map: list[str]) -> np.ndarray: """ This method is used to map the atype from the common type_map to the original type_map of indivial AtomicModels. Parameters ---------- - ori_map : List[str] + ori_map : list[str] The original type map of an AtomicModel. - new_map : List[str] + new_map : list[str] The common type map of the DPZBLLinearEnergyAtomicModel, created by the `get_type_map` method, must be a subset of the ori_map. @@ -284,8 +281,8 @@ def _compute_weight( self, extended_coord: np.ndarray, extended_atype: np.ndarray, - nlists_: List[np.ndarray], - ) -> List[np.ndarray]: + nlists_: list[np.ndarray], + ) -> list[np.ndarray]: """This should be a list of user defined weights that matches the number of models to be combined.""" nmodels = len(self.models) nframes, nloc, _ = nlists_[0].shape @@ -300,7 +297,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return max([model.get_dim_aparam() for model in self.models]) - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -347,7 +344,7 @@ def __init__( zbl_model: PairTabAtomicModel, sw_rmin: float, sw_rmax: float, - type_map: List[str], + type_map: list[str], smin_alpha: Optional[float] = 0.1, **kwargs, ): @@ -391,13 +388,13 @@ def _compute_weight( self, extended_coord: np.ndarray, extended_atype: np.ndarray, - nlists_: List[np.ndarray], - ) -> List[np.ndarray]: + nlists_: list[np.ndarray], + ) -> list[np.ndarray]: """ZBL weight. Returns ------- - List[np.ndarray] + list[np.ndarray] the atomic ZBL weight for interpolation. (nframes, nloc, 1) """ assert ( diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index bf345eaa12..6c0fc88e2c 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -4,8 +4,6 @@ abstractmethod, ) from typing import ( - Dict, - List, Optional, ) @@ -57,7 +55,7 @@ def get_rcut(self) -> float: pass @abstractmethod - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" pass @@ -66,7 +64,7 @@ def get_ntypes(self) -> int: return len(self.get_type_map()) @abstractmethod - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" pass @@ -87,7 +85,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" @abstractmethod - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -132,7 +130,7 @@ def fwd( mapping: Optional[t_tensor] = None, fparam: Optional[t_tensor] = None, aparam: Optional[t_tensor] = None, - ) -> Dict[str, t_tensor]: + ) -> dict[str, t_tensor]: pass @abstractmethod @@ -146,7 +144,7 @@ def deserialize(cls, data: dict): @abstractmethod def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: pass @@ -182,7 +180,7 @@ def do_grad_r( """ odef = self.fitting_output_def() if var_name is None: - require: List[bool] = [] + require: list[bool] = [] for vv in odef.keys(): require.append(self.do_grad_(vv, "r")) return any(require) @@ -199,7 +197,7 @@ def do_grad_c( """ odef = self.fitting_output_def() if var_name is None: - require: List[bool] = [] + require: list[bool] = [] for vv in odef.keys(): require.append(self.do_grad_(vv, "c")) return any(require) diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index 4218c24e3e..22471d3f32 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( - Dict, - List, Optional, Union, ) @@ -57,10 +55,10 @@ def __init__( self, tab_file: str, rcut: float, - sel: Union[int, List[int]], - type_map: List[str], + sel: Union[int, list[int]], + type_map: list[str], rcond: Optional[float] = None, - atom_ener: Optional[List[float]] = None, + atom_ener: Optional[list[float]] = None, **kwargs, ): super().__init__(type_map, **kwargs) @@ -109,10 +107,10 @@ def fitting_output_def(self) -> FittingOutputDef: def get_rcut(self) -> float: return self.rcut - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: return self.type_map - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: return [self.sel] def get_nsel(self) -> int: @@ -140,7 +138,7 @@ def need_sorted_nlist_for_lower(self) -> bool: return False def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -190,7 +188,7 @@ def forward_atomic( mapping: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: nframes, nloc, nnei = nlist.shape extended_coord = extended_coord.reshape(nframes, -1, 3) @@ -394,7 +392,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return 0 - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution diff --git a/deepmd/dpmodel/descriptor/descriptor.py b/deepmd/dpmodel/descriptor/descriptor.py index e48479cca8..6d0644f856 100644 --- a/deepmd/dpmodel/descriptor/descriptor.py +++ b/deepmd/dpmodel/descriptor/descriptor.py @@ -6,8 +6,6 @@ ) from typing import ( Callable, - Dict, - List, Optional, Union, ) @@ -57,7 +55,7 @@ def get_nsel(self) -> int: pass @abstractmethod - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" pass @@ -83,7 +81,7 @@ def get_dim_emb(self) -> int: def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -91,11 +89,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -104,7 +102,7 @@ def compute_input_stats( """ raise NotImplementedError - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" raise NotImplementedError @@ -152,7 +150,7 @@ def extend_descrpt_stat(des, type_map, des_with_stat=None): ---------- des : DescriptorBlock The descriptor block to be extended. - type_map : List[str] + type_map : list[str] The name of each type of atoms to be extended. des_with_stat : DescriptorBlock, Optional The descriptor block has additional statistics of types from newly provided `type_map`. diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index 70cb818eef..5ba3fc11b2 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -2,9 +2,7 @@ from typing import ( Any, Callable, - List, Optional, - Tuple, Union, ) @@ -171,7 +169,7 @@ class DescrptDPA1(NativeOP, BaseDescriptor): (Only support False to keep consistent with other backend references.) (Not used in this version. True option is not implemented.) If mask the diagonal of attention weights - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection: float @@ -203,7 +201,7 @@ class DescrptDPA1(NativeOP, BaseDescriptor): Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. spin (Only support None to keep consistent with other backend references.) @@ -227,9 +225,9 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, - neuron: List[int] = [25, 50, 100], + neuron: list[int] = [25, 50, 100], axis_neuron: int = 8, tebd_dim: int = 8, tebd_input_mode: str = "concat", @@ -240,7 +238,7 @@ def __init__( attn_layer: int = 2, attn_dotr: bool = True, attn_mask: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, set_davg_zero: bool = False, activation_function: str = "tanh", @@ -256,9 +254,9 @@ def __init__( stripped_type_embedding: Optional[bool] = None, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, # consistent with argcheck, not used though - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: ## seed, uniform_seed, not included. # Ensure compatibility with the deprecated stripped_type_embedding option. @@ -333,7 +331,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return self.se_atten.get_nsel() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.se_atten.get_sel() @@ -341,7 +339,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.se_atten.get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -395,7 +393,7 @@ def dim_out(self): def dim_emb(self): return self.get_dim_emb() - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError @@ -408,12 +406,12 @@ def set_stat_mean_and_stddev( self.se_atten.mean = mean self.se_atten.stddev = stddev - def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + def get_stat_mean_and_stddev(self) -> tuple[np.ndarray, np.ndarray]: """Get mean and stddev for descriptor.""" return self.se_atten.mean, self.se_atten.stddev def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -588,9 +586,9 @@ def deserialize(cls, data: dict) -> "DescrptDPA1": def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -623,9 +621,9 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, - neuron: List[int] = [25, 50, 100], + neuron: list[int] = [25, 50, 100], axis_neuron: int = 8, tebd_dim: int = 8, tebd_input_mode: str = "concat", @@ -635,7 +633,7 @@ def __init__( attn_layer: int = 2, attn_dotr: bool = True, attn_mask: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, set_davg_zero: bool = False, activation_function: str = "tanh", @@ -646,7 +644,7 @@ def __init__( trainable_ln: bool = True, ln_eps: Optional[float] = 1e-5, smooth: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: self.rcut = rcut self.rcut_smth = rcut_smth @@ -748,7 +746,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -817,7 +815,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data.""" @@ -829,7 +827,7 @@ def get_stats(self): def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -981,7 +979,7 @@ def __init__( ln_eps: float = 1e-5, smooth: bool = True, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Construct a neighbor-wise attention net.""" super().__init__() @@ -1109,7 +1107,7 @@ def __init__( ln_eps: float = 1e-5, smooth: bool = True, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Construct a neighbor-wise attention layer.""" super().__init__() @@ -1215,7 +1213,7 @@ def __init__( bias: bool = True, smooth: bool = True, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Construct a multi-head neighbor-wise attention net.""" super().__init__() diff --git a/deepmd/dpmodel/descriptor/dpa2.py b/deepmd/dpmodel/descriptor/dpa2.py index 43c57f443f..285dc724a7 100644 --- a/deepmd/dpmodel/descriptor/dpa2.py +++ b/deepmd/dpmodel/descriptor/dpa2.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, - Tuple, Union, ) @@ -70,7 +68,7 @@ def __init__( rcut: float, rcut_smth: float, nsel: int, - neuron: List[int] = [25, 50, 100], + neuron: list[int] = [25, 50, 100], axis_neuron: int = 16, tebd_dim: int = 8, tebd_input_mode: str = "concat", @@ -79,7 +77,7 @@ def __init__( resnet_dt: bool = False, type_one_side: bool = False, use_three_body: bool = False, - three_body_neuron: List[int] = [2, 4, 8], + three_body_neuron: list[int] = [2, 4, 8], three_body_sel: int = 40, three_body_rcut: float = 4.0, three_body_rcut_smth: float = 0.5, @@ -371,14 +369,14 @@ def __init__( concat_output_tebd: bool = True, precision: str = "float64", smooth: bool = True, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, add_tebd_to_repinit_out: bool = False, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ): r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. @@ -394,7 +392,7 @@ def __init__( The precision of the embedding net parameters. smooth : bool, optional Whether to use smoothness in processes such as attention weights calculation. - exclude_types : List[List[int]], optional + exclude_types : list[list[int]], optional The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection : float, optional @@ -410,7 +408,7 @@ def __init__( Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map : List[str], Optional + type_map : list[str], Optional A list of strings. Give the name to each type of atoms. Returns @@ -602,7 +600,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -610,7 +608,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -660,7 +658,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -723,14 +721,14 @@ def dim_emb(self): """Returns the embedding dimension g2.""" return self.get_dim_emb() - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError def set_stat_mean_and_stddev( self, - mean: List[np.ndarray], - stddev: List[np.ndarray], + mean: list[np.ndarray], + stddev: list[np.ndarray], ) -> None: """Update mean and stddev for descriptor.""" descrpt_list = [self.repinit, self.repformers] @@ -740,7 +738,7 @@ def set_stat_mean_and_stddev( descrpt.mean = mean[ii] descrpt.stddev = stddev[ii] - def get_stat_mean_and_stddev(self) -> Tuple[List[np.ndarray], List[np.ndarray]]: + def get_stat_mean_and_stddev(self) -> tuple[list[np.ndarray], list[np.ndarray]]: """Get mean and stddev for descriptor.""" mean_list = [self.repinit.mean, self.repformers.mean] stddev_list = [ @@ -1015,9 +1013,9 @@ def deserialize(cls, data: dict) -> "DescrptDPA2": def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/descriptor/hybrid.py b/deepmd/dpmodel/descriptor/hybrid.py index 4cd4e230ae..3aa8882db1 100644 --- a/deepmd/dpmodel/descriptor/hybrid.py +++ b/deepmd/dpmodel/descriptor/hybrid.py @@ -2,10 +2,7 @@ import math from typing import ( Any, - Dict, - List, Optional, - Tuple, Union, ) @@ -37,14 +34,14 @@ class DescrptHybrid(BaseDescriptor, NativeOP): Parameters ---------- - list : list : List[Union[BaseDescriptor, Dict[str, Any]]] + list : list : list[Union[BaseDescriptor, dict[str, Any]]] Build a descriptor from the concatenation of the list of descriptors. The descriptor can be either an object or a dictionary. """ def __init__( self, - list: List[Union[BaseDescriptor, Dict[str, Any]]], + list: list[Union[BaseDescriptor, dict[str, Any]]], ) -> None: super().__init__() # warning: list is conflict with built-in list @@ -69,7 +66,7 @@ def __init__( ), f"number of atom types in {ii}th descrptor {self.descrpt_list[0].__class__.__name__} does not match others" # if hybrid sel is larger than sub sel, the nlist needs to be cut for each type hybrid_sel = self.get_sel() - self.nlist_cut_idx: List[np.ndarray] = [] + self.nlist_cut_idx: list[np.ndarray] = [] if self.mixed_types() and not all( descrpt.mixed_types() for descrpt in self.descrpt_list ): @@ -107,7 +104,7 @@ def get_rcut_smth(self) -> float: # Note: Using the minimum rcut_smth might not be appropriate in all scenarios. Consider using a different approach or provide detailed documentation on why the minimum value is chosen. return np.min([descrpt.get_rcut_smth() for descrpt in self.descrpt_list]).item() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" if self.mixed_types(): return [ @@ -124,7 +121,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.descrpt_list[0].get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.descrpt_list[0].get_type_map() @@ -169,7 +166,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -182,15 +179,15 @@ def change_type_map( else None, ) - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" for descrpt in self.descrpt_list: descrpt.compute_input_stats(merged, path) def set_stat_mean_and_stddev( self, - mean: List[Union[np.ndarray, List[np.ndarray]]], - stddev: List[Union[np.ndarray, List[np.ndarray]]], + mean: list[Union[np.ndarray, list[np.ndarray]]], + stddev: list[Union[np.ndarray, list[np.ndarray]]], ) -> None: """Update mean and stddev for descriptor.""" for ii, descrpt in enumerate(self.descrpt_list): @@ -198,9 +195,9 @@ def set_stat_mean_and_stddev( def get_stat_mean_and_stddev( self, - ) -> Tuple[ - List[Union[np.ndarray, List[np.ndarray]]], - List[Union[np.ndarray, List[np.ndarray]]], + ) -> tuple[ + list[Union[np.ndarray, list[np.ndarray]]], + list[Union[np.ndarray, list[np.ndarray]]], ]: """Get mean and stddev for descriptor.""" mean_list = [] @@ -279,9 +276,9 @@ def call( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/descriptor/make_base_descriptor.py b/deepmd/dpmodel/descriptor/make_base_descriptor.py index 6ce54c6f12..a9b434d5f5 100644 --- a/deepmd/dpmodel/descriptor/make_base_descriptor.py +++ b/deepmd/dpmodel/descriptor/make_base_descriptor.py @@ -5,9 +5,7 @@ ) from typing import ( Callable, - List, Optional, - Tuple, Union, ) @@ -61,7 +59,7 @@ def get_rcut_smth(self) -> float: pass @abstractmethod - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected neighboring atoms for each type.""" pass @@ -79,7 +77,7 @@ def get_ntypes(self) -> int: pass @abstractmethod - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" pass @@ -124,7 +122,7 @@ def share_params(self, base_class, shared_level, resume=False): @abstractmethod def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -143,7 +141,7 @@ def get_stat_mean_and_stddev(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """Update mean and stddev for descriptor elements.""" @@ -188,9 +186,9 @@ def deserialize(cls, data: dict) -> "BD": def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/descriptor/repformers.py b/deepmd/dpmodel/descriptor/repformers.py index 7254f0bc3d..ec8be21a53 100644 --- a/deepmd/dpmodel/descriptor/repformers.py +++ b/deepmd/dpmodel/descriptor/repformers.py @@ -1,9 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - List, Optional, - Tuple, Union, ) @@ -110,7 +108,7 @@ class DescrptBlockRepformers(NativeOP, DescriptorBlock): The precision of the embedding net parameters. smooth : bool, optional Whether to use smoothness in processes such as attention weights calculation. - exclude_types : List[List[int]], optional + exclude_types : list[list[int]], optional The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection : float, optional @@ -159,7 +157,7 @@ def __init__( update_residual_init: str = "norm", set_davg_zero: bool = True, smooth: bool = True, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, precision: str = "float64", trainable_ln: bool = True, @@ -167,7 +165,7 @@ def __init__( g1_out_conv: bool = True, g1_out_mlp: bool = True, ln_eps: Optional[float] = 1e-5, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.rcut = rcut @@ -272,7 +270,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -337,7 +335,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data.""" @@ -349,7 +347,7 @@ def get_stats(self): def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -436,7 +434,7 @@ def get_residual( _mode: str = "norm", trainable: bool = True, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> np.ndarray: """ Get residual tensor for one update vector. @@ -694,7 +692,7 @@ def __init__( smooth: bool = True, attnw_shift: float = 20.0, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Return neighbor-wise multi-head self-attention maps, with gate mechanism.""" super().__init__() @@ -812,7 +810,7 @@ def __init__( input_dim: int, head_num: int, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.input_dim = input_dim @@ -897,7 +895,7 @@ def __init__( input_dim: int, head_num: int, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.input_dim = input_dim @@ -970,7 +968,7 @@ def __init__( smooth: bool = True, attnw_shift: float = 20.0, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.input_dim = input_dim @@ -1132,7 +1130,7 @@ def __init__( g1_out_conv: bool = True, g1_out_mlp: bool = True, ln_eps: Optional[float] = 1e-5, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.epsilon = 1e-4 # protection of 1./nnei @@ -1508,10 +1506,10 @@ def call( assert (nf, nloc) == g1.shape[:2] assert (nf, nloc, nnei) == h2.shape[:3] - g2_update: List[np.ndarray] = [g2] - h2_update: List[np.ndarray] = [h2] - g1_update: List[np.ndarray] = [g1] - g1_mlp: List[np.ndarray] = [g1] if not self.g1_out_mlp else [] + g2_update: list[np.ndarray] = [g2] + h2_update: list[np.ndarray] = [h2] + g1_update: list[np.ndarray] = [g1] + g1_mlp: list[np.ndarray] = [g1] if not self.g1_out_mlp else [] if self.g1_out_mlp: assert self.g1_self_mlp is not None g1_self_mlp = self.act(self.g1_self_mlp(g1)) @@ -1613,7 +1611,7 @@ def call( def list_update_res_avg( self, - update_list: List[np.ndarray], + update_list: list[np.ndarray], ) -> np.ndarray: nitem = len(update_list) uu = update_list[0] @@ -1621,7 +1619,7 @@ def list_update_res_avg( uu = uu + update_list[ii] return uu / (float(nitem) ** 0.5) - def list_update_res_incr(self, update_list: List[np.ndarray]) -> np.ndarray: + def list_update_res_incr(self, update_list: list[np.ndarray]) -> np.ndarray: nitem = len(update_list) uu = update_list[0] scale = 1.0 / (float(nitem - 1) ** 0.5) if nitem > 1 else 0.0 @@ -1630,7 +1628,7 @@ def list_update_res_incr(self, update_list: List[np.ndarray]) -> np.ndarray: return uu def list_update_res_residual( - self, update_list: List[np.ndarray], update_name: str = "g1" + self, update_list: list[np.ndarray], update_name: str = "g1" ) -> np.ndarray: nitem = len(update_list) uu = update_list[0] @@ -1648,7 +1646,7 @@ def list_update_res_residual( return uu def list_update( - self, update_list: List[np.ndarray], update_name: str = "g1" + self, update_list: list[np.ndarray], update_name: str = "g1" ) -> np.ndarray: if self.update_style == "res_avg": return self.list_update_res_avg(update_list) diff --git a/deepmd/dpmodel/descriptor/se_atten_v2.py b/deepmd/dpmodel/descriptor/se_atten_v2.py index d29580062c..e0ac222524 100644 --- a/deepmd/dpmodel/descriptor/se_atten_v2.py +++ b/deepmd/dpmodel/descriptor/se_atten_v2.py @@ -1,9 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, - List, Optional, - Tuple, Union, ) @@ -38,9 +36,9 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, - neuron: List[int] = [25, 50, 100], + neuron: list[int] = [25, 50, 100], axis_neuron: int = 8, tebd_dim: int = 8, resnet_dt: bool = False, @@ -50,7 +48,7 @@ def __init__( attn_layer: int = 2, attn_dotr: bool = True, attn_mask: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, set_davg_zero: bool = False, activation_function: str = "tanh", @@ -65,9 +63,9 @@ def __init__( stripped_type_embedding: Optional[bool] = None, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, # consistent with argcheck, not used though - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: DescrptDPA1.__init__( self, diff --git a/deepmd/dpmodel/descriptor/se_e2_a.py b/deepmd/dpmodel/descriptor/se_e2_a.py index 11856521c8..29577ef79e 100644 --- a/deepmd/dpmodel/descriptor/se_e2_a.py +++ b/deepmd/dpmodel/descriptor/se_e2_a.py @@ -3,9 +3,7 @@ import itertools from typing import ( Any, - List, Optional, - Tuple, Union, ) @@ -108,7 +106,7 @@ class DescrptSeA(NativeOP, BaseDescriptor): If the weights of embedding net are trainable. type_one_side Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection: float @@ -121,7 +119,7 @@ class DescrptSeA(NativeOP, BaseDescriptor): The precision of the embedding net parameters. Supported options are |PRECISION| spin The deepspin object. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. ntypes : int Number of element types. @@ -147,22 +145,22 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, type_one_side: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], env_protection: float = 0.0, set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, spin: Optional[Any] = None, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ntypes: Optional[int] = None, # to be compat with input # consistent with argcheck, not used though - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: del ntypes ## seed, uniform_seed, not included. @@ -282,7 +280,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -297,11 +295,11 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError @@ -314,7 +312,7 @@ def set_stat_mean_and_stddev( self.davg = mean self.dstd = stddev - def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + def get_stat_mean_and_stddev(self) -> tuple[np.ndarray, np.ndarray]: """Get mean and stddev for descriptor.""" return self.davg, self.dstd @@ -331,7 +329,7 @@ def cal_g( def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -473,9 +471,9 @@ def deserialize(cls, data: dict) -> "DescrptSeA": def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/descriptor/se_r.py b/deepmd/dpmodel/descriptor/se_r.py index 2d9f6f5a52..c9d27175d6 100644 --- a/deepmd/dpmodel/descriptor/se_r.py +++ b/deepmd/dpmodel/descriptor/se_r.py @@ -2,9 +2,7 @@ import copy from typing import ( Any, - List, Optional, - Tuple, Union, ) @@ -68,7 +66,7 @@ class DescrptSeR(NativeOP, BaseDescriptor): If the weights of embedding net are trainable. type_one_side Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. set_davg_zero @@ -79,7 +77,7 @@ class DescrptSeR(NativeOP, BaseDescriptor): The precision of the embedding net parameters. Supported options are |PRECISION| spin The deepspin object. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. ntypes : int Number of element types. @@ -105,21 +103,21 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], resnet_dt: bool = False, trainable: bool = True, type_one_side: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], env_protection: float = 0.0, set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, spin: Optional[Any] = None, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ntypes: Optional[int] = None, # to be compat with input # consistent with argcheck, not used though - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: del ntypes ## seed, uniform_seed, not included. @@ -240,7 +238,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -255,11 +253,11 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError @@ -272,7 +270,7 @@ def set_stat_mean_and_stddev( self.davg = mean self.dstd = stddev - def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + def get_stat_mean_and_stddev(self) -> tuple[np.ndarray, np.ndarray]: """Get mean and stddev for descriptor.""" return self.davg, self.dstd @@ -398,9 +396,9 @@ def deserialize(cls, data: dict) -> "DescrptSeR": def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/descriptor/se_t.py b/deepmd/dpmodel/descriptor/se_t.py index 364600aa8b..f2ea751c50 100644 --- a/deepmd/dpmodel/descriptor/se_t.py +++ b/deepmd/dpmodel/descriptor/se_t.py @@ -2,9 +2,7 @@ import copy import itertools from typing import ( - List, Optional, - Tuple, Union, ) @@ -73,7 +71,7 @@ class DescrptSeT(NativeOP, BaseDescriptor): The activation function in the embedding net. Supported options are |ACTIVATION_FN| env_protection : float Protection parameter to prevent division by zero errors during environment matrix calculations. - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. precision : str @@ -82,7 +80,7 @@ class DescrptSeT(NativeOP, BaseDescriptor): If the weights of embedding net are trainable. seed : int, Optional Random seed for initializing the network parameters. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. ntypes : int Number of element types. @@ -93,17 +91,17 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], resnet_dt: bool = False, set_davg_zero: bool = False, activation_function: str = "tanh", env_protection: float = 0.0, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], precision: str = DEFAULT_PRECISION, trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, ntypes: Optional[int] = None, # to be compat with input ) -> None: del ntypes @@ -174,7 +172,7 @@ def dim_out(self): return self.get_dim_out() def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -235,11 +233,11 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError @@ -252,13 +250,13 @@ def set_stat_mean_and_stddev( self.davg = mean self.dstd = stddev - def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + def get_stat_mean_and_stddev(self) -> tuple[np.ndarray, np.ndarray]: """Get mean and stddev for descriptor.""" return self.davg, self.dstd def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -399,9 +397,9 @@ def deserialize(cls, data: dict) -> "DescrptSeT": def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/descriptor/se_t_tebd.py b/deepmd/dpmodel/descriptor/se_t_tebd.py index b6e362d2d7..147a335926 100644 --- a/deepmd/dpmodel/descriptor/se_t_tebd.py +++ b/deepmd/dpmodel/descriptor/se_t_tebd.py @@ -1,9 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - List, Optional, - Tuple, Union, ) @@ -64,7 +62,7 @@ class DescrptSeTTebd(NativeOP, BaseDescriptor): The cut-off radius rcut_smth From where the environment matrix should be smoothed - sel : Union[List[int], int] + sel : Union[list[int], int] list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius int: the total maxmum number of atoms in the cut-off radius ntypes : int @@ -86,7 +84,7 @@ class DescrptSeTTebd(NativeOP, BaseDescriptor): The activation function in the embedding net. Supported options are |ACTIVATION_FN| env_protection: float Protection parameter to prevent division by zero errors during environment matrix calculations. - exclude_types : List[Tuple[int, int]] + exclude_types : list[tuple[int, int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. precision @@ -95,7 +93,7 @@ class DescrptSeTTebd(NativeOP, BaseDescriptor): If the weights of embedding net are trainable. seed Random seed for initializing the network parameters. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. concat_output_tebd: bool Whether to concat type embedding at the output of the descriptor. @@ -112,7 +110,7 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, neuron: list = [2, 4, 8], tebd_dim: int = 8, @@ -121,11 +119,11 @@ def __init__( set_davg_zero: bool = True, activation_function: str = "tanh", env_protection: float = 0.0, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], precision: str = "float64", trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, concat_output_tebd: bool = True, use_econf_tebd: bool = False, use_tebd_bias=False, @@ -178,7 +176,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return self.se_ttebd.get_nsel() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.se_ttebd.get_sel() @@ -186,7 +184,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.se_ttebd.get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -240,7 +238,7 @@ def dim_out(self): def dim_emb(self): return self.get_dim_emb() - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError @@ -253,12 +251,12 @@ def set_stat_mean_and_stddev( self.se_ttebd.mean = mean self.se_ttebd.stddev = stddev - def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + def get_stat_mean_and_stddev(self) -> tuple[np.ndarray, np.ndarray]: """Get mean and stddev for descriptor.""" return self.se_ttebd.mean, self.se_ttebd.stddev def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -412,9 +410,9 @@ def deserialize(cls, data: dict) -> "DescrptSeTTebd": def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -447,7 +445,7 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, neuron: list = [25, 50, 100], tebd_dim: int = 8, @@ -456,10 +454,10 @@ def __init__( activation_function="tanh", precision: str = "float64", resnet_dt: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, smooth: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: self.rcut = rcut self.rcut_smth = rcut_smth @@ -541,7 +539,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -610,7 +608,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data.""" @@ -622,7 +620,7 @@ def get_stats(self): def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) diff --git a/deepmd/dpmodel/fitting/dipole_fitting.py b/deepmd/dpmodel/fitting/dipole_fitting.py index 20e732823b..f67bbc93a4 100644 --- a/deepmd/dpmodel/fitting/dipole_fitting.py +++ b/deepmd/dpmodel/fitting/dipole_fitting.py @@ -2,8 +2,6 @@ import copy from typing import ( Any, - Dict, - List, Optional, Union, ) @@ -81,7 +79,7 @@ class DipoleFitting(GeneralFitting): c_differentiable If the variable is differentiated with respect to the cell tensor (pbc case). Only reducible variable are differentiable. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -90,25 +88,25 @@ def __init__( ntypes: int, dim_descrpt: int, embedding_width: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, - trainable: Optional[List[bool]] = None, + trainable: Optional[list[bool]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, - layer_name: Optional[List[Optional[str]]] = None, + layer_name: Optional[list[Optional[str]]] = None, use_aparam_as_mask: bool = False, spin: Any = None, mixed_types: bool = False, - exclude_types: List[int] = [], + exclude_types: list[int] = [], r_differentiable: bool = True, c_differentiable: bool = True, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, old_impl=False, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): if tot_ener_zero: raise NotImplementedError("tot_ener_zero is not implemented") @@ -188,7 +186,7 @@ def call( h2: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Calculate the fitting. Parameters diff --git a/deepmd/dpmodel/fitting/dos_fitting.py b/deepmd/dpmodel/fitting/dos_fitting.py index 0d4cee68e2..e9cd4a17ae 100644 --- a/deepmd/dpmodel/fitting/dos_fitting.py +++ b/deepmd/dpmodel/fitting/dos_fitting.py @@ -2,7 +2,6 @@ import copy from typing import ( TYPE_CHECKING, - List, Optional, Union, ) @@ -33,19 +32,19 @@ def __init__( ntypes: int, dim_descrpt: int, numb_dos: int = 300, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, bias_dos: Optional[np.ndarray] = None, rcond: Optional[float] = None, - trainable: Union[bool, List[bool]] = True, + trainable: Union[bool, list[bool]] = True, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = False, - exclude_types: List[int] = [], - type_map: Optional[List[str]] = None, - seed: Optional[Union[int, List[int]]] = None, + exclude_types: list[int] = [], + type_map: Optional[list[str]] = None, + seed: Optional[Union[int, list[int]]] = None, ): if bias_dos is not None: self.bias_dos = bias_dos diff --git a/deepmd/dpmodel/fitting/ener_fitting.py b/deepmd/dpmodel/fitting/ener_fitting.py index 60f23f9628..9a1eae0156 100644 --- a/deepmd/dpmodel/fitting/ener_fitting.py +++ b/deepmd/dpmodel/fitting/ener_fitting.py @@ -3,7 +3,6 @@ from typing import ( TYPE_CHECKING, Any, - List, Optional, Union, ) @@ -30,23 +29,23 @@ def __init__( self, ntypes: int, dim_descrpt: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, - trainable: Optional[List[bool]] = None, - atom_ener: Optional[List[float]] = None, + trainable: Optional[list[bool]] = None, + atom_ener: Optional[list[float]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, - layer_name: Optional[List[Optional[str]]] = None, + layer_name: Optional[list[Optional[str]]] = None, use_aparam_as_mask: bool = False, spin: Any = None, mixed_types: bool = False, - exclude_types: List[int] = [], - type_map: Optional[List[str]] = None, - seed: Optional[Union[int, List[int]]] = None, + exclude_types: list[int] = [], + type_map: Optional[list[str]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__( var_name="energy", diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index a20405018e..a587f69449 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -5,8 +5,6 @@ ) from typing import ( Any, - Dict, - List, Optional, Union, ) @@ -78,15 +76,15 @@ class GeneralFitting(NativeOP, BaseFitting): mixed_types If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. - exclude_types: List[int] + exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. - remove_vaccum_contribution: List[bool], optional + remove_vaccum_contribution: list[bool], optional Remove vaccum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. - seed: Optional[Union[int, List[int]]] + seed: Optional[Union[int, list[int]]] Random seed for initializing the network parameters. """ @@ -95,24 +93,24 @@ def __init__( var_name: str, ntypes: int, dim_descrpt: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, bias_atom_e: Optional[np.ndarray] = None, rcond: Optional[float] = None, tot_ener_zero: bool = False, - trainable: Optional[List[bool]] = None, + trainable: Optional[list[bool]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, - layer_name: Optional[List[Optional[str]]] = None, + layer_name: Optional[list[Optional[str]]] = None, use_aparam_as_mask: bool = False, spin: Any = None, mixed_types: bool = True, - exclude_types: List[int] = [], - remove_vaccum_contribution: Optional[List[bool]] = None, - type_map: Optional[List[str]] = None, - seed: Optional[Union[int, List[int]]] = None, + exclude_types: list[int] = [], + remove_vaccum_contribution: Optional[list[bool]] = None, + type_map: Optional[list[str]] = None, + seed: Optional[Union[int, list[int]]] = None, ): self.var_name = var_name self.ntypes = ntypes @@ -192,7 +190,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return self.numb_aparam - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -201,12 +199,12 @@ def get_sel_type(self) -> List[int]: """ return [ii for ii in range(self.ntypes) if ii not in self.exclude_types] - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -261,7 +259,7 @@ def __getitem__(self, key): def reinit_exclude( self, - exclude_types: List[int] = [], + exclude_types: list[int] = [], ): self.exclude_types = exclude_types self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) @@ -322,7 +320,7 @@ def _call_common( h2: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Calculate the fitting. Parameters diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 2e469eefe1..893853bb38 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -2,8 +2,6 @@ import copy from typing import ( Any, - Dict, - List, Optional, Union, ) @@ -105,9 +103,9 @@ class InvarFitting(GeneralFitting): And the aparam will not be used as the atomic parameters for embedding. mixed_types If false, different atomic types uses different fitting net, otherwise different atom types share the same fitting net. - exclude_types: List[int] + exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -118,24 +116,24 @@ def __init__( ntypes: int, dim_descrpt: int, dim_out: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, bias_atom: Optional[np.ndarray] = None, rcond: Optional[float] = None, tot_ener_zero: bool = False, - trainable: Optional[List[bool]] = None, - atom_ener: Optional[List[float]] = None, + trainable: Optional[list[bool]] = None, + atom_ener: Optional[list[float]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, - layer_name: Optional[List[Optional[str]]] = None, + layer_name: Optional[list[Optional[str]]] = None, use_aparam_as_mask: bool = False, spin: Any = None, mixed_types: bool = True, - exclude_types: List[int] = [], - type_map: Optional[List[str]] = None, - seed: Optional[Union[int, List[int]]] = None, + exclude_types: list[int] = [], + type_map: Optional[list[str]] = None, + seed: Optional[Union[int, list[int]]] = None, ): if tot_ener_zero: raise NotImplementedError("tot_ener_zero is not implemented") @@ -219,7 +217,7 @@ def call( h2: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Calculate the fitting. Parameters diff --git a/deepmd/dpmodel/fitting/make_base_fitting.py b/deepmd/dpmodel/fitting/make_base_fitting.py index 417ccc892a..a67273356d 100644 --- a/deepmd/dpmodel/fitting/make_base_fitting.py +++ b/deepmd/dpmodel/fitting/make_base_fitting.py @@ -4,8 +4,6 @@ abstractmethod, ) from typing import ( - Dict, - List, Optional, ) @@ -60,7 +58,7 @@ def fwd( h2: Optional[t_tensor] = None, fparam: Optional[t_tensor] = None, aparam: Optional[t_tensor] = None, - ) -> Dict[str, t_tensor]: + ) -> dict[str, t_tensor]: """Calculate fitting.""" pass @@ -69,13 +67,13 @@ def compute_output_stats(self, merged): raise NotImplementedError @abstractmethod - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" pass @abstractmethod def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. diff --git a/deepmd/dpmodel/fitting/polarizability_fitting.py b/deepmd/dpmodel/fitting/polarizability_fitting.py index d3036fe8b8..2ff5052a83 100644 --- a/deepmd/dpmodel/fitting/polarizability_fitting.py +++ b/deepmd/dpmodel/fitting/polarizability_fitting.py @@ -2,8 +2,6 @@ import copy from typing import ( Any, - Dict, - List, Optional, Union, ) @@ -82,11 +80,11 @@ class PolarFitting(GeneralFitting): fit_diag : bool Fit the diagonal part of the rotational invariant polarizability matrix, which will be converted to normal polarizability matrix by contracting with the rotation matrix. - scale : List[float] + scale : list[float] The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i] shift_diag : bool Whether to shift the diagonal part of the polarizability matrix. The shift operation is carried out after scale. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -95,26 +93,26 @@ def __init__( ntypes: int, dim_descrpt: int, embedding_width: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, - trainable: Optional[List[bool]] = None, + trainable: Optional[list[bool]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, - layer_name: Optional[List[Optional[str]]] = None, + layer_name: Optional[list[Optional[str]]] = None, use_aparam_as_mask: bool = False, spin: Any = None, mixed_types: bool = False, - exclude_types: List[int] = [], + exclude_types: list[int] = [], old_impl: bool = False, fit_diag: bool = True, - scale: Optional[List[float]] = None, + scale: Optional[list[float]] = None, shift_diag: bool = True, - type_map: Optional[List[str]] = None, - seed: Optional[Union[int, List[int]]] = None, + type_map: Optional[list[str]] = None, + seed: Optional[Union[int, list[int]]] = None, ): if tot_ener_zero: raise NotImplementedError("tot_ener_zero is not implemented") @@ -223,7 +221,7 @@ def output_def(self): ) def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -257,7 +255,7 @@ def call( h2: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Calculate the fitting. Parameters diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index 014dda4188..1a8fe44aae 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( - List, Optional, Union, ) @@ -60,9 +59,9 @@ class PropertyFittingNet(InvarFitting): The precision of the embedding net parameters. Supported options are |PRECISION| mixed_types If false, different atomic types uses different fitting net, otherwise different atom types share the same fitting net. - exclude_types: List[int] + exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -71,10 +70,10 @@ def __init__( ntypes: int, dim_descrpt: int, task_dim: int = 1, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], bias_atom_p: Optional[np.ndarray] = None, rcond: Optional[float] = None, - trainable: Union[bool, List[bool]] = True, + trainable: Union[bool, list[bool]] = True, intensive: bool = False, bias_method: str = "normal", resnet_dt: bool = True, @@ -83,8 +82,8 @@ def __init__( activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, - exclude_types: List[int] = [], - type_map: Optional[List[str]] = None, + exclude_types: list[int] = [], + type_map: Optional[list[str]] = None, # not used seed: Optional[int] = None, ): diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index 02625f5331..695edb29d2 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -4,11 +4,7 @@ TYPE_CHECKING, Any, Callable, - Dict, - List, Optional, - Tuple, - Type, Union, ) @@ -109,7 +105,7 @@ def get_ntypes(self) -> int: """Get the number of atom types of this model.""" return len(self.type_map) - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map (element name of the atom types) of this model.""" return self.type_map @@ -122,7 +118,7 @@ def get_dim_aparam(self) -> int: return self.dp.get_dim_aparam() @property - def model_type(self) -> Type["DeepEvalWrapper"]: + def model_type(self) -> type["DeepEvalWrapper"]: """The the evaluator of the model type.""" model_output_type = self.dp.model_output_type() if "energy" in model_output_type: @@ -138,7 +134,7 @@ def model_type(self) -> Type["DeepEvalWrapper"]: else: raise RuntimeError("Unknown model type") - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -168,7 +164,7 @@ def eval( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, **kwargs: Any, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. Parameters @@ -226,7 +222,7 @@ def eval( ) ) - def _get_request_defs(self, atomic: bool) -> List[OutputVariableDef]: + def _get_request_defs(self, atomic: bool) -> list[OutputVariableDef]: """Get the requested output definitions. When atomic is True, all output_def are requested. @@ -290,7 +286,7 @@ def _get_natoms_and_nframes( coords: np.ndarray, atom_types: np.ndarray, mixed_type: bool = False, - ) -> Tuple[int, int]: + ) -> tuple[int, int]: if mixed_type: natoms = len(atom_types[0]) else: @@ -307,7 +303,7 @@ def _eval_model( coords: np.ndarray, cells: Optional[np.ndarray], atom_types: np.ndarray, - request_defs: List[OutputVariableDef], + request_defs: list[OutputVariableDef], ): model = self.dp diff --git a/deepmd/dpmodel/model/base_model.py b/deepmd/dpmodel/model/base_model.py index c6d482c72f..3f71003bad 100644 --- a/deepmd/dpmodel/model/base_model.py +++ b/deepmd/dpmodel/model/base_model.py @@ -7,10 +7,7 @@ ) from typing import ( Any, - List, Optional, - Tuple, - Type, ) from deepmd.utils.data_system import ( @@ -22,7 +19,7 @@ ) -def make_base_model() -> Type[object]: +def make_base_model() -> type[object]: class BaseBaseModel(ABC, PluginVariant, make_plugin_registry("model")): """Base class for final exported model that will be directly used for inference. @@ -67,7 +64,7 @@ def __call__(self, *args: Any, **kwds: Any) -> Any: pass @abstractmethod - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" @abstractmethod @@ -83,7 +80,7 @@ def get_dim_aparam(self): """Get the number (dimension) of atomic parameters of this atomic model.""" @abstractmethod - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -99,7 +96,7 @@ def is_aparam_nall(self) -> bool: """ @abstractmethod - def model_output_type(self) -> List[str]: + def model_output_type(self) -> list[str]: """Get the output type for the model.""" @abstractmethod @@ -166,9 +163,9 @@ def get_nsel(self) -> int: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/model/dp_model.py b/deepmd/dpmodel/model/dp_model.py index 1597ba0b14..eda0414398 100644 --- a/deepmd/dpmodel/model/dp_model.py +++ b/deepmd/dpmodel/model/dp_model.py @@ -2,9 +2,7 @@ from typing import ( - List, Optional, - Tuple, ) from deepmd.dpmodel.descriptor.base_descriptor import ( @@ -21,9 +19,9 @@ class DPModelCommon: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index ee4c1f035a..8cdb7e1f25 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -1,10 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, - List, Optional, - Tuple, - Type, ) import numpy as np @@ -42,7 +38,7 @@ ) -def make_model(T_AtomicModel: Type[BaseAtomicModel]): +def make_model(T_AtomicModel: type[BaseAtomicModel]): """Make a model as a derived class of an atomic model. The model provide two interfaces. @@ -87,7 +83,7 @@ def model_output_def(self): """Get the output def for the model.""" return ModelOutputDef(self.atomic_output_def()) - def model_output_type(self) -> List[str]: + def model_output_type(self) -> list[str]: """Get the output type for the model.""" output_def = self.model_output_def() var_defs = output_def.var_defs @@ -106,7 +102,7 @@ def call( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, do_atomic_virial: bool = False, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Return model prediction. Parameters @@ -128,7 +124,7 @@ def call( Returns ------- ret_dict - The result dict of type Dict[str,np.ndarray]. + The result dict of type dict[str,np.ndarray]. The keys are defined by the `ModelOutputDef`. """ @@ -249,7 +245,7 @@ def input_type_cast( box: Optional[np.ndarray] = None, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - ) -> Tuple[ + ) -> tuple[ np.ndarray, Optional[np.ndarray], Optional[np.ndarray], @@ -263,7 +259,7 @@ def input_type_cast( ### ### type checking would not pass jit, convert to coord prec anyway ### - _lst: List[Optional[np.ndarray]] = [ + _lst: list[Optional[np.ndarray]] = [ vv.astype(coord.dtype) if vv is not None else None for vv in [box, fparam, aparam] ] @@ -285,9 +281,9 @@ def input_type_cast( def output_type_cast( self, - model_ret: Dict[str, np.ndarray], + model_ret: dict[str, np.ndarray], input_prec: str, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Convert the model output to the input prec.""" do_cast = ( input_prec @@ -427,7 +423,7 @@ def do_grad_c( return self.atomic_model.do_grad_c(var_name) def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -449,7 +445,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return self.atomic_model.get_dim_aparam() - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -469,7 +465,7 @@ def get_rcut(self) -> float: """Get the cut-off radius.""" return self.atomic_model.get_rcut() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.atomic_model.get_type_map() @@ -481,7 +477,7 @@ def get_nnei(self) -> int: """Returns the total number of selected neighboring atoms in the cut-off radius.""" return self.atomic_model.get_nnei() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.atomic_model.get_sel() diff --git a/deepmd/dpmodel/model/spin_model.py b/deepmd/dpmodel/model/spin_model.py index d9c96a979e..b0801fe59e 100644 --- a/deepmd/dpmodel/model/spin_model.py +++ b/deepmd/dpmodel/model/spin_model.py @@ -1,7 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, - List, Optional, ) @@ -222,7 +220,7 @@ def expand_aparam(aparam, nloc: int): ) return aparam - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" tmap = self.backbone_model.get_type_map() ntypes = len(tmap) // 2 # ignore the virtual type @@ -244,7 +242,7 @@ def get_dim_aparam(self): """Get the number (dimension) of atomic parameters of this atomic model.""" return self.backbone_model.get_dim_aparam() - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution to the result of the model. @@ -258,7 +256,7 @@ def is_aparam_nall(self) -> bool: """ return self.backbone_model.is_aparam_nall() - def model_output_type(self) -> List[str]: + def model_output_type(self) -> list[str]: """Get the output type for the model.""" return self.backbone_model.model_output_type() @@ -333,7 +331,7 @@ def call( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, do_atomic_virial: bool = False, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Return model prediction. Parameters @@ -358,7 +356,7 @@ def call( Returns ------- ret_dict - The result dict of type Dict[str,np.ndarray]. + The result dict of type dict[str,np.ndarray]. The keys are defined by the `ModelOutputDef`. """ diff --git a/deepmd/dpmodel/model/transform_output.py b/deepmd/dpmodel/model/transform_output.py index 67fb016389..43c275b1be 100644 --- a/deepmd/dpmodel/model/transform_output.py +++ b/deepmd/dpmodel/model/transform_output.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Dict, -) import numpy as np @@ -17,11 +14,11 @@ def fit_output_to_model_output( - fit_ret: Dict[str, np.ndarray], + fit_ret: dict[str, np.ndarray], fit_output_def: FittingOutputDef, coord_ext: np.ndarray, do_atomic_virial: bool = False, -) -> Dict[str, np.ndarray]: +) -> dict[str, np.ndarray]: """Transform the output of the fitting network to the model output. @@ -49,11 +46,11 @@ def fit_output_to_model_output( def communicate_extended_output( - model_ret: Dict[str, np.ndarray], + model_ret: dict[str, np.ndarray], model_output_def: ModelOutputDef, mapping: np.ndarray, # nf x nloc do_atomic_virial: bool = False, -) -> Dict[str, np.ndarray]: +) -> dict[str, np.ndarray]: """Transform the output of the model network defined on local and ghost (extended) atoms to local atoms. diff --git a/deepmd/dpmodel/output_def.py b/deepmd/dpmodel/output_def.py index d55ea3988d..2ceb4f412a 100644 --- a/deepmd/dpmodel/output_def.py +++ b/deepmd/dpmodel/output_def.py @@ -3,16 +3,11 @@ from enum import ( IntEnum, ) -from typing import ( - Dict, - List, - Tuple, -) def check_shape( - shape: List[int], - def_shape: List[int], + shape: list[int], + def_shape: list[int], ): """Check if the shape satisfies the defined shape.""" assert len(shape) == len(def_shape) @@ -193,7 +188,7 @@ class OutputVariableDef: def __init__( self, name: str, - shape: List[int], + shape: list[int], reducible: bool = False, r_differentiable: bool = False, c_differentiable: bool = False, @@ -256,7 +251,7 @@ class FittingOutputDef: def __init__( self, - var_defs: List[OutputVariableDef], + var_defs: list[OutputVariableDef], ): self.var_defs = {vv.name: vv for vv in var_defs} @@ -266,7 +261,7 @@ def __getitem__( ) -> OutputVariableDef: return self.var_defs[key] - def get_data(self) -> Dict[str, OutputVariableDef]: + def get_data(self) -> dict[str, OutputVariableDef]: return self.var_defs def keys(self): @@ -298,7 +293,7 @@ def __init__( self.def_hess_r, _ = do_derivative(self.def_derv_r) self.def_derv_c_redu = do_reduce(self.def_derv_c) self.def_mask = do_mask(self.def_outp.get_data()) - self.var_defs: Dict[str, OutputVariableDef] = {} + self.var_defs: dict[str, OutputVariableDef] = {} for ii in [ self.def_outp.get_data(), self.def_redu, @@ -318,7 +313,7 @@ def __getitem__( def get_data( self, - ) -> Dict[str, OutputVariableDef]: + ) -> dict[str, OutputVariableDef]: return self.var_defs def keys(self): @@ -347,11 +342,11 @@ def get_reduce_name(name: str) -> str: return name + "_redu" -def get_deriv_name(name: str) -> Tuple[str, str]: +def get_deriv_name(name: str) -> tuple[str, str]: return name + "_derv_r", name + "_derv_c" -def get_deriv_name_mag(name: str) -> Tuple[str, str]: +def get_deriv_name_mag(name: str) -> tuple[str, str]: return name + "_derv_r_mag", name + "_derv_c_mag" @@ -424,9 +419,9 @@ def check_deriv(var_def: OutputVariableDef) -> bool: def do_reduce( - def_outp_data: Dict[str, OutputVariableDef], -) -> Dict[str, OutputVariableDef]: - def_redu: Dict[str, OutputVariableDef] = {} + def_outp_data: dict[str, OutputVariableDef], +) -> dict[str, OutputVariableDef]: + def_redu: dict[str, OutputVariableDef] = {} for kk, vv in def_outp_data.items(): if vv.reducible: rk = get_reduce_name(kk) @@ -443,9 +438,9 @@ def do_reduce( def do_mask( - def_outp_data: Dict[str, OutputVariableDef], -) -> Dict[str, OutputVariableDef]: - def_mask: Dict[str, OutputVariableDef] = {} + def_outp_data: dict[str, OutputVariableDef], +) -> dict[str, OutputVariableDef]: + def_mask: dict[str, OutputVariableDef] = {} # for deep eval when has atomic mask def_mask["mask"] = OutputVariableDef( name="mask", @@ -468,10 +463,10 @@ def do_mask( def do_derivative( - def_outp_data: Dict[str, OutputVariableDef], -) -> Tuple[Dict[str, OutputVariableDef], Dict[str, OutputVariableDef]]: - def_derv_r: Dict[str, OutputVariableDef] = {} - def_derv_c: Dict[str, OutputVariableDef] = {} + def_outp_data: dict[str, OutputVariableDef], +) -> tuple[dict[str, OutputVariableDef], dict[str, OutputVariableDef]]: + def_derv_r: dict[str, OutputVariableDef] = {} + def_derv_c: dict[str, OutputVariableDef] = {} for kk, vv in def_outp_data.items(): rkr, rkc = get_deriv_name(kk) rkrm, rkcm = get_deriv_name_mag(kk) diff --git a/deepmd/dpmodel/utils/exclude_mask.py b/deepmd/dpmodel/utils/exclude_mask.py index ff668b8153..d0a739b9d4 100644 --- a/deepmd/dpmodel/utils/exclude_mask.py +++ b/deepmd/dpmodel/utils/exclude_mask.py @@ -1,8 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - List, - Tuple, -) import numpy as np @@ -13,7 +9,7 @@ class AtomExcludeMask: def __init__( self, ntypes: int, - exclude_types: List[int] = [], + exclude_types: list[int] = [], ): self.ntypes = ntypes self.exclude_types = exclude_types @@ -59,7 +55,7 @@ class PairExcludeMask: def __init__( self, ntypes: int, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.ntypes = ntypes self.exclude_types = set() diff --git a/deepmd/dpmodel/utils/neighbor_stat.py b/deepmd/dpmodel/utils/neighbor_stat.py index 96b39d20ad..744a4476cd 100644 --- a/deepmd/dpmodel/utils/neighbor_stat.py +++ b/deepmd/dpmodel/utils/neighbor_stat.py @@ -1,8 +1,9 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( +from collections.abc import ( Iterator, +) +from typing import ( Optional, - Tuple, ) import numpy as np @@ -47,7 +48,7 @@ def call( coord: np.ndarray, atype: np.ndarray, cell: Optional[np.ndarray], - ) -> Tuple[float, np.ndarray]: + ) -> tuple[float, np.ndarray]: """Calculate the neareest neighbor distance between atoms, maximum nbor size of atoms and the output data range of the environment matrix. @@ -130,7 +131,7 @@ def __init__( def iterator( self, data: DeepmdDataSystem - ) -> Iterator[Tuple[np.ndarray, float, str]]: + ) -> Iterator[tuple[np.ndarray, float, str]]: """Abstract method for producing data. Yields diff --git a/deepmd/dpmodel/utils/network.py b/deepmd/dpmodel/utils/network.py index 22e85c9890..e1242c3669 100644 --- a/deepmd/dpmodel/utils/network.py +++ b/deepmd/dpmodel/utils/network.py @@ -9,8 +9,6 @@ from typing import ( Callable, ClassVar, - Dict, - List, Optional, Union, ) @@ -86,7 +84,7 @@ def __init__( activation_function: Optional[str] = None, resnet: bool = False, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: prec = PRECISION_DICT[precision.lower()] self.precision = precision @@ -347,7 +345,7 @@ def __init__( uni_init: bool = True, trainable: bool = True, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> None: self.eps = eps self.uni_init = uni_init @@ -494,7 +492,7 @@ class NN(ModuleBase): The layers of the network. """ - def __init__(self, layers: Optional[List[dict]] = None) -> None: + def __init__(self, layers: Optional[list[dict]] = None) -> None: super().__init__() if layers is None: layers = [] @@ -604,11 +602,11 @@ class EN(T_Network): def __init__( self, in_dim, - neuron: List[int] = [24, 48, 96], + neuron: list[int] = [24, 48, 96], activation_function: str = "tanh", resnet_dt: bool = False, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, bias: bool = True, ): layers = [] @@ -709,12 +707,12 @@ def __init__( self, in_dim, out_dim, - neuron: List[int] = [24, 48, 96], + neuron: list[int] = [24, 48, 96], activation_function: str = "tanh", resnet_dt: bool = False, precision: str = DEFAULT_PRECISION, bias_out: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__( in_dim, @@ -804,7 +802,7 @@ class NetworkCollection: """ # subclass may override this - NETWORK_TYPE_MAP: ClassVar[Dict[str, type]] = { + NETWORK_TYPE_MAP: ClassVar[dict[str, type]] = { "network": NativeNet, "embedding_network": EmbeddingNet, "fitting_network": FittingNet, @@ -815,7 +813,7 @@ def __init__( ndim: int, ntypes: int, network_type: str = "network", - networks: List[Union[NativeNet, dict]] = [], + networks: list[Union[NativeNet, dict]] = [], ): self.ndim = ndim self.ntypes = ntypes diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index c935377e6a..4d0b3e3286 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -1,7 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, - List, Optional, Union, ) @@ -18,7 +16,7 @@ def extend_input_and_build_neighbor_list( coord, atype, rcut: float, - sel: List[int], + sel: list[int], mixed_types: bool = False, box: Optional[np.ndarray] = None, ): @@ -51,7 +49,7 @@ def build_neighbor_list( atype: np.ndarray, nloc: int, rcut: float, - sel: Union[int, List[int]], + sel: Union[int, list[int]], distinguish_types: bool = True, ) -> np.ndarray: """Build neightbor list for a single frame. keeps nsel neighbors. @@ -67,7 +65,7 @@ def build_neighbor_list( number of local atoms. rcut : float cut-off radius - sel : int or List[int] + sel : int or list[int] maximal number of neighbors (of each type). if distinguish_types==True, nsel should be list and the length of nsel should be equal to number of @@ -145,7 +143,7 @@ def build_neighbor_list( def nlist_distinguish_types( nlist: np.ndarray, atype: np.ndarray, - sel: List[int], + sel: list[int], ): """Given a nlist that does not distinguish atom types, return a nlist that distinguish atom types. @@ -179,9 +177,9 @@ def get_multiple_nlist_key(rcut: float, nsel: int) -> str: def build_multiple_neighbor_list( coord: np.ndarray, nlist: np.ndarray, - rcuts: List[float], - nsels: List[int], -) -> Dict[str, np.ndarray]: + rcuts: list[float], + nsels: list[int], +) -> dict[str, np.ndarray]: """Input one neighbor list, and produce multiple neighbor lists with different cutoff radius and numbers of selection out of it. The required rcuts and nsels should be smaller or equal to the input nlist. @@ -193,14 +191,14 @@ def build_multiple_neighbor_list( nlist : np.ndarray Neighbor list of shape [batch_size, nloc, nsel], the neighbors should be stored in an ascending order. - rcuts : List[float] + rcuts : list[float] list of cut-off radius in ascending order. - nsels : List[int] + nsels : list[int] maximal number of neighbors in ascending order. Returns ------- - nlist_dict : Dict[str, np.ndarray] + nlist_dict : dict[str, np.ndarray] A dict of nlists, key given by get_multiple_nlist_key(rc, nsel) value being the corresponding nlist. diff --git a/deepmd/dpmodel/utils/seed.py b/deepmd/dpmodel/utils/seed.py index 4ceab80066..165ff558b9 100644 --- a/deepmd/dpmodel/utils/seed.py +++ b/deepmd/dpmodel/utils/seed.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, overload, @@ -12,10 +11,10 @@ def child_seed(seed: None, idx: int) -> None: ... @overload -def child_seed(seed: Union[int, List[int]], idx: int) -> List[int]: ... +def child_seed(seed: Union[int, list[int]], idx: int) -> list[int]: ... -def child_seed(seed: Optional[Union[int, List[int]]], idx: int) -> Optional[List[int]]: +def child_seed(seed: Optional[Union[int, list[int]]], idx: int) -> Optional[list[int]]: """Generate a child seed from a parent seed. Parameters @@ -27,7 +26,7 @@ def child_seed(seed: Optional[Union[int, List[int]]], idx: int) -> Optional[List Returns ------- - Optional[List[int]] + Optional[list[int]] The child seed. """ # See https://numpy.org/doc/stable/reference/random/parallel.html#sequence-of-integer-seeds diff --git a/deepmd/dpmodel/utils/type_embed.py b/deepmd/dpmodel/utils/type_embed.py index 04c05b6a39..d67d8e50fd 100644 --- a/deepmd/dpmodel/utils/type_embed.py +++ b/deepmd/dpmodel/utils/type_embed.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -51,7 +50,7 @@ class TypeEmbedNet(NativeOP): Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -59,16 +58,16 @@ def __init__( self, *, ntypes: int, - neuron: List[int], + neuron: list[int], resnet_dt: bool = False, activation_function: str = "tanh", precision: str = "default", trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, padding: bool = False, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ) -> None: self.ntypes = ntypes self.neuron = neuron @@ -162,7 +161,7 @@ def serialize(self) -> dict: } def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. diff --git a/deepmd/dpmodel/utils/update_sel.py b/deepmd/dpmodel/utils/update_sel.py index dc38a6a041..3f2900771f 100644 --- a/deepmd/dpmodel/utils/update_sel.py +++ b/deepmd/dpmodel/utils/update_sel.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Type, -) from deepmd.dpmodel.utils.neighbor_stat import ( NeighborStat, @@ -13,5 +10,5 @@ class UpdateSel(BaseUpdateSel): @property - def neighbor_stat(self) -> Type[NeighborStat]: + def neighbor_stat(self) -> type[NeighborStat]: return NeighborStat diff --git a/deepmd/entrypoints/main.py b/deepmd/entrypoints/main.py index ba2eb90247..05f660cb9a 100644 --- a/deepmd/entrypoints/main.py +++ b/deepmd/entrypoints/main.py @@ -43,7 +43,7 @@ def main(args: argparse.Namespace): Parameters ---------- - args : List[str] or argparse.Namespace, optional + args : list[str] or argparse.Namespace, optional list of command line arguments, used to avoid calling from the subprocess, as it is quite slow to import tensorflow; if Namespace is given, it will be used directly diff --git a/deepmd/entrypoints/neighbor_stat.py b/deepmd/entrypoints/neighbor_stat.py index 8840851b91..62dceb24fd 100644 --- a/deepmd/entrypoints/neighbor_stat.py +++ b/deepmd/entrypoints/neighbor_stat.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from typing import ( - List, Optional, ) @@ -22,7 +21,7 @@ def neighbor_stat( *, system: str, rcut: float, - type_map: Optional[List[str]], + type_map: Optional[list[str]], mixed_type: bool = False, backend: str = "tensorflow", **kwargs, diff --git a/deepmd/entrypoints/show.py b/deepmd/entrypoints/show.py index 6f72c4614d..4cad5f312c 100644 --- a/deepmd/entrypoints/show.py +++ b/deepmd/entrypoints/show.py @@ -1,8 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging -from typing import ( - List, -) from deepmd.infer.deep_eval import ( DeepEval, @@ -14,7 +11,7 @@ def show( *, INPUT: str, - ATTRIBUTES: List[str], + ATTRIBUTES: list[str], **kwargs, ): model = DeepEval(INPUT, head=0) diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 6709a9cb29..ad445fdea1 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -7,10 +7,7 @@ ) from typing import ( TYPE_CHECKING, - Dict, - List, Optional, - Tuple, ) import numpy as np @@ -266,7 +263,7 @@ def test_ener( detail_file: Optional[str], has_atom_ener: bool, append_detail: bool = False, -) -> Tuple[List[np.ndarray], List[int]]: +) -> tuple[list[np.ndarray], list[int]]: """Test energy type model. Parameters @@ -288,7 +285,7 @@ def test_ener( Returns ------- - Tuple[List[np.ndarray], List[int]] + tuple[list[np.ndarray], list[int]] arrays with results and their shapes """ data.add("energy", 1, atomic=False, must=False, high_prec=True) @@ -564,7 +561,7 @@ def test_ener( } -def print_ener_sys_avg(avg: Dict[str, float]): +def print_ener_sys_avg(avg: dict[str, float]): """Print errors summary for energy type potential. Parameters @@ -598,7 +595,7 @@ def test_dos( detail_file: Optional[str], has_atom_dos: bool, append_detail: bool = False, -) -> Tuple[List[np.ndarray], List[int]]: +) -> tuple[list[np.ndarray], list[int]]: """Test DOS type model. Parameters @@ -620,7 +617,7 @@ def test_dos( Returns ------- - Tuple[List[np.ndarray], List[int]] + tuple[list[np.ndarray], list[int]] arrays with results and their shapes """ data.add("dos", dp.numb_dos, atomic=False, must=True, high_prec=True) @@ -736,7 +733,7 @@ def test_dos( } -def print_dos_sys_avg(avg: Dict[str, float]): +def print_dos_sys_avg(avg: dict[str, float]): """Print errors summary for DOS type potential. Parameters @@ -758,7 +755,7 @@ def test_property( detail_file: Optional[str], has_atom_property: bool, append_detail: bool = False, -) -> Tuple[List[np.ndarray], List[int]]: +) -> tuple[list[np.ndarray], list[int]]: """Test Property type model. Parameters @@ -780,7 +777,7 @@ def test_property( Returns ------- - Tuple[List[np.ndarray], List[int]] + tuple[list[np.ndarray], list[int]] arrays with results and their shapes """ data.add("property", dp.task_dim, atomic=False, must=True, high_prec=True) @@ -890,7 +887,7 @@ def test_property( } -def print_property_sys_avg(avg: Dict[str, float]): +def print_property_sys_avg(avg: dict[str, float]): """Print errors summary for Property type potential. Parameters @@ -940,7 +937,7 @@ def test_wfc( data: DeepmdData, numb_test: int, detail_file: Optional[str], -) -> Tuple[List[np.ndarray], List[int]]: +) -> tuple[list[np.ndarray], list[int]]: """Test energy type model. Parameters @@ -956,7 +953,7 @@ def test_wfc( Returns ------- - Tuple[List[np.ndarray], List[int]] + tuple[list[np.ndarray], list[int]] arrays with results and their shapes """ data.add( @@ -1004,7 +1001,7 @@ def test_polar( detail_file: Optional[str], *, atomic: bool, -) -> Tuple[List[np.ndarray], List[int]]: +) -> tuple[list[np.ndarray], list[int]]: """Test energy type model. Parameters @@ -1022,7 +1019,7 @@ def test_polar( Returns ------- - Tuple[List[np.ndarray], List[int]] + tuple[list[np.ndarray], list[int]] arrays with results and their shapes """ data.add( @@ -1145,7 +1142,7 @@ def test_dipole( numb_test: int, detail_file: Optional[str], atomic: bool, -) -> Tuple[List[np.ndarray], List[int]]: +) -> tuple[list[np.ndarray], list[int]]: """Test energy type model. Parameters @@ -1163,7 +1160,7 @@ def test_dipole( Returns ------- - Tuple[List[np.ndarray], List[int]] + tuple[list[np.ndarray], list[int]] arrays with results and their shapes """ data.add( diff --git a/deepmd/env.py b/deepmd/env.py index 139e95b824..605dfeed99 100644 --- a/deepmd/env.py +++ b/deepmd/env.py @@ -7,10 +7,6 @@ from pathlib import ( Path, ) -from typing import ( - Dict, - Tuple, -) import numpy as np @@ -105,7 +101,7 @@ def set_default_nthreads(): set_env_if_empty("DP_INTER_OP_PARALLELISM_THREADS", "0", verbose=False) -def get_default_nthreads() -> Tuple[int, int]: +def get_default_nthreads() -> tuple[int, int]: """Get paralellism settings. The method will first read the environment variables with the prefix `DP_`. @@ -114,7 +110,7 @@ def get_default_nthreads() -> Tuple[int, int]: Returns ------- - Tuple[int, int] + tuple[int, int] number of `DP_INTRA_OP_PARALLELISM_THREADS` and `DP_INTER_OP_PARALLELISM_THREADS` """ @@ -133,7 +129,7 @@ def get_default_nthreads() -> Tuple[int, int]: def _get_package_constants( config_file: Path = CONFIG_FILE, -) -> Dict[str, str]: +) -> dict[str, str]: """Read package constants set at compile time by CMake to dictionary. Parameters @@ -143,7 +139,7 @@ def _get_package_constants( Returns ------- - Dict[str, str] + dict[str, str] dictionary with package constants """ if not config_file.is_file(): diff --git a/deepmd/infer/deep_dos.py b/deepmd/infer/deep_dos.py index b26555627f..0d7ccee2b6 100644 --- a/deepmd/infer/deep_dos.py +++ b/deepmd/infer/deep_dos.py @@ -1,9 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, - List, Optional, - Tuple, Union, ) @@ -64,13 +62,13 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, mixed_type: bool = False, **kwargs: Any, - ) -> Tuple[np.ndarray, ...]: + ) -> tuple[np.ndarray, ...]: """Evaluate energy, force, and virial. If atomic is True, also return atomic energy and atomic virial. @@ -81,7 +79,7 @@ def eval( cells : np.ndarray The cell vectors of the system, in shape (nframes, 9). If the system is not periodic, set it to None. - atom_types : List[int] or np.ndarray + atom_types : list[int] or np.ndarray The types of the atoms. If mixed_type is False, the shape is (natoms,); otherwise, the shape is (nframes, natoms). atomic : bool, optional @@ -92,7 +90,7 @@ def eval( The atomic parameters, by default None. mixed_type : bool, optional Whether the atom_types is mixed type, by default False. - **kwargs : Dict[str, Any] + **kwargs : dict[str, Any] Keyword arguments. Returns diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index f35094df3d..4d0134c37c 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -7,11 +7,7 @@ TYPE_CHECKING, Any, ClassVar, - Dict, - List, Optional, - Tuple, - Type, Union, ) @@ -111,7 +107,7 @@ def eval( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, **kwargs: Any, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. Parameters @@ -158,7 +154,7 @@ def get_ntypes(self) -> int: """Get the number of atom types of this model.""" @abstractmethod - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map (element name of the atom types) of this model.""" @abstractmethod @@ -256,11 +252,11 @@ def _check_mixed_types(self, atom_types: np.ndarray) -> bool: @property @abstractmethod - def model_type(self) -> Type["DeepEval"]: + def model_type(self) -> type["DeepEval"]: """The the evaluator of the model type.""" @abstractmethod - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -355,7 +351,7 @@ def get_ntypes(self) -> int: """Get the number of atom types of this model.""" return self.deep_eval.get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map (element name of the atom types) of this model.""" return self.deep_eval.get_type_map() @@ -372,7 +368,7 @@ def _get_natoms_and_nframes( coords: np.ndarray, atom_types: np.ndarray, mixed_type: bool = False, - ) -> Tuple[int, int]: + ) -> tuple[int, int]: if mixed_type or atom_types.ndim > 1: natoms = len(atom_types[0]) else: @@ -525,7 +521,7 @@ def _standard_input(self, coords, cells, atom_types, fparam, aparam, mixed_type) ) return coords, cells, atom_types, fparam, aparam, nframes, natoms - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution diff --git a/deepmd/infer/deep_polar.py b/deepmd/infer/deep_polar.py index 22561a0685..7220e53637 100644 --- a/deepmd/infer/deep_polar.py +++ b/deepmd/infer/deep_polar.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -51,7 +50,7 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, diff --git a/deepmd/infer/deep_pot.py b/deepmd/infer/deep_pot.py index 0632fd1c84..4755bc276a 100644 --- a/deepmd/infer/deep_pot.py +++ b/deepmd/infer/deep_pot.py @@ -1,10 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, - List, Literal, Optional, - Tuple, Union, overload, ) @@ -95,13 +93,13 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: Literal[True], fparam: Optional[np.ndarray], aparam: Optional[np.ndarray], mixed_type: bool, **kwargs: Any, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: pass @overload @@ -109,13 +107,13 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: Literal[False], fparam: Optional[np.ndarray], aparam: Optional[np.ndarray], mixed_type: bool, **kwargs: Any, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: pass @overload @@ -123,26 +121,26 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: bool, fparam: Optional[np.ndarray], aparam: Optional[np.ndarray], mixed_type: bool, **kwargs: Any, - ) -> Tuple[np.ndarray, ...]: + ) -> tuple[np.ndarray, ...]: pass def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, mixed_type: bool = False, **kwargs: Any, - ) -> Tuple[np.ndarray, ...]: + ) -> tuple[np.ndarray, ...]: """Evaluate energy, force, and virial. If atomic is True, also return atomic energy and atomic virial. @@ -153,7 +151,7 @@ def eval( cells : np.ndarray The cell vectors of the system, in shape (nframes, 9). If the system is not periodic, set it to None. - atom_types : List[int] or np.ndarray + atom_types : list[int] or np.ndarray The types of the atoms. If mixed_type is False, the shape is (natoms,); otherwise, the shape is (nframes, natoms). atomic : bool, optional @@ -164,7 +162,7 @@ def eval( The atomic parameters, by default None. mixed_type : bool, optional Whether the atom_types is mixed type, by default False. - **kwargs : Dict[str, Any] + **kwargs : dict[str, Any] Keyword arguments. Returns diff --git a/deepmd/infer/deep_property.py b/deepmd/infer/deep_property.py index 5376fb1efc..4a3283cf32 100644 --- a/deepmd/infer/deep_property.py +++ b/deepmd/infer/deep_property.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, - Dict, - List, Optional, - Tuple, Union, ) @@ -69,13 +66,13 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, mixed_type: bool = False, - **kwargs: Dict[str, Any], - ) -> Tuple[np.ndarray, ...]: + **kwargs: dict[str, Any], + ) -> tuple[np.ndarray, ...]: """Evaluate properties. If atomic is True, also return atomic property. Parameters @@ -85,7 +82,7 @@ def eval( cells : np.ndarray The cell vectors of the system, in shape (nframes, 9). If the system is not periodic, set it to None. - atom_types : List[int] or np.ndarray + atom_types : list[int] or np.ndarray The types of the atoms. If mixed_type is False, the shape is (natoms,); otherwise, the shape is (nframes, natoms). atomic : bool, optional @@ -96,7 +93,7 @@ def eval( The atomic parameters, by default None. mixed_type : bool, optional Whether the atom_types is mixed type, by default False. - **kwargs : Dict[str, Any] + **kwargs : dict[str, Any] Keyword arguments. Returns diff --git a/deepmd/infer/deep_tensor.py b/deepmd/infer/deep_tensor.py index 48918e7c75..bb5bc12697 100644 --- a/deepmd/infer/deep_tensor.py +++ b/deepmd/infer/deep_tensor.py @@ -3,9 +3,7 @@ abstractmethod, ) from typing import ( - List, Optional, - Tuple, Union, ) @@ -44,7 +42,7 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: Union[List[int], np.ndarray], + atom_types: Union[list[int], np.ndarray], atomic: bool = True, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, @@ -119,7 +117,7 @@ def eval_full( aparam: Optional[np.ndarray] = None, mixed_type: bool = False, **kwargs: dict, - ) -> Tuple[np.ndarray, ...]: + ) -> tuple[np.ndarray, ...]: """Evaluate the model with interface similar to the energy model. Will return global tensor, component-wise force and virial and optionally atomic tensor and atomic virial. @@ -250,7 +248,7 @@ def eval_full( aparam: Optional[np.ndarray] = None, mixed_type: bool = False, **kwargs: dict, - ) -> Tuple[np.ndarray, ...]: + ) -> tuple[np.ndarray, ...]: """Unsupported method.""" raise RuntimeError( "This model does not support eval_full method. Use eval instead." diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py index 83708c7114..29e1eec741 100644 --- a/deepmd/infer/model_devi.py +++ b/deepmd/infer/model_devi.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Optional, - Tuple, overload, ) @@ -29,7 +28,7 @@ def calc_model_devi_f( real_f: Optional[np.ndarray] = None, relative: Optional[float] = None, atomic: Literal[False] = ..., -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: ... +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: ... @overload @@ -38,7 +37,7 @@ def calc_model_devi_f( real_f: Optional[np.ndarray] = None, relative: Optional[float] = None, atomic: Literal[True] = ..., -) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: ... +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: ... @overload @@ -47,7 +46,7 @@ def calc_model_devi_f( real_f: Optional[np.ndarray] = None, relative: Optional[float] = None, atomic: bool = False, -) -> Tuple[np.ndarray, ...]: ... +) -> tuple[np.ndarray, ...]: ... def calc_model_devi_f( @@ -55,7 +54,7 @@ def calc_model_devi_f( real_f: Optional[np.ndarray] = None, relative: Optional[float] = None, atomic: bool = False, -) -> Tuple[np.ndarray, ...]: +) -> tuple[np.ndarray, ...]: """Calculate model deviation of force. Parameters @@ -141,7 +140,7 @@ def calc_model_devi_v( vs: np.ndarray, real_v: Optional[np.ndarray] = None, relative: Optional[float] = None, -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Calculate model deviation of virial. Parameters diff --git a/deepmd/loggers/training.py b/deepmd/loggers/training.py index 954473e309..b2fff4788b 100644 --- a/deepmd/loggers/training.py +++ b/deepmd/loggers/training.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, Optional, ) @@ -16,7 +15,7 @@ def format_training_message( def format_training_message_per_task( batch: int, task_name: str, - rmse: Dict[str, float], + rmse: dict[str, float], learning_rate: Optional[float], ): if task_name: diff --git a/deepmd/main.py b/deepmd/main.py index c271152a06..60b8da2850 100644 --- a/deepmd/main.py +++ b/deepmd/main.py @@ -14,10 +14,7 @@ defaultdict, ) from typing import ( - Dict, - List, Optional, - Type, ) from deepmd.backend.backend import ( @@ -57,10 +54,10 @@ class RawTextArgumentDefaultsHelpFormatter( """This formatter is used to print multile-line help message with default value.""" -BACKENDS: Dict[str, Type[Backend]] = Backend.get_backends_by_feature( +BACKENDS: dict[str, type[Backend]] = Backend.get_backends_by_feature( Backend.Feature.ENTRY_POINT ) -BACKEND_TABLE: Dict[str, str] = {kk: vv.name.lower() for kk, vv in BACKENDS.items()} +BACKEND_TABLE: dict[str, str] = {kk: vv.name.lower() for kk, vv in BACKENDS.items()} class BackendOption(argparse.Action): @@ -130,7 +127,7 @@ def main_parser() -> argparse.ArgumentParser: ), ) - BACKEND_ALIAS: Dict[str, List[str]] = defaultdict(list) + BACKEND_ALIAS: dict[str, list[str]] = defaultdict(list) for alias, backend in BACKEND_TABLE.items(): BACKEND_ALIAS[backend].append(alias) for backend, alias in BACKEND_ALIAS.items(): @@ -856,12 +853,12 @@ def main_parser() -> argparse.ArgumentParser: return parser -def parse_args(args: Optional[List[str]] = None) -> argparse.Namespace: +def parse_args(args: Optional[list[str]] = None) -> argparse.Namespace: """Parse arguments and convert argument strings to objects. Parameters ---------- - args : List[str] + args : list[str] list of command line arguments, main purpose is testing default option None takes arguments from sys.argv @@ -880,12 +877,12 @@ def parse_args(args: Optional[List[str]] = None) -> argparse.Namespace: return parsed_args -def main(args: Optional[List[str]] = None): +def main(args: Optional[list[str]] = None): """DeePMD-kit new entry point. Parameters ---------- - args : List[str] + args : list[str] list of command line arguments, main purpose is testing default option None takes arguments from sys.argv diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index 3df05cbb47..a0694c41c5 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -8,7 +8,6 @@ Path, ) from typing import ( - List, Optional, Union, ) @@ -485,7 +484,7 @@ def change_bias(FLAGS): @record -def main(args: Optional[Union[List[str], argparse.Namespace]] = None): +def main(args: Optional[Union[list[str], argparse.Namespace]] = None): if not isinstance(args, argparse.Namespace): FLAGS = parse_args(args=args) else: diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index d5eae71731..538dc65371 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -4,11 +4,7 @@ TYPE_CHECKING, Any, Callable, - Dict, - List, Optional, - Tuple, - Type, Union, ) @@ -170,7 +166,7 @@ def get_ntypes(self) -> int: """Get the number of atom types of this model.""" return len(self.type_map) - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map (element name of the atom types) of this model.""" return self.type_map @@ -186,7 +182,7 @@ def get_intensive(self) -> bool: return self.dp.model["Default"].get_intensive() @property - def model_type(self) -> Type["DeepEvalWrapper"]: + def model_type(self) -> type["DeepEvalWrapper"]: """The the evaluator of the model type.""" model_output_type = self.dp.model["Default"].model_output_type() if "energy" in model_output_type: @@ -206,7 +202,7 @@ def model_type(self) -> Type["DeepEvalWrapper"]: else: raise RuntimeError("Unknown model type") - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -244,7 +240,7 @@ def eval( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, **kwargs: Any, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. Parameters @@ -311,7 +307,7 @@ def eval( ) ) - def _get_request_defs(self, atomic: bool) -> List[OutputVariableDef]: + def _get_request_defs(self, atomic: bool) -> list[OutputVariableDef]: """Get the requested output definitions. When atomic is True, all output_def are requested. @@ -376,7 +372,7 @@ def _get_natoms_and_nframes( coords: np.ndarray, atom_types: np.ndarray, mixed_type: bool = False, - ) -> Tuple[int, int]: + ) -> tuple[int, int]: if mixed_type: natoms = len(atom_types[0]) else: @@ -395,7 +391,7 @@ def _eval_model( atom_types: np.ndarray, fparam: Optional[np.ndarray], aparam: Optional[np.ndarray], - request_defs: List[OutputVariableDef], + request_defs: list[OutputVariableDef], ): model = self.dp.to(DEVICE) @@ -476,7 +472,7 @@ def _eval_model_spin( spins: np.ndarray, fparam: Optional[np.ndarray], aparam: Optional[np.ndarray], - request_defs: List[OutputVariableDef], + request_defs: list[OutputVariableDef], ): model = self.dp.to(DEVICE) diff --git a/deepmd/pt/loss/dos.py b/deepmd/pt/loss/dos.py index 7fd2e04ff2..84513b6bf9 100644 --- a/deepmd/pt/loss/dos.py +++ b/deepmd/pt/loss/dos.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - List, -) import torch @@ -230,7 +227,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False return model_pred, loss, more_loss @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" label_requirement = [] if self.has_ados or self.has_acdf: diff --git a/deepmd/pt/loss/ener.py b/deepmd/pt/loss/ener.py index 092fbc1f76..f40110a749 100644 --- a/deepmd/pt/loss/ener.py +++ b/deepmd/pt/loss/ener.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, ) @@ -336,7 +335,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): return model_pred, loss, more_loss @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" label_requirement = [] if self.has_e: diff --git a/deepmd/pt/loss/ener_spin.py b/deepmd/pt/loss/ener_spin.py index 78210a778b..09a053451f 100644 --- a/deepmd/pt/loss/ener_spin.py +++ b/deepmd/pt/loss/ener_spin.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - List, -) import torch import torch.nn.functional as F @@ -276,7 +273,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): return model_pred, loss, more_loss @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" label_requirement = [] if self.has_e: diff --git a/deepmd/pt/loss/loss.py b/deepmd/pt/loss/loss.py index 7e26f6571a..1a091e074e 100644 --- a/deepmd/pt/loss/loss.py +++ b/deepmd/pt/loss/loss.py @@ -3,9 +3,6 @@ ABC, abstractmethod, ) -from typing import ( - List, -) import torch @@ -25,7 +22,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate): @property @abstractmethod - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" pass diff --git a/deepmd/pt/loss/property.py b/deepmd/pt/loss/property.py index e4f86091bc..ba120e3d6c 100644 --- a/deepmd/pt/loss/property.py +++ b/deepmd/pt/loss/property.py @@ -1,8 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging -from typing import ( - List, -) import torch import torch.nn.functional as F @@ -138,7 +135,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False return model_pred, loss, more_loss @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" label_requirement = [] label_requirement.append( diff --git a/deepmd/pt/loss/tensor.py b/deepmd/pt/loss/tensor.py index 3dcf21af1d..32d25cc9f1 100644 --- a/deepmd/pt/loss/tensor.py +++ b/deepmd/pt/loss/tensor.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - List, -) import torch @@ -151,7 +148,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False return model_pred, loss, more_loss @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" label_requirement = [] if self.has_local_weight: diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index 4742fe66a3..bd3c2b49ab 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -4,10 +4,7 @@ import logging from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -67,7 +64,7 @@ class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_): of the atomic model. Implemented by removing the pairs from the nlist. rcond : float, optional The condition number for the regression of atomic energy. - preset_out_bias : Dict[str, List[Optional[np.ndarray]]], optional + preset_out_bias : dict[str, list[Optional[np.ndarray]]], optional Specifying atomic energy contribution in vacuum. Given by key:value pairs. The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] @@ -77,11 +74,11 @@ class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_): def __init__( self, - type_map: List[str], - atom_exclude_types: List[int] = [], - pair_exclude_types: List[Tuple[int, int]] = [], + type_map: list[str], + atom_exclude_types: list[int] = [], + pair_exclude_types: list[tuple[int, int]] = [], rcond: Optional[float] = None, - preset_out_bias: Optional[Dict[str, np.ndarray]] = None, + preset_out_bias: Optional[dict[str, np.ndarray]] = None, ): torch.nn.Module.__init__(self) BaseAtomicModel_.__init__(self) @@ -94,7 +91,7 @@ def __init__( def init_out_stat(self): """Initialize the output bias.""" ntypes = self.get_ntypes() - self.bias_keys: List[str] = list(self.fitting_output_def().keys()) + self.bias_keys: list[str] = list(self.fitting_output_def().keys()) self.max_out_size = max( [self.atomic_output_def()[kk].size for kk in self.bias_keys] ) @@ -124,13 +121,13 @@ def __getitem__(self, key): raise KeyError(key) @torch.jit.export - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map def reinit_atom_exclude( self, - exclude_types: List[int] = [], + exclude_types: list[int] = [], ): self.atom_exclude_types = exclude_types if exclude_types == []: @@ -140,7 +137,7 @@ def reinit_atom_exclude( def reinit_pair_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.pair_exclude_types = exclude_types if exclude_types == []: @@ -195,8 +192,8 @@ def forward_common_atomic( mapping: Optional[torch.Tensor] = None, fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, - ) -> Dict[str, torch.Tensor]: + comm_dict: Optional[dict[str, torch.Tensor]] = None, + ) -> dict[str, torch.Tensor]: """Common interface for atomic inference. This method accept extended coordinates, extended atom typs, neighbor list, @@ -276,8 +273,8 @@ def forward( mapping: Optional[torch.Tensor] = None, fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, - ) -> Dict[str, torch.Tensor]: + comm_dict: Optional[dict[str, torch.Tensor]] = None, + ) -> dict[str, torch.Tensor]: return self.forward_common_atomic( extended_coord, extended_atype, @@ -289,7 +286,7 @@ def forward( ) def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -354,7 +351,7 @@ def deserialize(cls, data: dict) -> "BaseAtomicModel": def compute_or_load_stat( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], stat_file_path: Optional[DPPath] = None, ): """ @@ -362,11 +359,11 @@ def compute_or_load_stat( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. stat_file_path : Optional[DPPath] @@ -377,7 +374,7 @@ def compute_or_load_stat( def compute_or_load_out_stat( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], stat_file_path: Optional[DPPath] = None, ): """ @@ -385,11 +382,11 @@ def compute_or_load_out_stat( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. stat_file_path : Optional[DPPath] @@ -404,7 +401,7 @@ def compute_or_load_out_stat( def apply_out_stat( self, - ret: Dict[str, torch.Tensor], + ret: dict[str, torch.Tensor], atype: torch.Tensor, ): """Apply the stat to each atomic output. @@ -435,11 +432,11 @@ def change_out_bias( Parameters ---------- - sample_merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + sample_merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. bias_adjust_mode : str @@ -480,7 +477,9 @@ def _get_forward_wrapper_func(self) -> Callable[..., torch.Tensor]: """Get a forward wrapper of the atomic model for output bias calculation.""" def model_forward(coord, atype, box, fparam=None, aparam=None): - with torch.no_grad(): # it's essential for pure torch forward function to use auto_batchsize + with ( + torch.no_grad() + ): # it's essential for pure torch forward function to use auto_batchsize ( extended_coord, extended_atype, @@ -520,7 +519,7 @@ def _default_std(self): def _varsize( self, - shape: List[int], + shape: list[int], ) -> int: output_size = 1 len_shape = len(shape) @@ -532,7 +531,7 @@ def _get_bias_index( self, kk: str, ) -> int: - res: List[int] = [] + res: list[int] = [] for i, e in enumerate(self.bias_keys): if e == kk: res.append(i) @@ -541,8 +540,8 @@ def _get_bias_index( def _store_out_stat( self, - out_bias: Dict[str, torch.Tensor], - out_std: Dict[str, torch.Tensor], + out_bias: dict[str, torch.Tensor], + out_std: dict[str, torch.Tensor], add: bool = False, ): ntypes = self.get_ntypes() @@ -562,8 +561,8 @@ def _store_out_stat( def _fetch_out_stat( self, - keys: List[str], - ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]: + keys: list[str], + ) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: ret_bias = {} ret_std = {} ntypes = self.get_ntypes() diff --git a/deepmd/pt/model/atomic_model/dipole_atomic_model.py b/deepmd/pt/model/atomic_model/dipole_atomic_model.py index 1723a30f2d..aa28294cc5 100644 --- a/deepmd/pt/model/atomic_model/dipole_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dipole_atomic_model.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Dict, -) import torch @@ -21,7 +18,7 @@ def __init__(self, descriptor, fitting, type_map, **kwargs): def apply_out_stat( self, - ret: Dict[str, torch.Tensor], + ret: dict[str, torch.Tensor], atype: torch.Tensor, ): # dipole not applying bias diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 8def2e48de..936a1fead3 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -3,8 +3,6 @@ import functools import logging from typing import ( - Dict, - List, Optional, ) @@ -52,7 +50,7 @@ def __init__( self, descriptor, fitting, - type_map: List[str], + type_map: list[str], **kwargs, ): super().__init__(type_map, **kwargs) @@ -79,7 +77,7 @@ def get_rcut(self) -> float: """Get the cut-off radius.""" return self.rcut - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Get the neighbor selection.""" return self.sel @@ -96,7 +94,7 @@ def mixed_types(self) -> bool: return self.descriptor.mixed_types() def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -157,8 +155,8 @@ def forward_atomic( mapping: Optional[torch.Tensor] = None, fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, - ) -> Dict[str, torch.Tensor]: + comm_dict: Optional[dict[str, torch.Tensor]] = None, + ) -> dict[str, torch.Tensor]: """Return atomic prediction. Parameters @@ -258,7 +256,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return self.fitting_net.get_dim_aparam() - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 3c7692212e..d88c4c3af5 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -2,10 +2,7 @@ import copy from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -55,8 +52,8 @@ class LinearEnergyAtomicModel(BaseAtomicModel): def __init__( self, - models: List[BaseAtomicModel], - type_map: List[str], + models: list[BaseAtomicModel], + type_map: list[str], **kwargs, ): super().__init__(type_map, **kwargs) @@ -119,12 +116,12 @@ def get_rcut(self) -> float: """Get the cut-off radius.""" return max(self.get_model_rcuts()) - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -140,22 +137,22 @@ def change_type_map( else None, ) - def get_model_rcuts(self) -> List[float]: + def get_model_rcuts(self) -> list[float]: """Get the cut-off radius for each individual models.""" return [model.get_rcut() for model in self.models] - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: return [max([model.get_nsel() for model in self.models])] - def get_model_nsels(self) -> List[int]: + def get_model_nsels(self) -> list[int]: """Get the processed sels for each individual models. Not distinguishing types.""" return [model.get_nsel() for model in self.models] - def get_model_sels(self) -> List[List[int]]: + def get_model_sels(self) -> list[list[int]]: """Get the sels for each individual models.""" return [model.get_sel() for model in self.models] - def _sort_rcuts_sels(self) -> Tuple[List[float], List[int]]: + def _sort_rcuts_sels(self) -> tuple[list[float], list[int]]: # sort the pair of rcut and sels in ascending order, first based on sel, then on rcut. zipped = torch.stack( [ @@ -168,8 +165,8 @@ def _sort_rcuts_sels(self) -> Tuple[List[float], List[int]]: inner_sorted = zipped[inner_sorting] outer_sorting = torch.argsort(inner_sorted[:, 0], stable=True) outer_sorted = inner_sorted[outer_sorting] - sorted_rcuts: List[float] = outer_sorted[:, 0].tolist() - sorted_sels: List[int] = outer_sorted[:, 1].to(torch.int64).tolist() + sorted_rcuts: list[float] = outer_sorted[:, 0].tolist() + sorted_sels: list[int] = outer_sorted[:, 1].to(torch.int64).tolist() return sorted_rcuts, sorted_sels def forward_atomic( @@ -180,8 +177,8 @@ def forward_atomic( mapping: Optional[torch.Tensor] = None, fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, - ) -> Dict[str, torch.Tensor]: + comm_dict: Optional[dict[str, torch.Tensor]] = None, + ) -> dict[str, torch.Tensor]: """Return atomic prediction. Parameters @@ -252,7 +249,7 @@ def forward_atomic( def apply_out_stat( self, - ret: Dict[str, torch.Tensor], + ret: dict[str, torch.Tensor], atype: torch.Tensor, ): """Apply the stat to each atomic output. @@ -270,16 +267,16 @@ def apply_out_stat( return ret @staticmethod - def remap_atype(ori_map: List[str], new_map: List[str]) -> torch.Tensor: + def remap_atype(ori_map: list[str], new_map: list[str]) -> torch.Tensor: """ This method is used to map the atype from the common type_map to the original type_map of indivial AtomicModels. It creates a index mapping for the conversion. Parameters ---------- - ori_map : List[str] + ori_map : list[str] The original type map of an AtomicModel. - new_map : List[str] + new_map : list[str] The common type map of the DPZBLLinearEnergyAtomicModel, created by the `get_type_map` method, must be a subset of the ori_map. @@ -335,7 +332,7 @@ def deserialize(cls, data: dict) -> "LinearEnergyAtomicModel": def _compute_weight( self, extended_coord, extended_atype, nlists_ - ) -> List[torch.Tensor]: + ) -> list[torch.Tensor]: """This should be a list of user defined weights that matches the number of models to be combined.""" nmodels = len(self.models) nframes, nloc, _ = nlists_[0].shape @@ -354,7 +351,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return max([model.get_dim_aparam() for model in self.models]) - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -383,7 +380,7 @@ def is_aparam_nall(self) -> bool: def compute_or_load_out_stat( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], stat_file_path: Optional[DPPath] = None, ): """ @@ -391,11 +388,11 @@ def compute_or_load_out_stat( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. stat_file_path : Optional[DPPath] @@ -456,7 +453,7 @@ def __init__( zbl_model: PairTabAtomicModel, sw_rmin: float, sw_rmax: float, - type_map: List[str], + type_map: list[str], smin_alpha: Optional[float] = 0.1, **kwargs, ): @@ -503,13 +500,13 @@ def _compute_weight( self, extended_coord: torch.Tensor, extended_atype: torch.Tensor, - nlists_: List[torch.Tensor], - ) -> List[torch.Tensor]: + nlists_: list[torch.Tensor], + ) -> list[torch.Tensor]: """ZBL weight. Returns ------- - List[torch.Tensor] + list[torch.Tensor] the atomic ZBL weight for interpolation. (nframes, nloc, 1) """ assert ( diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 7ef87524dd..2918bba947 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -2,8 +2,6 @@ import copy from typing import ( Callable, - Dict, - List, Optional, Union, ) @@ -55,7 +53,7 @@ class PairTabAtomicModel(BaseAtomicModel): The cutoff radius. sel : int or list[int] The maxmum number of atoms in the cut-off radius. - type_map : List[str] + type_map : list[str] Mapping atom type to the name (str) of the type. For example `type_map[1]` gives the name of the type 1. rcond : float, optional @@ -69,8 +67,8 @@ def __init__( self, tab_file: str, rcut: float, - sel: Union[int, List[int]], - type_map: List[str], + sel: Union[int, list[int]], + type_map: list[str], **kwargs, ): super().__init__(type_map, **kwargs) @@ -87,7 +85,7 @@ def __init__( ( tab_info, tab_data, - ) = self.tab.get() # this returns -> Tuple[np.array, np.array] + ) = self.tab.get() # this returns -> tuple[np.array, np.array] nspline, ntypes_tab = tab_info[-2:].astype(int) self.register_buffer("tab_info", torch.from_numpy(tab_info)) self.register_buffer( @@ -138,10 +136,10 @@ def get_out_bias(self) -> torch.Tensor: def get_rcut(self) -> float: return self.rcut - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: return self.type_map - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: return [self.sel] def get_nsel(self) -> int: @@ -169,7 +167,7 @@ def need_sorted_nlist_for_lower(self) -> bool: return False def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -218,7 +216,7 @@ def deserialize(cls, data) -> "PairTabAtomicModel": def compute_or_load_stat( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], stat_file_path: Optional[DPPath] = None, ): """ @@ -226,11 +224,11 @@ def compute_or_load_stat( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. stat_file_path : Optional[DPPath] @@ -248,8 +246,8 @@ def forward_atomic( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, - ) -> Dict[str, torch.Tensor]: + comm_dict: Optional[dict[str, torch.Tensor]] = None, + ) -> dict[str, torch.Tensor]: nframes, nloc, nnei = nlist.shape extended_coord = extended_coord.view(nframes, -1, 3) if self.do_grad_r() or self.do_grad_c(): @@ -470,7 +468,7 @@ def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return 0 - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution diff --git a/deepmd/pt/model/atomic_model/polar_atomic_model.py b/deepmd/pt/model/atomic_model/polar_atomic_model.py index 81cf8a23b6..39cda2650d 100644 --- a/deepmd/pt/model/atomic_model/polar_atomic_model.py +++ b/deepmd/pt/model/atomic_model/polar_atomic_model.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Dict, -) import torch @@ -21,7 +18,7 @@ def __init__(self, descriptor, fitting, type_map, **kwargs): def apply_out_stat( self, - ret: Dict[str, torch.Tensor], + ret: dict[str, torch.Tensor], atype: torch.Tensor, ): """Apply the stat to each atomic output. diff --git a/deepmd/pt/model/atomic_model/property_atomic_model.py b/deepmd/pt/model/atomic_model/property_atomic_model.py index 1fb8a5957f..2fac90100f 100644 --- a/deepmd/pt/model/atomic_model/property_atomic_model.py +++ b/deepmd/pt/model/atomic_model/property_atomic_model.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Dict, -) import torch @@ -21,7 +18,7 @@ def __init__(self, descriptor, fitting, type_map, **kwargs): def apply_out_stat( self, - ret: Dict[str, torch.Tensor], + ret: dict[str, torch.Tensor], atype: torch.Tensor, ): """Apply the stat to each atomic output. diff --git a/deepmd/pt/model/descriptor/descriptor.py b/deepmd/pt/model/descriptor/descriptor.py index 16c3d96301..78a4608108 100644 --- a/deepmd/pt/model/descriptor/descriptor.py +++ b/deepmd/pt/model/descriptor/descriptor.py @@ -6,8 +6,6 @@ ) from typing import ( Callable, - Dict, - List, Optional, Union, ) @@ -71,7 +69,7 @@ def get_nsel(self) -> int: pass @abstractmethod - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" pass @@ -102,7 +100,7 @@ def get_env_protection(self) -> float: def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -110,11 +108,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -123,7 +121,7 @@ def compute_input_stats( """ raise NotImplementedError - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" raise NotImplementedError @@ -203,7 +201,7 @@ def extend_descrpt_stat(des, type_map, des_with_stat=None): ---------- des : DescriptorBlock The descriptor block to be extended. - type_map : List[str] + type_map : list[str] The name of each type of atoms to be extended. des_with_stat : DescriptorBlock, Optional The descriptor block has additional statistics of types from newly provided `type_map`. diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index 14767cb100..617e8b49b6 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -157,7 +154,7 @@ class DescrptDPA1(BaseDescriptor, torch.nn.Module): (Only support False to keep consistent with other backend references.) (Not used in this version. True option is not implemented.) If mask the diagonal of attention weights - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection: float @@ -191,7 +188,7 @@ class DescrptDPA1(BaseDescriptor, torch.nn.Module): Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. spin (Only support None to keep consistent with other backend references.) @@ -215,7 +212,7 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, neuron: list = [25, 50, 100], axis_neuron: int = 16, @@ -229,7 +226,7 @@ def __init__( activation_function: str = "tanh", precision: str = "float64", resnet_dt: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, scaling_factor: int = 1.0, normalize=True, @@ -241,10 +238,10 @@ def __init__( smooth_type_embedding: bool = True, type_one_side: bool = False, stripped_type_embedding: Optional[bool] = None, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, # not implemented spin=None, type: Optional[str] = None, @@ -326,7 +323,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return self.se_atten.get_nsel() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.se_atten.get_sel() @@ -334,7 +331,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.se_atten.get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -405,7 +402,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -413,11 +410,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -435,12 +432,12 @@ def set_stat_mean_and_stddev( self.se_atten.mean = mean self.se_atten.stddev = stddev - def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + def get_stat_mean_and_stddev(self) -> tuple[torch.Tensor, torch.Tensor]: """Get mean and stddev for descriptor.""" return self.se_atten.mean, self.se_atten.stddev def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -562,7 +559,7 @@ def forward( extended_atype: torch.Tensor, nlist: torch.Tensor, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): """Compute the descriptor. @@ -617,9 +614,9 @@ def forward( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index 9fc4fc4a21..f1ef200b09 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -87,14 +84,14 @@ def __init__( concat_output_tebd: bool = True, precision: str = "float64", smooth: bool = True, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, add_tebd_to_repinit_out: bool = False, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, old_impl: bool = False, ): r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. @@ -111,7 +108,7 @@ def __init__( The precision of the embedding net parameters. smooth : bool, optional Whether to use smoothness in processes such as attention weights calculation. - exclude_types : List[List[int]], optional + exclude_types : list[list[int]], optional The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection : float, optional @@ -127,7 +124,7 @@ def __init__( Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map : List[str], Optional + type_map : list[str], Optional A list of strings. Give the name to each type of atoms. Returns @@ -324,7 +321,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -332,7 +329,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -423,7 +420,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -488,7 +485,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -496,11 +493,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -515,8 +512,8 @@ def compute_input_stats( def set_stat_mean_and_stddev( self, - mean: List[torch.Tensor], - stddev: List[torch.Tensor], + mean: list[torch.Tensor], + stddev: list[torch.Tensor], ) -> None: """Update mean and stddev for descriptor.""" descrpt_list = [self.repinit, self.repformers] @@ -526,7 +523,7 @@ def set_stat_mean_and_stddev( descrpt.mean = mean[ii] descrpt.stddev = stddev[ii] - def get_stat_mean_and_stddev(self) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + def get_stat_mean_and_stddev(self) -> tuple[list[torch.Tensor], list[torch.Tensor]]: """Get mean and stddev for descriptor.""" mean_list = [self.repinit.mean, self.repformers.mean] stddev_list = [ @@ -711,7 +708,7 @@ def forward( extended_atype: torch.Tensor, nlist: torch.Tensor, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): """Compute the descriptor. @@ -816,9 +813,9 @@ def forward( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/pt/model/descriptor/gaussian_lcc.py b/deepmd/pt/model/descriptor/gaussian_lcc.py index 2ae14bd432..8ac52215c0 100644 --- a/deepmd/pt/model/descriptor/gaussian_lcc.py +++ b/deepmd/pt/model/descriptor/gaussian_lcc.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, ) @@ -162,7 +161,7 @@ def dim_emb(self): """Returns the output dimension of pair representation.""" return self.pair_embed_dim - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" pass diff --git a/deepmd/pt/model/descriptor/hybrid.py b/deepmd/pt/model/descriptor/hybrid.py index 7156396c48..c8730e3465 100644 --- a/deepmd/pt/model/descriptor/hybrid.py +++ b/deepmd/pt/model/descriptor/hybrid.py @@ -2,10 +2,7 @@ import math from typing import ( Any, - Dict, - List, Optional, - Tuple, Union, ) @@ -38,16 +35,16 @@ class DescrptHybrid(BaseDescriptor, torch.nn.Module): Parameters ---------- - list : list : List[Union[BaseDescriptor, Dict[str, Any]]] + list : list : list[Union[BaseDescriptor, dict[str, Any]]] Build a descriptor from the concatenation of the list of descriptors. The descriptor can be either an object or a dictionary. """ - nlist_cut_idx: List[torch.Tensor] + nlist_cut_idx: list[torch.Tensor] def __init__( self, - list: List[Union[BaseDescriptor, Dict[str, Any]]], + list: list[Union[BaseDescriptor, dict[str, Any]]], **kwargs, ) -> None: super().__init__() @@ -57,7 +54,7 @@ def __init__( raise RuntimeError( "cannot build descriptor from an empty list of descriptors." ) - formatted_descript_list: List[BaseDescriptor] = [] + formatted_descript_list: list[BaseDescriptor] = [] for ii in descrpt_list: if isinstance(ii, BaseDescriptor): formatted_descript_list.append(ii) @@ -75,7 +72,7 @@ def __init__( self.descrpt_list[ii].get_ntypes() == self.descrpt_list[0].get_ntypes() ), f"number of atom types in {ii}th descrptor does not match others" # if hybrid sel is larger than sub sel, the nlist needs to be cut for each type - self.nlist_cut_idx: List[torch.Tensor] = [] + self.nlist_cut_idx: list[torch.Tensor] = [] if self.mixed_types() and not all( descrpt.mixed_types() for descrpt in self.descrpt_list ): @@ -114,7 +111,7 @@ def get_rcut_smth(self) -> float: # Note: Using the minimum rcut_smth might not be appropriate in all scenarios. Consider using a different approach or provide detailed documentation on why the minimum value is chosen. return min([descrpt.get_rcut_smth() for descrpt in self.descrpt_list]) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" if self.mixed_types(): return [ @@ -131,7 +128,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.descrpt_list[0].get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.descrpt_list[0].get_type_map() @@ -185,7 +182,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -198,15 +195,15 @@ def change_type_map( else None, ) - def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): + def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" for descrpt in self.descrpt_list: descrpt.compute_input_stats(merged, path) def set_stat_mean_and_stddev( self, - mean: List[Union[torch.Tensor, List[torch.Tensor]]], - stddev: List[Union[torch.Tensor, List[torch.Tensor]]], + mean: list[Union[torch.Tensor, list[torch.Tensor]]], + stddev: list[Union[torch.Tensor, list[torch.Tensor]]], ) -> None: """Update mean and stddev for descriptor.""" for ii, descrpt in enumerate(self.descrpt_list): @@ -214,9 +211,9 @@ def set_stat_mean_and_stddev( def get_stat_mean_and_stddev( self, - ) -> Tuple[ - List[Union[torch.Tensor, List[torch.Tensor]]], - List[Union[torch.Tensor, List[torch.Tensor]]], + ) -> tuple[ + list[Union[torch.Tensor, list[torch.Tensor]]], + list[Union[torch.Tensor, list[torch.Tensor]]], ]: """Get mean and stddev for descriptor.""" mean_list = [] @@ -233,7 +230,7 @@ def forward( atype_ext: torch.Tensor, nlist: torch.Tensor, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): """Compute the descriptor. @@ -303,9 +300,9 @@ def forward( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/pt/model/descriptor/repformer_layer.py b/deepmd/pt/model/descriptor/repformer_layer.py index 579dc0c81e..92e2404469 100644 --- a/deepmd/pt/model/descriptor/repformer_layer.py +++ b/deepmd/pt/model/descriptor/repformer_layer.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -44,7 +43,7 @@ def get_residual( _mode: str = "norm", trainable: bool = True, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> torch.Tensor: r""" Get residual tensor for one update vector. @@ -160,7 +159,7 @@ def __init__( smooth: bool = True, attnw_shift: float = 20.0, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Return neighbor-wise multi-head self-attention maps, with gate mechanism.""" super().__init__() @@ -285,7 +284,7 @@ def __init__( input_dim: int, head_num: int, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.input_dim = input_dim @@ -370,7 +369,7 @@ def __init__( input_dim: int, head_num: int, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.input_dim = input_dim @@ -443,7 +442,7 @@ def __init__( smooth: bool = True, attnw_shift: float = 20.0, precision: str = "float64", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.input_dim = input_dim @@ -602,7 +601,7 @@ def __init__( use_sqrt_nnei: bool = True, g1_out_conv: bool = True, g1_out_mlp: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.epsilon = 1e-4 # protection of 1./nnei @@ -1132,10 +1131,10 @@ def forward( assert (nb, nloc) == g1.shape[:2] assert (nb, nloc, nnei) == h2.shape[:3] - g2_update: List[torch.Tensor] = [g2] - h2_update: List[torch.Tensor] = [h2] - g1_update: List[torch.Tensor] = [g1] - g1_mlp: List[torch.Tensor] = [g1] if not self.g1_out_mlp else [] + g2_update: list[torch.Tensor] = [g2] + h2_update: list[torch.Tensor] = [h2] + g1_update: list[torch.Tensor] = [g1] + g1_mlp: list[torch.Tensor] = [g1] if not self.g1_out_mlp else [] if self.g1_out_mlp: assert self.g1_self_mlp is not None g1_self_mlp = self.act(self.g1_self_mlp(g1)) @@ -1236,7 +1235,7 @@ def forward( @torch.jit.export def list_update_res_avg( self, - update_list: List[torch.Tensor], + update_list: list[torch.Tensor], ) -> torch.Tensor: nitem = len(update_list) uu = update_list[0] @@ -1245,7 +1244,7 @@ def list_update_res_avg( return uu / (float(nitem) ** 0.5) @torch.jit.export - def list_update_res_incr(self, update_list: List[torch.Tensor]) -> torch.Tensor: + def list_update_res_incr(self, update_list: list[torch.Tensor]) -> torch.Tensor: nitem = len(update_list) uu = update_list[0] scale = 1.0 / (float(nitem - 1) ** 0.5) if nitem > 1 else 0.0 @@ -1255,7 +1254,7 @@ def list_update_res_incr(self, update_list: List[torch.Tensor]) -> torch.Tensor: @torch.jit.export def list_update_res_residual( - self, update_list: List[torch.Tensor], update_name: str = "g1" + self, update_list: list[torch.Tensor], update_name: str = "g1" ) -> torch.Tensor: nitem = len(update_list) uu = update_list[0] @@ -1275,7 +1274,7 @@ def list_update_res_residual( @torch.jit.export def list_update( - self, update_list: List[torch.Tensor], update_name: str = "g1" + self, update_list: list[torch.Tensor], update_name: str = "g1" ) -> torch.Tensor: if self.update_style == "res_avg": return self.list_update_res_avg(update_list) diff --git a/deepmd/pt/model/descriptor/repformer_layer_old_impl.py b/deepmd/pt/model/descriptor/repformer_layer_old_impl.py index 81ee35c9ab..47b20f7b03 100644 --- a/deepmd/pt/model/descriptor/repformer_layer_old_impl.py +++ b/deepmd/pt/model/descriptor/repformer_layer_old_impl.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - List, ) import torch @@ -634,10 +633,10 @@ def forward( if self.update_h2: h2 = _apply_h_norm(h2) - g2_update: List[torch.Tensor] = [g2] - h2_update: List[torch.Tensor] = [h2] - g1_update: List[torch.Tensor] = [g1] - g1_mlp: List[torch.Tensor] = [g1] + g2_update: list[torch.Tensor] = [g2] + h2_update: list[torch.Tensor] = [h2] + g1_update: list[torch.Tensor] = [g1] + g1_mlp: list[torch.Tensor] = [g1] if cal_gg1: gg1 = _make_nei_g1(g1_ext, nlist) @@ -704,7 +703,7 @@ def forward( @torch.jit.export def list_update_res_avg( self, - update_list: List[torch.Tensor], + update_list: list[torch.Tensor], ) -> torch.Tensor: nitem = len(update_list) uu = update_list[0] @@ -713,7 +712,7 @@ def list_update_res_avg( return uu / (float(nitem) ** 0.5) @torch.jit.export - def list_update_res_incr(self, update_list: List[torch.Tensor]) -> torch.Tensor: + def list_update_res_incr(self, update_list: list[torch.Tensor]) -> torch.Tensor: nitem = len(update_list) uu = update_list[0] scale = 1.0 / (float(nitem - 1) ** 0.5) if nitem > 1 else 0.0 @@ -722,7 +721,7 @@ def list_update_res_incr(self, update_list: List[torch.Tensor]) -> torch.Tensor: return uu @torch.jit.export - def list_update(self, update_list: List[torch.Tensor]) -> torch.Tensor: + def list_update(self, update_list: list[torch.Tensor]) -> torch.Tensor: if self.update_style == "res_avg": return self.list_update_res_avg(update_list) elif self.update_style == "res_incr": diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index a9e4ef7893..406758faa6 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -99,12 +96,12 @@ def __init__( update_residual_init: str = "norm", set_davg_zero: bool = True, smooth: bool = True, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, precision: str = "float64", trainable_ln: bool = True, ln_eps: Optional[float] = 1e-5, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, use_sqrt_nnei: bool = True, g1_out_conv: bool = True, g1_out_mlp: bool = True, @@ -177,7 +174,7 @@ def __init__( The precision of the embedding net parameters. smooth : bool, optional Whether to use smoothness in processes such as attention weights calculation. - exclude_types : List[List[int]], optional + exclude_types : list[list[int]], optional The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection : float, optional @@ -339,7 +336,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -408,7 +405,7 @@ def dim_emb(self): def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -420,7 +417,7 @@ def forward( extended_atype: torch.Tensor, extended_atype_embd: Optional[torch.Tensor] = None, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): if comm_dict is None: assert mapping is not None @@ -530,7 +527,7 @@ def forward( def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -538,11 +535,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -567,7 +564,7 @@ def compute_input_stats( self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" if self.stats is None: raise RuntimeError( diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index 44564a6fd3..1b51acfa21 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -3,10 +3,7 @@ from typing import ( Callable, ClassVar, - Dict, - List, Optional, - Tuple, Union, ) @@ -84,14 +81,14 @@ def __init__( activation_function: str = "tanh", precision: str = "float64", resnet_dt: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, old_impl: bool = False, type_one_side: bool = True, trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ntypes: Optional[int] = None, # to be compat with input - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, # not implemented spin=None, ): @@ -130,7 +127,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return self.sea.get_nsel() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sea.get_sel() @@ -138,7 +135,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.sea.get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -192,7 +189,7 @@ def dim_out(self): return self.sea.dim_out def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -205,7 +202,7 @@ def change_type_map( def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -213,11 +210,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -228,7 +225,7 @@ def compute_input_stats( def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): """Update the type exclusions.""" self.sea.reinit_exclude(exclude_types) @@ -239,7 +236,7 @@ def forward( atype_ext: torch.Tensor, nlist: torch.Tensor, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): """Compute the descriptor. @@ -284,7 +281,7 @@ def set_stat_mean_and_stddev( self.sea.mean = mean self.sea.stddev = stddev - def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + def get_stat_mean_and_stddev(self) -> tuple[torch.Tensor, torch.Tensor]: """Get mean and stddev for descriptor.""" return self.sea.mean, self.sea.stddev @@ -342,9 +339,9 @@ def t_cvt(xx): def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -386,12 +383,12 @@ def __init__( activation_function: str = "tanh", precision: str = "float64", resnet_dt: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, old_impl: bool = False, type_one_side: bool = True, trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, **kwargs, ): """Construct an embedding net of type `se_a`. @@ -484,7 +481,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -548,7 +545,7 @@ def __getitem__(self, key): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -556,11 +553,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -585,7 +582,7 @@ def compute_input_stats( self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" if self.stats is None: raise RuntimeError( @@ -595,7 +592,7 @@ def get_stats(self) -> Dict[str, StatItem]: def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py index 92d6e223e4..c760f7330b 100644 --- a/deepmd/pt/model/descriptor/se_atten.py +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -63,7 +60,7 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, neuron: list = [25, 50, 100], axis_neuron: int = 16, @@ -82,11 +79,11 @@ def __init__( temperature=None, smooth: bool = True, type_one_side: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, trainable_ln: bool = True, ln_eps: Optional[float] = 1e-5, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, type: Optional[str] = None, old_impl: bool = False, ): @@ -134,7 +131,7 @@ def __init__( (Only support False to keep consistent with other backend references.) (Not used in this version.) If mask the diagonal of attention weights - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection : float @@ -304,7 +301,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -373,7 +370,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -381,11 +378,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -410,7 +407,7 @@ def compute_input_stats( self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" if self.stats is None: raise RuntimeError( @@ -420,7 +417,7 @@ def get_stats(self) -> Dict[str, StatItem]: def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -612,7 +609,7 @@ def __init__( ln_eps: float = 1e-5, smooth: bool = True, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Construct a neighbor-wise attention net.""" super().__init__() @@ -755,7 +752,7 @@ def __init__( trainable_ln: bool = True, ln_eps: float = 1e-5, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Construct a neighbor-wise attention layer.""" super().__init__() @@ -862,7 +859,7 @@ def __init__( bias: bool = True, smooth: bool = True, precision: str = DEFAULT_PRECISION, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): """Construct a multi-head neighbor-wise attention net.""" super().__init__() diff --git a/deepmd/pt/model/descriptor/se_atten_v2.py b/deepmd/pt/model/descriptor/se_atten_v2.py index 41e37eb03c..f73ff255e6 100644 --- a/deepmd/pt/model/descriptor/se_atten_v2.py +++ b/deepmd/pt/model/descriptor/se_atten_v2.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, - Tuple, Union, ) @@ -42,7 +40,7 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, neuron: list = [25, 50, 100], axis_neuron: int = 16, @@ -55,7 +53,7 @@ def __init__( activation_function: str = "tanh", precision: str = "float64", resnet_dt: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, scaling_factor: int = 1.0, normalize=True, @@ -66,10 +64,10 @@ def __init__( ln_eps: Optional[float] = 1e-5, type_one_side: bool = False, stripped_type_embedding: Optional[bool] = None, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, # not implemented spin=None, type: Optional[str] = None, @@ -113,7 +111,7 @@ def __init__( resnet_dt : bool Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection : float @@ -149,7 +147,7 @@ def __init__( Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map : List[str], Optional + type_map : list[str], Optional A list of strings. Give the name to each type of atoms. spin (Only support None to keep consistent with other backend references.) diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index da8d422444..b873ee20b8 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -69,12 +66,12 @@ def __init__( activation_function: str = "tanh", precision: str = "float64", resnet_dt: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, old_impl: bool = False, trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, **kwargs, ): super().__init__() @@ -143,7 +140,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -151,7 +148,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -225,7 +222,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -238,7 +235,7 @@ def change_type_map( def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -246,11 +243,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -275,7 +272,7 @@ def compute_input_stats( self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" if self.stats is None: raise RuntimeError( @@ -301,7 +298,7 @@ def __getitem__(self, key): def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -312,7 +309,7 @@ def forward( atype_ext: torch.Tensor, nlist: torch.Tensor, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): """Compute the descriptor. @@ -404,7 +401,7 @@ def set_stat_mean_and_stddev( self.mean = mean self.stddev = stddev - def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + def get_stat_mean_and_stddev(self) -> tuple[torch.Tensor, torch.Tensor]: """Get mean and stddev for descriptor.""" return self.mean, self.stddev @@ -458,9 +455,9 @@ def t_cvt(xx): def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 5e7e507fbf..072457b48f 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -3,10 +3,7 @@ from typing import ( Callable, ClassVar, - Dict, - List, Optional, - Tuple, Union, ) @@ -95,7 +92,7 @@ class DescrptSeT(BaseDescriptor, torch.nn.Module): The activation function in the embedding net. Supported options are |ACTIVATION_FN| env_protection : float Protection parameter to prevent division by zero errors during environment matrix calculations. - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. precision : str @@ -104,7 +101,7 @@ class DescrptSeT(BaseDescriptor, torch.nn.Module): If the weights of embedding net are trainable. seed : int, Optional Random seed for initializing the network parameters. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -112,17 +109,17 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], resnet_dt: bool = False, set_davg_zero: bool = False, activation_function: str = "tanh", env_protection: float = 0.0, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], precision: str = "float64", trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, ntypes: Optional[int] = None, # to be compat with input # not implemented spin=None, @@ -159,7 +156,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return self.seat.get_nsel() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.seat.get_sel() @@ -167,7 +164,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.seat.get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -221,7 +218,7 @@ def dim_out(self): return self.seat.dim_out def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -234,7 +231,7 @@ def change_type_map( def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -242,11 +239,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -257,7 +254,7 @@ def compute_input_stats( def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): """Update the type exclusions.""" self.seat.reinit_exclude(exclude_types) @@ -268,7 +265,7 @@ def forward( atype_ext: torch.Tensor, nlist: torch.Tensor, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): """Compute the descriptor. @@ -314,7 +311,7 @@ def set_stat_mean_and_stddev( self.seat.mean = mean self.seat.stddev = stddev - def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + def get_stat_mean_and_stddev(self) -> tuple[torch.Tensor, torch.Tensor]: """Get mean and stddev for descriptor.""" return self.seat.mean, self.seat.stddev @@ -367,9 +364,9 @@ def t_cvt(xx): def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -404,16 +401,16 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], resnet_dt: bool = False, set_davg_zero: bool = False, activation_function: str = "tanh", env_protection: float = 0.0, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], precision: str = "float64", trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): r"""Construct an embedding net of type `se_e3`. @@ -438,7 +435,7 @@ def __init__( The activation function in the embedding net. Supported options are |ACTIVATION_FN| env_protection : float Protection parameter to prevent division by zero errors during environment matrix calculations. - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. precision : str @@ -511,7 +508,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -575,7 +572,7 @@ def __getitem__(self, key): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -583,11 +580,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -612,7 +609,7 @@ def compute_input_stats( self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" if self.stats is None: raise RuntimeError( @@ -622,7 +619,7 @@ def get_stats(self) -> Dict[str, StatItem]: def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) diff --git a/deepmd/pt/model/descriptor/se_t_tebd.py b/deepmd/pt/model/descriptor/se_t_tebd.py index 774a9154de..437a464709 100644 --- a/deepmd/pt/model/descriptor/se_t_tebd.py +++ b/deepmd/pt/model/descriptor/se_t_tebd.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Callable, - Dict, - List, Optional, - Tuple, Union, ) @@ -79,7 +76,7 @@ class DescrptSeTTebd(BaseDescriptor, torch.nn.Module): The cut-off radius rcut_smth From where the environment matrix should be smoothed - sel : Union[List[int], int] + sel : Union[list[int], int] list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius int: the total maxmum number of atoms in the cut-off radius ntypes : int @@ -101,7 +98,7 @@ class DescrptSeTTebd(BaseDescriptor, torch.nn.Module): The activation function in the embedding net. Supported options are |ACTIVATION_FN| env_protection: float Protection parameter to prevent division by zero errors during environment matrix calculations. - exclude_types : List[Tuple[int, int]] + exclude_types : list[tuple[int, int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. precision @@ -110,7 +107,7 @@ class DescrptSeTTebd(BaseDescriptor, torch.nn.Module): If the weights of embedding net are trainable. seed Random seed for initializing the network parameters. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. concat_output_tebd: bool Whether to concat type embedding at the output of the descriptor. @@ -127,7 +124,7 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, neuron: list = [2, 4, 8], tebd_dim: int = 8, @@ -136,11 +133,11 @@ def __init__( set_davg_zero: bool = True, activation_function: str = "tanh", env_protection: float = 0.0, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], precision: str = "float64", trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, concat_output_tebd: bool = True, use_econf_tebd: bool = False, use_tebd_bias=False, @@ -195,7 +192,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return self.se_ttebd.get_nsel() - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.se_ttebd.get_sel() @@ -203,7 +200,7 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.se_ttebd.get_ntypes() - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map @@ -274,7 +271,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -282,11 +279,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -304,12 +301,12 @@ def set_stat_mean_and_stddev( self.se_ttebd.mean = mean self.se_ttebd.stddev = stddev - def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + def get_stat_mean_and_stddev(self) -> tuple[torch.Tensor, torch.Tensor]: """Get mean and stddev for descriptor.""" return self.se_ttebd.mean, self.se_ttebd.stddev def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -410,7 +407,7 @@ def forward( extended_atype: torch.Tensor, nlist: torch.Tensor, mapping: Optional[torch.Tensor] = None, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): """Compute the descriptor. @@ -465,9 +462,9 @@ def forward( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -500,7 +497,7 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, neuron: list = [25, 50, 100], tebd_dim: int = 8, @@ -509,10 +506,10 @@ def __init__( activation_function="tanh", precision: str = "float64", resnet_dt: bool = False, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, smooth: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.rcut = rcut @@ -598,7 +595,7 @@ def get_nsel(self) -> int: """Returns the number of selected atoms in the cut-off radius.""" return sum(self.sel) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.sel @@ -667,7 +664,7 @@ def dim_emb(self): def compute_input_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, ): """ @@ -675,11 +672,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -704,7 +701,7 @@ def compute_input_stats( self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - def get_stats(self) -> Dict[str, StatItem]: + def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" if self.stats is None: raise RuntimeError( @@ -714,7 +711,7 @@ def get_stats(self) -> Dict[str, StatItem]: def reinit_exclude( self, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) diff --git a/deepmd/pt/model/model/dipole_model.py b/deepmd/pt/model/model/dipole_model.py index 0d4a53a850..c83d1f0bf7 100644 --- a/deepmd/pt/model/model/dipole_model.py +++ b/deepmd/pt/model/model/dipole_model.py @@ -3,7 +3,6 @@ deepcopy, ) from typing import ( - Dict, Optional, ) @@ -64,7 +63,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: model_ret = self.forward_common( coord, atype, diff --git a/deepmd/pt/model/model/dos_model.py b/deepmd/pt/model/model/dos_model.py index 27d62fa882..abfcd4a2b4 100644 --- a/deepmd/pt/model/model/dos_model.py +++ b/deepmd/pt/model/model/dos_model.py @@ -3,7 +3,6 @@ deepcopy, ) from typing import ( - Dict, Optional, ) @@ -56,7 +55,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: model_ret = self.forward_common( coord, atype, diff --git a/deepmd/pt/model/model/dp_model.py b/deepmd/pt/model/model/dp_model.py index d3a65db287..8659526c49 100644 --- a/deepmd/pt/model/model/dp_model.py +++ b/deepmd/pt/model/model/dp_model.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, - Tuple, ) from deepmd.pt.model.descriptor.base_descriptor import ( @@ -20,9 +18,9 @@ class DPModelCommon: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/pt/model/model/dp_zbl_model.py b/deepmd/pt/model/model/dp_zbl_model.py index 4016f0eb35..59147e1d4c 100644 --- a/deepmd/pt/model/model/dp_zbl_model.py +++ b/deepmd/pt/model/model/dp_zbl_model.py @@ -3,10 +3,7 @@ deepcopy, ) from typing import ( - Dict, - List, Optional, - Tuple, ) import torch @@ -68,7 +65,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: model_ret = self.forward_common( coord, atype, @@ -135,9 +132,9 @@ def forward_lower( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/pt/model/model/ener_model.py b/deepmd/pt/model/model/ener_model.py index e58ba1df62..82f429c4ab 100644 --- a/deepmd/pt/model/model/ener_model.py +++ b/deepmd/pt/model/model/ener_model.py @@ -3,7 +3,6 @@ deepcopy, ) from typing import ( - Dict, Optional, ) @@ -64,7 +63,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: model_ret = self.forward_common( coord, atype, @@ -104,7 +103,7 @@ def forward_lower( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): model_ret = self.forward_common_lower( extended_coord, diff --git a/deepmd/pt/model/model/frozen.py b/deepmd/pt/model/model/frozen.py index 395d81c217..431c035339 100644 --- a/deepmd/pt/model/model/frozen.py +++ b/deepmd/pt/model/model/frozen.py @@ -2,10 +2,7 @@ import json import tempfile from typing import ( - Dict, - List, Optional, - Tuple, ) import torch @@ -56,12 +53,12 @@ def get_rcut(self) -> float: return self.model.get_rcut() @torch.jit.export - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.model.get_type_map() @torch.jit.export - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.model.get_sel() @@ -76,7 +73,7 @@ def get_dim_aparam(self) -> int: return self.model.get_dim_aparam() @torch.jit.export - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -124,7 +121,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: return self.model.forward( coord, atype, @@ -177,9 +174,9 @@ def get_nsel(self) -> int: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/pt/model/model/make_hessian_model.py b/deepmd/pt/model/model/make_hessian_model.py index 9588348f53..d2541a815e 100644 --- a/deepmd/pt/model/model/make_hessian_model.py +++ b/deepmd/pt/model/model/make_hessian_model.py @@ -2,8 +2,6 @@ import copy import math from typing import ( - Dict, - List, Optional, Union, ) @@ -47,7 +45,7 @@ def __init__( def requires_hessian( self, - keys: Union[str, List[str]], + keys: Union[str, list[str]], ): """Set which output variable(s) requires hessian.""" if isinstance(keys, str): @@ -68,7 +66,7 @@ def forward_common( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: """Return model prediction. Parameters @@ -90,7 +88,7 @@ def forward_common( Returns ------- ret_dict - The result dict of type Dict[str,torch.Tensor]. + The result dict of type dict[str,torch.Tensor]. The keys are defined by the `ModelOutputDef`. """ @@ -122,7 +120,7 @@ def _cal_hessian_all( box: Optional[torch.Tensor] = None, fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: nf, nloc = atype.shape coord = coord.view([nf, (nloc * 3)]) box = box.view([nf, 9]) if box is not None else None @@ -130,7 +128,7 @@ def _cal_hessian_all( aparam = aparam.view([nf, nloc, -1]) if aparam is not None else None fdef = self.atomic_output_def() # keys of values that require hessian - hess_keys: List[str] = [] + hess_keys: list[str] = [] for kk in fdef.keys(): if fdef[kk].r_hessian: hess_keys.append(kk) diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index 8207f4961e..46b7e51109 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -1,10 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, - List, Optional, - Tuple, - Type, ) import torch @@ -43,7 +39,7 @@ ) -def make_model(T_AtomicModel: Type[BaseAtomicModel]): +def make_model(T_AtomicModel: type[BaseAtomicModel]): """Make a model as a derived class of an atomic model. The model provide two interfaces. @@ -89,13 +85,13 @@ def model_output_def(self): return ModelOutputDef(self.atomic_output_def()) @torch.jit.export - def model_output_type(self) -> List[str]: + def model_output_type(self) -> list[str]: """Get the output type for the model.""" output_def = self.model_output_def() var_defs = output_def.var_defs # jit: Comprehension ifs are not supported yet # type hint is critical for JIT - vars: List[str] = [] + vars: list[str] = [] for kk, vv in var_defs.items(): # .value is critical for JIT if vv.category == OutputVariableCategory.OUT.value: @@ -111,7 +107,7 @@ def forward_common( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: """Return model prediction. Parameters @@ -133,7 +129,7 @@ def forward_common( Returns ------- ret_dict - The result dict of type Dict[str,torch.Tensor]. + The result dict of type dict[str,torch.Tensor]. The keys are defined by the `ModelOutputDef`. """ @@ -187,11 +183,11 @@ def change_out_bias( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. bias_adjust_mode : str @@ -214,7 +210,7 @@ def forward_common_lower( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, extra_nlist_sort: bool = False, ): """Return model prediction. Lower interface that takes @@ -283,7 +279,7 @@ def input_type_cast( box: Optional[torch.Tensor] = None, fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, - ) -> Tuple[ + ) -> tuple[ torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor], @@ -302,7 +298,7 @@ def input_type_cast( # " does not match" # f" that of the coordinate {input_prec}" # ) - _lst: List[Optional[torch.Tensor]] = [ + _lst: list[Optional[torch.Tensor]] = [ vv.to(coord.dtype) if vv is not None else None for vv in [box, fparam, aparam] ] @@ -324,9 +320,9 @@ def input_type_cast( def output_type_cast( self, - model_ret: Dict[str, torch.Tensor], + model_ret: dict[str, torch.Tensor], input_prec: str, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: """Convert the model output to the input prec.""" do_cast = ( input_prec @@ -469,7 +465,7 @@ def do_grad_c( return self.atomic_model.do_grad_c(var_name) def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -499,7 +495,7 @@ def get_dim_aparam(self) -> int: return self.atomic_model.get_dim_aparam() @torch.jit.export - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -522,7 +518,7 @@ def get_rcut(self) -> float: return self.atomic_model.get_rcut() @torch.jit.export - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.atomic_model.get_type_map() @@ -548,7 +544,7 @@ def compute_or_load_stat( """Compute or load the statistics.""" return self.atomic_model.compute_or_load_stat(sampled_func, stat_file_path) - def get_sel(self) -> List[int]: + def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" return self.atomic_model.get_sel() @@ -581,7 +577,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: # directly call the forward_common method when no specific transform rule return self.forward_common( coord, diff --git a/deepmd/pt/model/model/polar_model.py b/deepmd/pt/model/model/polar_model.py index 7fbb7bdcf4..57379ba372 100644 --- a/deepmd/pt/model/model/polar_model.py +++ b/deepmd/pt/model/model/polar_model.py @@ -3,7 +3,6 @@ deepcopy, ) from typing import ( - Dict, Optional, ) @@ -56,7 +55,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: model_ret = self.forward_common( coord, atype, diff --git a/deepmd/pt/model/model/property_model.py b/deepmd/pt/model/model/property_model.py index a5b52139fe..164331f44c 100644 --- a/deepmd/pt/model/model/property_model.py +++ b/deepmd/pt/model/model/property_model.py @@ -3,7 +3,6 @@ deepcopy, ) from typing import ( - Dict, Optional, ) @@ -56,7 +55,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: model_ret = self.forward_common( coord, atype, @@ -92,7 +91,7 @@ def forward_lower( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): model_ret = self.forward_common_lower( extended_coord, diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index 717a7ee7c8..a9f6e4d75a 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -4,8 +4,6 @@ deepcopy, ) from typing import ( - Dict, - List, Optional, ) @@ -258,7 +256,7 @@ def expand_aparam(aparam, nloc: int): return aparam @torch.jit.export - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" tmap = self.backbone_model.get_type_map() ntypes = len(tmap) // 2 # ignore the virtual type @@ -285,7 +283,7 @@ def get_dim_aparam(self): return self.backbone_model.get_dim_aparam() @torch.jit.export - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution to the result of the model. @@ -301,7 +299,7 @@ def is_aparam_nall(self) -> bool: return self.backbone_model.is_aparam_nall() @torch.jit.export - def model_output_type(self) -> List[str]: + def model_output_type(self) -> list[str]: """Get the output type for the model.""" return self.backbone_model.model_output_type() @@ -422,7 +420,7 @@ def forward_common( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: nframes, nloc = atype.shape coord_updated, atype_updated = self.process_spin_input(coord, atype, spin) if aparam is not None: @@ -576,7 +574,7 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - ) -> Dict[str, torch.Tensor]: + ) -> dict[str, torch.Tensor]: model_ret = self.forward_common( coord, atype, diff --git a/deepmd/pt/model/model/transform_output.py b/deepmd/pt/model/model/transform_output.py index e8afab15c4..e15eda6a1d 100644 --- a/deepmd/pt/model/model/transform_output.py +++ b/deepmd/pt/model/model/transform_output.py @@ -1,7 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, - List, Optional, ) @@ -31,7 +29,7 @@ def atomic_virial_corr( ce = coord * atom_energy sumce0, sumce1, sumce2 = torch.split(torch.sum(ce, dim=1), [1, 1, 1], dim=-1) faked_grad = torch.ones_like(sumce0) - lst = torch.jit.annotate(List[Optional[torch.Tensor]], [faked_grad]) + lst = torch.jit.annotate(list[Optional[torch.Tensor]], [faked_grad]) extended_virial_corr0 = torch.autograd.grad( [sumce0], [extended_coord], @@ -76,7 +74,7 @@ def task_deriv_one( create_graph: bool = True, ): faked_grad = torch.ones_like(energy) - lst = torch.jit.annotate(List[Optional[torch.Tensor]], [faked_grad]) + lst = torch.jit.annotate(list[Optional[torch.Tensor]], [faked_grad]) extended_force = torch.autograd.grad( [energy], [extended_coord], @@ -153,12 +151,12 @@ def take_deriv( def fit_output_to_model_output( - fit_ret: Dict[str, torch.Tensor], + fit_ret: dict[str, torch.Tensor], fit_output_def: FittingOutputDef, coord_ext: torch.Tensor, do_atomic_virial: bool = False, create_graph: bool = True, -) -> Dict[str, torch.Tensor]: +) -> dict[str, torch.Tensor]: """Transform the output of the fitting network to the model output. @@ -197,11 +195,11 @@ def fit_output_to_model_output( def communicate_extended_output( - model_ret: Dict[str, torch.Tensor], + model_ret: dict[str, torch.Tensor], model_output_def: ModelOutputDef, mapping: torch.Tensor, # nf x nloc do_atomic_virial: bool = False, -) -> Dict[str, torch.Tensor]: +) -> dict[str, torch.Tensor]: """Transform the output of the model network defined on local and ghost (extended) atoms to local atoms. diff --git a/deepmd/pt/model/network/layernorm.py b/deepmd/pt/model/network/layernorm.py index c1c2c29c87..76ce90b627 100644 --- a/deepmd/pt/model/network/layernorm.py +++ b/deepmd/pt/model/network/layernorm.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -45,7 +44,7 @@ def __init__( stddev: float = 1.0, precision: str = DEFAULT_PRECISION, trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() self.eps = eps diff --git a/deepmd/pt/model/network/mlp.py b/deepmd/pt/model/network/mlp.py index 090d64fbcf..f2137bd004 100644 --- a/deepmd/pt/model/network/mlp.py +++ b/deepmd/pt/model/network/mlp.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( ClassVar, - Dict, - List, Optional, Union, ) @@ -83,7 +81,7 @@ def __init__( stddev: float = 1.0, precision: str = DEFAULT_PRECISION, init: str = "default", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ): super().__init__() # only use_timestep when skip connection is established. @@ -297,7 +295,7 @@ def __init__(self, *args, **kwargs): class NetworkCollection(DPNetworkCollection, nn.Module): """PyTorch implementation of NetworkCollection.""" - NETWORK_TYPE_MAP: ClassVar[Dict[str, type]] = { + NETWORK_TYPE_MAP: ClassVar[dict[str, type]] = { "network": MLP, "embedding_network": EmbeddingNet, "fitting_network": FittingNet, diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index 0c21a9814b..ef50274b03 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -571,7 +570,7 @@ def __init__( bavg=0.0, stddev=1.0, precision="default", - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, use_econf_tebd=False, use_tebd_bias: bool = False, type_map=None, @@ -627,7 +626,7 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -660,7 +659,7 @@ class TypeEmbedNetConsistent(nn.Module): Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -668,16 +667,16 @@ def __init__( self, *, ntypes: int, - neuron: List[int], + neuron: list[int], resnet_dt: bool = False, activation_function: str = "tanh", precision: str = "default", trainable: bool = True, - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, padding: bool = False, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ): """Construct a type embedding net.""" super().__init__() @@ -734,7 +733,7 @@ def forward(self, device: torch.device): return embed def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. diff --git a/deepmd/pt/model/task/dipole.py b/deepmd/pt/model/task/dipole.py index 30c5a341a7..56b14677b9 100644 --- a/deepmd/pt/model/task/dipole.py +++ b/deepmd/pt/model/task/dipole.py @@ -3,7 +3,6 @@ import logging from typing import ( Callable, - List, Optional, Union, ) @@ -45,7 +44,7 @@ class DipoleFittingNet(GeneralFitting): Embedding width per atom. embedding_width : int The dimension of rotation matrix, m1. - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layers of the fitting net. resnet_dt : bool Using time-step in the ResNet construction. @@ -70,7 +69,7 @@ class DipoleFittingNet(GeneralFitting): c_differentiable If the variable is differentiated with respect to the cell tensor (pbc case). Only reducible variable are differentiable. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -79,7 +78,7 @@ def __init__( ntypes: int, dim_descrpt: int, embedding_width: int, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -87,11 +86,11 @@ def __init__( precision: str = DEFAULT_PRECISION, mixed_types: bool = True, rcond: Optional[float] = None, - seed: Optional[Union[int, List[int]]] = None, - exclude_types: List[int] = [], + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], r_differentiable: bool = True, c_differentiable: bool = True, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, **kwargs, ): self.embedding_width = embedding_width @@ -151,7 +150,7 @@ def output_def(self) -> FittingOutputDef: def compute_output_stats( self, - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], stat_file_path: Optional[DPPath] = None, ): """ @@ -159,11 +158,11 @@ def compute_output_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. stat_file_path : Optional[DPPath] @@ -197,4 +196,4 @@ def forward( return {self.var_name: out.to(env.GLOBAL_PT_FLOAT_PRECISION)} # make jit happy with torch 2.0.0 - exclude_types: List[int] + exclude_types: list[int] diff --git a/deepmd/pt/model/task/dos.py b/deepmd/pt/model/task/dos.py index c27e287728..4f69094b0d 100644 --- a/deepmd/pt/model/task/dos.py +++ b/deepmd/pt/model/task/dos.py @@ -2,7 +2,6 @@ import copy import logging from typing import ( - List, Optional, Union, ) @@ -45,19 +44,19 @@ def __init__( ntypes: int, dim_descrpt: int, numb_dos: int = 300, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, rcond: Optional[float] = None, bias_dos: Optional[torch.Tensor] = None, - trainable: Union[bool, List[bool]] = True, - seed: Optional[Union[int, List[int]]] = None, + trainable: Union[bool, list[bool]] = True, + seed: Optional[Union[int, list[int]]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, - exclude_types: List[int] = [], + exclude_types: list[int] = [], mixed_types: bool = True, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ): if bias_dos is not None: self.bias_dos = bias_dos @@ -127,4 +126,4 @@ def serialize(self) -> dict: return dd # make jit happy with torch 2.0.0 - exclude_types: List[int] + exclude_types: list[int] diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index 1737e401fb..2048c05ba9 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -2,9 +2,7 @@ import copy import logging from typing import ( - List, Optional, - Tuple, Union, ) @@ -48,7 +46,7 @@ def __init__( self, ntypes: int, dim_descrpt: int, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], bias_atom_e: Optional[torch.Tensor] = None, resnet_dt: bool = True, numb_fparam: int = 0, @@ -56,8 +54,8 @@ def __init__( activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, - seed: Optional[Union[int, List[int]]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, **kwargs, ): super().__init__( @@ -94,7 +92,7 @@ def serialize(self) -> dict: } # make jit happy with torch 2.0.0 - exclude_types: List[int] + exclude_types: list[int] @Fitting.register("direct_force") @@ -185,11 +183,11 @@ def deserialize(self) -> "EnergyFittingNetDirect": raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: raise NotImplementedError - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: raise NotImplementedError def forward( @@ -201,7 +199,7 @@ def forward( h2: Optional[torch.Tensor] = None, fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, - ) -> Tuple[torch.Tensor, None]: + ) -> tuple[torch.Tensor, None]: """Based on embedding net output, alculate total energy. Args: diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 95242eb67c..1827569a17 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -5,7 +5,6 @@ abstractmethod, ) from typing import ( - List, Optional, Union, ) @@ -97,7 +96,7 @@ class GeneralFitting(Fitting): Embedding width per atom. dim_out : int The output dimension of the fitting net. - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : torch.Tensor, optional Average enery per atom for each element. @@ -118,17 +117,17 @@ class GeneralFitting(Fitting): The condition number for the regression of atomic energy. seed : int, optional Random seed. - exclude_types: List[int] + exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. - trainable : Union[List[bool], bool] + trainable : Union[list[bool], bool] If the parameters in the fitting net are trainable. Now this only supports setting all the parameters in the fitting net at one state. - When in List[bool], the trainable will be True only if all the boolean parameters are True. - remove_vaccum_contribution: List[bool], optional + When in list[bool], the trainable will be True only if all the boolean parameters are True. + remove_vaccum_contribution: list[bool], optional Remove vaccum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -137,7 +136,7 @@ def __init__( var_name: str, ntypes: int, dim_descrpt: int, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], bias_atom_e: Optional[torch.Tensor] = None, resnet_dt: bool = True, numb_fparam: int = 0, @@ -146,11 +145,11 @@ def __init__( precision: str = DEFAULT_PRECISION, mixed_types: bool = True, rcond: Optional[float] = None, - seed: Optional[Union[int, List[int]]] = None, - exclude_types: List[int] = [], - trainable: Union[bool, List[bool]] = True, - remove_vaccum_contribution: Optional[List[bool]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + trainable: Union[bool, list[bool]] = True, + remove_vaccum_contribution: Optional[list[bool]] = None, + type_map: Optional[list[str]] = None, **kwargs, ): super().__init__() @@ -253,13 +252,13 @@ def __init__( def reinit_exclude( self, - exclude_types: List[int] = [], + exclude_types: list[int] = [], ): self.exclude_types = exclude_types self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -342,9 +341,9 @@ def get_dim_aparam(self) -> int: return self.numb_aparam # make jit happy - exclude_types: List[int] + exclude_types: list[int] - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model. Only atoms with selected atom types have atomic contribution @@ -352,13 +351,13 @@ def get_sel_type(self) -> List[int]: If returning an empty list, all atom types are selected. """ # make jit happy - sel_type: List[int] = [] + sel_type: list[int] = [] for ii in range(self.ntypes): if ii not in self.exclude_types: sel_type.append(ii) return sel_type - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index 36c416d6e5..230046b74b 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -2,7 +2,6 @@ import copy import logging from typing import ( - List, Optional, Union, ) @@ -48,7 +47,7 @@ class InvarFitting(GeneralFitting): Embedding width per atom. dim_out : int The output dimension of the fitting net. - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : torch.Tensor, optional Average enery per atom for each element. @@ -69,14 +68,14 @@ class InvarFitting(GeneralFitting): The condition number for the regression of atomic energy. seed : int, optional Random seed. - exclude_types: List[int] + exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. - atom_ener: List[Optional[torch.Tensor]], optional + atom_ener: list[Optional[torch.Tensor]], optional Specifying atomic energy contribution in vacuum. The value is a list specifying the bias. the elements can be None or np.array of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] The `set_davg_zero` key in the descrptor should be set. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -87,7 +86,7 @@ def __init__( ntypes: int, dim_descrpt: int, dim_out: int, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], bias_atom_e: Optional[torch.Tensor] = None, resnet_dt: bool = True, numb_fparam: int = 0, @@ -96,10 +95,10 @@ def __init__( precision: str = DEFAULT_PRECISION, mixed_types: bool = True, rcond: Optional[float] = None, - seed: Optional[Union[int, List[int]]] = None, - exclude_types: List[int] = [], - atom_ener: Optional[List[Optional[torch.Tensor]]] = None, - type_map: Optional[List[str]] = None, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + atom_ener: Optional[list[Optional[torch.Tensor]]] = None, + type_map: Optional[list[str]] = None, **kwargs, ): self.dim_out = dim_out @@ -179,4 +178,4 @@ def forward( return self._forward_common(descriptor, atype, gr, g2, h2, fparam, aparam) # make jit happy with torch 2.0.0 - exclude_types: List[int] + exclude_types: list[int] diff --git a/deepmd/pt/model/task/polarizability.py b/deepmd/pt/model/task/polarizability.py index 7345fa296c..a16ab886d4 100644 --- a/deepmd/pt/model/task/polarizability.py +++ b/deepmd/pt/model/task/polarizability.py @@ -2,7 +2,6 @@ import copy import logging from typing import ( - List, Optional, Union, ) @@ -47,7 +46,7 @@ class PolarFittingNet(GeneralFitting): Embedding width per atom. embedding_width : int The dimension of rotation matrix, m1. - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layers of the fitting net. resnet_dt : bool Using time-step in the ResNet construction. @@ -69,11 +68,11 @@ class PolarFittingNet(GeneralFitting): fit_diag : bool Fit the diagonal part of the rotational invariant polarizability matrix, which will be converted to normal polarizability matrix by contracting with the rotation matrix. - scale : List[float] + scale : list[float] The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i] shift_diag : bool Whether to shift the diagonal part of the polarizability matrix. The shift operation is carried out after scale. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -83,7 +82,7 @@ def __init__( ntypes: int, dim_descrpt: int, embedding_width: int, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -91,12 +90,12 @@ def __init__( precision: str = DEFAULT_PRECISION, mixed_types: bool = True, rcond: Optional[float] = None, - seed: Optional[Union[int, List[int]]] = None, - exclude_types: List[int] = [], + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], fit_diag: bool = True, - scale: Optional[Union[List[float], float]] = None, + scale: Optional[Union[list[float], float]] = None, shift_diag: bool = True, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, **kwargs, ): self.embedding_width = embedding_width @@ -162,7 +161,7 @@ def __getitem__(self, key): return super().__getitem__(key) def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: """Change the type related params to new ones, according to `type_map` and the original one in the model. If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. @@ -258,4 +257,4 @@ def forward( return {"polarizability": out.to(env.GLOBAL_PT_FLOAT_PRECISION)} # make jit happy with torch 2.0.0 - exclude_types: List[int] + exclude_types: list[int] diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index 804383c57f..cc6a4e8745 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -2,7 +2,6 @@ import copy import logging from typing import ( - List, Optional, ) @@ -46,7 +45,7 @@ class PropertyFittingNet(InvarFitting): Embedding width per atom. task_dim : int The dimension of outputs of fitting net. - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_p : torch.Tensor, optional Average property per atom for each element. @@ -78,7 +77,7 @@ def __init__( ntypes: int, dim_descrpt: int, task_dim: int = 1, - neuron: List[int] = [128, 128, 128], + neuron: list[int] = [128, 128, 128], bias_atom_p: Optional[torch.Tensor] = None, intensive: bool = False, bias_method: str = "normal", @@ -149,4 +148,4 @@ def serialize(self) -> dict: return dd # make jit happy with torch 2.0.0 - exclude_types: List[int] + exclude_types: list[int] diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 9bdc80195f..95c73bd83c 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -10,7 +10,6 @@ ) from typing import ( Any, - Dict, ) import numpy as np @@ -88,7 +87,7 @@ class Trainer: def __init__( self, - config: Dict[str, Any], + config: dict[str, Any], training_data, stat_file_path=None, validation_data=None, diff --git a/deepmd/pt/train/wrapper.py b/deepmd/pt/train/wrapper.py index 6bc7cdc87a..922ac296ea 100644 --- a/deepmd/pt/train/wrapper.py +++ b/deepmd/pt/train/wrapper.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from typing import ( - Dict, Optional, Union, ) @@ -18,8 +17,8 @@ class ModelWrapper(torch.nn.Module): def __init__( self, - model: Union[torch.nn.Module, Dict], - loss: Union[torch.nn.Module, Dict] = None, + model: Union[torch.nn.Module, dict], + loss: Union[torch.nn.Module, dict] = None, model_params=None, shared_links=None, ): @@ -183,12 +182,12 @@ def forward( ) return model_pred, loss, more_loss - def set_extra_state(self, state: Dict): + def set_extra_state(self, state: dict): self.model_params = state["model_params"] self.train_infos = state["train_infos"] return None - def get_extra_state(self) -> Dict: + def get_extra_state(self) -> dict: state = { "model_params": self.model_params, "train_infos": self.train_infos, diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index 6a37a4a843..c7f44cfb70 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -9,9 +9,6 @@ from threading import ( Thread, ) -from typing import ( - List, -) import h5py import numpy as np @@ -86,7 +83,7 @@ def __init__( with h5py.File(systems) as file: systems = [os.path.join(systems, item) for item in file.keys()] - self.systems: List[DeepmdDataSetForLoader] = [] + self.systems: list[DeepmdDataSetForLoader] = [] if len(systems) >= 100: log.info(f"Constructing DataLoaders from {len(systems)} systems") @@ -106,7 +103,7 @@ def construct_dataset(system): ) as pool: self.systems = pool.map(construct_dataset, systems) - self.sampler_list: List[DistributedSampler] = [] + self.sampler_list: list[DistributedSampler] = [] self.index = [] self.total_batch = 0 @@ -178,7 +175,7 @@ def __getitem__(self, idx): batch["sid"] = idx return batch - def add_data_requirement(self, data_requirement: List[DataRequirementItem]): + def add_data_requirement(self, data_requirement: list[DataRequirementItem]): """Add data requirement for each system in multiple systems.""" for system in self.systems: system.add_data_requirement(data_requirement) @@ -186,7 +183,7 @@ def add_data_requirement(self, data_requirement: List[DataRequirementItem]): def print_summary( self, name: str, - prob: List[float], + prob: list[float], ): print_summary( name, diff --git a/deepmd/pt/utils/dataset.py b/deepmd/pt/utils/dataset.py index dbe4d92a0f..4a29f3f045 100644 --- a/deepmd/pt/utils/dataset.py +++ b/deepmd/pt/utils/dataset.py @@ -2,7 +2,6 @@ from typing import ( - List, Optional, ) @@ -17,7 +16,7 @@ class DeepmdDataSetForLoader(Dataset): - def __init__(self, system: str, type_map: Optional[List[str]] = None): + def __init__(self, system: str, type_map: Optional[list[str]] = None): """Construct DeePMD-style dataset containing frames cross different systems. Args: @@ -41,7 +40,7 @@ def __getitem__(self, index): b_data["natoms"] = self._natoms_vec return b_data - def add_data_requirement(self, data_requirement: List[DataRequirementItem]): + def add_data_requirement(self, data_requirement: list[DataRequirementItem]): """Add data requirement for this data system.""" for data_item in data_requirement: self._data_system.add( diff --git a/deepmd/pt/utils/env_mat_stat.py b/deepmd/pt/utils/env_mat_stat.py index 9eaea16c3e..cc30bd5155 100644 --- a/deepmd/pt/utils/env_mat_stat.py +++ b/deepmd/pt/utils/env_mat_stat.py @@ -1,10 +1,9 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from collections.abc import ( + Iterator, +) from typing import ( TYPE_CHECKING, - Dict, - Iterator, - List, - Tuple, Union, ) @@ -38,7 +37,7 @@ class EnvMatStat(BaseEnvMatStat): - def compute_stat(self, env_mat: Dict[str, torch.Tensor]) -> Dict[str, StatItem]: + def compute_stat(self, env_mat: dict[str, torch.Tensor]) -> dict[str, StatItem]: """Compute the statistics of the environment matrix for a single system. Parameters @@ -48,7 +47,7 @@ def compute_stat(self, env_mat: Dict[str, torch.Tensor]) -> Dict[str, StatItem]: Returns ------- - Dict[str, StatItem] + dict[str, StatItem] The statistics of the environment matrix. """ stats = {} @@ -78,18 +77,18 @@ def __init__(self, descriptor: "DescriptorBlock"): ) # se_r=1, se_a=4 def iter( - self, data: List[Dict[str, Union[torch.Tensor, List[Tuple[int, int]]]]] - ) -> Iterator[Dict[str, StatItem]]: + self, data: list[dict[str, Union[torch.Tensor, list[tuple[int, int]]]]] + ) -> Iterator[dict[str, StatItem]]: """Get the iterator of the environment matrix. Parameters ---------- - data : List[Dict[str, Union[torch.Tensor, List[Tuple[int, int]]]]] + data : list[dict[str, Union[torch.Tensor, list[tuple[int, int]]]]] The data. Yields ------ - Dict[str, StatItem] + dict[str, StatItem] The statistics of the environment matrix. """ zero_mean = torch.zeros( diff --git a/deepmd/pt/utils/exclude_mask.py b/deepmd/pt/utils/exclude_mask.py index c3f3f8eb2f..a5de969c07 100644 --- a/deepmd/pt/utils/exclude_mask.py +++ b/deepmd/pt/utils/exclude_mask.py @@ -1,9 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - List, - Set, - Tuple, -) import numpy as np import torch @@ -19,7 +14,7 @@ class AtomExcludeMask(torch.nn.Module): def __init__( self, ntypes: int, - exclude_types: List[int] = [], + exclude_types: list[int] = [], ): super().__init__() self.reinit(ntypes, exclude_types) @@ -27,7 +22,7 @@ def __init__( def reinit( self, ntypes: int, - exclude_types: List[int] = [], + exclude_types: list[int] = [], ): self.ntypes = ntypes self.exclude_types = exclude_types @@ -72,7 +67,7 @@ class PairExcludeMask(torch.nn.Module): def __init__( self, ntypes: int, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): super().__init__() self.reinit(ntypes, exclude_types) @@ -80,10 +75,10 @@ def __init__( def reinit( self, ntypes: int, - exclude_types: List[Tuple[int, int]] = [], + exclude_types: list[tuple[int, int]] = [], ): self.ntypes = ntypes - self._exclude_types: Set[Tuple[int, int]] = set() + self._exclude_types: set[tuple[int, int]] = set() for tt in exclude_types: assert len(tt) == 2 self._exclude_types.add((tt[0], tt[1])) diff --git a/deepmd/pt/utils/neighbor_stat.py b/deepmd/pt/utils/neighbor_stat.py index d5b5c74bdc..d427dc758a 100644 --- a/deepmd/pt/utils/neighbor_stat.py +++ b/deepmd/pt/utils/neighbor_stat.py @@ -1,8 +1,9 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( +from collections.abc import ( Iterator, +) +from typing import ( Optional, - Tuple, ) import numpy as np @@ -52,7 +53,7 @@ def forward( coord: torch.Tensor, atype: torch.Tensor, cell: Optional[torch.Tensor], - ) -> Tuple[torch.Tensor, torch.Tensor]: + ) -> tuple[torch.Tensor, torch.Tensor]: """Calculate the neareest neighbor distance between atoms, maximum nbor size of atoms and the output data range of the environment matrix. @@ -139,7 +140,7 @@ def __init__( def iterator( self, data: DeepmdDataSystem - ) -> Iterator[Tuple[np.ndarray, float, str]]: + ) -> Iterator[tuple[np.ndarray, float, str]]: """Abstract method for producing data. Yields diff --git a/deepmd/pt/utils/nlist.py b/deepmd/pt/utils/nlist.py index b34c43378c..a4f81a23a5 100644 --- a/deepmd/pt/utils/nlist.py +++ b/deepmd/pt/utils/nlist.py @@ -1,7 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, - List, Optional, Union, ) @@ -21,7 +19,7 @@ def extend_input_and_build_neighbor_list( coord, atype, rcut: float, - sel: List[int], + sel: list[int], mixed_types: bool = False, box: Optional[torch.Tensor] = None, ): @@ -55,7 +53,7 @@ def build_neighbor_list( atype: torch.Tensor, nloc: int, rcut: float, - sel: Union[int, List[int]], + sel: Union[int, list[int]], distinguish_types: bool = True, ) -> torch.Tensor: """Build neightbor list for a single frame. keeps nsel neighbors. @@ -71,7 +69,7 @@ def build_neighbor_list( number of local atoms. rcut : float cut-off radius - sel : int or List[int] + sel : int or list[int] maximal number of neighbors (of each type). if distinguish_types==True, nsel should be list and the length of nsel should be equal to number of @@ -137,7 +135,7 @@ def _trim_mask_distinguish_nlist( rr: torch.Tensor, nlist: torch.Tensor, rcut: float, - sel: List[int], + sel: list[int], distinguish_types: bool, ) -> torch.Tensor: """Trim the size of nlist, mask if any central atom is virtual, distinguish types if necessary.""" @@ -178,7 +176,7 @@ def build_directional_neighbor_list( coord_neig: torch.Tensor, atype_neig: torch.Tensor, rcut: float, - sel: Union[int, List[int]], + sel: Union[int, list[int]], distinguish_types: bool = True, ) -> torch.Tensor: """Build directional neighbor list. @@ -205,7 +203,7 @@ def build_directional_neighbor_list( if type < 0 the atom is treated as virtual atoms. rcut : float cut-off radius - sel : int or List[int] + sel : int or list[int] maximal number of neighbors (of each type). if distinguish_types==True, nsel should be list and the length of nsel should be equal to number of @@ -277,7 +275,7 @@ def build_directional_neighbor_list( def nlist_distinguish_types( nlist: torch.Tensor, atype: torch.Tensor, - sel: List[int], + sel: list[int], ): """Given a nlist that does not distinguish atom types, return a nlist that distinguish atom types. @@ -327,9 +325,9 @@ def get_multiple_nlist_key( def build_multiple_neighbor_list( coord: torch.Tensor, nlist: torch.Tensor, - rcuts: List[float], - nsels: List[int], -) -> Dict[str, torch.Tensor]: + rcuts: list[float], + nsels: list[int], +) -> dict[str, torch.Tensor]: """Input one neighbor list, and produce multiple neighbor lists with different cutoff radius and numbers of selection out of it. The required rcuts and nsels should be smaller or equal to the input nlist. @@ -341,14 +339,14 @@ def build_multiple_neighbor_list( nlist : torch.Tensor Neighbor list of shape [batch_size, nloc, nsel], the neighbors should be stored in an ascending order. - rcuts : List[float] + rcuts : list[float] list of cut-off radius in ascending order. - nsels : List[int] + nsels : list[int] maximal number of neighbors in ascending order. Returns ------- - nlist_dict : Dict[str, torch.Tensor] + nlist_dict : dict[str, torch.Tensor] A dict of nlists, key given by get_multiple_nlist_key(rc, nsel) value being the corresponding nlist. diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 58e02f436d..23fb12f2a4 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -5,8 +5,6 @@ ) from typing import ( Callable, - Dict, - List, Optional, Union, ) @@ -89,7 +87,7 @@ def make_stat_input(datasets, dataloaders, nbatches): def _restore_from_file( stat_file_path: DPPath, - keys: List[str] = ["energy"], + keys: list[str] = ["energy"], ) -> Optional[dict]: if stat_file_path is None: return None, None @@ -147,8 +145,8 @@ def _post_process_stat( def _compute_model_predict( - sampled: Union[Callable[[], List[dict]], List[dict]], - keys: List[str], + sampled: Union[Callable[[], list[dict]], list[dict]], + keys: list[str], model_forward: Callable[..., torch.Tensor], ): auto_batch_size = AutoBatchSize() @@ -187,7 +185,7 @@ def model_forward_auto_batch_size(*args, **kwargs): def _make_preset_out_bias( ntypes: int, - ibias: List[Optional[np.ndarray]], + ibias: list[Optional[np.ndarray]], ) -> Optional[np.ndarray]: """Make preset out bias. @@ -237,12 +235,12 @@ def _fill_stat_with_global( def compute_output_stats( - merged: Union[Callable[[], List[dict]], List[dict]], + merged: Union[Callable[[], list[dict]], list[dict]], ntypes: int, - keys: Union[str, List[str]] = ["energy"], + keys: Union[str, list[str]] = ["energy"], stat_file_path: Optional[DPPath] = None, rcond: Optional[float] = None, - preset_bias: Optional[Dict[str, List[Optional[np.ndarray]]]] = None, + preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, model_forward: Optional[Callable[..., torch.Tensor]] = None, atomic_output: Optional[FittingOutputDef] = None, ): @@ -251,11 +249,11 @@ def compute_output_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. ntypes : int @@ -264,7 +262,7 @@ def compute_output_stats( The path to the stat file. rcond : float, optional The condition number for the regression of atomic energy. - preset_bias : Dict[str, List[Optional[np.ndarray]]], optional + preset_bias : dict[str, list[Optional[np.ndarray]]], optional Specifying atomic energy contribution in vacuum. Given by key:value pairs. The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] @@ -401,12 +399,12 @@ def compute_output_stats( def compute_output_stats_global( - sampled: List[dict], + sampled: list[dict], ntypes: int, - keys: List[str], + keys: list[str], rcond: Optional[float] = None, - preset_bias: Optional[Dict[str, List[Optional[np.ndarray]]]] = None, - model_pred: Optional[Dict[str, np.ndarray]] = None, + preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, + model_pred: Optional[dict[str, np.ndarray]] = None, atomic_output: Optional[FittingOutputDef] = None, ): """This function only handle stat computation from reduced global labels.""" @@ -526,10 +524,10 @@ def rmse(x): def compute_output_stats_atomic( - sampled: List[dict], + sampled: list[dict], ntypes: int, - keys: List[str], - model_pred: Optional[Dict[str, np.ndarray]] = None, + keys: list[str], + model_pred: Optional[dict[str, np.ndarray]] = None, ): # get label dict from sample; for each key, only picking the system with atomic labels. outputs = { diff --git a/deepmd/pt/utils/update_sel.py b/deepmd/pt/utils/update_sel.py index 7f42a9f91c..e8c40e2626 100644 --- a/deepmd/pt/utils/update_sel.py +++ b/deepmd/pt/utils/update_sel.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Type, -) from deepmd.pt.utils.neighbor_stat import ( NeighborStat, @@ -13,5 +10,5 @@ class UpdateSel(BaseUpdateSel): @property - def neighbor_stat(self) -> Type[NeighborStat]: + def neighbor_stat(self) -> type[NeighborStat]: return NeighborStat diff --git a/deepmd/pt/utils/utils.py b/deepmd/pt/utils/utils.py index 9ccdbfdb5d..43b82efcc1 100644 --- a/deepmd/pt/utils/utils.py +++ b/deepmd/pt/utils/utils.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, overload, @@ -123,7 +122,7 @@ def dict_to_device(sample_dict): XSHIFT = 16 -def hashmix(value: int, hash_const: List[int]): +def hashmix(value: int, hash_const: list[int]): value ^= INIT_A hash_const[0] *= MULT_A value *= INIT_A @@ -142,7 +141,7 @@ def mix(x: int, y: int): return result -def mix_entropy(entropy_array: List[int]) -> int: +def mix_entropy(entropy_array: list[int]) -> int: # https://github.com/numpy/numpy/blob/a4cddb60489f821a1a4dffc16cd5c69755d43bdb/numpy/random/bit_generator.pyx#L341-L374 hash_const = [INIT_A] mixer = hashmix(entropy_array[0], hash_const) @@ -152,7 +151,7 @@ def mix_entropy(entropy_array: List[int]) -> int: def get_generator( - seed: Optional[Union[int, List[int]]] = None, + seed: Optional[Union[int, list[int]]] = None, ) -> Optional[torch.Generator]: if seed is not None: if isinstance(seed, list): diff --git a/deepmd/tf/cluster/__init__.py b/deepmd/tf/cluster/__init__.py index 6735ce92f4..0f8916038d 100644 --- a/deepmd/tf/cluster/__init__.py +++ b/deepmd/tf/cluster/__init__.py @@ -2,9 +2,7 @@ """Module that reads node resources, auto detects if running local or on SLURM.""" from typing import ( - List, Optional, - Tuple, ) from .local import get_resource as get_local_res @@ -12,12 +10,12 @@ __all__ = ["get_resource"] -def get_resource() -> Tuple[str, List[str], Optional[List[int]]]: +def get_resource() -> tuple[str, list[str], Optional[list[int]]]: """Get local or slurm resources: nodename, nodelist, and gpus. Returns ------- - Tuple[str, List[str], Optional[List[int]]] + tuple[str, list[str], Optional[list[int]]] nodename, nodelist, and gpus """ return get_local_res() diff --git a/deepmd/tf/cluster/local.py b/deepmd/tf/cluster/local.py index 009a182e55..a9392bd326 100644 --- a/deepmd/tf/cluster/local.py +++ b/deepmd/tf/cluster/local.py @@ -4,9 +4,7 @@ import subprocess as sp import sys from typing import ( - List, Optional, - Tuple, ) from deepmd.tf.env import ( @@ -25,7 +23,7 @@ def get_gpus(): Returns ------- - Optional[List[int]] + Optional[list[int]] List of available GPU IDs. Otherwise, None. """ if not tf.test.is_built_with_cuda() and not ( @@ -51,12 +49,12 @@ def get_gpus(): return list(range(num_gpus)) if num_gpus > 0 else None -def get_resource() -> Tuple[str, List[str], Optional[List[int]]]: +def get_resource() -> tuple[str, list[str], Optional[list[int]]]: """Get local resources: nodename, nodelist, and gpus. Returns ------- - Tuple[str, List[str], Optional[List[int]]] + tuple[str, list[str], Optional[list[int]]] nodename, nodelist, and gpus """ nodename, nodelist = get_host_names() diff --git a/deepmd/tf/descriptor/descriptor.py b/deepmd/tf/descriptor/descriptor.py index 2bef63fa5e..ba54ca1309 100644 --- a/deepmd/tf/descriptor/descriptor.py +++ b/deepmd/tf/descriptor/descriptor.py @@ -4,11 +4,7 @@ ) from typing import ( Any, - Dict, - List, Optional, - Set, - Tuple, ) import numpy as np @@ -111,7 +107,7 @@ def get_dim_rot_mat_1(self) -> int: """ raise NotImplementedError - def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: + def get_nlist(self) -> tuple[tf.Tensor, tf.Tensor, list[int], list[int]]: """Returns neighbor information. Returns @@ -130,12 +126,12 @@ def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: @abstractmethod def compute_input_stats( self, - data_coord: List[np.ndarray], - data_box: List[np.ndarray], - data_atype: List[np.ndarray], - natoms_vec: List[np.ndarray], - mesh: List[np.ndarray], - input_dict: Dict[str, List[np.ndarray]], + data_coord: list[np.ndarray], + data_box: list[np.ndarray], + data_atype: list[np.ndarray], + natoms_vec: list[np.ndarray], + mesh: list[np.ndarray], + input_dict: dict[str, list[np.ndarray]], **kwargs, ) -> None: """Compute the statisitcs (avg and std) of the training data. The input will be @@ -175,7 +171,7 @@ def build( natoms: tf.Tensor, box_: tf.Tensor, mesh: tf.Tensor, - input_dict: Dict[str, Any], + input_dict: dict[str, Any], reuse: Optional[bool] = None, suffix: str = "", ) -> tf.Tensor: @@ -275,7 +271,7 @@ def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: @abstractmethod def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters @@ -323,7 +319,7 @@ def init_variables( f"Descriptor {type(self).__name__} doesn't support initialization from the given variables!" ) - def get_tensor_names(self, suffix: str = "") -> Tuple[str]: + def get_tensor_names(self, suffix: str = "") -> tuple[str]: """Get names of tensors. Parameters @@ -333,7 +329,7 @@ def get_tensor_names(self, suffix: str = "") -> Tuple[str]: Returns ------- - Tuple[str] + tuple[str] Names of tensors """ raise NotImplementedError( @@ -362,9 +358,9 @@ def pass_tensors_from_frz_model( def build_type_exclude_mask( self, - exclude_types: Set[Tuple[int, int]], + exclude_types: set[tuple[int, int]], ntypes: int, - sel: List[int], + sel: list[int], ndescrpt: int, atype: tf.Tensor, shape0: tf.Tensor, @@ -391,12 +387,12 @@ def build_type_exclude_mask( Parameters ---------- - exclude_types : List[Tuple[int, int]] + exclude_types : list[tuple[int, int]] The list of excluded types, e.g. [(0, 1), (1, 0)] means the interaction between type 0 and type 1 is excluded. ntypes : int The number of types. - sel : List[int] + sel : list[int] The list of the number of selected neighbors for each type. ndescrpt : int The number of descriptors for each atom. @@ -469,9 +465,9 @@ def explicit_ntypes(self) -> bool: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -535,6 +531,6 @@ def serialize(self, suffix: str = "") -> dict: raise NotImplementedError(f"Not implemented in class {self.__name__}") @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" return [] diff --git a/deepmd/tf/descriptor/hybrid.py b/deepmd/tf/descriptor/hybrid.py index fe4fc2ae6a..e4458476c8 100644 --- a/deepmd/tf/descriptor/hybrid.py +++ b/deepmd/tf/descriptor/hybrid.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, - Dict, - List, Optional, - Tuple, Union, ) @@ -41,14 +38,14 @@ class DescrptHybrid(Descriptor): Parameters ---------- - list : list : List[Union[Descriptor, Dict[str, Any]]] + list : list : list[Union[Descriptor, dict[str, Any]]] Build a descriptor from the concatenation of the list of descriptors. The descriptor can be either an object or a dictionary. """ def __init__( self, - list: List[Union[Descriptor, Dict[str, Any]]], + list: list[Union[Descriptor, dict[str, Any]]], ntypes: Optional[int] = None, spin: Optional[Spin] = None, **kwargs, @@ -93,7 +90,7 @@ def get_dim_out(self) -> int: def get_nlist( self, - ) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: + ) -> tuple[tf.Tensor, tf.Tensor, list[int], list[int]]: """Get the neighbor information of the descriptor, returns the nlist of the descriptor with the largest cut-off radius. @@ -111,7 +108,7 @@ def get_nlist( maxr_idx = np.argmax([ii.get_rcut() for ii in self.descrpt_list]) return self.get_nlist_i(maxr_idx) - def get_nlist_i(self, ii: int) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: + def get_nlist_i(self, ii: int) -> tuple[tf.Tensor, tf.Tensor, list[int], list[int]]: """Get the neighbor information of the ii-th descriptor. Parameters @@ -275,7 +272,7 @@ def build( def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters @@ -385,7 +382,7 @@ def init_variables( for idx, ii in enumerate(self.descrpt_list): ii.init_variables(graph, graph_def, suffix=f"{suffix}_{idx}") - def get_tensor_names(self, suffix: str = "") -> Tuple[str]: + def get_tensor_names(self, suffix: str = "") -> tuple[str]: """Get names of tensors. Parameters @@ -395,7 +392,7 @@ def get_tensor_names(self, suffix: str = "") -> Tuple[str]: Returns ------- - Tuple[str] + tuple[str] Names of tensors """ tensor_names = [] @@ -429,9 +426,9 @@ def explicit_ntypes(self) -> bool: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/tf/descriptor/loc_frame.py b/deepmd/tf/descriptor/loc_frame.py index 4891c5a55f..74ba755b4c 100644 --- a/deepmd/tf/descriptor/loc_frame.py +++ b/deepmd/tf/descriptor/loc_frame.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, - Tuple, ) import numpy as np @@ -60,9 +58,9 @@ class DescrptLocFrame(Descriptor): def __init__( self, rcut: float, - sel_a: List[int], - sel_r: List[int], - axis_rule: List[int], + sel_a: list[int], + sel_r: list[int], + axis_rule: list[int], **kwargs, ) -> None: """Constructor.""" @@ -142,7 +140,7 @@ def get_dim_out(self) -> int: """Returns the output dimension of this descriptor.""" return self.ndescrpt - def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: + def get_nlist(self) -> tuple[tf.Tensor, tf.Tensor, list[int], list[int]]: """Returns ------- nlist @@ -320,7 +318,7 @@ def get_rot_mat(self) -> tf.Tensor: def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters @@ -437,9 +435,9 @@ def init_variables( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/tf/descriptor/se.py b/deepmd/tf/descriptor/se.py index f5f54550f2..319a65f6da 100644 --- a/deepmd/tf/descriptor/se.py +++ b/deepmd/tf/descriptor/se.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import re from typing import ( - List, Optional, - Set, - Tuple, ) from deepmd.dpmodel.utils.network import ( @@ -80,7 +77,7 @@ def _identity_tensors(self, suffix: str = "") -> None: self.rij = tf.identity(self.rij, name="o_rij" + suffix) self.nlist = tf.identity(self.nlist, name="o_nlist" + suffix) - def get_tensor_names(self, suffix: str = "") -> Tuple[str]: + def get_tensor_names(self, suffix: str = "") -> tuple[str]: """Get names of tensors. Parameters @@ -90,7 +87,7 @@ def get_tensor_names(self, suffix: str = "") -> Tuple[str]: Returns ------- - Tuple[str] + tuple[str] Names of tensors """ return ( @@ -157,9 +154,9 @@ def precision(self) -> tf.DType: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -190,11 +187,11 @@ def serialize_network( ntypes: int, ndim: int, in_dim: int, - neuron: List[int], + neuron: list[int], activation_function: str, resnet_dt: bool, variables: dict, - excluded_types: Set[Tuple[int, int]] = set(), + excluded_types: set[tuple[int, int]] = set(), suffix: str = "", ) -> dict: """Serialize network. @@ -207,7 +204,7 @@ def serialize_network( The dimension of elements in_dim : int The input dimension - neuron : List[int] + neuron : list[int] The neuron list activation_function : str The activation function @@ -215,7 +212,7 @@ def serialize_network( Whether to use resnet variables : dict The input variables - excluded_types : Set[Tuple[int, int]], optional + excluded_types : set[tuple[int, int]], optional The excluded types suffix : str, optional The suffix of the scope diff --git a/deepmd/tf/descriptor/se_a.py b/deepmd/tf/descriptor/se_a.py index 721e8e71d1..d5a8ed6815 100644 --- a/deepmd/tf/descriptor/se_a.py +++ b/deepmd/tf/descriptor/se_a.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, - Tuple, ) import numpy as np @@ -141,7 +139,7 @@ class DescrptSeA(DescrptSe): Random seed for initializing the network parameters. type_one_side Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. set_davg_zero @@ -154,7 +152,7 @@ class DescrptSeA(DescrptSe): Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed env_protection: float Protection parameter to prevent division by zero errors during environment matrix calculations. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. References @@ -169,21 +167,21 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, type_one_side: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, spin: Optional[Spin] = None, tebd_input_mode: str = "concat", - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input env_protection: float = 0.0, # not implement!! **kwargs, ) -> None: @@ -327,7 +325,7 @@ def get_dim_rot_mat_1(self) -> int: """Returns the first dimension of the rotation matrix. The rotation is of shape dim_1 x 3.""" return self.filter_neuron[-1] - def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: + def get_nlist(self) -> tuple[tf.Tensor, tf.Tensor, list[int], list[int]]: """Returns neighbor information. Returns @@ -696,7 +694,7 @@ def get_rot_mat(self) -> tf.Tensor: def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters diff --git a/deepmd/tf/descriptor/se_a_ebd.py b/deepmd/tf/descriptor/se_a_ebd.py index c558cd285e..ae76308e69 100644 --- a/deepmd/tf/descriptor/se_a_ebd.py +++ b/deepmd/tf/descriptor/se_a_ebd.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, ) @@ -65,7 +64,7 @@ class DescrptSeAEbd(DescrptSeA): The activation function in the embedding net. Supported options are {0} precision The precision of the embedding net parameters. Supported options are {1} - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. """ @@ -74,8 +73,8 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, @@ -87,7 +86,7 @@ def __init__( set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = "default", - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], **kwargs, ) -> None: """Constructor.""" @@ -600,7 +599,7 @@ def _ebd_filter( return result, qmat @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" data_requirement = super().input_requirement if self.numb_aparam > 0: diff --git a/deepmd/tf/descriptor/se_a_ebd_v2.py b/deepmd/tf/descriptor/se_a_ebd_v2.py index 9afa6598d1..af43eedbbc 100644 --- a/deepmd/tf/descriptor/se_a_ebd_v2.py +++ b/deepmd/tf/descriptor/se_a_ebd_v2.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from typing import ( - List, Optional, ) @@ -31,14 +30,14 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, type_one_side: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = "default", diff --git a/deepmd/tf/descriptor/se_a_ef.py b/deepmd/tf/descriptor/se_a_ef.py index 81f4c8955a..9f70464c56 100644 --- a/deepmd/tf/descriptor/se_a_ef.py +++ b/deepmd/tf/descriptor/se_a_ef.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, - Tuple, ) import numpy as np @@ -57,7 +55,7 @@ class DescrptSeAEf(DescrptSe): Random seed for initializing the network parameters. type_one_side Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. set_davg_zero @@ -74,14 +72,14 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, type_one_side: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = "default", @@ -144,7 +142,7 @@ def get_rot_mat(self) -> tf.Tensor: """Get rotational matrix.""" return self.qmat - def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: + def get_nlist(self) -> tuple[tf.Tensor, tf.Tensor, list[int], list[int]]: """Returns neighbor information. Returns @@ -267,7 +265,7 @@ def build( def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters @@ -305,14 +303,14 @@ def __init__( op, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, type_one_side: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = "default", @@ -586,7 +584,7 @@ def _compute_dstats_sys_smth( return sysr, sysr2, sysa, sysa2, sysn @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" data_requirement = super().input_requirement data_requirement.append( diff --git a/deepmd/tf/descriptor/se_a_mask.py b/deepmd/tf/descriptor/se_a_mask.py index 316a909be1..e12f6a0fff 100644 --- a/deepmd/tf/descriptor/se_a_mask.py +++ b/deepmd/tf/descriptor/se_a_mask.py @@ -2,10 +2,7 @@ import warnings from typing import ( Any, - Dict, - List, Optional, - Tuple, ) import numpy as np @@ -100,7 +97,7 @@ class DescrptSeAMask(DescrptSeA): Random seed for initializing the network parameters. type_one_side Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. activation_function @@ -120,13 +117,13 @@ class DescrptSeAMask(DescrptSeA): def __init__( self, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, type_one_side: bool = False, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], seed: Optional[int] = None, activation_function: str = "tanh", precision: str = "default", @@ -271,7 +268,7 @@ def build( natoms: tf.Tensor, box_: tf.Tensor, mesh: tf.Tensor, - input_dict: Dict[str, Any], + input_dict: dict[str, Any], reuse: Optional[bool] = None, suffix: str = "", ) -> tf.Tensor: @@ -384,7 +381,7 @@ def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor, - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters @@ -430,9 +427,9 @@ def prod_force_virial( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 37bcd7eea0..963e81ecf0 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -4,10 +4,7 @@ import warnings from typing import ( Any, - List, Optional, - Set, - Tuple, Union, ) @@ -125,7 +122,7 @@ class DescrptSeAtten(DescrptSeA): If 'False', type embeddings of both neighbor and central atoms are considered. If 'True', only type embeddings of neighbor atoms are considered. Default is 'False'. - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. set_davg_zero: bool @@ -162,7 +159,7 @@ class DescrptSeAtten(DescrptSeA): Setting this parameter to `True` is equivalent to setting `tebd_input_mode` to 'strip'. Setting it to `False` is equivalent to setting `tebd_input_mode` to 'concat'. The default value is `None`, which means the `tebd_input_mode` setting will be used instead. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. Raises @@ -175,16 +172,16 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, - neuron: List[int] = [25, 50, 100], + neuron: list[int] = [25, 50, 100], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, type_one_side: bool = True, set_davg_zero: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, @@ -203,7 +200,7 @@ def __init__( concat_output_tebd: bool = True, env_protection: float = 0.0, # not implement!! stripped_type_embedding: Optional[bool] = None, - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input **kwargs, ) -> None: # Ensure compatibility with the deprecated stripped_type_embedding option. @@ -1420,9 +1417,9 @@ def compat_ln_pattern(old_key): def build_type_exclude_mask_mixed( self, - exclude_types: Set[Tuple[int, int]], + exclude_types: set[tuple[int, int]], ntypes: int, - sel: List[int], + sel: list[int], ndescrpt: int, atype: tf.Tensor, shape0: tf.Tensor, @@ -1441,12 +1438,12 @@ def build_type_exclude_mask_mixed( Parameters ---------- - exclude_types : List[Tuple[int, int]] + exclude_types : list[tuple[int, int]] The list of excluded types, e.g. [(0, 1), (1, 0)] means the interaction between type 0 and type 1 is excluded. ntypes : int The number of types. - sel : List[int] + sel : list[int] The list of the number of selected neighbors for each type. ndescrpt : int The number of descriptors for each atom. @@ -1511,9 +1508,9 @@ def explicit_ntypes(self) -> bool: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -1646,7 +1643,7 @@ def serialize_network_strip( ntypes: int, ndim: int, in_dim: int, - neuron: List[int], + neuron: list[int], activation_function: str, resnet_dt: bool, variables: dict, @@ -1663,7 +1660,7 @@ def serialize_network_strip( The dimension of elements in_dim : int The input dimension - neuron : List[int] + neuron : list[int] The neuron list activation_function : str The activation function @@ -2055,7 +2052,7 @@ class DescrptDPA1Compat(DescrptSeAtten): attn_mask: bool (Only support False to keep consistent with other backend references.) If mask the diagonal of attention weights - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. env_protection: float @@ -2088,7 +2085,7 @@ class DescrptDPA1Compat(DescrptSeAtten): Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. spin (Only support None to keep consistent with old implementation.) @@ -2099,9 +2096,9 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: Union[List[int], int], + sel: Union[list[int], int], ntypes: int, - neuron: List[int] = [25, 50, 100], + neuron: list[int] = [25, 50, 100], axis_neuron: int = 8, tebd_dim: int = 8, tebd_input_mode: str = "concat", @@ -2112,7 +2109,7 @@ def __init__( attn_layer: int = 2, attn_dotr: bool = True, attn_mask: bool = False, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], env_protection: float = 0.0, set_davg_zero: bool = False, activation_function: str = "tanh", @@ -2126,7 +2123,7 @@ def __init__( concat_output_tebd: bool = True, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, spin: Optional[Any] = None, # consistent with argcheck, not used though seed: Optional[int] = None, diff --git a/deepmd/tf/descriptor/se_atten_v2.py b/deepmd/tf/descriptor/se_atten_v2.py index a4fdf24a55..dc71f87523 100644 --- a/deepmd/tf/descriptor/se_atten_v2.py +++ b/deepmd/tf/descriptor/se_atten_v2.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from typing import ( - List, Optional, ) @@ -44,7 +43,7 @@ class DescrptSeAttenV2(DescrptSeAtten): Random seed for initializing the network parameters. type_one_side Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. set_davg_zero @@ -71,14 +70,14 @@ def __init__( rcut_smth: float, sel: int, ntypes: int, - neuron: List[int] = [24, 48, 96], + neuron: list[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, type_one_side: bool = True, set_davg_zero: bool = False, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, diff --git a/deepmd/tf/descriptor/se_r.py b/deepmd/tf/descriptor/se_r.py index cd99651314..8096ef7c96 100644 --- a/deepmd/tf/descriptor/se_r.py +++ b/deepmd/tf/descriptor/se_r.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, - Tuple, ) import numpy as np @@ -76,7 +74,7 @@ class DescrptSeR(DescrptSe): Random seed for initializing the network parameters. type_one_side Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. activation_function @@ -85,7 +83,7 @@ class DescrptSeR(DescrptSe): The precision of the embedding net parameters. Supported options are |PRECISION| uniform_seed Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -93,19 +91,19 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, type_one_side: bool = True, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, spin: Optional[Spin] = None, - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input env_protection: float = 0.0, # not implement!! **kwargs, ) -> None: @@ -488,7 +486,7 @@ def build( def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters diff --git a/deepmd/tf/descriptor/se_t.py b/deepmd/tf/descriptor/se_t.py index d5f5e2ab8a..f96b1ba778 100644 --- a/deepmd/tf/descriptor/se_t.py +++ b/deepmd/tf/descriptor/se_t.py @@ -1,10 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import re from typing import ( - List, Optional, - Set, - Tuple, ) import numpy as np @@ -90,7 +87,7 @@ class DescrptSeT(DescrptSe): Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed env_protection: float Protection parameter to prevent division by zero errors during environment matrix calculations. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -98,17 +95,17 @@ def __init__( self, rcut: float, rcut_smth: float, - sel: List[int], - neuron: List[int] = [24, 48, 96], + sel: list[int], + neuron: list[int] = [24, 48, 96], resnet_dt: bool = False, trainable: bool = True, seed: Optional[int] = None, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], set_davg_zero: bool = False, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input env_protection: float = 0.0, # not implement!! **kwargs, ) -> None: @@ -212,7 +209,7 @@ def get_dim_out(self) -> int: """Returns the output dimension of this descriptor.""" return self.filter_neuron[-1] - def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: + def get_nlist(self) -> tuple[tf.Tensor, tf.Tensor, list[int], list[int]]: """Returns neighbor information. Returns @@ -495,7 +492,7 @@ def build( def prod_force_virial( self, atom_ener: tf.Tensor, natoms: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Compute force and virial. Parameters @@ -724,11 +721,11 @@ def serialize_network( ntypes: int, ndim: int, in_dim: int, - neuron: List[int], + neuron: list[int], activation_function: str, resnet_dt: bool, variables: dict, - excluded_types: Set[Tuple[int, int]] = set(), + excluded_types: set[tuple[int, int]] = set(), suffix: str = "", ) -> dict: """Serialize network. @@ -741,7 +738,7 @@ def serialize_network( The dimension of elements in_dim : int The input dimension - neuron : List[int] + neuron : list[int] The neuron list activation_function : str The activation function @@ -749,7 +746,7 @@ def serialize_network( Whether to use resnet variables : dict The input variables - excluded_types : Set[Tuple[int, int]], optional + excluded_types : set[tuple[int, int]], optional The excluded types suffix : str, optional The suffix of the scope diff --git a/deepmd/tf/entrypoints/freeze.py b/deepmd/tf/entrypoints/freeze.py index 787d26e9a4..cee6615abc 100755 --- a/deepmd/tf/entrypoints/freeze.py +++ b/deepmd/tf/entrypoints/freeze.py @@ -15,7 +15,6 @@ Path, ) from typing import ( - List, Optional, Union, ) @@ -80,7 +79,7 @@ def _make_node_names( modifier_type: Optional[str] = None, out_suffix: str = "", node_names: Optional[Union[str, list]] = None, -) -> List[str]: +) -> list[str]: """Get node names based on model type. Parameters @@ -96,7 +95,7 @@ def _make_node_names( Returns ------- - List[str] + list[str] list with all node names to freeze Raises @@ -238,7 +237,7 @@ def freeze_graph( The default session. input_graph : tf.GraphDef The input graph_def stored from the checkpoint. - input_node : List[str] + input_node : list[str] The expected nodes to freeze. freeze_type : str The model type to freeze. diff --git a/deepmd/tf/entrypoints/ipi.py b/deepmd/tf/entrypoints/ipi.py index 1631a35c2e..1183375119 100644 --- a/deepmd/tf/entrypoints/ipi.py +++ b/deepmd/tf/entrypoints/ipi.py @@ -4,9 +4,6 @@ import os import subprocess import sys -from typing import ( - List, -) from deepmd.tf.lmp import ( get_op_dir, @@ -15,7 +12,7 @@ ROOT_DIR = get_op_dir() -def _program(name: str, args: List[str]): +def _program(name: str, args: list[str]): """Execuate a program. Parameters diff --git a/deepmd/tf/entrypoints/main.py b/deepmd/tf/entrypoints/main.py index 493e5b7aa4..d9dff4eb4a 100644 --- a/deepmd/tf/entrypoints/main.py +++ b/deepmd/tf/entrypoints/main.py @@ -6,7 +6,6 @@ Path, ) from typing import ( - List, Optional, Union, ) @@ -39,12 +38,12 @@ __all__ = ["main", "parse_args", "get_ll", "main_parser"] -def main(args: Optional[Union[List[str], argparse.Namespace]] = None): +def main(args: Optional[Union[list[str], argparse.Namespace]] = None): """DeePMD-Kit entry point. Parameters ---------- - args : List[str] or argparse.Namespace, optional + args : list[str] or argparse.Namespace, optional list of command line arguments, used to avoid calling from the subprocess, as it is quite slow to import tensorflow; if Namespace is given, it will be used directly diff --git a/deepmd/tf/entrypoints/train.py b/deepmd/tf/entrypoints/train.py index 12a3c59d70..66622b3182 100755 --- a/deepmd/tf/entrypoints/train.py +++ b/deepmd/tf/entrypoints/train.py @@ -9,7 +9,6 @@ import time from typing import ( Any, - Dict, Optional, ) @@ -186,12 +185,12 @@ def train( _do_work(jdata, run_opt, is_compress) -def _do_work(jdata: Dict[str, Any], run_opt: RunOptions, is_compress: bool = False): +def _do_work(jdata: dict[str, Any], run_opt: RunOptions, is_compress: bool = False): """Run serial model training. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] arguments read form json/yaml control file run_opt : RunOptions object with run configuration diff --git a/deepmd/tf/entrypoints/transfer.py b/deepmd/tf/entrypoints/transfer.py index 7c90c77de8..b93caf3cac 100644 --- a/deepmd/tf/entrypoints/transfer.py +++ b/deepmd/tf/entrypoints/transfer.py @@ -3,10 +3,11 @@ import logging import re +from collections.abc import ( + Sequence, +) from typing import ( - Dict, Optional, - Sequence, ) import numpy as np @@ -234,7 +235,7 @@ def check_dim(raw_graph_node: tf.Tensor, old_graph_node: tf.Tensor, node_name: s ) -def load_transform_node(graph: tf.Graph) -> Dict[str, tf.Tensor]: +def load_transform_node(graph: tf.Graph) -> dict[str, tf.Tensor]: """Load nodes and their names from graph to dict. Parameters @@ -244,7 +245,7 @@ def load_transform_node(graph: tf.Graph) -> Dict[str, tf.Tensor]: Returns ------- - Dict[str, tf.Tensor] + dict[str, tf.Tensor] mapping on graph node names and corresponding tensors """ transform_node_pattern = re.compile(TRANSFER_PATTERN) diff --git a/deepmd/tf/fit/dipole.py b/deepmd/tf/fit/dipole.py index fd37b63720..0e5b860fa2 100644 --- a/deepmd/tf/fit/dipole.py +++ b/deepmd/tf/fit/dipole.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, ) @@ -47,12 +46,12 @@ class DipoleFittingSeA(Fitting): The dimension of the descrptor :math:`\mathcal{D}` embedding_width The rotation matrix dimension of the descrptor :math:`\mathcal{D}` - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) - sel_type : List[int] + sel_type : list[int] The atom types selected to have an atomic dipole prediction. If is None, all atoms are selected. seed : int Random seed for initializing the network parameters. @@ -65,7 +64,7 @@ class DipoleFittingSeA(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -74,15 +73,15 @@ def __init__( ntypes: int, dim_descrpt: int, embedding_width: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, - sel_type: Optional[List[int]] = None, + sel_type: Optional[list[int]] = None, seed: Optional[int] = None, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, mixed_types: bool = False, - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" diff --git a/deepmd/tf/fit/dos.py b/deepmd/tf/fit/dos.py index 382d11f45e..ebc347c2fd 100644 --- a/deepmd/tf/fit/dos.py +++ b/deepmd/tf/fit/dos.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from typing import ( - List, Optional, ) @@ -100,7 +99,7 @@ class DOSFitting(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -108,21 +107,21 @@ def __init__( self, ntypes: int, dim_descrpt: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, numb_dos: int = 300, rcond: Optional[float] = None, - trainable: Optional[List[bool]] = None, + trainable: Optional[list[bool]] = None, seed: Optional[int] = None, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, - layer_name: Optional[List[Optional[str]]] = None, + layer_name: Optional[list[Optional[str]]] = None, use_aparam_as_mask: bool = False, mixed_types: bool = False, - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" @@ -738,7 +737,7 @@ def serialize(self, suffix: str = "") -> dict: return data @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" data_requirement = [] if self.numb_fparam > 0: diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index c2aef0610a..b01574cf87 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -2,7 +2,6 @@ import logging from typing import ( TYPE_CHECKING, - List, Optional, ) @@ -149,7 +148,7 @@ class EnerFitting(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -157,23 +156,23 @@ def __init__( self, ntypes: int, dim_descrpt: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, - trainable: Optional[List[bool]] = None, + trainable: Optional[list[bool]] = None, seed: Optional[int] = None, - atom_ener: List[float] = [], + atom_ener: list[float] = [], activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, - layer_name: Optional[List[Optional[str]]] = None, + layer_name: Optional[list[Optional[str]]] = None, use_aparam_as_mask: bool = False, spin: Optional[Spin] = None, mixed_types: bool = False, - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" @@ -942,7 +941,7 @@ def serialize(self, suffix: str = "") -> dict: return data @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" data_requirement = [] if self.numb_fparam > 0: @@ -963,8 +962,8 @@ def input_requirement(self) -> List[DataRequirementItem]: def change_energy_bias_lower( data: DeepmdDataSystem, dp: DeepEval, - origin_type_map: List[str], - full_type_map: List[str], + origin_type_map: list[str], + full_type_map: list[str], bias_atom_e: np.ndarray, bias_adjust_mode="change-by-statistic", ntest=10, diff --git a/deepmd/tf/fit/fitting.py b/deepmd/tf/fit/fitting.py index 9190261187..f159de1628 100644 --- a/deepmd/tf/fit/fitting.py +++ b/deepmd/tf/fit/fitting.py @@ -4,7 +4,6 @@ abstractmethod, ) from typing import ( - List, Optional, ) @@ -131,7 +130,7 @@ def serialize_network( ntypes: int, ndim: int, in_dim: int, - neuron: List[int], + neuron: list[int], activation_function: str, resnet_dt: bool, variables: dict, @@ -148,7 +147,7 @@ def serialize_network( The dimension of elements in_dim : int The input dimension - neuron : List[int] + neuron : list[int] The neuron list activation_function : str The activation function @@ -257,6 +256,6 @@ def deserialize_network(cls, data: dict, suffix: str = "") -> dict: return fitting_net_variables @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" return [] diff --git a/deepmd/tf/fit/polar.py b/deepmd/tf/fit/polar.py index 14902a4d96..cc79e3402a 100644 --- a/deepmd/tf/fit/polar.py +++ b/deepmd/tf/fit/polar.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import warnings from typing import ( - List, Optional, ) @@ -52,18 +51,18 @@ class PolarFittingSeA(Fitting): The dimension of the descrptor :math:`\mathcal{D}` embedding_width The rotation matrix dimension of the descrptor :math:`\mathcal{D}` - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) - sel_type : List[int] + sel_type : list[int] The atom types selected to have an atomic polarizability prediction. If is None, all atoms are selected. fit_diag : bool Fit the diagonal part of the rotational invariant polarizability matrix, which will be converted to normal polarizability matrix by contracting with the rotation matrix. - scale : List[float] + scale : list[float] The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i] - diag_shift : List[float] + diag_shift : list[float] The diagonal part of the polarizability matrix of type i will be shifted by diag_shift[i]. The shift operation is carried out after scale. seed : int Random seed for initializing the network parameters. @@ -76,7 +75,7 @@ class PolarFittingSeA(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -85,19 +84,19 @@ def __init__( ntypes: int, dim_descrpt: int, embedding_width: int, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, - sel_type: Optional[List[int]] = None, + sel_type: Optional[list[int]] = None, fit_diag: bool = True, - scale: Optional[List[float]] = None, + scale: Optional[list[float]] = None, shift_diag: bool = True, # YWolfeee: will support the user to decide whether to use this function - # diag_shift : List[float] = None, YWolfeee: will not support the user to assign a shift + # diag_shift : list[float] = None, YWolfeee: will not support the user to assign a shift seed: Optional[int] = None, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, mixed_types: bool = False, - type_map: Optional[List[str]] = None, # to be compat with input + type_map: Optional[list[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" @@ -153,7 +152,7 @@ def __init__( self.mixed_types = mixed_types self.type_map = type_map - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get selected atom types.""" return self.sel_type @@ -620,18 +619,18 @@ class GlobalPolarFittingSeA: ---------- descrpt : tf.Tensor The descrptor - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) - sel_type : List[int] + sel_type : list[int] The atom types selected to have an atomic polarizability prediction fit_diag : bool Fit the diagonal part of the rotational invariant polarizability matrix, which will be converted to normal polarizability matrix by contracting with the rotation matrix. - scale : List[float] + scale : list[float] The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i] - diag_shift : List[float] + diag_shift : list[float] The diagonal part of the polarizability matrix of type i will be shifted by diag_shift[i]. The shift operation is carried out after scale. seed : int Random seed for initializing the network parameters. @@ -644,12 +643,12 @@ class GlobalPolarFittingSeA: def __init__( self, descrpt: tf.Tensor, - neuron: List[int] = [120, 120, 120], + neuron: list[int] = [120, 120, 120], resnet_dt: bool = True, - sel_type: Optional[List[int]] = None, + sel_type: Optional[list[int]] = None, fit_diag: bool = True, - scale: Optional[List[float]] = None, - diag_shift: Optional[List[float]] = None, + scale: Optional[list[float]] = None, + diag_shift: Optional[list[float]] = None, seed: Optional[int] = None, activation_function: str = "tanh", precision: str = "default", diff --git a/deepmd/tf/infer/data_modifier.py b/deepmd/tf/infer/data_modifier.py index 08966c3498..ddb1af68d7 100644 --- a/deepmd/tf/infer/data_modifier.py +++ b/deepmd/tf/infer/data_modifier.py @@ -1,9 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import os -from typing import ( - List, - Tuple, -) import numpy as np @@ -47,8 +43,8 @@ class DipoleChargeModifier(DeepDipole): def __init__( self, model_name: str, - model_charge_map: List[float], - sys_charge_map: List[float], + model_charge_map: list[float], + sys_charge_map: list[float], ewald_h: float = 1, ewald_beta: float = 1, ) -> None: @@ -219,7 +215,7 @@ def eval( box: np.ndarray, atype: np.ndarray, eval_fv: bool = True, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Evaluate the modification. Parameters diff --git a/deepmd/tf/infer/deep_eval.py b/deepmd/tf/infer/deep_eval.py index 0f317bd21f..33725007f3 100644 --- a/deepmd/tf/infer/deep_eval.py +++ b/deepmd/tf/infer/deep_eval.py @@ -1,17 +1,13 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json from functools import ( - lru_cache, + cache, ) from typing import ( TYPE_CHECKING, Any, Callable, - Dict, - List, Optional, - Tuple, - Type, Union, ) @@ -268,8 +264,8 @@ def _init_attr(self): self.modifier_type = None @property - @lru_cache(maxsize=None) - def model_type(self) -> Type["DeepEvalWrapper"]: + @cache + def model_type(self) -> type["DeepEvalWrapper"]: """Get type of model. :type:str @@ -293,7 +289,7 @@ def model_type(self) -> Type["DeepEvalWrapper"]: raise RuntimeError(f"unknown model type {model_type}") @property - @lru_cache(maxsize=None) + @cache def model_version(self) -> str: """Get version of model. @@ -312,7 +308,7 @@ def model_version(self) -> str: return mt.decode("utf-8") @property - @lru_cache(maxsize=None) + @cache def sess(self) -> tf.Session: """Get TF session.""" # start a tf session associated to the graph @@ -398,7 +394,7 @@ def _load_graph( def sort_input( coord: np.ndarray, atom_type: np.ndarray, - sel_atoms: Optional[List[int]] = None, + sel_atoms: Optional[list[int]] = None, ): """Sort atoms in the system according their types. @@ -451,7 +447,7 @@ def sort_input( return coord, atom_type, idx_map, atom_type, idx_map @staticmethod - def reverse_map(vec: np.ndarray, imap: List[int]) -> np.ndarray: + def reverse_map(vec: np.ndarray, imap: list[int]) -> np.ndarray: """Reverse mapping of a vector according to the index map. Parameters @@ -635,7 +631,7 @@ def get_rcut(self) -> float: """Get the cut-off radius of this model.""" return self.rcut - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map (element name of the atom types) of this model.""" return self.tmap @@ -687,8 +683,8 @@ def eval_func(*args, **kwargs): def _get_natoms_and_nframes( self, coords: np.ndarray, - atom_types: Union[List[int], np.ndarray], - ) -> Tuple[int, int]: + atom_types: Union[list[int], np.ndarray], + ) -> tuple[int, int]: natoms = len(atom_types[0]) if natoms == 0: assert coords.size == 0 @@ -707,7 +703,7 @@ def eval( aparam: Optional[np.ndarray] = None, efield: Optional[np.ndarray] = None, **kwargs: Any, - ) -> Dict[str, np.ndarray]: + ) -> dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. Parameters @@ -1197,7 +1193,7 @@ def __init__( self.neighbor_list = neighbor_list @property - @lru_cache(maxsize=None) + @cache def model_type(self) -> str: """Get type of model. @@ -1208,7 +1204,7 @@ def model_type(self) -> str: return mt.decode("utf-8") @property - @lru_cache(maxsize=None) + @cache def model_version(self) -> str: """Get version of model. @@ -1227,7 +1223,7 @@ def model_version(self) -> str: return mt.decode("utf-8") @property - @lru_cache(maxsize=None) + @cache def sess(self) -> tf.Session: """Get TF session.""" # start a tf session associated to the graph @@ -1319,7 +1315,7 @@ def _load_graph( def sort_input( coord: np.ndarray, atom_type: np.ndarray, - sel_atoms: Optional[List[int]] = None, + sel_atoms: Optional[list[int]] = None, mixed_type: bool = False, ): """Sort atoms in the system according their types. @@ -1382,7 +1378,7 @@ def sort_input( return coord, atom_type, idx_map @staticmethod - def reverse_map(vec: np.ndarray, imap: List[int]) -> np.ndarray: + def reverse_map(vec: np.ndarray, imap: list[int]) -> np.ndarray: """Reverse mapping of a vector according to the index map. Parameters diff --git a/deepmd/tf/infer/deep_tensor.py b/deepmd/tf/infer/deep_tensor.py index b0f2f244e1..a20bbfe513 100644 --- a/deepmd/tf/infer/deep_tensor.py +++ b/deepmd/tf/infer/deep_tensor.py @@ -2,10 +2,7 @@ from typing import ( TYPE_CHECKING, ClassVar, - Dict, - List, Optional, - Tuple, ) import numpy as np @@ -41,7 +38,7 @@ class DeepTensor(DeepEval): The neighbor list object. If None, then build the native neighbor list. """ - tensors: ClassVar[Dict[str, str]] = { + tensors: ClassVar[dict[str, str]] = { # descriptor attrs "t_ntypes": "descrpt_attr/ntypes:0", "t_rcut": "descrpt_attr/rcut:0", @@ -127,11 +124,11 @@ def get_rcut(self) -> float: """Get the cut-off radius of this model.""" return self.rcut - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map (element name of the atom types) of this model.""" return self.tmap - def get_sel_type(self) -> List[int]: + def get_sel_type(self) -> list[int]: """Get the selected atom types of this model.""" return self.tselt @@ -147,7 +144,7 @@ def eval( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: List[int], + atom_types: list[int], atomic: bool = True, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, @@ -277,13 +274,13 @@ def eval_full( self, coords: np.ndarray, cells: Optional[np.ndarray], - atom_types: List[int], + atom_types: list[int], atomic: bool = False, fparam: Optional[np.array] = None, aparam: Optional[np.array] = None, efield: Optional[np.array] = None, mixed_type: bool = False, - ) -> Tuple[np.ndarray, ...]: + ) -> tuple[np.ndarray, ...]: """Evaluate the model with interface similar to the energy model. Will return global tensor, component-wise force and virial and optionally atomic tensor and atomic virial. diff --git a/deepmd/tf/infer/ewald_recp.py b/deepmd/tf/infer/ewald_recp.py index 110188c34f..f4b7d86588 100644 --- a/deepmd/tf/infer/ewald_recp.py +++ b/deepmd/tf/infer/ewald_recp.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Tuple, -) import numpy as np @@ -54,7 +51,7 @@ def __init__(self, hh, beta): def eval( self, coord: np.ndarray, charge: np.ndarray, box: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Evaluate. Parameters diff --git a/deepmd/tf/lmp.py b/deepmd/tf/lmp.py index b2e47308ed..f3679847fc 100644 --- a/deepmd/tf/lmp.py +++ b/deepmd/tf/lmp.py @@ -10,7 +10,6 @@ Path, ) from typing import ( - List, Optional, ) @@ -32,12 +31,12 @@ find_libpython = None -def get_env(paths: List[Optional[str]]) -> str: +def get_env(paths: list[Optional[str]]) -> str: """Get the environment variable from given paths.""" return ":".join(p for p in paths if p is not None) -def get_library_path(module: str, filename: str) -> List[str]: +def get_library_path(module: str, filename: str) -> list[str]: """Get library path from a module. Parameters diff --git a/deepmd/tf/loss/dos.py b/deepmd/tf/loss/dos.py index 385d2484a8..0b8efe26e0 100644 --- a/deepmd/tf/loss/dos.py +++ b/deepmd/tf/loss/dos.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - List, -) import numpy as np @@ -211,7 +208,7 @@ def eval(self, sess, feed_dict, natoms): return results @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" data_requirements = [] # data required diff --git a/deepmd/tf/loss/ener.py b/deepmd/tf/loss/ener.py index 7ecb185818..337046836b 100644 --- a/deepmd/tf/loss/ener.py +++ b/deepmd/tf/loss/ener.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, ) @@ -356,7 +355,7 @@ def eval(self, sess, feed_dict, natoms): return results @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" data_requirements = [] # data required @@ -726,7 +725,7 @@ def print_on_training( return print_str @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" data_requirements = [] # data required @@ -872,7 +871,7 @@ def eval(self, sess, feed_dict, natoms): return results @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" data_requirements = [] # data required diff --git a/deepmd/tf/loss/loss.py b/deepmd/tf/loss/loss.py index ca90c2eb64..351da7b748 100644 --- a/deepmd/tf/loss/loss.py +++ b/deepmd/tf/loss/loss.py @@ -3,11 +3,6 @@ ABCMeta, abstractmethod, ) -from typing import ( - Dict, - List, - Tuple, -) import numpy as np @@ -27,10 +22,10 @@ def build( self, learning_rate: tf.Tensor, natoms: tf.Tensor, - model_dict: Dict[str, tf.Tensor], - label_dict: Dict[str, tf.Tensor], + model_dict: dict[str, tf.Tensor], + label_dict: dict[str, tf.Tensor], suffix: str, - ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]: + ) -> tuple[tf.Tensor, dict[str, tf.Tensor]]: """Build the loss function graph. Parameters @@ -58,7 +53,7 @@ def build( def eval( self, sess: tf.Session, - feed_dict: Dict[tf.placeholder, tf.Tensor], + feed_dict: dict[tf.placeholder, tf.Tensor], natoms: tf.Tensor, ) -> dict: """Eval the loss function. @@ -98,5 +93,5 @@ def display_if_exist(loss: tf.Tensor, find_property: float) -> tf.Tensor: @property @abstractmethod - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" diff --git a/deepmd/tf/loss/tensor.py b/deepmd/tf/loss/tensor.py index 4a70ae2a96..a5bcbbe025 100644 --- a/deepmd/tf/loss/tensor.py +++ b/deepmd/tf/loss/tensor.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - List, -) import numpy as np @@ -142,7 +139,7 @@ def eval(self, sess, feed_dict, natoms): return results @property - def label_requirement(self) -> List[DataRequirementItem]: + def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" data_requirements = [] # data required diff --git a/deepmd/tf/model/dos.py b/deepmd/tf/model/dos.py index 61809eff30..7ab068da63 100644 --- a/deepmd/tf/model/dos.py +++ b/deepmd/tf/model/dos.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -51,7 +50,7 @@ def __init__( descriptor: dict, fitting_net: dict, type_embedding: Optional[Union[dict, TypeEmbedNet]] = None, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, data_stat_nbatch: int = 10, data_stat_protect: float = 1e-2, **kwargs, diff --git a/deepmd/tf/model/ener.py b/deepmd/tf/model/ener.py index 66aaff8189..b21c920d9c 100644 --- a/deepmd/tf/model/ener.py +++ b/deepmd/tf/model/ener.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -77,7 +76,7 @@ def __init__( descriptor: dict, fitting_net: dict, type_embedding: Optional[Union[dict, TypeEmbedNet]] = None, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, data_stat_nbatch: int = 10, data_stat_protect: float = 1e-2, use_srtab: Optional[str] = None, diff --git a/deepmd/tf/model/frozen.py b/deepmd/tf/model/frozen.py index 3e296c00f2..05700dc64e 100644 --- a/deepmd/tf/model/frozen.py +++ b/deepmd/tf/model/frozen.py @@ -6,9 +6,7 @@ Enum, ) from typing import ( - List, Optional, - Tuple, Union, ) @@ -244,9 +242,9 @@ def get_type_map(self) -> list: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -285,7 +283,7 @@ def deserialize(cls, data: dict, suffix: str = ""): raise RuntimeError("Should not touch here.") @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" data_requirement = [] numb_fparam = self.model.get_dim_fparam() diff --git a/deepmd/tf/model/linear.py b/deepmd/tf/model/linear.py index 1bd1644e54..4c75c2a1d5 100644 --- a/deepmd/tf/model/linear.py +++ b/deepmd/tf/model/linear.py @@ -8,9 +8,7 @@ reduce, ) from typing import ( - List, Optional, - Tuple, Union, ) @@ -50,7 +48,7 @@ class LinearModel(Model): If "sum", the weights are set to be 1. """ - def __init__(self, models: List[dict], weights: List[float], **kwargs): + def __init__(self, models: list[dict], weights: list[float], **kwargs): super().__init__(**kwargs) self.models = [Model(**model) for model in models] if isinstance(weights, list): @@ -140,9 +138,9 @@ def get_type_map(self) -> list: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -175,7 +173,7 @@ def update_sel( return local_jdata_cpy, min_nbor_dist @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" return reduce( operator.iadd, [model.input_requirement for model in self.models], [] diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index 5224fde473..833f8364ae 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -8,10 +8,7 @@ Enum, ) from typing import ( - Dict, - List, Optional, - Tuple, Union, ) @@ -113,7 +110,7 @@ def __new__(cls, *args, **kwargs): def __init__( self, type_embedding: Optional[Union[dict, TypeEmbedNet]] = None, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, data_stat_nbatch: int = 10, data_bias_nsample: int = 10, data_stat_protect: float = 1e-2, @@ -360,7 +357,7 @@ def build_type_embedding( return dout def _import_graph_def_from_frz_model( - self, frz_model: str, feed_dict: dict, return_elements: List[str] + self, frz_model: str, feed_dict: dict, return_elements: list[str] ): return_nodes = [x[:-2] for x in return_elements] graph, graph_def = load_graph_def(frz_model) @@ -370,7 +367,7 @@ def _import_graph_def_from_frz_model( ) def _import_graph_def_from_ckpt_meta( - self, ckpt_meta: str, feed_dict: dict, return_elements: List[str] + self, ckpt_meta: str, feed_dict: dict, return_elements: list[str] ): return_nodes = [x[:-2] for x in return_elements] with tf.Graph().as_default() as graph: @@ -469,7 +466,7 @@ def get_feed_dict( box: tf.Tensor, mesh: tf.Tensor, **kwargs, - ) -> Dict[str, tf.Tensor]: + ) -> dict[str, tf.Tensor]: """Generate the feed_dict for current descriptor. Parameters @@ -515,9 +512,9 @@ def get_feed_dict( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Notes @@ -586,7 +583,7 @@ def serialize(self, suffix: str = "") -> dict: @property @abstractmethod - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" @@ -647,7 +644,7 @@ def __init__( descriptor: Union[dict, Descriptor], fitting_net: Union[dict, Fitting], type_embedding: Optional[Union[dict, TypeEmbedNet]] = None, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, **kwargs, ) -> None: super().__init__( @@ -761,9 +758,9 @@ def get_ntypes(self) -> int: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -863,6 +860,6 @@ def serialize(self, suffix: str = "") -> dict: } @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" return self.descrpt.input_requirement + self.fitting.input_requirement diff --git a/deepmd/tf/model/pairtab.py b/deepmd/tf/model/pairtab.py index 29ddfe9499..d54940fec6 100644 --- a/deepmd/tf/model/pairtab.py +++ b/deepmd/tf/model/pairtab.py @@ -3,9 +3,7 @@ Enum, ) from typing import ( - List, Optional, - Tuple, Union, ) @@ -69,7 +67,7 @@ class PairTabModel(Model): model_type = "ener" def __init__( - self, tab_file: str, rcut: float, sel: Union[int, List[int]], **kwargs + self, tab_file: str, rcut: float, sel: Union[int, list[int]], **kwargs ): super().__init__() self.tab_file = tab_file @@ -275,9 +273,9 @@ def enable_compression(self, suffix: str = "") -> None: def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Notes @@ -308,6 +306,6 @@ def update_sel( return local_jdata_cpy, min_nbor_dist @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" return [] diff --git a/deepmd/tf/model/pairwise_dprc.py b/deepmd/tf/model/pairwise_dprc.py index 6fd8e82f7e..c8a57d90b3 100644 --- a/deepmd/tf/model/pairwise_dprc.py +++ b/deepmd/tf/model/pairwise_dprc.py @@ -1,9 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Dict, - List, Optional, - Tuple, Union, ) @@ -53,7 +50,7 @@ def __init__( qm_model: dict, qmmm_model: dict, type_embedding: Union[dict, TypeEmbedNet], - type_map: List[str], + type_map: list[str], data_stat_nbatch: int = 10, data_stat_nsample: int = 10, data_stat_protect: float = 1e-2, @@ -373,7 +370,7 @@ def get_feed_dict( box: tf.Tensor, mesh: tf.Tensor, **kwargs, - ) -> Dict[str, tf.Tensor]: + ) -> dict[str, tf.Tensor]: """Generate the feed_dict for current descriptor. Parameters @@ -416,9 +413,9 @@ def get_feed_dict( def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, - ) -> Tuple[dict, Optional[float]]: + ) -> tuple[dict, Optional[float]]: """Update the selection and perform neighbor statistics. Parameters @@ -442,7 +439,7 @@ def update_sel( return local_jdata, min_nbor_dist @property - def input_requirement(self) -> List[DataRequirementItem]: + def input_requirement(self) -> list[DataRequirementItem]: """Return data requirements needed for the model input.""" data_requirement = [] data_requirement.append( diff --git a/deepmd/tf/model/tensor.py b/deepmd/tf/model/tensor.py index b2afe0d71f..8514844e03 100644 --- a/deepmd/tf/model/tensor.py +++ b/deepmd/tf/model/tensor.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -50,7 +49,7 @@ def __init__( descriptor: dict, fitting_net: dict, type_embedding: Optional[Union[dict, TypeEmbedNet]] = None, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, data_stat_nbatch: int = 10, data_stat_protect: float = 1e-2, **kwargs, diff --git a/deepmd/tf/nvnmd/utils/fio.py b/deepmd/tf/nvnmd/utils/fio.py index 3efd7520dd..9daff62183 100644 --- a/deepmd/tf/nvnmd/utils/fio.py +++ b/deepmd/tf/nvnmd/utils/fio.py @@ -3,9 +3,6 @@ import logging import os import struct -from typing import ( - List, -) import numpy as np @@ -168,7 +165,7 @@ def load(self, file_name="", default_value=""): log.warning(f"can not find {file_name}") return default_value - def save(self, file_name: str, data: List[str]): + def save(self, file_name: str, data: list[str]): r"""Save hex string into binary file.""" log.info(f"write binary to {file_name}") Fio().create_file_path(file_name) diff --git a/deepmd/tf/train/run_options.py b/deepmd/tf/train/run_options.py index b835d63852..c36b42e194 100644 --- a/deepmd/tf/train/run_options.py +++ b/deepmd/tf/train/run_options.py @@ -8,7 +8,6 @@ ) from typing import ( TYPE_CHECKING, - List, Optional, ) @@ -80,7 +79,7 @@ class RunOptions: Attributes ---------- - gpus: Optional[List[int]] + gpus: Optional[list[int]] list of GPUs if any are present else None is_chief: bool in distribured training it is true for tha main MPI process in serail it is @@ -91,17 +90,17 @@ class RunOptions: index of the MPI task nodename: str name of the node - node_list_ : List[str] + node_list_ : list[str] the list of nodes of the current mpirun my_device: str deviice type - gpu or cpu """ - gpus: Optional[List[int]] + gpus: Optional[list[int]] world_size: int my_rank: int nodename: str - nodelist: List[int] + nodelist: list[int] my_device: str _HVD: Optional["HVD"] diff --git a/deepmd/tf/train/trainer.py b/deepmd/tf/train/trainer.py index 474af1da90..7f9aeb27d2 100644 --- a/deepmd/tf/train/trainer.py +++ b/deepmd/tf/train/trainer.py @@ -4,10 +4,6 @@ import os import shutil import time -from typing import ( - Dict, - List, -) import google.protobuf.message import numpy as np @@ -891,7 +887,7 @@ def _change_energy_bias( ) @property - def data_requirements(self) -> List[DataRequirementItem]: + def data_requirements(self) -> list[DataRequirementItem]: return self.model.input_requirement + self.loss.label_requirement @@ -922,17 +918,17 @@ def __init__(self, train_data: DeepmdDataSystem): self.data_keys = batch_data.keys() self.data_types = [tf.as_dtype(x.dtype) for x in batch_data.values()] - def build(self) -> List[tf.Tensor]: + def build(self) -> list[tf.Tensor]: """Build the OP that loads the training data. Returns ------- - List[tf.Tensor] + list[tf.Tensor] Tensor of the loaded data. """ train_data = self.train_data - def get_train_batch() -> List[np.ndarray]: + def get_train_batch() -> list[np.ndarray]: batch_data = train_data.get_batch() # convert dict to list of arryas batch_data = tuple([batch_data[kk] for kk in self.data_keys]) @@ -940,17 +936,17 @@ def get_train_batch() -> List[np.ndarray]: return tf.py_func(get_train_batch, [], self.data_types, name="train_data") - def get_data_dict(self, batch_list: List[np.ndarray]) -> Dict[str, np.ndarray]: + def get_data_dict(self, batch_list: list[np.ndarray]) -> dict[str, np.ndarray]: """Generate a dict of the loaded data. Parameters ---------- - batch_list : List[np.ndarray] + batch_list : list[np.ndarray] The loaded data. Returns ------- - Dict[str, np.ndarray] + dict[str, np.ndarray] The dict of the loaded data. """ return dict(zip(self.data_keys, batch_list)) diff --git a/deepmd/tf/utils/finetune.py b/deepmd/tf/utils/finetune.py index 4e55b9f5bb..4c57246ffd 100644 --- a/deepmd/tf/utils/finetune.py +++ b/deepmd/tf/utils/finetune.py @@ -3,7 +3,6 @@ import logging from typing import ( Any, - Dict, ) from deepmd.tf.utils.errors import ( @@ -17,13 +16,13 @@ def replace_model_params_with_pretrained_model( - jdata: Dict[str, Any], pretrained_model: str + jdata: dict[str, Any], pretrained_model: str ): """Replace the model params in input script according to pretrained model. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] input script pretrained_model : str filename of the pretrained model diff --git a/deepmd/tf/utils/graph.py b/deepmd/tf/utils/graph.py index a891506e95..4fccaac0e8 100644 --- a/deepmd/tf/utils/graph.py +++ b/deepmd/tf/utils/graph.py @@ -1,9 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import re -from typing import ( - Dict, - Tuple, -) import numpy as np @@ -22,7 +18,7 @@ ) -def load_graph_def(model_file: str) -> Tuple[tf.Graph, tf.GraphDef]: +def load_graph_def(model_file: str) -> tuple[tf.Graph, tf.GraphDef]: """Load graph as well as the graph_def from the frozen model(model_file). Parameters @@ -98,7 +94,7 @@ def get_tensor_by_name(model_file: str, tensor_name: str) -> tf.Tensor: return get_tensor_by_name_from_graph(graph, tensor_name) -def get_pattern_nodes_from_graph_def(graph_def: tf.GraphDef, pattern: str) -> Dict: +def get_pattern_nodes_from_graph_def(graph_def: tf.GraphDef, pattern: str) -> dict: """Get the pattern nodes with the given tf.GraphDef object. Parameters @@ -123,7 +119,7 @@ def get_pattern_nodes_from_graph_def(graph_def: tf.GraphDef, pattern: str) -> Di def get_embedding_net_nodes_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the embedding net nodes with the given tf.GraphDef object. Parameters @@ -154,7 +150,7 @@ def get_embedding_net_nodes_from_graph_def( return embedding_net_nodes -def get_embedding_net_nodes(model_file: str, suffix: str = "") -> Dict: +def get_embedding_net_nodes(model_file: str, suffix: str = "") -> dict: """Get the embedding net nodes with the given frozen model(model_file). Parameters @@ -175,7 +171,7 @@ def get_embedding_net_nodes(model_file: str, suffix: str = "") -> Dict: def get_embedding_net_variables_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the embedding net variables with the given tf.GraphDef object. Parameters @@ -220,7 +216,7 @@ def get_extra_embedding_net_nodes_from_graph_def( graph_def: tf.GraphDef, suffix: str = "", extra_suffix: str = "", -) -> Dict: +) -> dict: """Get the extra embedding net nodes with the given tf.GraphDef object. Parameters @@ -259,7 +255,7 @@ def get_extra_embedding_net_variables_from_graph_def( graph_def: tf.GraphDef, suffix: str = "", extra_suffix: str = "", -) -> Dict: +) -> dict: """Get the embedding net variables with the given tf.GraphDef object. Parameters @@ -282,7 +278,7 @@ def get_extra_embedding_net_variables_from_graph_def( return convert_tensor_to_ndarray_in_dict(extra_embedding_net_nodes) -def get_embedding_net_variables(model_file: str, suffix: str = "") -> Dict: +def get_embedding_net_variables(model_file: str, suffix: str = "") -> dict: """Get the embedding net variables with the given frozen model(model_file). Parameters @@ -303,7 +299,7 @@ def get_embedding_net_variables(model_file: str, suffix: str = "") -> Dict: def get_fitting_net_nodes_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the fitting net nodes with the given tf.GraphDef object. Parameters @@ -334,7 +330,7 @@ def get_fitting_net_nodes_from_graph_def( return fitting_net_nodes -def get_fitting_net_nodes(model_file: str) -> Dict: +def get_fitting_net_nodes(model_file: str) -> dict: """Get the fitting net nodes with the given frozen model(model_file). Parameters @@ -353,7 +349,7 @@ def get_fitting_net_nodes(model_file: str) -> Dict: def get_fitting_net_variables_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the fitting net variables with the given tf.GraphDef object. Parameters @@ -372,7 +368,7 @@ def get_fitting_net_variables_from_graph_def( return convert_tensor_to_ndarray_in_dict(fitting_net_nodes) -def get_fitting_net_variables(model_file: str, suffix: str = "") -> Dict: +def get_fitting_net_variables(model_file: str, suffix: str = "") -> dict: """Get the fitting net variables with the given frozen model(model_file). Parameters @@ -393,7 +389,7 @@ def get_fitting_net_variables(model_file: str, suffix: str = "") -> Dict: def get_type_embedding_net_nodes_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the type embedding net nodes with the given tf.GraphDef object. Parameters @@ -425,7 +421,7 @@ def get_type_embedding_net_nodes_from_graph_def( def get_type_embedding_net_variables_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the type embedding net variables with the given tf.GraphDef object. Parameters @@ -448,7 +444,7 @@ def get_type_embedding_net_variables_from_graph_def( def get_attention_layer_nodes_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the attention layer nodes with the given tf.GraphDef object. Parameters @@ -482,7 +478,7 @@ def get_attention_layer_nodes_from_graph_def( def get_attention_layer_variables_from_graph_def( graph_def: tf.GraphDef, suffix: str = "" -) -> Dict: +) -> dict: """Get the attention layer variables with the given tf.GraphDef object. Parameters @@ -504,18 +500,18 @@ def get_attention_layer_variables_from_graph_def( def convert_tensor_to_ndarray_in_dict( - tensor_dict: Dict[str, tf.Tensor], -) -> Dict[str, np.ndarray]: + tensor_dict: dict[str, tf.Tensor], +) -> dict[str, np.ndarray]: """Convert tensor to ndarray in dict. Parameters ---------- - tensor_dict : Dict[str, tf.Tensor] + tensor_dict : dict[str, tf.Tensor] The input tensor dict Returns ------- - Dict[str, np.ndarray] + dict[str, np.ndarray] The converted tensor dict """ for key in tensor_dict: diff --git a/deepmd/tf/utils/neighbor_stat.py b/deepmd/tf/utils/neighbor_stat.py index f668d4a4da..4052c89821 100644 --- a/deepmd/tf/utils/neighbor_stat.py +++ b/deepmd/tf/utils/neighbor_stat.py @@ -1,9 +1,10 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging -from typing import ( +from collections.abc import ( Iterator, +) +from typing import ( Optional, - Tuple, ) import numpy as np @@ -61,7 +62,7 @@ def build( atype: tf.Tensor, cell: tf.Tensor, pbc: tf.Tensor, - ) -> Tuple[tf.Tensor, tf.Tensor]: + ) -> tuple[tf.Tensor, tf.Tensor]: """Calculate the nearest neighbor distance between atoms, maximum nbor size of atoms and the output data range of the environment matrix. @@ -187,7 +188,7 @@ def __init__( self.op = self.build() self.sub_sess = tf.Session(graph=sub_graph, config=default_tf_session_config) - def build(self) -> Tuple[tf.Tensor, tf.Tensor]: + def build(self) -> tuple[tf.Tensor, tf.Tensor]: """Build the graph. Returns @@ -215,7 +216,7 @@ def build(self) -> Tuple[tf.Tensor, tf.Tensor]: def iterator( self, data: DeepmdDataSystem - ) -> Iterator[Tuple[np.ndarray, float, str]]: + ) -> Iterator[tuple[np.ndarray, float, str]]: """Produce data. Parameters diff --git a/deepmd/tf/utils/parallel_op.py b/deepmd/tf/utils/parallel_op.py index 5eeb1fab7f..ce43ea8c15 100644 --- a/deepmd/tf/utils/parallel_op.py +++ b/deepmd/tf/utils/parallel_op.py @@ -1,11 +1,11 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from collections.abc import ( + Generator, +) from typing import ( Any, Callable, - Dict, - Generator, Optional, - Tuple, ) from deepmd.tf.env import ( @@ -21,7 +21,7 @@ class ParallelOp: Parameters ---------- - builder : Callable[..., Tuple[Dict[str, tf.Tensor], Tuple[tf.Tensor]]] + builder : Callable[..., tuple[dict[str, tf.Tensor], tuple[tf.Tensor]]] returns two objects: a dict which stores placeholders by key, and a tuple with the final op(s) nthreads : int, optional the number of threads @@ -45,7 +45,7 @@ class ParallelOp: def __init__( self, - builder: Callable[..., Tuple[Dict[str, tf.Tensor], Tuple[tf.Tensor]]], + builder: Callable[..., tuple[dict[str, tf.Tensor], tuple[tf.Tensor]]], nthreads: Optional[int] = None, config: Optional[tf.ConfigProto] = None, ) -> None: @@ -65,8 +65,8 @@ def __init__( self.ops.append(op) def generate( - self, sess: tf.Session, feed: Generator[Dict[str, Any], None, None] - ) -> Generator[Tuple, None, None]: + self, sess: tf.Session, feed: Generator[dict[str, Any], None, None] + ) -> Generator[tuple, None, None]: """Returns a generator. Parameters diff --git a/deepmd/tf/utils/spin.py b/deepmd/tf/utils/spin.py index c20d4dcc7b..ab70bdf319 100644 --- a/deepmd/tf/utils/spin.py +++ b/deepmd/tf/utils/spin.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, ) @@ -25,9 +24,9 @@ class Spin: def __init__( self, - use_spin: Optional[List[bool]] = None, - spin_norm: Optional[List[float]] = None, - virtual_len: Optional[List[float]] = None, + use_spin: Optional[list[bool]] = None, + spin_norm: Optional[list[float]] = None, + virtual_len: Optional[list[float]] = None, ) -> None: """Constructor.""" self.use_spin = use_spin @@ -74,14 +73,14 @@ def get_ntypes_spin(self) -> int: """Returns the number of atom types which contain spin.""" return self.ntypes_spin - def get_use_spin(self) -> List[bool]: + def get_use_spin(self) -> list[bool]: """Returns the list of whether to use spin for each atom type.""" return self.use_spin - def get_spin_norm(self) -> List[float]: + def get_spin_norm(self) -> list[float]: """Returns the list of magnitude of atomic spin for each atom type.""" return self.spin_norm - def get_virtual_len(self) -> List[float]: + def get_virtual_len(self) -> list[float]: """Returns the list of distance between real atom and virtual atom for each atom type.""" return self.virtual_len diff --git a/deepmd/tf/utils/tabulate.py b/deepmd/tf/utils/tabulate.py index e1ab45c44f..afb94bb050 100644 --- a/deepmd/tf/utils/tabulate.py +++ b/deepmd/tf/utils/tabulate.py @@ -5,9 +5,6 @@ ) from typing import ( Callable, - Dict, - List, - Tuple, ) import numpy as np @@ -53,7 +50,7 @@ class DPTabulate: The graph_def of the original model type_one_side Try to build N_types tables. Otherwise, building N_types^2 tables - exclude_types : List[List[int]] + exclude_types : list[list[int]] The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1. activation_function @@ -65,11 +62,11 @@ class DPTabulate: def __init__( self, descrpt: Descriptor, - neuron: List[int], + neuron: list[int], graph: tf.Graph, graph_def: tf.GraphDef, type_one_side: bool = False, - exclude_types: List[List[int]] = [], + exclude_types: list[list[int]] = [], activation_fn: Callable[[tf.Tensor], tf.Tensor] = tf.nn.tanh, suffix: str = "", ) -> None: @@ -160,7 +157,7 @@ def __init__( def build( self, min_nbor_dist: float, extrapolate: float, stride0: float, stride1: float - ) -> Tuple[Dict[str, int], Dict[str, int]]: + ) -> tuple[dict[str, int], dict[str, int]]: r"""Build the tables for model compression. Parameters diff --git a/deepmd/tf/utils/type_embed.py b/deepmd/tf/utils/type_embed.py index 7d74b0a856..13d02a858c 100644 --- a/deepmd/tf/utils/type_embed.py +++ b/deepmd/tf/utils/type_embed.py @@ -2,7 +2,6 @@ import logging import re from typing import ( - List, Optional, Union, ) @@ -105,7 +104,7 @@ class TypeEmbedNet: Whether to use electronic configuration type embedding. use_tebd_bias : bool, Optional Whether to use bias in the type embedding layer. - type_map: List[str], Optional + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -113,7 +112,7 @@ def __init__( self, *, ntypes: int, - neuron: List[int], + neuron: list[int], resnet_dt: bool = False, activation_function: Union[str, None] = "tanh", precision: str = "default", @@ -123,7 +122,7 @@ def __init__( padding: bool = False, use_econf_tebd: bool = False, use_tebd_bias: bool = False, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, **kwargs, ) -> None: """Constructor.""" diff --git a/deepmd/tf/utils/update_sel.py b/deepmd/tf/utils/update_sel.py index 726aec4d41..8915eb0147 100644 --- a/deepmd/tf/utils/update_sel.py +++ b/deepmd/tf/utils/update_sel.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Type, -) from deepmd.tf.utils.neighbor_stat import ( NeighborStat, @@ -13,5 +10,5 @@ class UpdateSel(BaseUpdateSel): @property - def neighbor_stat(self) -> Type[NeighborStat]: + def neighbor_stat(self) -> type[NeighborStat]: return NeighborStat diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index a799b6b0c4..1a5e1cc3b2 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -4,8 +4,6 @@ import warnings from typing import ( Callable, - Dict, - List, Optional, Union, ) @@ -92,7 +90,7 @@ def type_embedding_args(): doc_use_tebd_bias = "Whether to use bias in the type embedding layer." return [ - Argument("neuron", List[int], optional=True, default=[8], doc=doc_neuron), + Argument("neuron", list[int], optional=True, default=[8], doc=doc_neuron), Argument( "activation_function", str, @@ -136,22 +134,22 @@ def spin_args(): ) return [ - Argument("use_spin", [List[bool], List[int]], doc=doc_use_spin), + Argument("use_spin", [list[bool], list[int]], doc=doc_use_spin), Argument( "spin_norm", - List[float], + list[float], optional=True, doc=doc_only_tf_supported + doc_spin_norm, ), Argument( "virtual_len", - List[float], + list[float], optional=True, doc=doc_only_tf_supported + doc_virtual_len, ), Argument( "virtual_scale", - [List[float], float], + [list[float], float], optional=True, doc=doc_only_pt_supported + doc_virtual_scale, ), @@ -166,10 +164,10 @@ def __init__(self) -> None: self.__plugin = Plugin() def register( - self, name: str, alias: Optional[List[str]] = None, doc: str = "" + self, name: str, alias: Optional[list[str]] = None, doc: str = "" ) -> Callable[ - [Union[Callable[[], Argument], Callable[[], List[Argument]]]], - Union[Callable[[], Argument], Callable[[], List[Argument]]], + [Union[Callable[[], Argument], Callable[[], list[Argument]]]], + Union[Callable[[], Argument], Callable[[], list[Argument]]], ]: """Register a descriptor argument plugin. @@ -177,12 +175,12 @@ def register( ---------- name : str the name of a descriptor - alias : List[str], optional + alias : list[str], optional the list of aliases of this descriptor Returns ------- - Callable[[Union[Callable[[], Argument], Callable[[], List[Argument]]]], Union[Callable[[], Argument], Callable[[], List[Argument]]]] + Callable[[Union[Callable[[], Argument], Callable[[], list[Argument]]]], Union[Callable[[], Argument], Callable[[], list[Argument]]]] decorator to return the registered descriptor argument method Examples @@ -197,7 +195,7 @@ def descrpt_some_descrpt_args(): alias = tuple(alias) return self.__plugin.register((name, alias, doc)) - def get_all_argument(self, exclude_hybrid: bool = False) -> List[Argument]: + def get_all_argument(self, exclude_hybrid: bool = False) -> list[Argument]: """Get all arguments. Parameters @@ -207,7 +205,7 @@ def get_all_argument(self, exclude_hybrid: bool = False) -> List[Argument]: Returns ------- - List[Argument] + list[Argument] all arguments """ arguments = [] @@ -245,17 +243,17 @@ def descrpt_local_frame_args(): - axis_rule[i*6+5]: index of the axis atom defining the second axis. Note that the neighbors with the same class and type are sorted according to their relative distance." return [ - Argument("sel_a", List[int], optional=False, doc=doc_sel_a), - Argument("sel_r", List[int], optional=False, doc=doc_sel_r), + Argument("sel_a", list[int], optional=False, doc=doc_sel_a), + Argument("sel_r", list[int], optional=False, doc=doc_sel_r), Argument("rcut", float, optional=True, default=6.0, doc=doc_rcut), - Argument("axis_rule", List[int], optional=False, doc=doc_axis_rule), + Argument("axis_rule", list[int], optional=False, doc=doc_axis_rule), ] @descrpt_args_plugin.register("se_e2_a", alias=["se_a"]) def descrpt_se_a_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - - `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ + - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" @@ -272,11 +270,11 @@ def descrpt_se_a_args(): doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `atom_ener` in the energy fitting is used" return [ - Argument("sel", [List[int], str], optional=True, default="auto", doc=doc_sel), + Argument("sel", [list[int], str], optional=True, default="auto", doc=doc_sel), Argument("rcut", float, optional=True, default=6.0, doc=doc_rcut), Argument("rcut_smth", float, optional=True, default=0.5, doc=doc_rcut_smth), Argument( - "neuron", List[int], optional=True, default=[10, 20, 40], doc=doc_neuron + "neuron", list[int], optional=True, default=[10, 20, 40], doc=doc_neuron ), Argument( "axis_neuron", @@ -302,7 +300,7 @@ def descrpt_se_a_args(): Argument("seed", [int, None], optional=True, doc=doc_seed), Argument( "exclude_types", - List[List[int]], + list[list[int]], optional=True, default=[], doc=doc_exclude_types, @@ -323,7 +321,7 @@ def descrpt_se_a_args(): @descrpt_args_plugin.register("se_e3", alias=["se_at", "se_a_3be", "se_t"]) def descrpt_se_t_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - - `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ + - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" @@ -338,11 +336,11 @@ def descrpt_se_t_args(): doc_env_protection = "Protection parameter to prevent division by zero errors during environment matrix calculations. For example, when using paddings, there may be zero distances of neighbors, which may make division by zero error during environment matrix calculations without protection." return [ - Argument("sel", [List[int], str], optional=True, default="auto", doc=doc_sel), + Argument("sel", [list[int], str], optional=True, default="auto", doc=doc_sel), Argument("rcut", float, optional=True, default=6.0, doc=doc_rcut), Argument("rcut_smth", float, optional=True, default=0.5, doc=doc_rcut_smth), Argument( - "neuron", List[int], optional=True, default=[10, 20, 40], doc=doc_neuron + "neuron", list[int], optional=True, default=[10, 20, 40], doc=doc_neuron ), Argument( "activation_function", @@ -360,7 +358,7 @@ def descrpt_se_t_args(): ), Argument( "exclude_types", - List[List[int]], + list[list[int]], optional=True, default=[], doc=doc_exclude_types, @@ -392,7 +390,7 @@ def descrpt_se_a_tpe_args(): @descrpt_args_plugin.register("se_e2_r", alias=["se_r"]) def descrpt_se_r_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - - `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ + - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" @@ -408,11 +406,11 @@ def descrpt_se_r_args(): doc_env_protection = "Protection parameter to prevent division by zero errors during environment matrix calculations. For example, when using paddings, there may be zero distances of neighbors, which may make division by zero error during environment matrix calculations without protection." return [ - Argument("sel", [List[int], str], optional=True, default="auto", doc=doc_sel), + Argument("sel", [list[int], str], optional=True, default="auto", doc=doc_sel), Argument("rcut", float, optional=True, default=6.0, doc=doc_rcut), Argument("rcut_smth", float, optional=True, default=0.5, doc=doc_rcut_smth), Argument( - "neuron", List[int], optional=True, default=[10, 20, 40], doc=doc_neuron + "neuron", list[int], optional=True, default=[10, 20, 40], doc=doc_neuron ), Argument( "activation_function", @@ -430,7 +428,7 @@ def descrpt_se_r_args(): Argument("seed", [int, None], optional=True, doc=doc_seed), Argument( "exclude_types", - List[List[int]], + list[list[int]], optional=True, default=[], doc=doc_exclude_types, @@ -469,7 +467,7 @@ def descrpt_hybrid_args(): def descrpt_se_atten_common_args(): doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - - `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ + - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" @@ -490,12 +488,12 @@ def descrpt_se_atten_common_args(): return [ Argument( - "sel", [int, List[int], str], optional=True, default="auto", doc=doc_sel + "sel", [int, list[int], str], optional=True, default="auto", doc=doc_sel ), Argument("rcut", float, optional=True, default=6.0, doc=doc_rcut), Argument("rcut_smth", float, optional=True, default=0.5, doc=doc_rcut_smth), Argument( - "neuron", List[int], optional=True, default=[10, 20, 40], doc=doc_neuron + "neuron", list[int], optional=True, default=[10, 20, 40], doc=doc_neuron ), Argument( "axis_neuron", @@ -521,7 +519,7 @@ def descrpt_se_atten_common_args(): Argument("seed", [int, None], optional=True, doc=doc_seed), Argument( "exclude_types", - List[List[int]], + list[list[int]], optional=True, default=[], doc=doc_exclude_types, @@ -666,7 +664,7 @@ def descrpt_se_atten_args(): def descrpt_se_e3_tebd_args(): doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - - `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ + - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" @@ -697,12 +695,12 @@ def descrpt_se_e3_tebd_args(): return [ Argument( - "sel", [int, List[int], str], optional=True, default="auto", doc=doc_sel + "sel", [int, list[int], str], optional=True, default="auto", doc=doc_sel ), Argument("rcut", float, optional=True, default=6.0, doc=doc_rcut), Argument("rcut_smth", float, optional=True, default=0.5, doc=doc_rcut_smth), Argument( - "neuron", List[int], optional=True, default=[10, 20, 40], doc=doc_neuron + "neuron", list[int], optional=True, default=[10, 20, 40], doc=doc_neuron ), Argument( "tebd_dim", @@ -745,7 +743,7 @@ def descrpt_se_e3_tebd_args(): ), Argument( "exclude_types", - List[List[int]], + list[list[int]], optional=True, default=[], doc=doc_exclude_types, @@ -898,7 +896,7 @@ def descrpt_dpa2_args(): Argument("smooth", bool, optional=True, default=True, doc=doc_smooth), Argument( "exclude_types", - List[List[int]], + list[list[int]], optional=True, default=[], doc=doc_exclude_types, @@ -1338,7 +1336,7 @@ def descrpt_se_a_ebd_v2_args(): @descrpt_args_plugin.register("se_a_mask", doc=doc_only_tf_supported) def descrpt_se_a_mask_args(): doc_sel = 'This parameter sets the number of selected neighbors for each type of atom. It can be:\n\n\ - - `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ + - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -1352,9 +1350,9 @@ def descrpt_se_a_mask_args(): doc_seed = "Random seed for parameter initialization" return [ - Argument("sel", [List[int], str], optional=True, default="auto", doc=doc_sel), + Argument("sel", [list[int], str], optional=True, default="auto", doc=doc_sel), Argument( - "neuron", List[int], optional=True, default=[10, 20, 40], doc=doc_neuron + "neuron", list[int], optional=True, default=[10, 20, 40], doc=doc_neuron ), Argument( "axis_neuron", @@ -1377,7 +1375,7 @@ def descrpt_se_a_mask_args(): ), Argument( "exclude_types", - List[List[int]], + list[list[int]], optional=True, default=[], doc=doc_exclude_types, @@ -1451,7 +1449,7 @@ def fitting_ener(): Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), Argument( "neuron", - List[int], + list[int], optional=True, default=[120, 120, 120], alias=["n_neuron"], @@ -1468,7 +1466,7 @@ def fitting_ener(): Argument("resnet_dt", bool, optional=True, default=True, doc=doc_resnet_dt), Argument( "trainable", - [List[bool], bool], + [list[bool], bool], optional=True, default=True, doc=doc_trainable, @@ -1479,12 +1477,12 @@ def fitting_ener(): Argument("seed", [int, None], optional=True, doc=doc_seed), Argument( "atom_ener", - List[Optional[float]], + list[Optional[float]], optional=True, default=[], doc=doc_atom_ener, ), - Argument("layer_name", List[str], optional=True, doc=doc_layer_name), + Argument("layer_name", list[str], optional=True, doc=doc_layer_name), Argument( "use_aparam_as_mask", bool, @@ -1516,7 +1514,7 @@ def fitting_dos(): Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), Argument( - "neuron", List[int], optional=True, default=[120, 120, 120], doc=doc_neuron + "neuron", list[int], optional=True, default=[120, 120, 120], doc=doc_neuron ), Argument( "activation_function", @@ -1529,7 +1527,7 @@ def fitting_dos(): Argument("resnet_dt", bool, optional=True, default=True, doc=doc_resnet_dt), Argument( "trainable", - [List[bool], bool], + [list[bool], bool], optional=True, default=True, doc=doc_trainable, @@ -1559,7 +1557,7 @@ def fitting_property(): Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), Argument( "neuron", - List[int], + list[int], optional=True, default=[120, 120, 120], alias=["n_neuron"], @@ -1601,7 +1599,7 @@ def fitting_polar(): return [ Argument( "neuron", - List[int], + list[int], optional=True, default=[120, 120, 120], alias=["n_neuron"], @@ -1618,13 +1616,13 @@ def fitting_polar(): Argument("precision", str, optional=True, default="default", doc=doc_precision), Argument("fit_diag", bool, optional=True, default=True, doc=doc_fit_diag), Argument( - "scale", [List[float], float], optional=True, default=1.0, doc=doc_scale + "scale", [list[float], float], optional=True, default=1.0, doc=doc_scale ), # Argument("diag_shift", [list,float], optional = True, default = 0.0, doc = doc_diag_shift), Argument("shift_diag", bool, optional=True, default=True, doc=doc_shift_diag), Argument( "sel_type", - [List[int], int, None], + [list[int], int, None], optional=True, alias=["pol_type"], doc=doc_sel_type + doc_only_tf_supported, @@ -1648,7 +1646,7 @@ def fitting_dipole(): return [ Argument( "neuron", - List[int], + list[int], optional=True, default=[120, 120, 120], alias=["n_neuron"], @@ -1665,7 +1663,7 @@ def fitting_dipole(): Argument("precision", str, optional=True, default="default", doc=doc_precision), Argument( "sel_type", - [List[int], int, None], + [list[int], int, None], optional=True, alias=["dipole_type"], doc=doc_sel_type + doc_only_tf_supported, @@ -1702,9 +1700,9 @@ def modifier_dipole_charge(): return [ Argument("model_name", str, optional=False, doc=doc_model_name), Argument( - "model_charge_map", List[float], optional=False, doc=doc_model_charge_map + "model_charge_map", list[float], optional=False, doc=doc_model_charge_map ), - Argument("sys_charge_map", List[float], optional=False, doc=doc_sys_charge_map), + Argument("sys_charge_map", list[float], optional=False, doc=doc_sys_charge_map), Argument("ewald_beta", float, optional=True, default=0.4, doc=doc_ewald_beta), Argument("ewald_h", float, optional=True, default=1.0, doc=doc_ewald_h), ] @@ -1733,7 +1731,7 @@ def model_compression(): return [ Argument("model_file", str, optional=False, doc=doc_model_file), - Argument("table_config", List[float], optional=False, doc=doc_table_config), + Argument("table_config", list[float], optional=False, doc=doc_table_config), Argument("min_nbor_dist", float, optional=False, doc=doc_min_nbor_dist), ] @@ -1785,7 +1783,7 @@ def model_args(exclude_hybrid=False): "model", dict, [ - Argument("type_map", List[str], optional=True, doc=doc_type_map), + Argument("type_map", list[str], optional=True, doc=doc_type_map), Argument( "data_stat_nbatch", int, @@ -1837,7 +1835,7 @@ def model_args(exclude_hybrid=False): ), Argument( "preset_out_bias", - Dict[str, List[Optional[Union[float, List[float]]]]], + dict[str, list[Optional[Union[float, list[float]]]]], optional=True, default=None, doc=doc_only_pt_supported + doc_preset_out_bias, @@ -1960,7 +1958,7 @@ def pairtab_model_args() -> Argument: doc_rcut = "The cut-off radius." doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - - `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ + - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' ca = Argument( "pairtab", @@ -1968,7 +1966,7 @@ def pairtab_model_args() -> Argument: [ Argument("tab_file", str, optional=False, doc=doc_tab_file), Argument("rcut", float, optional=False, doc=doc_rcut), - Argument("sel", [int, List[int], str], optional=False, doc=doc_sel), + Argument("sel", [int, list[int], str], optional=False, doc=doc_sel), ], doc=doc_only_tf_supported + "Pairwise tabulation energy model.", ) @@ -2494,11 +2492,11 @@ def training_data_args(): # ! added by Ziyao: new specification style for data args = [ Argument( - "systems", [List[str], str], optional=False, default=".", doc=doc_systems + "systems", [list[str], str], optional=False, default=".", doc=doc_systems ), Argument( "batch_size", - [List[int], int, str], + [list[int], int, str], optional=True, default="auto", doc=doc_batch_size, @@ -2515,7 +2513,7 @@ def training_data_args(): # ! added by Ziyao: new specification style for data ), Argument( "sys_probs", - List[float], + list[float], optional=True, default=None, doc=doc_sys_probs, @@ -2560,11 +2558,11 @@ def validation_data_args(): # ! added by Ziyao: new specification style for dat args = [ Argument( - "systems", [List[str], str], optional=False, default=".", doc=doc_systems + "systems", [list[str], str], optional=False, default=".", doc=doc_systems ), Argument( "batch_size", - [List[int], int, str], + [list[int], int, str], optional=True, default="auto", doc=doc_batch_size, @@ -2581,7 +2579,7 @@ def validation_data_args(): # ! added by Ziyao: new specification style for dat ), Argument( "sys_probs", - List[float], + list[float], optional=True, default=None, doc=doc_sys_probs, @@ -2877,7 +2875,7 @@ def gen_json(multi_task: bool = False, **kwargs) -> str: ) -def gen_args(multi_task: bool = False) -> List[Argument]: +def gen_args(multi_task: bool = False) -> list[Argument]: if not multi_task: return [ model_args(), diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index 8fe67ad6fc..0394993854 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -7,7 +7,6 @@ ) from typing import ( Callable, - Tuple, ) import array_api_compat @@ -81,7 +80,7 @@ def __init__(self, initial_batch_size: int = 1024, factor: float = 2.0) -> None: def execute( self, callable: Callable, start_index: int, natoms: int - ) -> Tuple[int, tuple]: + ) -> tuple[int, tuple]: """Excuate a method with given batch size. Parameters @@ -153,7 +152,7 @@ def _adjust_batch_size(self, factor: float): def execute_all( self, callable: Callable, total_size: int, natoms: int, *args, **kwargs - ) -> Tuple[np.ndarray]: + ) -> tuple[np.ndarray]: """Excuate a method with all given data. This method is compatible with Array API. @@ -174,7 +173,7 @@ def execute_all( def execute_with_batch_size( batch_size: int, start_index: int - ) -> Tuple[int, Tuple[np.ndarray]]: + ) -> tuple[int, tuple[np.ndarray]]: end_index = start_index + batch_size end_index = min(end_index, total_size) return (end_index - start_index), callable( diff --git a/deepmd/utils/compat.py b/deepmd/utils/compat.py index edd01b8291..83cbe46fad 100644 --- a/deepmd/utils/compat.py +++ b/deepmd/utils/compat.py @@ -3,14 +3,15 @@ import json import warnings +from collections.abc import ( + Sequence, +) from pathlib import ( Path, ) from typing import ( Any, - Dict, Optional, - Sequence, Union, ) @@ -22,13 +23,13 @@ def convert_input_v0_v1( - jdata: Dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None -) -> Dict[str, Any]: + jdata: dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None +) -> dict[str, Any]: """Convert input from v0 format to v1. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] loaded json/yaml file warning : bool, optional whether to show deprecation warning, by default True @@ -37,7 +38,7 @@ def convert_input_v0_v1( Returns ------- - Dict[str, Any] + dict[str, Any] converted output """ output = {} @@ -63,19 +64,19 @@ def _warning_input_v0_v1(fname: Optional[Union[str, Path]]): warnings.warn(msg) -def _model(jdata: Dict[str, Any], smooth: bool) -> Dict[str, Dict[str, Any]]: +def _model(jdata: dict[str, Any], smooth: bool) -> dict[str, dict[str, Any]]: """Convert data to v1 input for non-smooth model. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] parsed input json/yaml data smooth : bool whether to use smooth or non-smooth descriptor version Returns ------- - Dict[str, Dict[str, Any]] + dict[str, dict[str, Any]] dictionary with model input parameters and sub-dictionaries for descriptor and fitting net """ @@ -87,17 +88,17 @@ def _model(jdata: Dict[str, Any], smooth: bool) -> Dict[str, Dict[str, Any]]: return model -def _nonsmth_descriptor(jdata: Dict[str, Any]) -> Dict[str, Any]: +def _nonsmth_descriptor(jdata: dict[str, Any]) -> dict[str, Any]: """Convert data to v1 input for non-smooth descriptor. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] parsed input json/yaml data Returns ------- - Dict[str, Any] + dict[str, Any] dict with descriptor parameters """ descriptor = {} @@ -106,17 +107,17 @@ def _nonsmth_descriptor(jdata: Dict[str, Any]) -> Dict[str, Any]: return descriptor -def _smth_descriptor(jdata: Dict[str, Any]) -> Dict[str, Any]: +def _smth_descriptor(jdata: dict[str, Any]) -> dict[str, Any]: """Convert data to v1 input for smooth descriptor. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] parsed input json/yaml data Returns ------- - Dict[str, Any] + dict[str, Any] dict with descriptor parameters """ descriptor = {} @@ -136,17 +137,17 @@ def _smth_descriptor(jdata: Dict[str, Any]) -> Dict[str, Any]: return descriptor -def _fitting_net(jdata: Dict[str, Any]) -> Dict[str, Any]: +def _fitting_net(jdata: dict[str, Any]) -> dict[str, Any]: """Convert data to v1 input for fitting net. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] parsed input json/yaml data Returns ------- - Dict[str, Any] + dict[str, Any] dict with fitting net parameters """ fitting_net = {} @@ -163,17 +164,17 @@ def _fitting_net(jdata: Dict[str, Any]) -> Dict[str, Any]: return fitting_net -def _learning_rate(jdata: Dict[str, Any]) -> Dict[str, Any]: +def _learning_rate(jdata: dict[str, Any]) -> dict[str, Any]: """Convert data to v1 input for learning rate section. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] parsed input json/yaml data Returns ------- - Dict[str, Any] + dict[str, Any] dict with learning rate parameters """ learning_rate = {} @@ -182,20 +183,20 @@ def _learning_rate(jdata: Dict[str, Any]) -> Dict[str, Any]: return learning_rate -def _loss(jdata: Dict[str, Any]) -> Dict[str, Any]: +def _loss(jdata: dict[str, Any]) -> dict[str, Any]: """Convert data to v1 input for loss function. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] parsed input json/yaml data Returns ------- - Dict[str, Any] + dict[str, Any] dict with loss function parameters """ - loss: Dict[str, Any] = {} + loss: dict[str, Any] = {} _jcopy( jdata, loss, @@ -215,17 +216,17 @@ def _loss(jdata: Dict[str, Any]) -> Dict[str, Any]: return loss -def _training(jdata: Dict[str, Any]) -> Dict[str, Any]: +def _training(jdata: dict[str, Any]) -> dict[str, Any]: """Convert data to v1 input for training. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] parsed input json/yaml data Returns ------- - Dict[str, Any] + dict[str, Any] dict with training parameters """ training = {} @@ -250,14 +251,14 @@ def _training(jdata: Dict[str, Any]) -> Dict[str, Any]: return training -def _jcopy(src: Dict[str, Any], dst: Dict[str, Any], keys: Sequence[str]): +def _jcopy(src: dict[str, Any], dst: dict[str, Any], keys: Sequence[str]): """Copy specified keys from one dict to another. Parameters ---------- - src : Dict[str, Any] + src : dict[str, Any] source dictionary - dst : Dict[str, Any] + dst : dict[str, Any] destination dictionary, will be modified in place keys : Sequence[str] list of keys to copy @@ -267,12 +268,12 @@ def _jcopy(src: Dict[str, Any], dst: Dict[str, Any], keys: Sequence[str]): dst[k] = src[k] -def remove_decay_rate(jdata: Dict[str, Any]): +def remove_decay_rate(jdata: dict[str, Any]): """Convert decay_rate to stop_lr. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] input data """ lr = jdata["learning_rate"] @@ -287,8 +288,8 @@ def remove_decay_rate(jdata: Dict[str, Any]): def convert_input_v1_v2( - jdata: Dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None -) -> Dict[str, Any]: + jdata: dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None +) -> dict[str, Any]: tr_cfg = jdata["training"] tr_data_keys = { "systems", @@ -334,15 +335,15 @@ def _warning_input_v1_v2(fname: Optional[Union[str, Path]]): def deprecate_numb_test( - jdata: Dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None -) -> Dict[str, Any]: + jdata: dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None +) -> dict[str, Any]: """Deprecate `numb_test` since v2.1. It has taken no effect since v2.0. See `#1243 `_. Parameters ---------- - jdata : Dict[str, Any] + jdata : dict[str, Any] loaded json/yaml file warning : bool, optional whether to show deprecation warning, by default True @@ -351,7 +352,7 @@ def deprecate_numb_test( Returns ------- - Dict[str, Any] + dict[str, Any] converted output """ try: @@ -372,8 +373,8 @@ def deprecate_numb_test( def update_deepmd_input( - jdata: Dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None -) -> Dict[str, Any]: + jdata: dict[str, Any], warning: bool = True, dump: Optional[Union[str, Path]] = None +) -> dict[str, Any]: def is_deepmd_v0_input(jdata): return "model" not in jdata.keys() diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 5d324afb95..4c77bcf59a 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -4,7 +4,6 @@ import bisect import logging from typing import ( - List, Optional, ) @@ -53,7 +52,7 @@ def __init__( sys_path: str, set_prefix: str = "set", shuffle_test: bool = True, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, optional_type_map: bool = True, modifier=None, trn_all_set: bool = False, @@ -134,7 +133,7 @@ def add( atomic: bool = False, must: bool = False, high_prec: bool = False, - type_sel: Optional[List[int]] = None, + type_sel: Optional[list[int]] = None, repeat: int = 1, default: float = 0.0, dtype: Optional[np.dtype] = None, @@ -304,11 +303,11 @@ def get_ntypes(self) -> int: else: return max(self.get_atom_type()) + 1 - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map - def get_atom_type(self) -> List[int]: + def get_atom_type(self) -> list[int]: """Get atom types.""" return self.atom_type @@ -738,7 +737,7 @@ def __init__( atomic: bool = False, must: bool = False, high_prec: bool = False, - type_sel: Optional[List[int]] = None, + type_sel: Optional[list[int]] = None, repeat: int = 1, default: float = 0.0, dtype: Optional[np.dtype] = None, diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 235930527b..e499163e6a 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -3,12 +3,10 @@ import logging import warnings from functools import ( - lru_cache, + cache, ) from typing import ( Any, - Dict, - List, Optional, Union, ) @@ -45,13 +43,13 @@ class DeepmdDataSystem: def __init__( self, - systems: List[str], + systems: list[str], batch_size: int, test_size: int, rcut: Optional[float] = None, set_prefix: str = "set", shuffle_test: bool = True, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, optional_type_map: bool = True, modifier=None, trn_all_set=False, @@ -241,8 +239,8 @@ def _load_test(self, ntests=-1): self.test_data[nn].append(test_system_data[nn]) @property - @lru_cache(maxsize=None) - def default_mesh(self) -> List[np.ndarray]: + @cache + def default_mesh(self) -> list[np.ndarray]: """Mesh for each system.""" return [ make_default_mesh( @@ -266,7 +264,7 @@ def compute_energy_shift(self, rcond=None, key="energy"): ) return energy_shift.ravel() - def add_dict(self, adict: Dict[str, Dict[str, Any]]) -> None: + def add_dict(self, adict: dict[str, dict[str, Any]]) -> None: """Add items to the data system by a `dict`. `adict` should have items like .. code-block:: python. @@ -299,7 +297,7 @@ def add_dict(self, adict: Dict[str, Dict[str, Any]]) -> None: ) def add_data_requirements( - self, data_requirements: List[DataRequirementItem] + self, data_requirements: list[DataRequirementItem] ) -> None: """Add items to the data system by a list of `DataRequirementItem`.""" self.add_dict({rr.key: rr.dict for rr in data_requirements}) @@ -311,7 +309,7 @@ def add( atomic: bool = False, must: bool = False, high_prec: bool = False, - type_sel: Optional[List[int]] = None, + type_sel: Optional[list[int]] = None, repeat: int = 1, default: float = 0.0, dtype: Optional[np.dtype] = None, @@ -468,7 +466,7 @@ def get_batch_mixed(self) -> dict: b_data = self._merge_batch_data(batch_data) return b_data - def _merge_batch_data(self, batch_data: List[dict]) -> dict: + def _merge_batch_data(self, batch_data: list[dict]) -> dict: """Merge batch data from different systems. Parameters @@ -550,7 +548,7 @@ def get_sys_ntest(self, sys_idx=None): else: return self.test_size[self.pick_idx] - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map @@ -635,12 +633,12 @@ def _format_name_length(name, width): def print_summary( name: str, nsystems: int, - system_dirs: List[str], - natoms: List[int], - batch_size: List[int], - nbatches: List[int], - sys_probs: List[float], - pbc: List[bool], + system_dirs: list[str], + natoms: list[int], + batch_size: list[int], + nbatches: list[int], + sys_probs: list[float], + pbc: list[bool], ): """Print summary of systems. @@ -732,7 +730,7 @@ def prob_sys_size_ext(keywords, nsystems, nbatch): return sys_probs -def process_systems(systems: Union[str, List[str]]) -> List[str]: +def process_systems(systems: Union[str, list[str]]) -> list[str]: """Process the user-input systems. If it is a single directory, search for all the systems in the directory. @@ -773,7 +771,7 @@ def process_systems(systems: Union[str, List[str]]) -> List[str]: def get_data( - jdata: Dict[str, Any], rcut, type_map, modifier, multi_task_mode=False + jdata: dict[str, Any], rcut, type_map, modifier, multi_task_mode=False ) -> DeepmdDataSystem: """Get the data system. diff --git a/deepmd/utils/econf_embd.py b/deepmd/utils/econf_embd.py index 7f12206ae3..99c7edf284 100644 --- a/deepmd/utils/econf_embd.py +++ b/deepmd/utils/econf_embd.py @@ -1,8 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Dict, - List, -) import numpy as np from mendeleev import ( @@ -228,8 +224,8 @@ def make_element_embedding_list_vec( def make_econf_embedding( - types: List[str], flatten: bool = True -) -> Dict[str, np.ndarray]: + types: list[str], flatten: bool = True +) -> dict[str, np.ndarray]: """Make the electronic configuration embedding.""" all_ret = {} for ii in types: @@ -240,7 +236,7 @@ def make_econf_embedding( return all_ret -def transform_to_spin_rep(res: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: +def transform_to_spin_rep(res: dict[str, np.ndarray]) -> dict[str, np.ndarray]: """Tranform electron occupation of 0/1/2 to -1,-1/-1,1/1,1.""" ret = {} @@ -261,7 +257,7 @@ def transform(ii): return ret -def print_econf_embedding(res: Dict[str, np.ndarray]): +def print_econf_embedding(res: dict[str, np.ndarray]): """Print electron configuration embedding.""" for kk, vv in res.items(): vvstr = ",".join([str(ii) for ii in vv]) diff --git a/deepmd/utils/env_mat_stat.py b/deepmd/utils/env_mat_stat.py index bbb43fd703..ecc0b7b62f 100644 --- a/deepmd/utils/env_mat_stat.py +++ b/deepmd/utils/env_mat_stat.py @@ -7,10 +7,10 @@ from collections import ( defaultdict, ) -from typing import ( - Dict, +from collections.abc import ( Iterator, - List, +) +from typing import ( Optional, ) @@ -98,12 +98,12 @@ def __init__(self) -> None: super().__init__() self.stats = defaultdict(StatItem) - def compute_stats(self, data: List[Dict[str, np.ndarray]]) -> None: + def compute_stats(self, data: list[dict[str, np.ndarray]]) -> None: """Compute the statistics of the environment matrix. Parameters ---------- - data : List[Dict[str, np.ndarray]] + data : list[dict[str, np.ndarray]] The environment matrix. """ if len(self.stats) > 0: @@ -113,17 +113,17 @@ def compute_stats(self, data: List[Dict[str, np.ndarray]]) -> None: self.stats[kk] += iter_stats[kk] @abstractmethod - def iter(self, data: List[Dict[str, np.ndarray]]) -> Iterator[Dict[str, StatItem]]: + def iter(self, data: list[dict[str, np.ndarray]]) -> Iterator[dict[str, StatItem]]: """Get the iterator of the environment matrix. Parameters ---------- - data : List[Dict[str, np.ndarray]] + data : list[dict[str, np.ndarray]] The environment matrix. Yields ------ - Dict[str, StatItem] + dict[str, StatItem] The statistics of the environment matrix. """ @@ -160,7 +160,7 @@ def load_stats(self, path: DPPath) -> None: ) def load_or_compute_stats( - self, data: List[Dict[str, np.ndarray]], path: Optional[DPPath] = None + self, data: list[dict[str, np.ndarray]], path: Optional[DPPath] = None ) -> None: """Load the statistics of the environment matrix if it exists, otherwise compute and save it. @@ -168,7 +168,7 @@ def load_or_compute_stats( ---------- path : DPPath The path to load the statistics of the environment matrix. - data : List[Dict[str, np.ndarray]] + data : list[dict[str, np.ndarray]] The environment matrix. """ if path is not None and path.is_dir(): @@ -180,7 +180,7 @@ def load_or_compute_stats( self.save_stats(path) log.info(f"Save stats to {path}.") - def get_avg(self, default: float = 0) -> Dict[str, float]: + def get_avg(self, default: float = 0) -> dict[str, float]: """Get the average of the environment matrix. Parameters @@ -190,14 +190,14 @@ def get_avg(self, default: float = 0) -> Dict[str, float]: Returns ------- - Dict[str, float] + dict[str, float] The average of the environment matrix. """ return {kk: vv.compute_avg(default=default) for kk, vv in self.stats.items()} def get_std( self, default: float = 1e-1, protection: float = 1e-2 - ) -> Dict[str, float]: + ) -> dict[str, float]: """Get the standard deviation of the environment matrix. Parameters @@ -209,7 +209,7 @@ def get_std( Returns ------- - Dict[str, float] + dict[str, float] The standard deviation of the environment matrix. """ return { diff --git a/deepmd/utils/finetune.py b/deepmd/utils/finetune.py index 9baa1b5aa8..d8d035a853 100644 --- a/deepmd/utils/finetune.py +++ b/deepmd/utils/finetune.py @@ -1,9 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging -from typing import ( - List, - Tuple, -) log = logging.getLogger(__name__) @@ -11,8 +7,8 @@ class FinetuneRuleItem: def __init__( self, - p_type_map: List[str], - type_map: List[str], + p_type_map: list[str], + type_map: list[str], model_branch: str = "Default", random_fitting: bool = False, resuming: bool = False, @@ -74,21 +70,21 @@ def get_finetune_tmap(self): def get_index_between_two_maps( - old_map: List[str], - new_map: List[str], + old_map: list[str], + new_map: list[str], ): """Returns the mapping index of types in new_map to those in the old_map. Parameters ---------- - old_map : List[str] + old_map : list[str] The old list of atom type names. - new_map : List[str] + new_map : list[str] The new list of atom type names. Returns ------- - index_map: List[int] + index_map: list[int] List contains `len(new_map)` indices, where `index_map[i]` is the index of `new_map[i]` in `old_map`. If `new_map[i]` is not in the `old_map`, the index will be `i - len(new_map)`. has_new_type: bool @@ -112,21 +108,21 @@ def get_index_between_two_maps( def map_atom_exclude_types( - atom_exclude_types: List[int], - remap_index: List[int], + atom_exclude_types: list[int], + remap_index: list[int], ): """Return the remapped atom_exclude_types according to remap_index. Parameters ---------- - atom_exclude_types : List[int] + atom_exclude_types : list[int] Exclude the atomic contribution of the given types. - remap_index : List[int] + remap_index : list[int] The indices in the old type list that correspond to the types in the new type list. Returns ------- - remapped_atom_exclude_types: List[int] + remapped_atom_exclude_types: list[int] Remapped atom_exclude_types that only keeps the types in the new type list. """ @@ -137,22 +133,22 @@ def map_atom_exclude_types( def map_pair_exclude_types( - pair_exclude_types: List[Tuple[int, int]], - remap_index: List[int], + pair_exclude_types: list[tuple[int, int]], + remap_index: list[int], ): """Return the remapped atom_exclude_types according to remap_index. Parameters ---------- - pair_exclude_types : List[Tuple[int, int]] + pair_exclude_types : list[tuple[int, int]] Exclude the pair of atoms of the given types from computing the output of the atomic model. - remap_index : List[int] + remap_index : list[int] The indices in the old type list that correspond to the types in the new type list. Returns ------- - remapped_pair_exclude_typess: List[Tuple[int, int]] + remapped_pair_exclude_typess: list[tuple[int, int]] Remapped pair_exclude_types that only keeps the types in the new type list. """ diff --git a/deepmd/utils/hostlist.py b/deepmd/utils/hostlist.py index c184b04031..4dac08af19 100644 --- a/deepmd/utils/hostlist.py +++ b/deepmd/utils/hostlist.py @@ -1,12 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import socket -from typing import ( - List, - Tuple, -) -def get_host_names() -> Tuple[str, List[str]]: +def get_host_names() -> tuple[str, list[str]]: """Get host names of all nodes in the cluster. If mpi4py is not installed or MPI is not used, then the @@ -16,7 +12,7 @@ def get_host_names() -> Tuple[str, List[str]]: ------- str Host name of the current node - List[str] + list[str] List of host names of all nodes in the cluster """ host_name = socket.gethostname() diff --git a/deepmd/utils/neighbor_stat.py b/deepmd/utils/neighbor_stat.py index 54a4c16b24..40e629d9db 100644 --- a/deepmd/utils/neighbor_stat.py +++ b/deepmd/utils/neighbor_stat.py @@ -5,9 +5,8 @@ ABC, abstractmethod, ) -from typing import ( +from collections.abc import ( Iterator, - Tuple, ) import numpy as np @@ -46,7 +45,7 @@ def __init__( self.ntypes = ntypes self.mixed_type = mixed_type - def get_stat(self, data: DeepmdDataSystem) -> Tuple[float, np.ndarray]: + def get_stat(self, data: DeepmdDataSystem) -> tuple[float, np.ndarray]: """Get the data statistics of the training data, including nearest nbor distance between atoms, max nbor size of atoms. Parameters @@ -89,7 +88,7 @@ def get_stat(self, data: DeepmdDataSystem) -> Tuple[float, np.ndarray]: @abstractmethod def iterator( self, data: DeepmdDataSystem - ) -> Iterator[Tuple[np.ndarray, float, str]]: + ) -> Iterator[tuple[np.ndarray, float, str]]: """Abstract method for producing data. Yields diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index fd09e6815b..43af191e62 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -3,7 +3,6 @@ from typing import ( Optional, - Tuple, ) import numpy as np @@ -14,7 +13,7 @@ def compute_stats_from_redu( natoms: np.ndarray, assigned_bias: Optional[np.ndarray] = None, rcond: Optional[float] = None, -) -> Tuple[np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray]: """Compute the output statistics. Given the reduced output value and the number of atoms for each atom, @@ -86,7 +85,7 @@ def compute_stats_from_redu( def compute_stats_from_atomic( output: np.ndarray, atype: np.ndarray, -) -> Tuple[np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray]: """Compute the output statistics. Given the output value and the type of atoms, diff --git a/deepmd/utils/pair_tab.py b/deepmd/utils/pair_tab.py index 73980a2fd6..cddc358f27 100644 --- a/deepmd/utils/pair_tab.py +++ b/deepmd/utils/pair_tab.py @@ -4,7 +4,6 @@ import logging from typing import ( Optional, - Tuple, ) import numpy as np @@ -199,7 +198,7 @@ def _check_table_upper_boundary(self) -> None: self.vdata = np.concatenate((self.vdata, pad_extrapolation), axis=0) - def get(self) -> Tuple[np.array, np.array]: + def get(self) -> tuple[np.array, np.array]: """Get the serialized table.""" return self.tab_info, self.tab_data diff --git a/deepmd/utils/path.py b/deepmd/utils/path.py index e794a36cab..6c52caac1d 100644 --- a/deepmd/utils/path.py +++ b/deepmd/utils/path.py @@ -13,8 +13,6 @@ ) from typing import ( ClassVar, - Dict, - List, Optional, ) @@ -77,7 +75,7 @@ def save_numpy(self, arr: np.ndarray) -> None: """ @abstractmethod - def glob(self, pattern: str) -> List["DPPath"]: + def glob(self, pattern: str) -> list["DPPath"]: """Search path using the glob pattern. Parameters @@ -87,12 +85,12 @@ def glob(self, pattern: str) -> List["DPPath"]: Returns ------- - List[DPPath] + list[DPPath] list of paths """ @abstractmethod - def rglob(self, pattern: str) -> List["DPPath"]: + def rglob(self, pattern: str) -> list["DPPath"]: """This is like calling :meth:`DPPath.glob()` with `**/` added in front of the given relative pattern. @@ -103,7 +101,7 @@ def rglob(self, pattern: str) -> List["DPPath"]: Returns ------- - List[DPPath] + list[DPPath] list of paths """ @@ -206,7 +204,7 @@ def save_numpy(self, arr: np.ndarray) -> None: with self.path.open("wb") as f: np.save(f, arr) - def glob(self, pattern: str) -> List["DPPath"]: + def glob(self, pattern: str) -> list["DPPath"]: """Search path using the glob pattern. Parameters @@ -216,13 +214,13 @@ def glob(self, pattern: str) -> List["DPPath"]: Returns ------- - List[DPPath] + list[DPPath] list of paths """ # currently DPOSPath will only derivative DPOSPath return [type(self)(p, mode=self.mode) for p in self.path.glob(pattern)] - def rglob(self, pattern: str) -> List["DPPath"]: + def rglob(self, pattern: str) -> list["DPPath"]: """This is like calling :meth:`DPPath.glob()` with `**/` added in front of the given relative pattern. @@ -233,7 +231,7 @@ def rglob(self, pattern: str) -> List["DPPath"]: Returns ------- - List[DPPath] + list[DPPath] list of paths """ return [type(self)(p, mode=self.mode) for p in self.path.rglob(pattern)] @@ -360,7 +358,7 @@ def save_numpy(self, arr: np.ndarray) -> None: self.root.flush() self._new_keys.append(self._name) - def glob(self, pattern: str) -> List["DPPath"]: + def glob(self, pattern: str) -> list["DPPath"]: """Search path using the glob pattern. Parameters @@ -370,7 +368,7 @@ def glob(self, pattern: str) -> List["DPPath"]: Returns ------- - List[DPPath] + list[DPPath] list of paths """ # got paths starts with current path first, which is faster @@ -384,7 +382,7 @@ def glob(self, pattern: str) -> List["DPPath"]: for pp in globfilter(subpaths, self._connect_path(pattern)) ] - def rglob(self, pattern: str) -> List["DPPath"]: + def rglob(self, pattern: str) -> list["DPPath"]: """This is like calling :meth:`DPPath.glob()` with `**/` added in front of the given relative pattern. @@ -395,17 +393,17 @@ def rglob(self, pattern: str) -> List["DPPath"]: Returns ------- - List[DPPath] + list[DPPath] list of paths """ return self.glob("**" + pattern) @property - def _keys(self) -> List[str]: + def _keys(self) -> list[str]: """Walk all groups and dataset.""" return self._file_keys(self.root) - __file_new_keys: ClassVar[Dict[h5py.File, List[str]]] = {} + __file_new_keys: ClassVar[dict[h5py.File, list[str]]] = {} @property def _new_keys(self): @@ -415,7 +413,7 @@ def _new_keys(self): @classmethod @lru_cache(None) - def _file_keys(cls, file: h5py.File) -> List[str]: + def _file_keys(cls, file: h5py.File) -> list[str]: """Walk all groups and dataset.""" l = [] file.visit(lambda x: l.append("/" + x)) diff --git a/deepmd/utils/plugin.py b/deepmd/utils/plugin.py index b5c89eb4d3..ce8b015ddf 100644 --- a/deepmd/utils/plugin.py +++ b/deepmd/utils/plugin.py @@ -8,9 +8,7 @@ ) from typing import ( Callable, - Dict, Optional, - Type, ) @@ -19,7 +17,7 @@ class Plugin: Attributes ---------- - plugins : Dict[str, object] + plugins : dict[str, object] plugins Examples @@ -99,7 +97,7 @@ class PluginVariant(metaclass=VariantABCMeta): pass -def make_plugin_registry(name: Optional[str] = None) -> Type[object]: +def make_plugin_registry(name: Optional[str] = None) -> type[object]: """Make a plugin registry. Parameters @@ -141,7 +139,7 @@ class SomeClass(BaseClass): return PR.__plugins.register(key) @classmethod - def get_class_by_type(cls, class_type: str) -> Type[object]: + def get_class_by_type(cls, class_type: str) -> type[object]: """Get the class by the plugin type.""" if class_type in PR.__plugins.plugins: return PR.__plugins.plugins[class_type] @@ -154,7 +152,7 @@ def get_class_by_type(cls, class_type: str) -> Type[object]: raise RuntimeError(f"Unknown {name} type: {class_type}. {dym_message}") @classmethod - def get_plugins(cls) -> Dict[str, Type[object]]: + def get_plugins(cls) -> dict[str, type[object]]: """Get all the registered plugins.""" return PR.__plugins.plugins diff --git a/deepmd/utils/random.py b/deepmd/utils/random.py index 44ea6a1dac..440faca177 100644 --- a/deepmd/utils/random.py +++ b/deepmd/utils/random.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Optional, - Tuple, Union, ) @@ -12,7 +11,7 @@ def choice( a: Union[np.ndarray, int], - size: Optional[Union[int, Tuple[int, ...]]] = None, + size: Optional[Union[int, tuple[int, ...]]] = None, replace: bool = True, p: Optional[np.ndarray] = None, ): diff --git a/deepmd/utils/spin.py b/deepmd/utils/spin.py index 101867d3e4..41ea52df88 100644 --- a/deepmd/utils/spin.py +++ b/deepmd/utils/spin.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( - List, - Tuple, Union, ) @@ -20,10 +18,10 @@ class Spin: Parameters ---------- - use_spin: List[bool] + use_spin: list[bool] A list of boolean values indicating whether to use atomic spin for each atom type. True for spin and False for not. List of bool values with shape of [ntypes]. - virtual_scale: List[float], float + virtual_scale: list[float], float The scaling factor to determine the virtual distance between a virtual atom representing spin and its corresponding real atom for each atom type with spin. This factor is defined as the virtual distance @@ -35,8 +33,8 @@ class Spin: def __init__( self, - use_spin: List[bool], - virtual_scale: Union[List[float], float], + use_spin: list[bool], + virtual_scale: Union[list[float], float], ) -> None: self.ntypes_real = len(use_spin) self.ntypes_spin = use_spin.count(True) @@ -93,7 +91,7 @@ def get_ntypes_input(self) -> int: """Returns the number of double real atom types for input placeholder.""" return self.ntypes_input - def get_use_spin(self) -> List[bool]: + def get_use_spin(self) -> list[bool]: """Returns the list of whether to use spin for each atom type.""" return self.use_spin @@ -127,7 +125,7 @@ def init_atom_exclude_types_placeholder(self) -> None: """ self.atom_exclude_types_p = self.placeholder_type.tolist() - def get_pair_exclude_types(self, exclude_types=None) -> List[Tuple[int, int]]: + def get_pair_exclude_types(self, exclude_types=None) -> list[tuple[int, int]]: """ Return the pair-wise exclusion types for descriptor. The placeholder types for those without spin are excluded. @@ -135,7 +133,7 @@ def get_pair_exclude_types(self, exclude_types=None) -> List[Tuple[int, int]]: if exclude_types is None: return self.pair_exclude_types else: - _exclude_types: List[Tuple[int, int]] = copy.deepcopy( + _exclude_types: list[tuple[int, int]] = copy.deepcopy( self.pair_exclude_types ) for tt in exclude_types: @@ -143,7 +141,7 @@ def get_pair_exclude_types(self, exclude_types=None) -> List[Tuple[int, int]]: _exclude_types.append((tt[0], tt[1])) return _exclude_types - def get_atom_exclude_types(self, exclude_types=None) -> List[int]: + def get_atom_exclude_types(self, exclude_types=None) -> list[int]: """ Return the atom-wise exclusion types for fitting before out_def. Both the placeholder types and spin types are excluded. @@ -151,12 +149,12 @@ def get_atom_exclude_types(self, exclude_types=None) -> List[int]: if exclude_types is None: return self.atom_exclude_types_ps else: - _exclude_types: List[int] = copy.deepcopy(self.atom_exclude_types_ps) + _exclude_types: list[int] = copy.deepcopy(self.atom_exclude_types_ps) _exclude_types += exclude_types _exclude_types = list(set(_exclude_types)) return _exclude_types - def get_atom_exclude_types_placeholder(self, exclude_types=None) -> List[int]: + def get_atom_exclude_types_placeholder(self, exclude_types=None) -> list[int]: """ Return the atom-wise exclusion types for fitting after out_def. The placeholder types for those without spin are excluded. @@ -164,7 +162,7 @@ def get_atom_exclude_types_placeholder(self, exclude_types=None) -> List[int]: if exclude_types is None: return self.atom_exclude_types_p else: - _exclude_types: List[int] = copy.deepcopy(self.atom_exclude_types_p) + _exclude_types: list[int] = copy.deepcopy(self.atom_exclude_types_p) _exclude_types += exclude_types _exclude_types = list(set(_exclude_types)) return _exclude_types diff --git a/deepmd/utils/update_sel.py b/deepmd/utils/update_sel.py index 6feed525e5..ba1457b19c 100644 --- a/deepmd/utils/update_sel.py +++ b/deepmd/utils/update_sel.py @@ -5,10 +5,7 @@ abstractmethod, ) from typing import ( - List, Optional, - Tuple, - Type, Union, ) @@ -28,11 +25,11 @@ class BaseUpdateSel(ABC): def update_one_sel( self, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], rcut: float, - sel: Union[int, List[int], str], + sel: Union[int, list[int], str], mixed_type: bool = False, - ) -> Tuple[float, List[int]]: + ) -> tuple[float, list[int]]: min_nbor_dist, tmp_sel = self.get_nbor_stat( train_data, type_map, @@ -86,17 +83,17 @@ def wrap_up_4(self, xx): def get_nbor_stat( self, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], rcut: float, mixed_type: bool = False, - ) -> Tuple[float, Union[int, List[int]]]: + ) -> tuple[float, Union[int, list[int]]]: """Get the neighbor statistics of the data. Parameters ---------- train_data : DeepmdDataSystem The training data. - type_map : Optional[List[str]] + type_map : Optional[list[str]] The type map. rcut : float The cutoff radius. @@ -107,7 +104,7 @@ def get_nbor_stat( ------- min_nbor_dist : float The minimum neighbor distance. - max_nbor_size : List[int] + max_nbor_size : list[int] The maximum neighbor size. """ if type_map and len(type_map) == 0: @@ -128,7 +125,7 @@ def get_nbor_stat( @property @abstractmethod - def neighbor_stat(self) -> Type[NeighborStat]: + def neighbor_stat(self) -> type[NeighborStat]: pass def get_min_nbor_dist( diff --git a/deepmd/utils/weight_avg.py b/deepmd/utils/weight_avg.py index b344d3bb75..7c75d18e68 100644 --- a/deepmd/utils/weight_avg.py +++ b/deepmd/utils/weight_avg.py @@ -2,21 +2,16 @@ from collections import ( defaultdict, ) -from typing import ( - Dict, - List, - Tuple, -) import numpy as np -def weighted_average(errors: List[Dict[str, Tuple[float, float]]]) -> Dict: +def weighted_average(errors: list[dict[str, tuple[float, float]]]) -> dict: """Compute wighted average of prediction errors (MAE or RMSE) for model. Parameters ---------- - errors : List[Dict[str, Tuple[float, float]]] + errors : list[dict[str, tuple[float, float]]] List: the error of systems Dict: the error of quantities, name given by the key str: the name of the quantity, must starts with 'mae' or 'rmse' diff --git a/doc/development/coding-conventions.rst b/doc/development/coding-conventions.rst index 137b0d0d51..bf186d1231 100644 --- a/doc/development/coding-conventions.rst +++ b/doc/development/coding-conventions.rst @@ -30,7 +30,7 @@ Rules ----- The code must be compatible with the oldest supported version of python -which is 3.8. +which is 3.9. The project follows the generic coding conventions as specified in the `Style Guide for Python Code`_, `Docstring diff --git a/doc/development/create-a-model-pt.md b/doc/development/create-a-model-pt.md index 2ab0dee18f..257dd8a25d 100644 --- a/doc/development/create-a-model-pt.md +++ b/doc/development/create-a-model-pt.md @@ -73,7 +73,7 @@ class SomeDescript(BaseDescriptor, torch.nn.Module): def update_sel( cls, train_data: DeepmdDataSystem, - type_map: Optional[List[str]], + type_map: Optional[list[str]], local_jdata: dict, ): pass @@ -149,7 +149,7 @@ from deepmd.utils.argcheck import descrpt_args_plugin @descrpt_args_plugin.register("some_descrpt") -def descrpt_some_args() -> List[Argument]: +def descrpt_some_args() -> list[Argument]: return [ Argument("arg1", bool, optional=False, doc="balabala"), Argument("arg2", float, optional=True, default=6.0, doc="haha"), diff --git a/doc/development/create-a-model-tf.md b/doc/development/create-a-model-tf.md index 9ab3525bb5..95a2f66f23 100644 --- a/doc/development/create-a-model-tf.md +++ b/doc/development/create-a-model-tf.md @@ -37,7 +37,7 @@ from deepmd.utils.argcheck import descrpt_args_plugin @descrpt_args_plugin.register("some_descrpt") -def descrpt_some_args() -> List[Argument]: +def descrpt_some_args() -> list[Argument]: return [ Argument("arg1", bool, optional=False, doc="balabala"), Argument("arg2", float, optional=True, default=6.0, doc="haha"), diff --git a/doc/getting-started/quick_start.ipynb b/doc/getting-started/quick_start.ipynb index d1c45ad0b8..0c9563b9e9 100644 --- a/doc/getting-started/quick_start.ipynb +++ b/doc/getting-started/quick_start.ipynb @@ -523,7 +523,7 @@ " color: #bbbbff;\n", "}\n", "\n", - "
{
  \"_comment\": \"that's all\",
  \"model\"model:
type: dict
: {
    \"type_map\"type_map:
type: typing.List[str], optional
A list of strings. Give the name to each type of atoms. It is noted that the number of atom type of training system must be less than 128 in a GPU environment. If not given, type.raw in each system should use the same type indexes, and type_map.raw will take no effect.
: [
     \"H\",
     \"C\"
    ],

    \"descriptor\"descriptor:
type: dict
The descriptor of atomic environment.
: {
      \"type\"type:
type: str
The type of the descritpor. See explanation below.
- loc_frame: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.
- se_e2_a: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.
- se_e2_r: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.
- se_e3: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.
- se_a_tpe: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.
- se_atten: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.
- se_atten_v2: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.
- se_a_mask: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). aparam are required as an indicator matrix for the real/virtual sign of input atoms.
- hybrid: Concatenate of a list of descriptors as a new descriptor.
: \"se_e2_a\",
      \"sel\"sel:
type: str | typing.List[int], optional, default: auto
This parameter set the number of selected neighbors for each type of atom. It can be:
- List[int]. The length of the list should be the same as the number of atom types in the system. sel[i] gives the selected number of type-i neighbors. sel[i] is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.
- str. Can be \"auto:factor\" or \"auto\". \"factor\" is a float number larger than 1. This option will automatically determine the sel. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the \"factor\". Finally the number is wraped up to 4 divisible. The option \"auto\" is equivalent to \"auto:1.1\".
: \"auto\",
      \"rcut_smth\"rcut_smth:
type: float, optional, default: 0.5
Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth
: 0.5,
      \"rcut\"rcut:
type: float, optional, default: 6.0
The cut-off radius.
: 6.0,
      \"neuron\"neuron:
type: typing.List[int], optional, default: [10, 20, 40]
Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built.
: [
       25,
       50,
       100
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: False
Whether to use a \"Timestep\" in the skip connection
: false,
      \"axis_neuron\"axis_neuron:
type: int, optional, default: 4, alias: n_axis_neuron
Size of the submatrix of G (embedding matrix).
: 16,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization
: 1,
      \"_comment\": \" that's all\"
    },
    \"fitting_net\"fitting_net:
type: dict
The fitting of physical properties.
: {
      \"neuron\"neuron:
type: typing.List[int], optional, default: [120, 120, 120], alias: n_neuron
The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built.
: [
       240,
       240,
       240
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: True
Whether to use a \"Timestep\" in the skip connection
: true,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization of the fitting net
: 1,
      \"_comment\": \" that's all\"
    },
    \"_comment\": \" that's all\"
  },
  \"learning_rate\"learning_rate:
type: dict, optional
The definitio of learning rate
: {
    \"type\"type:
type: str, default: exp
The type of the learning rate.
: \"exp\",
    \"decay_steps\"decay_steps:
type: int, optional, default: 5000
The learning rate is decaying every this number of training steps.
: 50,
    \"start_lr\"start_lr:
type: float, optional, default: 0.001
The learning rate at the start of the training.
: 0.001,
    \"stop_lr\"stop_lr:
type: float, optional, default: 1e-08
The desired learning rate at the end of the training.
: 3.51e-08,
    \"_comment\": \"that's all\"
  },
  \"loss\"loss:
type: dict, optional
The definition of loss function. The loss type should be set to tensor, ener or left unset.
: {
    \"type\"type:
type: str, default: ener
The type of the loss. When the fitting type is ener, the loss type should be set to ener or left unset. When the fitting type is dipole or polar, the loss type should be set to tensor.
: \"ener\",
    \"start_pref_e\"start_pref_e:
type: float | int, optional, default: 0.02
The prefactor of energy loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the energy label should be provided by file energy.npy in each data system. If both start_pref_e and limit_pref_e are set to 0, then the energy will be ignored.
: 0.02,
    \"limit_pref_e\"limit_pref_e:
type: float | int, optional, default: 1.0
The prefactor of energy loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_f\"start_pref_f:
type: float | int, optional, default: 1000
The prefactor of force loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the force label should be provided by file force.npy in each data system. If both start_pref_f and limit_pref_f are set to 0, then the force will be ignored.
: 1000,
    \"limit_pref_f\"limit_pref_f:
type: float | int, optional, default: 1.0
The prefactor of force loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_v\"start_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the virial label should be provided by file virial.npy in each data system. If both start_pref_v and limit_pref_v are set to 0, then the virial will be ignored.
: 0,
    \"limit_pref_v\"limit_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 0,
    \"_comment\": \" that's all\"
  },
  \"training\"training:
type: dict
The training options.
: {
    \"training_data\"training_data:
type: dict, optional
Configurations of training data.
: {
      \"systems\"systems:
type: str | typing.List[str]
The data systems for training. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/training_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.List[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
- string \"mixed:N\": the batch data will be sampled from all systems and merged into a mixed system with the batch size N. Only support the se_atten descriptor.
If MPI is used, the value should be considered as the batch size per task.
: \"auto\",
      \"_comment\": \"that's all\"
    },
    \"validation_data\"validation_data:
type: NoneType | dict, optional, default: None
Configurations of validation data. Similar to that of training data, except that a numb_btch argument may be configured
: {
      \"systems\"systems:
type: str | typing.List[str]
The data systems for validation. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/validation_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.List[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
: \"auto\",
      \"numb_btch\"numb_btch:
type: int, optional, default: 1, alias: numb_batch
An integer that specifies the number of batches to be sampled for each validation period.
: 1,
      \"_comment\": \"that's all\"
    },
    \"numb_steps\"numb_steps:
type: int, alias: stop_batch
Number of training batch. Each training uses one batch of data.
: 10000,
    \"seed\"seed:
type: NoneType | int, optional
The random seed for getting frames from the training data set.
: 10,
    \"disp_file\"disp_file:
type: str, optional, default: lcurve.out
The file for printing learning curve.
: \"lcurve.out\",
    \"disp_freq\"disp_freq:
type: int, optional, default: 1000
The frequency of printing learning curve.
: 200,
    \"save_freq\"save_freq:
type: int, optional, default: 1000
The frequency of saving check point.
: 1000,
    \"_comment\": \"that's all\"
  }
}
" + "
{
  \"_comment\": \"that's all\",
  \"model\"model:
type: dict
: {
    \"type_map\"type_map:
type: typing.list[str], optional
A list of strings. Give the name to each type of atoms. It is noted that the number of atom type of training system must be less than 128 in a GPU environment. If not given, type.raw in each system should use the same type indexes, and type_map.raw will take no effect.
: [
     \"H\",
     \"C\"
    ],

    \"descriptor\"descriptor:
type: dict
The descriptor of atomic environment.
: {
      \"type\"type:
type: str
The type of the descritpor. See explanation below.
- loc_frame: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.
- se_e2_a: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.
- se_e2_r: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.
- se_e3: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.
- se_a_tpe: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.
- se_atten: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.
- se_atten_v2: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.
- se_a_mask: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). aparam are required as an indicator matrix for the real/virtual sign of input atoms.
- hybrid: Concatenate of a list of descriptors as a new descriptor.
: \"se_e2_a\",
      \"sel\"sel:
type: str | typing.list[int], optional, default: auto
This parameter set the number of selected neighbors for each type of atom. It can be:
- list[int]. The length of the list should be the same as the number of atom types in the system. sel[i] gives the selected number of type-i neighbors. sel[i] is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.
- str. Can be \"auto:factor\" or \"auto\". \"factor\" is a float number larger than 1. This option will automatically determine the sel. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the \"factor\". Finally the number is wraped up to 4 divisible. The option \"auto\" is equivalent to \"auto:1.1\".
: \"auto\",
      \"rcut_smth\"rcut_smth:
type: float, optional, default: 0.5
Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth
: 0.5,
      \"rcut\"rcut:
type: float, optional, default: 6.0
The cut-off radius.
: 6.0,
      \"neuron\"neuron:
type: typing.list[int], optional, default: [10, 20, 40]
Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built.
: [
       25,
       50,
       100
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: False
Whether to use a \"Timestep\" in the skip connection
: false,
      \"axis_neuron\"axis_neuron:
type: int, optional, default: 4, alias: n_axis_neuron
Size of the submatrix of G (embedding matrix).
: 16,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization
: 1,
      \"_comment\": \" that's all\"
    },
    \"fitting_net\"fitting_net:
type: dict
The fitting of physical properties.
: {
      \"neuron\"neuron:
type: typing.list[int], optional, default: [120, 120, 120], alias: n_neuron
The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built.
: [
       240,
       240,
       240
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: True
Whether to use a \"Timestep\" in the skip connection
: true,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization of the fitting net
: 1,
      \"_comment\": \" that's all\"
    },
    \"_comment\": \" that's all\"
  },
  \"learning_rate\"learning_rate:
type: dict, optional
The definitio of learning rate
: {
    \"type\"type:
type: str, default: exp
The type of the learning rate.
: \"exp\",
    \"decay_steps\"decay_steps:
type: int, optional, default: 5000
The learning rate is decaying every this number of training steps.
: 50,
    \"start_lr\"start_lr:
type: float, optional, default: 0.001
The learning rate at the start of the training.
: 0.001,
    \"stop_lr\"stop_lr:
type: float, optional, default: 1e-08
The desired learning rate at the end of the training.
: 3.51e-08,
    \"_comment\": \"that's all\"
  },
  \"loss\"loss:
type: dict, optional
The definition of loss function. The loss type should be set to tensor, ener or left unset.
: {
    \"type\"type:
type: str, default: ener
The type of the loss. When the fitting type is ener, the loss type should be set to ener or left unset. When the fitting type is dipole or polar, the loss type should be set to tensor.
: \"ener\",
    \"start_pref_e\"start_pref_e:
type: float | int, optional, default: 0.02
The prefactor of energy loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the energy label should be provided by file energy.npy in each data system. If both start_pref_e and limit_pref_e are set to 0, then the energy will be ignored.
: 0.02,
    \"limit_pref_e\"limit_pref_e:
type: float | int, optional, default: 1.0
The prefactor of energy loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_f\"start_pref_f:
type: float | int, optional, default: 1000
The prefactor of force loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the force label should be provided by file force.npy in each data system. If both start_pref_f and limit_pref_f are set to 0, then the force will be ignored.
: 1000,
    \"limit_pref_f\"limit_pref_f:
type: float | int, optional, default: 1.0
The prefactor of force loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_v\"start_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the virial label should be provided by file virial.npy in each data system. If both start_pref_v and limit_pref_v are set to 0, then the virial will be ignored.
: 0,
    \"limit_pref_v\"limit_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 0,
    \"_comment\": \" that's all\"
  },
  \"training\"training:
type: dict
The training options.
: {
    \"training_data\"training_data:
type: dict, optional
Configurations of training data.
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for training. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/training_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
- string \"mixed:N\": the batch data will be sampled from all systems and merged into a mixed system with the batch size N. Only support the se_atten descriptor.
If MPI is used, the value should be considered as the batch size per task.
: \"auto\",
      \"_comment\": \"that's all\"
    },
    \"validation_data\"validation_data:
type: NoneType | dict, optional, default: None
Configurations of validation data. Similar to that of training data, except that a numb_btch argument may be configured
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for validation. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/validation_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
: \"auto\",
      \"numb_btch\"numb_btch:
type: int, optional, default: 1, alias: numb_batch
An integer that specifies the number of batches to be sampled for each validation period.
: 1,
      \"_comment\": \"that's all\"
    },
    \"numb_steps\"numb_steps:
type: int, alias: stop_batch
Number of training batch. Each training uses one batch of data.
: 10000,
    \"seed\"seed:
type: NoneType | int, optional
The random seed for getting frames from the training data set.
: 10,
    \"disp_file\"disp_file:
type: str, optional, default: lcurve.out
The file for printing learning curve.
: \"lcurve.out\",
    \"disp_freq\"disp_freq:
type: int, optional, default: 1000
The frequency of printing learning curve.
: 200,
    \"save_freq\"save_freq:
type: int, optional, default: 1000
The frequency of saving check point.
: 1000,
    \"_comment\": \"that's all\"
  }
}
" ], "text/plain": [ "" diff --git a/doc/install/easy-install.md b/doc/install/easy-install.md index a0c6270287..99962d08b8 100644 --- a/doc/install/easy-install.md +++ b/doc/install/easy-install.md @@ -10,7 +10,7 @@ You can refer to [DeepModeling conda FAQ](https://docs.deepmodeling.com/faq/cond ::: :::{note} -Python 3.8 or above is required for Python interface. +Python 3.9 or above is required for Python interface. ::: - [Install off-line packages](#install-off-line-packages) diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index a725be0133..4079a8d424 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -21,7 +21,7 @@ deepmd_source_dir=`pwd` ### Install Backend's Python interface First, check the Python version on your machine. -Python 3.8 or above is required. +Python 3.9 or above is required. ```bash python --version @@ -95,7 +95,7 @@ deactivate If one has multiple python interpreters named something like python3.x, it can be specified by, for example ```bash -virtualenv -p python3.8 $deepmd_venv +virtualenv -p python3.9 $deepmd_venv ``` One should remember to activate the virtual environment every time he/she uses DeePMD-kit. diff --git a/pyproject.toml b/pyproject.toml index 1b825ef441..6932960ace 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,10 @@ classifiers = [ "Programming Language :: Python :: 3 :: Only", "Environment :: GPU :: NVIDIA CUDA :: 12 :: 12.2", "Intended Audience :: Science/Research", - "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Physics", @@ -51,7 +54,7 @@ dependencies = [ 'mendeleev', 'array-api-compat', ] -requires-python = ">=3.8" +requires-python = ">=3.9" keywords = ["deepmd"] [project.entry-points."lammps.plugins"] diff --git a/source/install/build_tf.py b/source/install/build_tf.py index a65e922098..a9e1e247cd 100755 --- a/source/install/build_tf.py +++ b/source/install/build_tf.py @@ -56,8 +56,6 @@ ignore_patterns, ) from typing import ( - Dict, - List, Optional, ) @@ -225,11 +223,11 @@ class Build(metaclass=ABCMeta): """Build process.""" @abstractproperty - def resources(self) -> Dict[str, OnlineResource]: + def resources(self) -> dict[str, OnlineResource]: """Required resources.""" @abstractproperty - def dependencies(self) -> Dict[str, "Build"]: + def dependencies(self) -> dict[str, "Build"]: """Required dependencies.""" def download_all_resources(self): @@ -364,7 +362,7 @@ def _ignore_patterns(path, names): return _ignore_patterns -def call(commands: List[str], env={}, **kwargs): +def call(commands: list[str], env={}, **kwargs): """Call commands and print to screen for debug. Raises @@ -423,14 +421,14 @@ def __init__(self, version="1.11.0") -> None: @property @lru_cache - def resources(self) -> Dict[str, OnlineResource]: + def resources(self) -> dict[str, OnlineResource]: return { "bazelisk": RESOURCES["bazelisk-" + self.version], } @property @lru_cache - def dependencies(self) -> Dict[str, Build]: + def dependencies(self) -> dict[str, Build]: return {} def build(self): @@ -449,12 +447,12 @@ class BuildNumPy(Build): @property @lru_cache - def resources(self) -> Dict[str, OnlineResource]: + def resources(self) -> dict[str, OnlineResource]: return {} @property @lru_cache - def dependencies(self) -> Dict[str, Build]: + def dependencies(self) -> dict[str, Build]: return {} @property @@ -481,12 +479,12 @@ class BuildCUDA(Build): @property @lru_cache - def resources(self) -> Dict[str, OnlineResource]: + def resources(self) -> dict[str, OnlineResource]: return {} @property @lru_cache - def dependencies(self) -> Dict[str, Build]: + def dependencies(self) -> dict[str, Build]: return {} def build(self): @@ -554,12 +552,12 @@ class BuildROCM(Build): @property @lru_cache - def resources(self) -> Dict[str, OnlineResource]: + def resources(self) -> dict[str, OnlineResource]: return {} @property @lru_cache - def dependencies(self) -> Dict[str, Build]: + def dependencies(self) -> dict[str, Build]: return {} def build(self): @@ -599,14 +597,14 @@ def __init__( @property @lru_cache - def resources(self) -> Dict[str, OnlineResource]: + def resources(self) -> dict[str, OnlineResource]: return { "tensorflow": RESOURCES["tensorflow-" + self.version], } @property @lru_cache - def dependencies(self) -> Dict[str, Build]: + def dependencies(self) -> dict[str, Build]: optional_dep = {} if self.enable_cuda: optional_dep["cuda"] = BuildCUDA() @@ -778,12 +776,12 @@ def _environments(self) -> dict: } @property - def _build_targets(self) -> List[str]: + def _build_targets(self) -> list[str]: # C++ interface return ["//tensorflow:libtensorflow_cc" + get_shlib_ext()] @property - def _build_opts(self) -> List[str]: + def _build_opts(self) -> list[str]: opts = [ "--logging=6", "--verbose_failures", @@ -798,7 +796,7 @@ def _build_opts(self) -> List[str]: return opts @property - def _bazel_opts(self) -> List[str]: + def _bazel_opts(self) -> list[str]: return [] @property @@ -826,7 +824,7 @@ def clean_package(): # interface -def env() -> Dict[str, str]: +def env() -> dict[str, str]: return { "Python": sys.executable, "CUDA": CUDA_PATH, @@ -855,12 +853,12 @@ class RawTextArgumentDefaultsHelpFormatter( pass -def parse_args(args: Optional[List[str]] = None): +def parse_args(args: Optional[list[str]] = None): """TensorFlow C++ Library Installer commandline options argument parser. Parameters ---------- - args : List[str] + args : list[str] list of command line arguments, main purpose is testing default option None takes arguments from sys.argv """ diff --git a/source/tests/common/dpmodel/array_api/test_env_mat.py b/source/tests/common/dpmodel/array_api/test_env_mat.py index d5bc7b6c18..8dfa199d53 100644 --- a/source/tests/common/dpmodel/array_api/test_env_mat.py +++ b/source/tests/common/dpmodel/array_api/test_env_mat.py @@ -1,11 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import sys import unittest -if sys.version_info >= (3, 9): - import array_api_strict as xp -else: - raise unittest.SkipTest("array_api_strict doesn't support Python<=3.8") +import array_api_strict as xp from deepmd.dpmodel.utils.env_mat import ( compute_smooth_weight, diff --git a/source/tests/common/dpmodel/test_output_def.py b/source/tests/common/dpmodel/test_output_def.py index 9e8ef2940f..03ceb67d01 100644 --- a/source/tests/common/dpmodel/test_output_def.py +++ b/source/tests/common/dpmodel/test_output_def.py @@ -1,8 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import unittest -from typing import ( - List, -) import numpy as np @@ -26,7 +23,7 @@ class VariableDef: def __init__( self, name: str, - shape: List[int], + shape: list[int], atomic: bool = True, ): self.name = name diff --git a/source/tests/common/test_argument_parser.py b/source/tests/common/test_argument_parser.py index 36a2f07be5..1404185607 100644 --- a/source/tests/common/test_argument_parser.py +++ b/source/tests/common/test_argument_parser.py @@ -15,9 +15,6 @@ from typing import ( TYPE_CHECKING, Any, - Dict, - List, - Tuple, Union, ) @@ -33,13 +30,13 @@ from typing_extensions import TypedDict # python<=3.7 class DATA(TypedDict): - type: Union[type, Tuple[type]] + type: Union[type, tuple[type]] value: Any - TEST_DICT = Dict[str, DATA] + TEST_DICT = dict[str, DATA] -def build_args(args: "TEST_DICT", command: str) -> List[str]: +def build_args(args: "TEST_DICT", command: str) -> list[str]: """Build list of arguments similar to one generated by `sys.argv` used by argparse. Parameters @@ -51,7 +48,7 @@ def build_args(args: "TEST_DICT", command: str) -> List[str]: Returns ------- - List[str] + list[str] arguments with options as list of strings, goal is to emulate `sys.argv` """ args_list = [command] diff --git a/source/tests/common/test_auto_batch_size.py b/source/tests/common/test_auto_batch_size.py index 0369bbb70c..cc1e6bf25a 100644 --- a/source/tests/common/test_auto_batch_size.py +++ b/source/tests/common/test_auto_batch_size.py @@ -1,8 +1,9 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import os -import sys import unittest +import array_api_strict as xp + from deepmd.utils.batch_size import ( AutoBatchSize, ) @@ -10,11 +11,6 @@ OutOfMemoryError, ) -if sys.version_info >= (3, 9): - import array_api_strict as xp -else: - raise unittest.SkipTest("array_api_strict doesn't support Python<=3.8") - class CustomizedAutoBatchSizeCPU(AutoBatchSize): def is_gpu_available(self): diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index e8873e528a..c64b14c273 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -14,10 +14,7 @@ Any, Callable, ClassVar, - Dict, - List, Optional, - Tuple, Union, ) from uuid import ( @@ -75,7 +72,7 @@ class CommonTest(ABC): """PyTorch model class.""" jax_class: ClassVar[Optional[type]] """JAX model class.""" - args: ClassVar[Optional[Union[Argument, List[Argument]]]] + args: ClassVar[Optional[Union[Argument, list[Argument]]]] """Arguments that maps to the `data`.""" skip_dp: ClassVar[bool] = False """Whether to skip the native DP model.""" @@ -118,7 +115,7 @@ def pass_data_to_cls(self, cls, data) -> Any: return cls(**data, **self.addtional_data) @abstractmethod - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: """Build the TF graph. Parameters @@ -175,7 +172,7 @@ class RefBackend(Enum): JAX = 5 @abstractmethod - def extract_ret(self, ret: Any, backend: RefBackend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend: RefBackend) -> tuple[np.ndarray, ...]: """Extract the return value when comparing with other backends. Parameters @@ -193,7 +190,7 @@ def extract_ret(self, ret: Any, backend: RefBackend) -> Tuple[np.ndarray, ...]: def build_eval_tf( self, sess: "tf.Session", obj: Any, suffix: str - ) -> List[np.ndarray]: + ) -> list[np.ndarray]: """Build and evaluate the TF graph.""" t_out, feed_dict = self.build_tf(obj, suffix) @@ -489,7 +486,7 @@ class TestClass(base_class): def parameterize_func( func: Callable, - param_dict_list: Dict[str, Tuple], + param_dict_list: dict[str, tuple], ): """Parameterize functions with different default values. @@ -497,7 +494,7 @@ def parameterize_func( ---------- func : Callable The base function. - param_dict_list : Dict[str, Tuple] + param_dict_list : dict[str, Tuple] Dictionary of parameters with default values to be changed in base function, each of which is a tuple of choices. Returns diff --git a/source/tests/consistent/descriptor/test_dpa1.py b/source/tests/consistent/descriptor/test_dpa1.py index 0f44ecaae1..59d7369753 100644 --- a/source/tests/consistent/descriptor/test_dpa1.py +++ b/source/tests/consistent/descriptor/test_dpa1.py @@ -3,7 +3,6 @@ from typing import ( Any, Optional, - Tuple, ) import numpy as np @@ -284,7 +283,7 @@ def setUp(self): use_tebd_bias, ) = self.param - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -314,7 +313,7 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/consistent/descriptor/test_dpa2.py b/source/tests/consistent/descriptor/test_dpa2.py index 144567ae58..53f9ce4200 100644 --- a/source/tests/consistent/descriptor/test_dpa2.py +++ b/source/tests/consistent/descriptor/test_dpa2.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -338,7 +337,7 @@ def setUp(self): use_tebd_bias, ) = self.param - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -368,7 +367,7 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/consistent/descriptor/test_hybrid.py b/source/tests/consistent/descriptor/test_hybrid.py index 7cfb627d54..cd52eea5be 100644 --- a/source/tests/consistent/descriptor/test_hybrid.py +++ b/source/tests/consistent/descriptor/test_hybrid.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -105,7 +104,7 @@ def setUp(self): ) self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -133,5 +132,5 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) diff --git a/source/tests/consistent/descriptor/test_se_atten_v2.py b/source/tests/consistent/descriptor/test_se_atten_v2.py index 989fdc16e7..a3fe4e98b4 100644 --- a/source/tests/consistent/descriptor/test_se_atten_v2.py +++ b/source/tests/consistent/descriptor/test_se_atten_v2.py @@ -3,7 +3,6 @@ from typing import ( Any, Optional, - Tuple, ) import numpy as np @@ -215,7 +214,7 @@ def setUp(self): ) self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -245,7 +244,7 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/consistent/descriptor/test_se_e2_a.py b/source/tests/consistent/descriptor/test_se_e2_a.py index 1e3e5ae86d..2563ee1d6d 100644 --- a/source/tests/consistent/descriptor/test_se_e2_a.py +++ b/source/tests/consistent/descriptor/test_se_e2_a.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -150,7 +149,7 @@ def setUp(self): self.atype = self.atype[idx] self.coords = self.coords.reshape(-1, 3)[idx].ravel() - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -178,7 +177,7 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/consistent/descriptor/test_se_r.py b/source/tests/consistent/descriptor/test_se_r.py index 8b835f3b5c..7103f60aa7 100644 --- a/source/tests/consistent/descriptor/test_se_r.py +++ b/source/tests/consistent/descriptor/test_se_r.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -121,7 +120,7 @@ def setUp(self): ) self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -149,7 +148,7 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/consistent/descriptor/test_se_t.py b/source/tests/consistent/descriptor/test_se_t.py index 7579344012..833b76f6e1 100644 --- a/source/tests/consistent/descriptor/test_se_t.py +++ b/source/tests/consistent/descriptor/test_se_t.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -141,7 +140,7 @@ def setUp(self): self.atype = self.atype[idx] self.coords = self.coords.reshape(-1, 3)[idx].ravel() - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -169,7 +168,7 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/consistent/descriptor/test_se_t_tebd.py b/source/tests/consistent/descriptor/test_se_t_tebd.py index d9bd00aad3..3299a04c78 100644 --- a/source/tests/consistent/descriptor/test_se_t_tebd.py +++ b/source/tests/consistent/descriptor/test_se_t_tebd.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -187,7 +186,7 @@ def setUp(self): use_tebd_bias, ) = self.param - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_descriptor( obj, self.natoms, @@ -217,7 +216,7 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/consistent/fitting/test_dipole.py b/source/tests/consistent/fitting/test_dipole.py index 4f33d58c10..5d7be1b0e5 100644 --- a/source/tests/consistent/fitting/test_dipole.py +++ b/source/tests/consistent/fitting/test_dipole.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -97,7 +96,7 @@ def addtional_data(self) -> dict: "embedding_width": 30, } - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: ( resnet_dt, precision, @@ -144,7 +143,7 @@ def eval_dp(self, dp_obj: Any) -> Any: None, )["dipole"] - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same ret = ret[0].reshape(-1, self.natoms[0], 1) diff --git a/source/tests/consistent/fitting/test_dos.py b/source/tests/consistent/fitting/test_dos.py index bfdf76c8ff..ada65c8ac5 100644 --- a/source/tests/consistent/fitting/test_dos.py +++ b/source/tests/consistent/fitting/test_dos.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -106,7 +105,7 @@ def addtional_data(self) -> dict: "mixed_types": mixed_types, } - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: ( resnet_dt, precision, @@ -158,7 +157,7 @@ def eval_dp(self, dp_obj: Any) -> Any: fparam=self.fparam if numb_fparam else None, )["dos"] - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same ret = ret[0].reshape(-1, self.natoms[0], 1) diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 157b1bab8a..ac4f7ae543 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -106,7 +105,7 @@ def addtional_data(self) -> dict: "mixed_types": mixed_types, } - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: ( resnet_dt, precision, @@ -158,7 +157,7 @@ def eval_dp(self, dp_obj: Any) -> Any: fparam=self.fparam if numb_fparam else None, )["energy"] - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same ret = ret[0].reshape(-1, self.natoms[0], 1) diff --git a/source/tests/consistent/fitting/test_polar.py b/source/tests/consistent/fitting/test_polar.py index 808514ade4..6a3465ba24 100644 --- a/source/tests/consistent/fitting/test_polar.py +++ b/source/tests/consistent/fitting/test_polar.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -97,7 +96,7 @@ def addtional_data(self) -> dict: "embedding_width": 30, } - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: ( resnet_dt, precision, @@ -144,7 +143,7 @@ def eval_dp(self, dp_obj: Any) -> Any: None, )["polarizability"] - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same ret = ret[0].reshape(-1, self.natoms[0], 1) diff --git a/source/tests/consistent/fitting/test_property.py b/source/tests/consistent/fitting/test_property.py index 3f406d3a6b..a9fb6b694a 100644 --- a/source/tests/consistent/fitting/test_property.py +++ b/source/tests/consistent/fitting/test_property.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -113,7 +112,7 @@ def addtional_data(self) -> dict: "mixed_types": mixed_types, } - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: ( resnet_dt, precision, @@ -168,7 +167,7 @@ def eval_dp(self, dp_obj: Any) -> Any: fparam=self.fparam if numb_fparam else None, )["property"] - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same ret = ret[0].reshape(-1, self.natoms[0], 1) diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index c8ff9e4dcf..692e1287dc 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -141,7 +140,7 @@ def setUp(self): self.atype = self.atype[:, idx_map] self.coords = self.coords[:, idx_map] - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_model( obj, self.natoms, @@ -169,7 +168,7 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: return (ret["energy_redu"].ravel(), ret["energy"].ravel()) diff --git a/source/tests/consistent/model/test_frozen.py b/source/tests/consistent/model/test_frozen.py index e362aed511..f11a11914b 100644 --- a/source/tests/consistent/model/test_frozen.py +++ b/source/tests/consistent/model/test_frozen.py @@ -3,7 +3,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -121,7 +120,7 @@ def setUp(self): self.atype = self.atype[:, idx_map] self.coords = self.coords[:, idx_map] - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return self.build_tf_model( obj, self.natoms, @@ -149,7 +148,7 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: return (ret["energy_redu"].ravel(), ret["energy"].ravel()) diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index c66ef0fbaa..1464517581 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -2,7 +2,6 @@ import unittest from typing import ( Any, - Tuple, ) import numpy as np @@ -96,7 +95,7 @@ def setUp(self): self.ntypes = 2 - def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: return [ obj.build( obj.ntypes, @@ -121,7 +120,7 @@ def eval_jax(self, jax_obj: Any) -> Any: raise ValueError("Output is numpy array") return [np.array(x) if isinstance(x, jnp.ndarray) else x for x in (out,)] - def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) @property diff --git a/source/tests/infer/case.py b/source/tests/infer/case.py index c1bce424c4..4a5ce638d6 100644 --- a/source/tests/infer/case.py +++ b/source/tests/infer/case.py @@ -27,7 +27,6 @@ Path, ) from typing import ( - Dict, Optional, ) @@ -175,12 +174,12 @@ def get_model(self, suffix: str, out_file: Optional[str] = None) -> str: @lru_cache -def get_cases() -> Dict[str, Case]: +def get_cases() -> dict[str, Case]: """Get all test cases. Returns ------- - Dict[str, Case] + dict[str, Case] A dictionary containing all test cases. Examples diff --git a/source/tests/pt/common.py b/source/tests/pt/common.py index 16b343be8a..173e9d52dc 100644 --- a/source/tests/pt/common.py +++ b/source/tests/pt/common.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - List, Optional, Union, ) @@ -46,7 +45,7 @@ def eval_model( model, coords: Union[np.ndarray, torch.Tensor], cells: Optional[Union[np.ndarray, torch.Tensor]], - atom_types: Union[np.ndarray, torch.Tensor, List[int]], + atom_types: Union[np.ndarray, torch.Tensor, list[int]], spins: Optional[Union[np.ndarray, torch.Tensor]] = None, atomic: bool = False, infer_batch_size: int = 2, diff --git a/source/tests/pt/model/test_atomic_model_atomic_stat.py b/source/tests/pt/model/test_atomic_model_atomic_stat.py index 470b01b507..6a21fc6e5a 100644 --- a/source/tests/pt/model/test_atomic_model_atomic_stat.py +++ b/source/tests/pt/model/test_atomic_model_atomic_stat.py @@ -5,7 +5,6 @@ Path, ) from typing import ( - List, Optional, ) @@ -70,11 +69,11 @@ def serialize(self) -> dict: raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: raise NotImplementedError - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: raise NotImplementedError def forward( diff --git a/source/tests/pt/model/test_atomic_model_global_stat.py b/source/tests/pt/model/test_atomic_model_global_stat.py index 11752278e4..9ce5784bfa 100644 --- a/source/tests/pt/model/test_atomic_model_global_stat.py +++ b/source/tests/pt/model/test_atomic_model_global_stat.py @@ -5,7 +5,6 @@ Path, ) from typing import ( - List, Optional, ) @@ -82,11 +81,11 @@ def serialize(self) -> dict: raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: raise NotImplementedError - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: raise NotImplementedError def forward( diff --git a/source/tests/pt/model/test_force_grad.py b/source/tests/pt/model/test_force_grad.py index ddc3c0bccf..d3cd11f71d 100644 --- a/source/tests/pt/model/test_force_grad.py +++ b/source/tests/pt/model/test_force_grad.py @@ -6,7 +6,6 @@ Path, ) from typing import ( - List, Optional, ) @@ -32,7 +31,7 @@ class CheckSymmetry(DeepmdData): def __init__( self, sys_path: str, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ): super().__init__(sys_path=sys_path, type_map=type_map) self.add("energy", 1, atomic=False, must=False, high_prec=True) diff --git a/source/tests/pt/model/test_linear_atomic_model_stat.py b/source/tests/pt/model/test_linear_atomic_model_stat.py index 604c82f513..49b7a3821f 100644 --- a/source/tests/pt/model/test_linear_atomic_model_stat.py +++ b/source/tests/pt/model/test_linear_atomic_model_stat.py @@ -5,7 +5,6 @@ Path, ) from typing import ( - List, Optional, ) @@ -63,11 +62,11 @@ def serialize(self) -> dict: raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: raise NotImplementedError - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: raise NotImplementedError def forward( @@ -115,11 +114,11 @@ def serialize(self) -> dict: raise NotImplementedError def change_type_map( - self, type_map: List[str], model_with_new_type_stat=None + self, type_map: list[str], model_with_new_type_stat=None ) -> None: raise NotImplementedError - def get_type_map(self) -> List[str]: + def get_type_map(self) -> list[str]: raise NotImplementedError def forward( diff --git a/source/tests/pt/model/test_rotation.py b/source/tests/pt/model/test_rotation.py index caa6385c80..cf947c30b2 100644 --- a/source/tests/pt/model/test_rotation.py +++ b/source/tests/pt/model/test_rotation.py @@ -5,7 +5,6 @@ Path, ) from typing import ( - List, Optional, ) @@ -30,7 +29,7 @@ class CheckSymmetry(DeepmdData): def __init__( self, sys_path: str, - type_map: Optional[List[str]] = None, + type_map: Optional[list[str]] = None, ): super().__init__(sys_path=sys_path, type_map=type_map) self.add("energy", 1, atomic=False, must=False, high_prec=True) diff --git a/source/tests/universal/common/cases/atomic_model/utils.py b/source/tests/universal/common/cases/atomic_model/utils.py index b63563e237..bfd2e2cd5f 100644 --- a/source/tests/universal/common/cases/atomic_model/utils.py +++ b/source/tests/universal/common/cases/atomic_model/utils.py @@ -2,8 +2,6 @@ from typing import ( Any, Callable, - Dict, - List, Optional, ) @@ -21,7 +19,7 @@ class AtomicModelTestCase: """Common test case for atomic model.""" - expected_type_map: List[str] + expected_type_map: list[str] """Expected type map.""" expected_rcut: float """Expected cut-off radius.""" @@ -29,25 +27,25 @@ class AtomicModelTestCase: """Expected number (dimension) of frame parameters.""" expected_dim_aparam: int """Expected number (dimension) of atomic parameters.""" - expected_sel_type: List[int] + expected_sel_type: list[int] """Expected selected atom types.""" expected_aparam_nall: bool """Expected shape of atomic parameters.""" - expected_model_output_type: List[str] + expected_model_output_type: list[str] """Expected output type for the model.""" - model_output_equivariant: List[str] + model_output_equivariant: list[str] """Outputs that are equivariant to the input rotation.""" - expected_sel: List[int] + expected_sel: list[int] """Expected number of neighbors.""" expected_has_message_passing: bool """Expected whether having message passing.""" forward_wrapper: Callable[[Any], Any] """Calss wrapper for forward method.""" - aprec_dict: Dict[str, Optional[float]] + aprec_dict: dict[str, Optional[float]] """Dictionary of absolute precision in each test.""" - rprec_dict: Dict[str, Optional[float]] + rprec_dict: dict[str, Optional[float]] """Dictionary of relative precision in each test.""" - epsilon_dict: Dict[str, Optional[float]] + epsilon_dict: dict[str, Optional[float]] """Dictionary of epsilons in each test.""" def test_get_type_map(self): diff --git a/source/tests/universal/common/cases/model/utils.py b/source/tests/universal/common/cases/model/utils.py index 66b2e64fd3..d583d06b05 100644 --- a/source/tests/universal/common/cases/model/utils.py +++ b/source/tests/universal/common/cases/model/utils.py @@ -6,8 +6,6 @@ from typing import ( Any, Callable, - Dict, - List, Optional, ) @@ -31,7 +29,7 @@ class ModelTestCase: """Common test case for model.""" - expected_type_map: List[str] + expected_type_map: list[str] """Expected type map.""" expected_rcut: float """Expected cut-off radius.""" @@ -39,15 +37,15 @@ class ModelTestCase: """Expected number (dimension) of frame parameters.""" expected_dim_aparam: int """Expected number (dimension) of atomic parameters.""" - expected_sel_type: List[int] + expected_sel_type: list[int] """Expected selected atom types.""" expected_aparam_nall: bool """Expected shape of atomic parameters.""" - expected_model_output_type: List[str] + expected_model_output_type: list[str] """Expected output type for the model.""" - model_output_equivariant: List[str] + model_output_equivariant: list[str] """Outputs that are equivariant to the input rotation.""" - expected_sel: List[int] + expected_sel: list[int] """Expected number of neighbors.""" expected_has_message_passing: bool """Expected whether having message passing.""" @@ -55,11 +53,11 @@ class ModelTestCase: """Class wrapper for forward method.""" forward_wrapper_cpu_ref: Callable[[Any], Any] """Convert model to CPU method.""" - aprec_dict: Dict[str, Optional[float]] + aprec_dict: dict[str, Optional[float]] """Dictionary of absolute precision in each test.""" - rprec_dict: Dict[str, Optional[float]] + rprec_dict: dict[str, Optional[float]] """Dictionary of relative precision in each test.""" - epsilon_dict: Dict[str, Optional[float]] + epsilon_dict: dict[str, Optional[float]] """Dictionary of epsilons in each test.""" def test_get_type_map(self): From c90c4e165b6246076f9048e33c43321617df0e1c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 6 Oct 2024 13:25:07 -0400 Subject: [PATCH 023/193] fix(tf): set visible_device_list for TF C++ (#4172) Fix #4171. ## Summary by CodeRabbit - **New Features** - Enhanced GPU selection logic for improved resource management. - Added support for single-frame and multi-frame computations with new parameters for atom energy and virial calculations. - Extended functionality for mixed-type computations in the model. - **Bug Fixes** - Improved error handling during initialization and model execution. - Added output tensor dimension validations to ensure expected structures are maintained. - **Documentation** - Clarified output tensor validation to ensure expected dimensions are maintained. --------- Signed-off-by: Jinzhe Zeng --- source/api_cc/src/DataModifierTF.cc | 7 +++++-- source/api_cc/src/DeepPotTF.cc | 7 +++++-- source/api_cc/src/DeepTensorTF.cc | 7 +++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/source/api_cc/src/DataModifierTF.cc b/source/api_cc/src/DataModifierTF.cc index 324cb14098..aaa2252955 100644 --- a/source/api_cc/src/DataModifierTF.cc +++ b/source/api_cc/src/DataModifierTF.cc @@ -49,8 +49,11 @@ void DipoleChargeModifierTF::init(const std::string& model, 0.9); options.config.mutable_gpu_options()->set_allow_growth(true); DPErrcheck(DPSetDevice(gpu_rank % gpu_num)); - std::string str = "/gpu:"; - str += std::to_string(gpu_rank % gpu_num); + std::string str = "/gpu:0"; + // See + // https://github.com/tensorflow/tensorflow/blame/8fac27b486939f40bc8e362b94a16a4a8bb51869/tensorflow/core/protobuf/config.proto#L80 + options.config.mutable_gpu_options()->set_visible_device_list( + std::to_string(gpu_rank % gpu_num)); graph::SetDefaultDevice(str, graph_def); } #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 2c09c17a69..d7a7edfb60 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -447,8 +447,11 @@ void DeepPotTF::init(const std::string& model, 0.9); options.config.mutable_gpu_options()->set_allow_growth(true); DPErrcheck(DPSetDevice(gpu_rank % gpu_num)); - std::string str = "/gpu:"; - str += std::to_string(gpu_rank % gpu_num); + std::string str = "/gpu:0"; + // See + // https://github.com/tensorflow/tensorflow/blame/8fac27b486939f40bc8e362b94a16a4a8bb51869/tensorflow/core/protobuf/config.proto#L80 + options.config.mutable_gpu_options()->set_visible_device_list( + std::to_string(gpu_rank % gpu_num)); graph::SetDefaultDevice(str, graph_def); } #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM diff --git a/source/api_cc/src/DeepTensorTF.cc b/source/api_cc/src/DeepTensorTF.cc index 34a47bc6f3..c69b7c018e 100644 --- a/source/api_cc/src/DeepTensorTF.cc +++ b/source/api_cc/src/DeepTensorTF.cc @@ -46,8 +46,11 @@ void DeepTensorTF::init(const std::string &model, 0.9); options.config.mutable_gpu_options()->set_allow_growth(true); DPErrcheck(DPSetDevice(gpu_rank % gpu_num)); - std::string str = "/gpu:"; - str += std::to_string(gpu_rank % gpu_num); + std::string str = "/gpu:0"; + // See + // https://github.com/tensorflow/tensorflow/blame/8fac27b486939f40bc8e362b94a16a4a8bb51869/tensorflow/core/protobuf/config.proto#L80 + options.config.mutable_gpu_options()->set_visible_device_list( + std::to_string(gpu_rank % gpu_num)); graph::SetDefaultDevice(str, graph_def); } #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM From a0747b92d764aeb383f6d2eef872ee918e0316c0 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 6 Oct 2024 19:49:12 -0400 Subject: [PATCH 024/193] fix(tf): throw errors when loc_cellnum is 0 (#4180) Fix #4122. ## Summary by CodeRabbit - **New Features** - Enhanced error handling in the computation process, providing clearer error messages for invalid local cell numbers. - **Bug Fixes** - Improved robustness of the `compute_cell_info` function to prevent failures due to invalid input conditions. Signed-off-by: Jinzhe Zeng --- source/lib/src/coord.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/source/lib/src/coord.cc b/source/lib/src/coord.cc index b1456bc7f1..8e759f372f 100644 --- a/source/lib/src/coord.cc +++ b/source/lib/src/coord.cc @@ -4,6 +4,7 @@ #include #include "SimulationRegion.h" +#include "errors.h" #include "neighbor_list.h" using namespace deepmd; @@ -95,6 +96,12 @@ void deepmd::compute_cell_info( } cell_info[21] = (cell_info[3 + 0]) * (cell_info[3 + 1]) * (cell_info[3 + 2]); // loc_cellnum + if (cell_info[21] <= 0) { + throw deepmd::deepmd_exception( + "loc_cellnum should be positive but is " + + std::to_string(cell_info[21]) + + ". You may give a PBC box with zero volume."); + } cell_info[22] = (2 * cell_info[12 + 0] + cell_info[3 + 0]) * (2 * cell_info[12 + 1] + cell_info[3 + 1]) * (2 * cell_info[12 + 2] + cell_info[3 + 2]); // total_cellnum From 2feb21c6a06420a71919fcd8253d8e7762d90c0a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 02:47:24 +0000 Subject: [PATCH 025/193] [pre-commit.ci] pre-commit autoupdate (#4173) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.7 → v0.6.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.7...v0.6.8) - [github.com/pre-commit/mirrors-clang-format: v18.1.8 → v19.1.0](https://github.com/pre-commit/mirrors-clang-format/compare/v18.1.8...v19.1.0) - https://github.com/pylint-dev/pylint/: v3.3.0 → v3.3.1 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- .pre-commit-config.yaml | 8 ++++---- source/lib/include/gpu_cuda.h | 12 ++++++++---- source/lib/include/gpu_rocm.h | 12 ++++++++---- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5d34f39752..486b5e94fd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.6.7 + rev: v0.6.8 hooks: - id: ruff args: ["--fix"] @@ -52,10 +52,10 @@ repos: - id: blacken-docs # C++ - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v18.1.8 + rev: v19.1.0 hooks: - id: clang-format - exclude: ^source/3rdparty|source/lib/src/gpu/cudart/.+\.inc + exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - repo: https://github.com/pre-commit/mirrors-prettier rev: v4.0.0-alpha.8 @@ -146,7 +146,7 @@ repos: exclude: .pre-commit-config.yaml|source/lmp # customized pylint rules - repo: https://github.com/pylint-dev/pylint/ - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pylint entry: env PYTHONPATH=source/checker pylint diff --git a/source/lib/include/gpu_cuda.h b/source/lib/include/gpu_cuda.h index fb467674cb..9504a95b7a 100644 --- a/source/lib/include/gpu_cuda.h +++ b/source/lib/include/gpu_cuda.h @@ -18,8 +18,10 @@ #define gpuMemset cudaMemset #define GPU_MAX_NBOR_SIZE 4096 -#define DPErrcheck(res) \ - { DPAssert((res), __FILE__, __LINE__); } +#define DPErrcheck(res) \ + { \ + DPAssert((res), __FILE__, __LINE__); \ + } inline void DPAssert(cudaError_t code, const char *file, int line, @@ -54,8 +56,10 @@ inline void DPAssert(cudaError_t code, } } -#define nborErrcheck(res) \ - { nborAssert((res), __FILE__, __LINE__); } +#define nborErrcheck(res) \ + { \ + nborAssert((res), __FILE__, __LINE__); \ + } inline void nborAssert(cudaError_t code, const char *file, int line, diff --git a/source/lib/include/gpu_rocm.h b/source/lib/include/gpu_rocm.h index fbd5e1ce3f..abb7ddfa62 100644 --- a/source/lib/include/gpu_rocm.h +++ b/source/lib/include/gpu_rocm.h @@ -20,8 +20,10 @@ #define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice #define gpuMemset hipMemset -#define DPErrcheck(res) \ - { DPAssert((res), __FILE__, __LINE__); } +#define DPErrcheck(res) \ + { \ + DPAssert((res), __FILE__, __LINE__); \ + } inline void DPAssert(hipError_t code, const char *file, int line, @@ -39,8 +41,10 @@ inline void DPAssert(hipError_t code, } } -#define nborErrcheck(res) \ - { nborAssert((res), __FILE__, __LINE__); } +#define nborErrcheck(res) \ + { \ + nborAssert((res), __FILE__, __LINE__); \ + } inline void nborAssert(hipError_t code, const char *file, int line, From d667929bc4ec1b9721dee2c194e39d15cdf7725d Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 7 Oct 2024 13:32:31 -0400 Subject: [PATCH 026/193] docs: add documentation for installation requirements of DPA-2 (#4178) Fix #4161. ## Summary by CodeRabbit - **New Features** - Added installation requirements for the DPA-2 model in the documentation, including customized OP library instructions. - **Improvements** - Enhanced error messaging in the `border_op` function for better user guidance. - Clarified parameter handling and documentation in the `DescrptBlockRepformers` class. - Improved logic for processing input tensors and neighbor lists in the `forward` method. - Strengthened input statistics handling in the `compute_input_stats` method. --------- Signed-off-by: Jinzhe Zeng --- deepmd/pt/model/descriptor/repformers.py | 3 ++- doc/model/dpa2.md | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index 406758faa6..64965825a0 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -57,7 +57,8 @@ def border_op( argument8, ) -> torch.Tensor: raise NotImplementedError( - "border_op is not available since customized PyTorch OP library is not built when freezing the model." + "border_op is not available since customized PyTorch OP library is not built when freezing the model. " + "See documentation for DPA-2 for details." ) # Note: this hack cannot actually save a model that can be runned using LAMMPS. diff --git a/doc/model/dpa2.md b/doc/model/dpa2.md index 5de30ee6b2..24ce5222e9 100644 --- a/doc/model/dpa2.md +++ b/doc/model/dpa2.md @@ -8,6 +8,16 @@ The DPA-2 model implementation. See https://arxiv.org/abs/2312.15492 for more de Training example: `examples/water/dpa2/input_torch_medium.json`, see [README](../../examples/water/dpa2/README.md) for inputs in different levels. +## Requirements of installation {{ pytorch_icon }} + +If one wants to run the DPA-2 model on LAMMPS, the customized OP library for the Python interface must be installed when [freezing the model](../freeze/freeze.md). + +The customized OP library for the Python interface can be installed by setting environment variable {envvar}`DP_ENABLE_PYTORCH` to `1` during installation. + +If one runs LAMMPS with MPI, the customized OP library for the C++ interface should be compiled against the same MPI library as the runtime MPI. +If one runs LAMMPS with MPI and CUDA devices, it is recommended to compile the customized OP library for the C++ interface with a [CUDA-Aware MPI](https://developer.nvidia.com/mpi-solutions-gpus) library and CUDA, +otherwise the communication between GPU cards falls back to the slower CPU implementation. + ## Data format DPA-2 supports both the [standard data format](../data/system.md) and the [mixed type data format](../data/system.md#mixed-type). From dcdd804a6eb20867d27eff24a9c300f1bc6fe370 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 7 Oct 2024 20:40:40 -0400 Subject: [PATCH 027/193] chore: use `functools.cached_property` for cached properties (#4187) `functools.cached_property` (new in Python 3.8) is more suitable for cached properties. ## Summary by CodeRabbit - **New Features** - Introduced a new parameter `neighbor_list` for enhanced neighbor list handling in model evaluation. - Added support for percentage strings in the `test_size` parameter for flexible test size configuration. - New method `_make_auto_ts` to facilitate test size calculations based on specified percentages. - **Bug Fixes** - Improved caching mechanisms for properties, enhancing performance and memory management. - **Documentation** - Added comments and clarifications in the code to improve understanding of batch and test size handling. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/tf/infer/deep_eval.py | 20 +++++++------------- deepmd/tf/utils/tabulate.py | 4 ++-- deepmd/utils/data_system.py | 5 ++--- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/deepmd/tf/infer/deep_eval.py b/deepmd/tf/infer/deep_eval.py index 33725007f3..56df7f782f 100644 --- a/deepmd/tf/infer/deep_eval.py +++ b/deepmd/tf/infer/deep_eval.py @@ -1,7 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json from functools import ( - cache, + cached_property, ) from typing import ( TYPE_CHECKING, @@ -263,8 +263,7 @@ def _init_attr(self): else: self.modifier_type = None - @property - @cache + @cached_property def model_type(self) -> type["DeepEvalWrapper"]: """Get type of model. @@ -288,8 +287,7 @@ def model_type(self) -> type["DeepEvalWrapper"]: else: raise RuntimeError(f"unknown model type {model_type}") - @property - @cache + @cached_property def model_version(self) -> str: """Get version of model. @@ -307,8 +305,7 @@ def model_version(self) -> str: [mt] = run_sess(self.sess, [t_mt], feed_dict={}) return mt.decode("utf-8") - @property - @cache + @cached_property def sess(self) -> tf.Session: """Get TF session.""" # start a tf session associated to the graph @@ -1192,8 +1189,7 @@ def __init__( self.neighbor_list = neighbor_list - @property - @cache + @cached_property def model_type(self) -> str: """Get type of model. @@ -1203,8 +1199,7 @@ def model_type(self) -> str: [mt] = run_sess(self.sess, [t_mt], feed_dict={}) return mt.decode("utf-8") - @property - @cache + @cached_property def model_version(self) -> str: """Get version of model. @@ -1222,8 +1217,7 @@ def model_version(self) -> str: [mt] = run_sess(self.sess, [t_mt], feed_dict={}) return mt.decode("utf-8") - @property - @cache + @cached_property def sess(self) -> tf.Session: """Get TF session.""" # start a tf session associated to the graph diff --git a/deepmd/tf/utils/tabulate.py b/deepmd/tf/utils/tabulate.py index afb94bb050..1dc6128f62 100644 --- a/deepmd/tf/utils/tabulate.py +++ b/deepmd/tf/utils/tabulate.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from functools import ( + cached_property, lru_cache, ) from typing import ( @@ -770,8 +771,7 @@ def _get_layer_size(self): raise RuntimeError("Unsupported descriptor") return layer_size - @property - @lru_cache + @cached_property def _n_all_excluded(self) -> int: """Then number of types excluding all types.""" return sum(int(self._all_excluded(ii)) for ii in range(0, self.ntypes)) diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index e499163e6a..7bec0b16f4 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -3,7 +3,7 @@ import logging import warnings from functools import ( - cache, + cached_property, ) from typing import ( Any, @@ -238,8 +238,7 @@ def _load_test(self, ntests=-1): for nn in test_system_data: self.test_data[nn].append(test_system_data[nn]) - @property - @cache + @cached_property def default_mesh(self) -> list[np.ndarray]: """Mesh for each system.""" return [ From b807bb4d3ac572b5903ec58c906bacffbfef9a9e Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 7 Oct 2024 20:43:06 -0400 Subject: [PATCH 028/193] docs: document more for multiprocessing (#4190) Fix #4182. ## Summary by CodeRabbit - **Documentation** - Updated `lammps-command.md` to clarify GPU usage and unit handling in LAMMPS. - Enhanced `howtoset_num_nodes.md` with new sections on MPI and multiprocessing for TensorFlow and PyTorch, improving clarity and usability. - Added guidance on GPU resource allocation for parallel processes. Signed-off-by: Jinzhe Zeng --- doc/third-party/lammps-command.md | 5 +++++ doc/troubleshooting/howtoset_num_nodes.md | 21 +++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/doc/third-party/lammps-command.md b/doc/third-party/lammps-command.md index 4baba00e05..6a16605bfc 100644 --- a/doc/third-party/lammps-command.md +++ b/doc/third-party/lammps-command.md @@ -4,6 +4,11 @@ See [Environment variables](../env.md) for the runtime environment variables. ::: +:::{note} +Each MPI rank can only use at most one GPU card. +See [How to control the parallelism of a job](../troubleshooting/howtoset_num_nodes.md) for details. +::: + ## units All units in LAMMPS except `lj` are supported. `lj` is not supported. diff --git a/doc/troubleshooting/howtoset_num_nodes.md b/doc/troubleshooting/howtoset_num_nodes.md index 0c547650fb..b09fb80cb6 100644 --- a/doc/troubleshooting/howtoset_num_nodes.md +++ b/doc/troubleshooting/howtoset_num_nodes.md @@ -4,11 +4,26 @@ DeePMD-kit has three levels of parallelism. To get the best performance, one should control the number of threads used by DeePMD-kit. One should make sure the product of the parallel numbers is less than or equal to the number of cores available. -## MPI (optional) +## MPI or multiprocessing (optional) Parallelism for MPI is optional and used for multiple nodes, multiple GPU cards, or sometimes multiple CPU cores. -To enable MPI support for training, one should [install horovod](../install/install-from-source.md#install-horovod-and-mpi4py) in advance. Note that the parallelism mode is data parallelism, so it is not expected to see the training time per batch decreases. +::::{tab-set} + +:::{tab-item} TensorFlow {{ tensorflow_icon }} + +To enable MPI support for training in the TensorFlow interface, one should [install horovod](../install/install-from-source.md#install-horovod-and-mpi4py) in advance. + +::: +:::{tab-item} PyTorch {{ pytorch_icon }} + +Multiprocessing support for training in the PyTorch backend is implemented with [torchrun](https://pytorch.org/docs/stable/elastic/run.html). + +::: +:::: + +Note that the parallelism mode is data parallelism, so it is not expected to see the training time per batch decreases. +See [Parallel training](../train/parallel-training.md) for details. MPI support for inference is not directly supported by DeePMD-kit, but indirectly supported by the third-party software. For example, [LAMMPS enables running simulations in parallel](https://docs.lammps.org/Developer_parallel.html) using the MPI parallel communication standard with distributed data. That software has to build against MPI. @@ -22,6 +37,8 @@ Note that `mpirun` here should be the same as the MPI used to build software. Fo Sometimes, `$num_nodes` and the nodes information can be directly given by the HPC scheduler system, if the MPI used here is the same as the MPI used to build the scheduler system. Otherwise, one have to manually assign these information. +Each process can use at most one GPU card. + ## Parallelism between independent operators For CPU devices, TensorFlow and PyTorch use multiple streams to run independent operators (OP). From 9a15bc0ff804e2921bf85fb921e6fe78f6c9bff6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:50:00 -0400 Subject: [PATCH 029/193] [pre-commit.ci] pre-commit autoupdate (#4192) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.6.0 → v5.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.6.0...v5.0.0) - [github.com/astral-sh/ruff-pre-commit: v0.6.8 → v0.6.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.8...v0.6.9) - [github.com/asottile/blacken-docs: 1.18.0 → 1.19.0](https://github.com/asottile/blacken-docs/compare/1.18.0...1.19.0) - [github.com/pre-commit/mirrors-clang-format: v19.1.0 → v19.1.1](https://github.com/pre-commit/mirrors-clang-format/compare/v19.1.0...v19.1.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 486b5e94fd..6a1d303f64 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: trailing-whitespace exclude: "^.+\\.pbtxt$" @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.6.8 + rev: v0.6.9 hooks: - id: ruff args: ["--fix"] @@ -47,12 +47,12 @@ repos: exclude: ^source/3rdparty # Python inside docs - repo: https://github.com/asottile/blacken-docs - rev: 1.18.0 + rev: 1.19.0 hooks: - id: blacken-docs # C++ - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v19.1.0 + rev: v19.1.1 hooks: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) From 3939786f1dab77c18f501c42340535f2a3708141 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 9 Oct 2024 07:19:47 -0400 Subject: [PATCH 030/193] feat(jax/array-api): dpa1 (#4160) ## Summary by CodeRabbit - **New Features** - Updated method for converting input to NumPy arrays, enhancing performance and compatibility with array-like structures. - Simplified handling of weight, bias, and identity variables for improved compatibility with array backends. - Introduced new network classes and enhanced network management functionalities. - Added support for the new `array_api_strict` backend in testing. - **Bug Fixes** - Fixed serialization process to ensure accurate conversion of weights and biases. - **Tests** - Added tests to validate the new functionalities and ensure compatibility across various backends, including JAX and Array API Strict. - **Chores** - Continued improvements to project structure and dependencies for better maintainability. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/dpmodel/array_api.py | 40 +++ deepmd/dpmodel/descriptor/dpa1.py | 236 +++++++++++++----- deepmd/dpmodel/utils/env_mat.py | 29 ++- deepmd/dpmodel/utils/exclude_mask.py | 34 ++- deepmd/dpmodel/utils/network.py | 38 +-- deepmd/dpmodel/utils/nlist.py | 118 +++++---- deepmd/dpmodel/utils/region.py | 34 +-- deepmd/dpmodel/utils/type_embed.py | 2 +- deepmd/jax/common.py | 50 +++- deepmd/jax/descriptor/__init__.py | 1 + deepmd/jax/descriptor/dpa1.py | 86 +++++++ deepmd/jax/env.py | 4 + deepmd/jax/utils/exclude_mask.py | 18 ++ deepmd/jax/utils/network.py | 51 +++- deepmd/jax/utils/type_embed.py | 2 + pyproject.toml | 1 + source/tests/array_api_strict/__init__.py | 2 + source/tests/array_api_strict/common.py | 25 ++ .../array_api_strict/descriptor/__init__.py | 1 + .../tests/array_api_strict/descriptor/dpa1.py | 81 ++++++ .../tests/array_api_strict/utils/__init__.py | 1 + .../array_api_strict/utils/exclude_mask.py | 17 ++ .../tests/array_api_strict/utils/network.py | 45 ++++ .../array_api_strict/utils/type_embed.py | 22 ++ .../common/dpmodel/test_descriptor_dpa1.py | 19 ++ source/tests/consistent/common.py | 66 +++++ source/tests/consistent/descriptor/common.py | 63 +++++ .../tests/consistent/descriptor/test_dpa1.py | 96 +++++++ .../tests/consistent/test_type_embedding.py | 13 + 29 files changed, 1022 insertions(+), 173 deletions(-) create mode 100644 deepmd/jax/descriptor/__init__.py create mode 100644 deepmd/jax/descriptor/dpa1.py create mode 100644 deepmd/jax/utils/exclude_mask.py create mode 100644 source/tests/array_api_strict/__init__.py create mode 100644 source/tests/array_api_strict/common.py create mode 100644 source/tests/array_api_strict/descriptor/__init__.py create mode 100644 source/tests/array_api_strict/descriptor/dpa1.py create mode 100644 source/tests/array_api_strict/utils/__init__.py create mode 100644 source/tests/array_api_strict/utils/exclude_mask.py create mode 100644 source/tests/array_api_strict/utils/network.py create mode 100644 source/tests/array_api_strict/utils/type_embed.py diff --git a/deepmd/dpmodel/array_api.py b/deepmd/dpmodel/array_api.py index e4af2ad627..360df78a7b 100644 --- a/deepmd/dpmodel/array_api.py +++ b/deepmd/dpmodel/array_api.py @@ -1,6 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later """Utilities for the array API.""" +import array_api_compat + def support_array_api(version: str) -> callable: """Mark a function as supporting the specific version of the array API. @@ -27,3 +29,41 @@ def set_version(func: callable) -> callable: return func return set_version + + +# array api adds take_along_axis in https://github.com/data-apis/array-api/pull/816 +# but it hasn't been released yet +# below is a pure Python implementation of take_along_axis +# https://github.com/data-apis/array-api/issues/177#issuecomment-2093630595 +def xp_swapaxes(a, axis1, axis2): + xp = array_api_compat.array_namespace(a) + axes = list(range(a.ndim)) + axes[axis1], axes[axis2] = axes[axis2], axes[axis1] + a = xp.permute_dims(a, axes) + return a + + +def xp_take_along_axis(arr, indices, axis): + xp = array_api_compat.array_namespace(arr) + arr = xp_swapaxes(arr, axis, -1) + indices = xp_swapaxes(indices, axis, -1) + + m = arr.shape[-1] + n = indices.shape[-1] + + shape = list(arr.shape) + shape.pop(-1) + shape = [*shape, n] + + arr = xp.reshape(arr, (-1,)) + if n != 0: + indices = xp.reshape(indices, (-1, n)) + else: + indices = xp.reshape(indices, (0, 0)) + + offset = (xp.arange(indices.shape[0]) * m)[:, xp.newaxis] + indices = xp.reshape(offset + indices, (-1,)) + + out = xp.take(arr, indices) + out = xp.reshape(out, shape) + return xp_swapaxes(out, axis, -1) diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index 5ba3fc11b2..add9cb9f71 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -6,6 +6,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( @@ -13,6 +14,9 @@ PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.array_api import ( + xp_take_along_axis, +) from deepmd.dpmodel.utils import ( EmbeddingNet, EnvMat, @@ -32,9 +36,6 @@ from deepmd.dpmodel.utils.update_sel import ( UpdateSel, ) -from deepmd.env import ( - GLOBAL_NP_FLOAT_PRECISION, -) from deepmd.utils.data_system import ( DeepmdDataSystem, ) @@ -59,13 +60,16 @@ def np_softmax(x, axis=-1): - x = np.nan_to_num(x) # to avoid value warning - e_x = np.exp(x - np.max(x, axis=axis, keepdims=True)) - return e_x / np.sum(e_x, axis=axis, keepdims=True) + xp = array_api_compat.array_namespace(x) + # x = xp.nan_to_num(x) # to avoid value warning + x = xp.where(xp.isnan(x), xp.zeros_like(x), x) + e_x = xp.exp(x - xp.max(x, axis=axis, keepdims=True)) + return e_x / xp.sum(e_x, axis=axis, keepdims=True) def np_normalize(x, axis=-1): - return x / np.linalg.norm(x, axis=axis, keepdims=True) + xp = array_api_compat.array_namespace(x) + return x / xp.linalg.vector_norm(x, axis=axis, keepdims=True) @BaseDescriptor.register("se_atten") @@ -474,10 +478,14 @@ def call( The smooth switch function. """ del mapping + xp = array_api_compat.array_namespace(coord_ext, atype_ext, nlist) nf, nloc, nnei = nlist.shape - nall = coord_ext.reshape(nf, -1).shape[1] // 3 + nall = xp.reshape(coord_ext, (nf, -1)).shape[1] // 3 # nf x nall x tebd_dim - atype_embd_ext = self.type_embedding.call()[atype_ext] + atype_embd_ext = xp.reshape( + xp.take(self.type_embedding.call(), xp.reshape(atype_ext, [-1]), axis=0), + (nf, nall, self.tebd_dim), + ) # nfnl x tebd_dim atype_embd = atype_embd_ext[:, :nloc, :] grrg, g2, h2, rot_mat, sw = self.se_atten( @@ -489,8 +497,8 @@ def call( ) # nf x nloc x (ng x ng1 + tebd_dim) if self.concat_output_tebd: - grrg = np.concatenate( - [grrg, atype_embd.reshape(nf, nloc, self.tebd_dim)], axis=-1 + grrg = xp.concat( + [grrg, xp.reshape(atype_embd, (nf, nloc, self.tebd_dim))], axis=-1 ) return grrg, rot_mat, None, None, sw @@ -536,8 +544,8 @@ def serialize(self) -> dict: "exclude_types": obj.exclude_types, "env_protection": obj.env_protection, "@variables": { - "davg": obj["davg"], - "dstd": obj["dstd"], + "davg": np.array(obj["davg"]), + "dstd": np.array(obj["dstd"]), }, ## to be updated when the options are supported. "trainable": self.trainable, @@ -683,12 +691,12 @@ def __init__( self.embd_input_dim = 1 + self.tebd_dim_input else: self.embd_input_dim = 1 - self.embeddings = NetworkCollection( + embeddings = NetworkCollection( ndim=0, ntypes=self.ntypes, network_type="embedding_network", ) - self.embeddings[0] = EmbeddingNet( + embeddings[0] = EmbeddingNet( self.embd_input_dim, self.neuron, self.activation_function, @@ -696,13 +704,14 @@ def __init__( self.precision, seed=child_seed(seed, 0), ) + self.embeddings = embeddings if self.tebd_input_mode in ["strip"]: - self.embeddings_strip = NetworkCollection( + embeddings_strip = NetworkCollection( ndim=0, ntypes=self.ntypes, network_type="embedding_network", ) - self.embeddings_strip[0] = EmbeddingNet( + embeddings_strip[0] = EmbeddingNet( self.tebd_dim_input, self.neuron, self.activation_function, @@ -710,6 +719,7 @@ def __init__( self.precision, seed=child_seed(seed, 1), ) + self.embeddings_strip = embeddings_strip else: self.embeddings_strip = None self.dpa1_attention = NeighborGatedAttention( @@ -837,9 +847,10 @@ def cal_g( ss, embedding_idx, ): + xp = array_api_compat.array_namespace(ss) nfnl, nnei = ss.shape[0:2] - shape2 = np.prod(ss.shape[2:]) - ss = ss.reshape(nfnl, nnei, shape2) + shape2 = xp.prod(xp.asarray(ss.shape[2:])) + ss = xp.reshape(ss, (nfnl, nnei, shape2)) # nfnl x nnei x ng gg = self.embeddings[embedding_idx].call(ss) return gg @@ -850,9 +861,10 @@ def cal_g_strip( embedding_idx, ): assert self.embeddings_strip is not None + xp = array_api_compat.array_namespace(ss) nfnl, nnei = ss.shape[0:2] - shape2 = np.prod(ss.shape[2:]) - ss = ss.reshape(nfnl, nnei, shape2) + shape2 = xp.prod(xp.asarray(ss.shape[2:])) + ss = xp.reshape(ss, (nfnl, nnei, shape2)) # nfnl x nnei x ng gg = self.embeddings_strip[embedding_idx].call(ss) return gg @@ -865,6 +877,7 @@ def call( atype_embd_ext: Optional[np.ndarray] = None, mapping: Optional[np.ndarray] = None, ): + xp = array_api_compat.array_namespace(nlist, coord_ext, atype_ext) # nf x nloc x nnei x 4 dmatrix, diff, sw = self.env_mat.call( coord_ext, atype_ext, nlist, self.mean, self.stddev @@ -872,41 +885,42 @@ def call( nf, nloc, nnei, _ = dmatrix.shape exclude_mask = self.emask.build_type_exclude_mask(nlist, atype_ext) # nfnl x nnei - exclude_mask = exclude_mask.reshape(nf * nloc, nnei) + exclude_mask = xp.reshape(exclude_mask, (nf * nloc, nnei)) # nfnl x nnei - nlist = nlist.reshape(nf * nloc, nnei) - nlist = np.where(exclude_mask, nlist, -1) + nlist = xp.reshape(nlist, (nf * nloc, nnei)) + nlist = xp.where(exclude_mask, nlist, xp.full_like(nlist, -1)) # nfnl x nnei x 4 - dmatrix = dmatrix.reshape(nf * nloc, nnei, 4) + dmatrix = xp.reshape(dmatrix, (nf * nloc, nnei, 4)) # nfnl x nnei x 1 - sw = sw.reshape(nf * nloc, nnei, 1) + sw = xp.reshape(sw, (nf * nloc, nnei, 1)) # nfnl x tebd_dim - atype_embd = atype_embd_ext[:, :nloc, :].reshape(nf * nloc, self.tebd_dim) + atype_embd = xp.reshape(atype_embd_ext[:, :nloc, :], (nf * nloc, self.tebd_dim)) # nfnl x nnei x tebd_dim - atype_embd_nnei = np.tile(atype_embd[:, np.newaxis, :], (1, nnei, 1)) + atype_embd_nnei = xp.tile(atype_embd[:, xp.newaxis, :], (1, nnei, 1)) # nfnl x nnei nlist_mask = nlist != -1 # nfnl x nnei x 1 - sw = np.where(nlist_mask[:, :, None], sw, 0.0) - nlist_masked = np.where(nlist_mask, nlist, 0) - index = np.tile(nlist_masked.reshape(nf, -1, 1), (1, 1, self.tebd_dim)) + sw = xp.where(nlist_mask[:, :, None], sw, xp.full_like(sw, 0.0)) + nlist_masked = xp.where(nlist_mask, nlist, xp.zeros_like(nlist)) + index = xp.tile(xp.reshape(nlist_masked, (nf, -1, 1)), (1, 1, self.tebd_dim)) # nfnl x nnei x tebd_dim - atype_embd_nlist = np.take_along_axis(atype_embd_ext, index, axis=1).reshape( - nf * nloc, nnei, self.tebd_dim + atype_embd_nlist = xp_take_along_axis(atype_embd_ext, index, axis=1) + atype_embd_nlist = xp.reshape( + atype_embd_nlist, (nf * nloc, nnei, self.tebd_dim) ) ng = self.neuron[-1] # nfnl x nnei x 4 - rr = dmatrix.reshape(nf * nloc, nnei, 4) - rr = rr * exclude_mask[:, :, None] + rr = xp.reshape(dmatrix, (nf * nloc, nnei, 4)) + rr = rr * xp.astype(exclude_mask[:, :, None], rr.dtype) # nfnl x nnei x 1 ss = rr[..., 0:1] if self.tebd_input_mode in ["concat"]: if not self.type_one_side: # nfnl x nnei x (1 + 2 * tebd_dim) - ss = np.concatenate([ss, atype_embd_nlist, atype_embd_nnei], axis=-1) + ss = xp.concat([ss, atype_embd_nlist, atype_embd_nnei], axis=-1) else: # nfnl x nnei x (1 + tebd_dim) - ss = np.concatenate([ss, atype_embd_nlist], axis=-1) + ss = xp.concat([ss, atype_embd_nlist], axis=-1) # calculate gg # nfnl x nnei x ng gg = self.cal_g(ss, 0) @@ -916,42 +930,47 @@ def call( assert self.embeddings_strip is not None if not self.type_one_side: # nfnl x nnei x (tebd_dim * 2) - tt = np.concatenate([atype_embd_nlist, atype_embd_nnei], axis=-1) + tt = xp.concat([atype_embd_nlist, atype_embd_nnei], axis=-1) else: # nfnl x nnei x tebd_dim tt = atype_embd_nlist # nfnl x nnei x ng gg_t = self.cal_g_strip(tt, 0) if self.smooth: - gg_t = gg_t * sw.reshape(-1, self.nnei, 1) + gg_t = gg_t * xp.reshape(sw, (-1, self.nnei, 1)) # nfnl x nnei x ng gg = gg_s * gg_t + gg_s else: raise NotImplementedError - input_r = rr.reshape(-1, nnei, 4)[:, :, 1:4] / np.maximum( - np.linalg.norm(rr.reshape(-1, nnei, 4)[:, :, 1:4], axis=-1, keepdims=True), - 1e-12, + normed = xp.linalg.vector_norm( + xp.reshape(rr, (-1, nnei, 4))[:, :, 1:4], axis=-1, keepdims=True + ) + input_r = xp.reshape(rr, (-1, nnei, 4))[:, :, 1:4] / xp.maximum( + normed, + xp.full_like(normed, 1e-12), ) gg = self.dpa1_attention( gg, nlist_mask, input_r=input_r, sw=sw ) # shape is [nframes*nloc, self.neei, out_size] # nfnl x ng x 4 - gr = np.einsum("lni,lnj->lij", gg, rr) + # gr = xp.einsum("lni,lnj->lij", gg, rr) + gr = xp.sum(gg[:, :, :, None] * rr[:, :, None, :], axis=1) gr /= self.nnei gr1 = gr[:, : self.axis_neuron, :] # nfnl x ng x ng1 - grrg = np.einsum("lid,ljd->lij", gr, gr1) + # grrg = xp.einsum("lid,ljd->lij", gr, gr1) + grrg = xp.sum(gr[:, :, None, :] * gr1[:, None, :, :], axis=3) # nf x nloc x (ng x ng1) - grrg = grrg.reshape(nf, nloc, ng * self.axis_neuron).astype( - GLOBAL_NP_FLOAT_PRECISION + grrg = xp.astype( + xp.reshape(grrg, (nf, nloc, ng * self.axis_neuron)), coord_ext.dtype ) return ( - grrg.reshape(nf, nloc, self.filter_neuron[-1] * self.axis_neuron), - gg.reshape(nf, nloc, self.nnei, self.filter_neuron[-1]), - dmatrix.reshape(nf, nloc, self.nnei, 4)[..., 1:], - gr[..., 1:].reshape(nf, nloc, self.filter_neuron[-1], 3), - sw, + xp.reshape(grrg, (nf, nloc, self.filter_neuron[-1] * self.axis_neuron)), + xp.reshape(gg, (nf, nloc, self.nnei, self.filter_neuron[-1])), + xp.reshape(dmatrix, (nf, nloc, self.nnei, 4))[..., 1:], + xp.reshape(gr[..., 1:], (nf, nloc, self.filter_neuron[-1], 3)), + xp.reshape(sw, (nf, nloc, nnei, 1)), ) def has_message_passing(self) -> bool: @@ -962,6 +981,77 @@ def need_sorted_nlist_for_lower(self) -> bool: """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" return False + def serialize(self) -> dict: + """Serialize the descriptor to dict.""" + obj = self + data = { + "@class": "DescriptorBlock", + "type": "dpa1", + "@version": 1, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "ntypes": obj.ntypes, + "neuron": obj.neuron, + "axis_neuron": obj.axis_neuron, + "tebd_dim": obj.tebd_dim, + "tebd_input_mode": obj.tebd_input_mode, + "set_davg_zero": obj.set_davg_zero, + "attn": obj.attn, + "attn_layer": obj.attn_layer, + "attn_dotr": obj.attn_dotr, + "attn_mask": obj.attn_mask, + "activation_function": obj.activation_function, + "resnet_dt": obj.resnet_dt, + "scaling_factor": obj.scaling_factor, + "normalize": obj.normalize, + "temperature": obj.temperature, + "trainable_ln": obj.trainable_ln, + "ln_eps": obj.ln_eps, + "smooth": obj.smooth, + "type_one_side": obj.type_one_side, + # make deterministic + "precision": np.dtype(PRECISION_DICT[obj.precision]).name, + "embeddings": obj.embeddings.serialize(), + "attention_layers": obj.dpa1_attention.serialize(), + "env_mat": obj.env_mat.serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "@variables": { + "davg": np.array(obj["davg"]), + "dstd": np.array(obj["dstd"]), + }, + } + if obj.tebd_input_mode in ["strip"]: + data.update({"embeddings_strip": obj.embeddings_strip.serialize()}) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptDPA1": + """Deserialize from dict.""" + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + data.pop("type") + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + attention_layers = data.pop("attention_layers") + env_mat = data.pop("env_mat") + tebd_input_mode = data["tebd_input_mode"] + if tebd_input_mode in ["strip"]: + embeddings_strip = data.pop("embeddings_strip") + else: + embeddings_strip = None + obj = cls(**data) + + obj["davg"] = variables["davg"] + obj["dstd"] = variables["dstd"] + obj.embeddings = NetworkCollection.deserialize(embeddings) + if tebd_input_mode in ["strip"]: + obj.embeddings_strip = NetworkCollection.deserialize(embeddings_strip) + obj.dpa1_attention = NeighborGatedAttention.deserialize(attention_layers) + return obj + class NeighborGatedAttention(NativeOP): def __init__( @@ -1254,18 +1344,23 @@ def __init__( ) def call(self, query, nei_mask, input_r=None, sw=None, attnw_shift=20.0): + xp = array_api_compat.array_namespace(query, nei_mask) # Linear projection - q, k, v = np.split(self.in_proj(query), 3, axis=-1) + # q, k, v = xp.split(self.in_proj(query), 3, axis=-1) + _query = self.in_proj(query) + q = _query[..., 0 : self.head_dim] + k = _query[..., self.head_dim : self.head_dim * 2] + v = _query[..., self.head_dim * 2 : self.head_dim * 3] # Reshape and normalize # (nf x nloc) x num_heads x nnei x head_dim - q = q.reshape(-1, self.nnei, self.num_heads, self.head_dim).transpose( - 0, 2, 1, 3 + q = xp.permute_dims( + xp.reshape(q, (-1, self.nnei, self.num_heads, self.head_dim)), (0, 2, 1, 3) ) - k = k.reshape(-1, self.nnei, self.num_heads, self.head_dim).transpose( - 0, 2, 1, 3 + k = xp.permute_dims( + xp.reshape(k, (-1, self.nnei, self.num_heads, self.head_dim)), (0, 2, 1, 3) ) - v = v.reshape(-1, self.nnei, self.num_heads, self.head_dim).transpose( - 0, 2, 1, 3 + v = xp.permute_dims( + xp.reshape(v, (-1, self.nnei, self.num_heads, self.head_dim)), (0, 2, 1, 3) ) if self.normalize: q = np_normalize(q, axis=-1) @@ -1274,29 +1369,38 @@ def call(self, query, nei_mask, input_r=None, sw=None, attnw_shift=20.0): q = q * self.scaling # Attention weights # (nf x nloc) x num_heads x nnei x nnei - attn_weights = q @ k.transpose(0, 1, 3, 2) - nei_mask = nei_mask.reshape(-1, self.nnei) + attn_weights = q @ xp.permute_dims(k, (0, 1, 3, 2)) + nei_mask = xp.reshape(nei_mask, (-1, self.nnei)) if self.smooth: - sw = sw.reshape(-1, 1, self.nnei) + sw = xp.reshape(sw, (-1, 1, self.nnei)) attn_weights = (attn_weights + attnw_shift) * sw[:, :, :, None] * sw[ :, :, None, : ] - attnw_shift else: - attn_weights = np.where(nei_mask[:, None, None, :], attn_weights, -np.inf) + attn_weights = xp.where( + nei_mask[:, None, None, :], + attn_weights, + xp.full_like(attn_weights, -xp.inf), + ) attn_weights = np_softmax(attn_weights, axis=-1) - attn_weights = np.where(nei_mask[:, None, :, None], attn_weights, 0.0) + attn_weights = xp.where( + nei_mask[:, None, :, None], attn_weights, xp.zeros_like(attn_weights) + ) if self.smooth: attn_weights = attn_weights * sw[:, :, :, None] * sw[:, :, None, :] if self.dotr: - angular_weight = (input_r @ input_r.transpose(0, 2, 1)).reshape( - -1, 1, self.nnei, self.nnei + angular_weight = xp.reshape( + input_r @ xp.permute_dims(input_r, (0, 2, 1)), + (-1, 1, self.nnei, self.nnei), ) attn_weights = attn_weights * angular_weight # Output projection # (nf x nloc) x num_heads x nnei x head_dim o = attn_weights @ v # (nf x nloc) x nnei x (num_heads x head_dim) - o = o.transpose(0, 2, 1, 3).reshape(-1, self.nnei, self.hidden_dim) + o = xp.reshape( + xp.permute_dims(o, (0, 2, 1, 3)), (-1, self.nnei, self.hidden_dim) + ) output = self.out_proj(o) return output, attn_weights diff --git a/deepmd/dpmodel/utils/env_mat.py b/deepmd/dpmodel/utils/env_mat.py index 41f2591279..f4bc333a03 100644 --- a/deepmd/dpmodel/utils/env_mat.py +++ b/deepmd/dpmodel/utils/env_mat.py @@ -12,6 +12,7 @@ ) from deepmd.dpmodel.array_api import ( support_array_api, + xp_take_along_axis, ) @@ -44,33 +45,34 @@ def _make_env_mat( protection: float = 0.0, ): """Make smooth environment matrix.""" + xp = array_api_compat.array_namespace(nlist) nf, nloc, nnei = nlist.shape # nf x nall x 3 - coord = coord.reshape(nf, -1, 3) + coord = xp.reshape(coord, (nf, -1, 3)) mask = nlist >= 0 - nlist = nlist * mask + nlist = nlist * xp.astype(mask, nlist.dtype) # nf x (nloc x nnei) x 3 - index = np.tile(nlist.reshape(nf, -1, 1), (1, 1, 3)) - coord_r = np.take_along_axis(coord, index, 1) + index = xp.tile(xp.reshape(nlist, (nf, -1, 1)), (1, 1, 3)) + coord_r = xp_take_along_axis(coord, index, 1) # nf x nloc x nnei x 3 - coord_r = coord_r.reshape(nf, nloc, nnei, 3) + coord_r = xp.reshape(coord_r, (nf, nloc, nnei, 3)) # nf x nloc x 1 x 3 - coord_l = coord[:, :nloc].reshape(nf, -1, 1, 3) + coord_l = xp.reshape(coord[:, :nloc, ...], (nf, -1, 1, 3)) # nf x nloc x nnei x 3 diff = coord_r - coord_l # nf x nloc x nnei - length = np.linalg.norm(diff, axis=-1, keepdims=True) + length = xp.linalg.vector_norm(diff, axis=-1, keepdims=True) # for index 0 nloc atom - length = length + ~np.expand_dims(mask, -1) + length = length + xp.astype(~xp.expand_dims(mask, axis=-1), length.dtype) t0 = 1 / (length + protection) t1 = diff / (length + protection) ** 2 weight = compute_smooth_weight(length, ruct_smth, rcut) - weight = weight * np.expand_dims(mask, -1) + weight = weight * xp.astype(xp.expand_dims(mask, axis=-1), weight.dtype) if radial_only: env_mat = t0 * weight else: - env_mat = np.concatenate([t0, t1], axis=-1) * weight - return env_mat, diff * np.expand_dims(mask, -1), weight + env_mat = xp.concat([t0, t1], axis=-1) * weight + return env_mat, diff * xp.astype(xp.expand_dims(mask, axis=-1), diff.dtype), weight class EnvMat(NativeOP): @@ -122,13 +124,14 @@ def call( switch The value of switch function. shape: nf x nloc x nnei """ + xp = array_api_compat.array_namespace(coord_ext, atype_ext, nlist) em, diff, sw = self._call(nlist, coord_ext, radial_only) nf, nloc, nnei = nlist.shape atype = atype_ext[:, :nloc] if davg is not None: - em -= davg[atype] + em -= xp.reshape(xp.take(davg, xp.reshape(atype, (-1,)), axis=0), em.shape) if dstd is not None: - em /= dstd[atype] + em /= xp.reshape(xp.take(dstd, xp.reshape(atype, (-1,)), axis=0), em.shape) return em, diff, sw def _call(self, nlist, coord_ext, radial_only): diff --git a/deepmd/dpmodel/utils/exclude_mask.py b/deepmd/dpmodel/utils/exclude_mask.py index d0a739b9d4..5469e66d97 100644 --- a/deepmd/dpmodel/utils/exclude_mask.py +++ b/deepmd/dpmodel/utils/exclude_mask.py @@ -1,7 +1,12 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import array_api_compat import numpy as np +from deepmd.dpmodel.array_api import ( + xp_take_along_axis, +) + class AtomExcludeMask: """Computes the type exclusion mask for atoms.""" @@ -45,8 +50,9 @@ def build_type_exclude_mask( otherwise being 1. """ + xp = array_api_compat.array_namespace(atype) nf, natom = atype.shape - return self.type_mask[atype].reshape(nf, natom) + return xp.reshape(self.type_mask[atype], (nf, natom)) class PairExcludeMask: @@ -64,7 +70,7 @@ def __init__( self.exclude_types.add((tt[0], tt[1])) self.exclude_types.add((tt[1], tt[0])) # ntypes + 1 for nlist masks - self.type_mask = np.array( + type_mask = np.array( [ [ 1 if (tt_i, tt_j) not in self.exclude_types else 0 @@ -75,7 +81,7 @@ def __init__( dtype=np.int32, ) # (ntypes+1 x ntypes+1) - self.type_mask = self.type_mask.reshape([-1]) + self.type_mask = type_mask.reshape([-1]) def get_exclude_types(self): return self.exclude_types @@ -102,23 +108,29 @@ def build_type_exclude_mask( otherwise being 1. """ + xp = array_api_compat.array_namespace(nlist, atype_ext) if len(self.exclude_types) == 0: # safely return 1 if nothing is excluded. - return np.ones_like(nlist, dtype=np.int32) + return xp.ones_like(nlist, dtype=xp.int32) nf, nloc, nnei = nlist.shape nall = atype_ext.shape[1] # add virtual atom of type ntypes. nf x nall+1 - ae = np.concatenate( - [atype_ext, self.ntypes * np.ones([nf, 1], dtype=atype_ext.dtype)], axis=-1 + ae = xp.concat( + [atype_ext, self.ntypes * xp.ones([nf, 1], dtype=atype_ext.dtype)], axis=-1 ) - type_i = atype_ext[:, :nloc].reshape(nf, nloc) * (self.ntypes + 1) + type_i = xp.reshape(atype_ext[:, :nloc], (nf, nloc)) * (self.ntypes + 1) # nf x nloc x nnei - index = np.where(nlist == -1, nall, nlist).reshape(nf, nloc * nnei) - type_j = np.take_along_axis(ae, index, axis=1).reshape(nf, nloc, nnei) + index = xp.reshape( + xp.where(nlist == -1, xp.full_like(nlist, nall), nlist), (nf, nloc * nnei) + ) + type_j = xp_take_along_axis(ae, index, axis=1) + type_j = xp.reshape(type_j, (nf, nloc, nnei)) type_ij = type_i[:, :, None] + type_j # nf x (nloc x nnei) - type_ij = type_ij.reshape(nf, nloc * nnei) - mask = self.type_mask[type_ij].reshape(nf, nloc, nnei) + type_ij = xp.reshape(type_ij, (nf, nloc * nnei)) + mask = xp.reshape( + xp.take(self.type_mask, xp.reshape(type_ij, (-1,))), (nf, nloc, nnei) + ) return mask def __contains__(self, item): diff --git a/deepmd/dpmodel/utils/network.py b/deepmd/dpmodel/utils/network.py index e1242c3669..339035ff4e 100644 --- a/deepmd/dpmodel/utils/network.py +++ b/deepmd/dpmodel/utils/network.py @@ -146,15 +146,18 @@ def deserialize(cls, data: dict) -> "NativeLayer": num_out, **data, ) - obj.w, obj.b, obj.idt = ( + w, b, idt = ( variables["w"], variables.get("b", None), variables.get("idt", None), ) - if obj.b is not None: - obj.b = obj.b.ravel() - if obj.idt is not None: - obj.idt = obj.idt.ravel() + if b is not None: + b = b.ravel() + if idt is not None: + idt = idt.ravel() + obj.w = w + obj.b = b + obj.idt = idt obj.check_shape_consistency() return obj @@ -175,8 +178,11 @@ def check_type_consistency(self): def check_var(var): if var is not None: + # array api standard doesn't provide a API to get the dtype name + # this is really hacked + dtype_name = str(var.dtype).split(".")[-1] # assertion "float64" == "double" would fail - assert PRECISION_DICT[var.dtype.name] is PRECISION_DICT[precision] + assert PRECISION_DICT[dtype_name] is PRECISION_DICT[precision] check_var(self.w) check_var(self.b) @@ -249,7 +255,7 @@ def call(self, x: np.ndarray) -> np.ndarray: if self.resnet and self.w.shape[1] == self.w.shape[0]: y += x elif self.resnet and self.w.shape[1] == 2 * self.w.shape[0]: - y += xp.concatenate([x, x], axis=-1) + y += xp.concat([x, x], axis=-1) return y @@ -360,10 +366,11 @@ def __init__( precision=precision, seed=seed, ) - self.w = self.w.squeeze(0) # keep the weight shape to be [num_in] + xp = array_api_compat.array_namespace(self.w, self.b) + self.w = xp.squeeze(self.w, 0) # keep the weight shape to be [num_in] if self.uni_init: - self.w = np.ones_like(self.w) - self.b = np.zeros_like(self.b) + self.w = xp.ones_like(self.w) + self.b = xp.zeros_like(self.b) # only to keep consistent with other backends self.trainable = trainable @@ -376,8 +383,8 @@ def serialize(self) -> dict: The serialized layer. """ data = { - "w": self.w, - "b": self.b, + "w": to_numpy_array(self.w), + "b": to_numpy_array(self.b), } return { "@class": "LayerNorm", @@ -471,11 +478,12 @@ def call(self, x: np.ndarray) -> np.ndarray: @staticmethod def layer_norm_numpy(x, shape, weight=None, bias=None, eps=1e-5): + xp = array_api_compat.array_namespace(x) # mean and variance - mean = np.mean(x, axis=tuple(range(-len(shape), 0)), keepdims=True) - var = np.var(x, axis=tuple(range(-len(shape), 0)), keepdims=True) + mean = xp.mean(x, axis=tuple(range(-len(shape), 0)), keepdims=True) + var = xp.var(x, axis=tuple(range(-len(shape), 0)), keepdims=True) # normalize - x_normalized = (x - mean) / np.sqrt(var + eps) + x_normalized = (x - mean) / xp.sqrt(var + eps) # shift and scale if weight is not None and bias is not None: x_normalized = x_normalized * weight + bias diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index 4d0b3e3286..4806fa4cd8 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -4,8 +4,13 @@ Union, ) +import array_api_compat import numpy as np +from deepmd.dpmodel.array_api import ( + xp_take_along_axis, +) + from .region import ( normalize_coord, to_face_distance, @@ -88,34 +93,36 @@ def build_neighbor_list( For virtual atoms all neighboring positions are filled with -1. """ + xp = array_api_compat.array_namespace(coord, atype) batch_size = coord.shape[0] - coord = coord.reshape(batch_size, -1) + coord = xp.reshape(coord, (batch_size, -1)) nall = coord.shape[1] // 3 # fill virtual atoms with large coords so they are not neighbors of any # real atom. if coord.size > 0: - xmax = np.max(coord) + 2.0 * rcut + xmax = xp.max(coord) + 2.0 * rcut else: xmax = 2.0 * rcut # nf x nall is_vir = atype < 0 - coord1 = np.where( - is_vir[:, :, None], xmax, coord.reshape(batch_size, nall, 3) - ).reshape(batch_size, nall * 3) + coord1 = xp.where( + is_vir[:, :, None], xmax, xp.reshape(coord, (batch_size, nall, 3)) + ) + coord1 = xp.reshape(coord1, (batch_size, nall * 3)) if isinstance(sel, int): sel = [sel] nsel = sum(sel) coord0 = coord1[:, : nloc * 3] diff = ( - coord1.reshape([batch_size, -1, 3])[:, None, :, :] - - coord0.reshape([batch_size, -1, 3])[:, :, None, :] + xp.reshape(coord1, [batch_size, -1, 3])[:, None, :, :] + - xp.reshape(coord0, [batch_size, -1, 3])[:, :, None, :] ) assert list(diff.shape) == [batch_size, nloc, nall, 3] - rr = np.linalg.norm(diff, axis=-1) + rr = xp.linalg.vector_norm(diff, axis=-1) # if central atom has two zero distances, sorting sometimes can not exclude itself - rr -= np.eye(nloc, nall, dtype=diff.dtype)[np.newaxis, :, :] - nlist = np.argsort(rr, axis=-1) - rr = np.sort(rr, axis=-1) + rr -= xp.eye(nloc, nall, dtype=diff.dtype)[xp.newaxis, :, :] + nlist = xp.argsort(rr, axis=-1) + rr = xp.sort(rr, axis=-1) rr = rr[:, :, 1:] nlist = nlist[:, :, 1:] nnei = rr.shape[2] @@ -123,16 +130,20 @@ def build_neighbor_list( rr = rr[:, :, :nsel] nlist = nlist[:, :, :nsel] else: - rr = np.concatenate( - [rr, np.ones([batch_size, nloc, nsel - nnei]) + rcut], # pylint: disable=no-explicit-dtype + rr = xp.concatenate( + [rr, xp.ones([batch_size, nloc, nsel - nnei]) + rcut], # pylint: disable=no-explicit-dtype axis=-1, ) - nlist = np.concatenate( - [nlist, np.ones([batch_size, nloc, nsel - nnei], dtype=nlist.dtype)], + nlist = xp.concatenate( + [nlist, xp.ones([batch_size, nloc, nsel - nnei], dtype=nlist.dtype)], axis=-1, ) assert list(nlist.shape) == [batch_size, nloc, nsel] - nlist = np.where(np.logical_or((rr > rcut), is_vir[:, :nloc, None]), -1, nlist) + nlist = xp.where( + xp.logical_or((rr > rcut), is_vir[:, :nloc, None]), + xp.full_like(nlist, -1), + nlist, + ) if distinguish_types: return nlist_distinguish_types(nlist, atype, sel) @@ -149,23 +160,24 @@ def nlist_distinguish_types( distinguish atom types. """ + xp = array_api_compat.array_namespace(nlist, atype) nf, nloc, _ = nlist.shape ret_nlist = [] - tmp_atype = np.tile(atype[:, None], [1, nloc, 1]) + tmp_atype = xp.tile(atype[:, None], [1, nloc, 1]) mask = nlist == -1 tnlist_0 = nlist.copy() tnlist_0[mask] = 0 - tnlist = np.take_along_axis(tmp_atype, tnlist_0, axis=2).squeeze() - tnlist = np.where(mask, -1, tnlist) + tnlist = xp_take_along_axis(tmp_atype, tnlist_0, axis=2).squeeze() + tnlist = xp.where(mask, -1, tnlist) snsel = tnlist.shape[2] for ii, ss in enumerate(sel): - pick_mask = (tnlist == ii).astype(np.int32) - sorted_indices = np.argsort(-pick_mask, kind="stable", axis=-1) - pick_mask_sorted = -np.sort(-pick_mask, axis=-1) - inlist = np.take_along_axis(nlist, sorted_indices, axis=2) - inlist = np.where(~pick_mask_sorted.astype(bool), -1, inlist) - ret_nlist.append(np.split(inlist, [ss, snsel - ss], axis=-1)[0]) - ret = np.concatenate(ret_nlist, axis=-1) + pick_mask = (tnlist == ii).astype(xp.int32) + sorted_indices = xp.argsort(-pick_mask, kind="stable", axis=-1) + pick_mask_sorted = -xp.sort(-pick_mask, axis=-1) + inlist = xp_take_along_axis(nlist, sorted_indices, axis=2) + inlist = xp.where(~pick_mask_sorted.astype(bool), -1, inlist) + ret_nlist.append(xp.split(inlist, [ss, snsel - ss], axis=-1)[0]) + ret = xp.concat(ret_nlist, axis=-1) return ret @@ -263,36 +275,46 @@ def extend_coord_with_ghosts( maping extended index to the local index """ + xp = array_api_compat.array_namespace(coord, atype) nf, nloc = atype.shape - aidx = np.tile(np.arange(nloc)[np.newaxis, :], (nf, 1)) # pylint: disable=no-explicit-dtype + aidx = xp.tile(xp.arange(nloc)[xp.newaxis, :], (nf, 1)) # pylint: disable=no-explicit-dtype if cell is None: nall = nloc - extend_coord = coord.copy() - extend_atype = atype.copy() - extend_aidx = aidx.copy() + extend_coord = coord + extend_atype = atype + extend_aidx = aidx else: - coord = coord.reshape((nf, nloc, 3)) - cell = cell.reshape((nf, 3, 3)) + coord = xp.reshape(coord, (nf, nloc, 3)) + cell = xp.reshape(cell, (nf, 3, 3)) to_face = to_face_distance(cell) - nbuff = np.ceil(rcut / to_face).astype(int) - nbuff = np.max(nbuff, axis=0) - xi = np.arange(-nbuff[0], nbuff[0] + 1, 1) # pylint: disable=no-explicit-dtype - yi = np.arange(-nbuff[1], nbuff[1] + 1, 1) # pylint: disable=no-explicit-dtype - zi = np.arange(-nbuff[2], nbuff[2] + 1, 1) # pylint: disable=no-explicit-dtype - xyz = np.outer(xi, np.array([1, 0, 0]))[:, np.newaxis, np.newaxis, :] - xyz = xyz + np.outer(yi, np.array([0, 1, 0]))[np.newaxis, :, np.newaxis, :] - xyz = xyz + np.outer(zi, np.array([0, 0, 1]))[np.newaxis, np.newaxis, :, :] - xyz = xyz.reshape(-1, 3) - shift_idx = xyz[np.argsort(np.linalg.norm(xyz, axis=1))] + nbuff = xp.astype(xp.ceil(rcut / to_face), xp.int64) + nbuff = xp.max(nbuff, axis=0) + xi = xp.arange(-int(nbuff[0]), int(nbuff[0]) + 1, 1) # pylint: disable=no-explicit-dtype + yi = xp.arange(-int(nbuff[1]), int(nbuff[1]) + 1, 1) # pylint: disable=no-explicit-dtype + zi = xp.arange(-int(nbuff[2]), int(nbuff[2]) + 1, 1) # pylint: disable=no-explicit-dtype + xyz = xp.linalg.outer(xi, xp.asarray([1, 0, 0]))[:, xp.newaxis, xp.newaxis, :] + xyz = ( + xyz + + xp.linalg.outer(yi, xp.asarray([0, 1, 0]))[xp.newaxis, :, xp.newaxis, :] + ) + xyz = ( + xyz + + xp.linalg.outer(zi, xp.asarray([0, 0, 1]))[xp.newaxis, xp.newaxis, :, :] + ) + xyz = xp.reshape(xyz, (-1, 3)) + xyz = xp.astype(xyz, coord.dtype) + shift_idx = xp.take(xyz, xp.argsort(xp.linalg.vector_norm(xyz, axis=1)), axis=0) ns, _ = shift_idx.shape nall = ns * nloc - shift_vec = np.einsum("sd,fdk->fsk", shift_idx, cell) + # shift_vec = xp.einsum("sd,fdk->fsk", shift_idx, cell) + shift_vec = xp.tensordot(shift_idx, cell, axes=([1], [1])) + shift_vec = xp.permute_dims(shift_vec, (1, 0, 2)) extend_coord = coord[:, None, :, :] + shift_vec[:, :, None, :] - extend_atype = np.tile(atype[:, :, np.newaxis], (1, ns, 1)) - extend_aidx = np.tile(aidx[:, :, np.newaxis], (1, ns, 1)) + extend_atype = xp.tile(atype[:, :, xp.newaxis], (1, ns, 1)) + extend_aidx = xp.tile(aidx[:, :, xp.newaxis], (1, ns, 1)) return ( - extend_coord.reshape((nf, nall * 3)), - extend_atype.reshape((nf, nall)), - extend_aidx.reshape((nf, nall)), + xp.reshape(extend_coord, (nf, nall * 3)), + xp.reshape(extend_atype, (nf, nall)), + xp.reshape(extend_aidx, (nf, nall)), ) diff --git a/deepmd/dpmodel/utils/region.py b/deepmd/dpmodel/utils/region.py index ddbc4b29b8..8102020827 100644 --- a/deepmd/dpmodel/utils/region.py +++ b/deepmd/dpmodel/utils/region.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import array_api_compat import numpy as np @@ -21,8 +22,9 @@ def phys2inter( the internal coordinates """ - rec_cell = np.linalg.inv(cell) - return np.matmul(coord, rec_cell) + xp = array_api_compat.array_namespace(coord, cell) + rec_cell = xp.linalg.inv(cell) + return xp.matmul(coord, rec_cell) def inter2phys( @@ -44,7 +46,8 @@ def inter2phys( the physical coordinates """ - return np.matmul(coord, cell) + xp = array_api_compat.array_namespace(coord, cell) + return xp.matmul(coord, cell) def normalize_coord( @@ -66,8 +69,9 @@ def normalize_coord( wrapped coordinates of shape [*, na, 3]. """ + xp = array_api_compat.array_namespace(coord, cell) icoord = phys2inter(coord, cell) - icoord = np.remainder(icoord, 1.0) + icoord = xp.remainder(icoord, 1.0) return inter2phys(icoord, cell) @@ -87,17 +91,19 @@ def to_face_distance( the to face distances of shape [*, 3] """ + xp = array_api_compat.array_namespace(cell) cshape = cell.shape - dist = b_to_face_distance(cell.reshape([-1, 3, 3])) - return dist.reshape(list(cshape[:-2]) + [3]) # noqa:RUF005 + dist = b_to_face_distance(xp.reshape(cell, [-1, 3, 3])) + return xp.reshape(dist, list(cshape[:-2]) + [3]) # noqa:RUF005 def b_to_face_distance(cell): - volume = np.linalg.det(cell) - c_yz = np.cross(cell[:, 1], cell[:, 2], axis=-1) - _h2yz = volume / np.linalg.norm(c_yz, axis=-1) - c_zx = np.cross(cell[:, 2], cell[:, 0], axis=-1) - _h2zx = volume / np.linalg.norm(c_zx, axis=-1) - c_xy = np.cross(cell[:, 0], cell[:, 1], axis=-1) - _h2xy = volume / np.linalg.norm(c_xy, axis=-1) - return np.stack([_h2yz, _h2zx, _h2xy], axis=1) + xp = array_api_compat.array_namespace(cell) + volume = xp.linalg.det(cell) + c_yz = xp.linalg.cross(cell[:, 1, ...], cell[:, 2, ...], axis=-1) + _h2yz = volume / xp.linalg.vector_norm(c_yz, axis=-1) + c_zx = xp.linalg.cross(cell[:, 2, ...], cell[:, 0, ...], axis=-1) + _h2zx = volume / xp.linalg.vector_norm(c_zx, axis=-1) + c_xy = xp.linalg.cross(cell[:, 0, ...], cell[:, 1, ...], axis=-1) + _h2xy = volume / xp.linalg.vector_norm(c_xy, axis=-1) + return xp.stack([_h2yz, _h2zx, _h2xy], axis=1) diff --git a/deepmd/dpmodel/utils/type_embed.py b/deepmd/dpmodel/utils/type_embed.py index d67d8e50fd..e28b6abb31 100644 --- a/deepmd/dpmodel/utils/type_embed.py +++ b/deepmd/dpmodel/utils/type_embed.py @@ -106,7 +106,7 @@ def call(self) -> np.ndarray: embed = self.embedding_net(self.econf_tebd) if self.padding: embed_pad = xp.zeros((1, embed.shape[-1]), dtype=embed.dtype) - embed = xp.concatenate([embed, embed_pad], axis=0) + embed = xp.concat([embed, embed_pad], axis=0) return embed @classmethod diff --git a/deepmd/jax/common.py b/deepmd/jax/common.py index 550b168b29..9c144a41d1 100644 --- a/deepmd/jax/common.py +++ b/deepmd/jax/common.py @@ -1,13 +1,18 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( - Union, + Any, + Optional, overload, ) import numpy as np +from deepmd.dpmodel.common import ( + NativeOP, +) from deepmd.jax.env import ( jnp, + nnx, ) @@ -19,7 +24,7 @@ def to_jax_array(array: np.ndarray) -> jnp.ndarray: ... def to_jax_array(array: None) -> None: ... -def to_jax_array(array: Union[np.ndarray]) -> Union[jnp.ndarray]: +def to_jax_array(array: Optional[np.ndarray]) -> Optional[jnp.ndarray]: """Convert a numpy array to a JAX array. Parameters @@ -35,3 +40,44 @@ def to_jax_array(array: Union[np.ndarray]) -> Union[jnp.ndarray]: if array is None: return None return jnp.array(array) + + +def flax_module( + module: NativeOP, +) -> nnx.Module: + """Convert a NativeOP to a Flax module. + + Parameters + ---------- + module : NativeOP + The NativeOP to convert. + + Returns + ------- + flax.nnx.Module + The Flax module. + + Examples + -------- + >>> @flax_module + ... class MyModule(NativeOP): + ... pass + """ + metas = set() + if not issubclass(type(nnx.Module), type(module)): + metas.add(type(module)) + if not issubclass(type(module), type(nnx.Module)): + metas.add(type(nnx.Module)) + + class MixedMetaClass(*metas): + def __call__(self, *args, **kwargs): + return type(nnx.Module).__call__(self, *args, **kwargs) + + class FlaxModule(module, nnx.Module, metaclass=MixedMetaClass): + def __init_subclass__(cls, **kwargs) -> None: + return super().__init_subclass__(**kwargs) + + def __setattr__(self, name: str, value: Any) -> None: + return super().__setattr__(name, value) + + return FlaxModule diff --git a/deepmd/jax/descriptor/__init__.py b/deepmd/jax/descriptor/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/jax/descriptor/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/jax/descriptor/dpa1.py b/deepmd/jax/descriptor/dpa1.py new file mode 100644 index 0000000000..a9b0404970 --- /dev/null +++ b/deepmd/jax/descriptor/dpa1.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.dpa1 import DescrptBlockSeAtten as DescrptBlockSeAttenDP +from deepmd.dpmodel.descriptor.dpa1 import DescrptDPA1 as DescrptDPA1DP +from deepmd.dpmodel.descriptor.dpa1 import GatedAttentionLayer as GatedAttentionLayerDP +from deepmd.dpmodel.descriptor.dpa1 import ( + NeighborGatedAttention as NeighborGatedAttentionDP, +) +from deepmd.dpmodel.descriptor.dpa1 import ( + NeighborGatedAttentionLayer as NeighborGatedAttentionLayerDP, +) +from deepmd.jax.common import ( + flax_module, + to_jax_array, +) +from deepmd.jax.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.jax.utils.network import ( + LayerNorm, + NativeLayer, + NetworkCollection, +) +from deepmd.jax.utils.type_embed import ( + TypeEmbedNet, +) + + +@flax_module +class GatedAttentionLayer(GatedAttentionLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"in_proj", "out_proj"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +@flax_module +class NeighborGatedAttentionLayer(NeighborGatedAttentionLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "attention_layer": + value = GatedAttentionLayer.deserialize(value.serialize()) + elif name == "attn_layer_norm": + value = LayerNorm.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +@flax_module +class NeighborGatedAttention(NeighborGatedAttentionDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "attention_layers": + value = [ + NeighborGatedAttentionLayer.deserialize(ii.serialize()) for ii in value + ] + return super().__setattr__(name, value) + + +@flax_module +class DescrptBlockSeAtten(DescrptBlockSeAttenDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_jax_array(value) + elif name in {"embeddings", "embeddings_strip"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "dpa1_attention": + value = NeighborGatedAttention.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) + + +@flax_module +class DescrptDPA1(DescrptDPA1DP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "se_atten": + value = DescrptBlockSeAtten.deserialize(value.serialize()) + elif name == "type_embedding": + value = TypeEmbedNet.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/deepmd/jax/env.py b/deepmd/jax/env.py index 34e4aa6240..5a5a7f6bf0 100644 --- a/deepmd/jax/env.py +++ b/deepmd/jax/env.py @@ -5,10 +5,14 @@ import jax import jax.numpy as jnp +from flax import ( + nnx, +) jax.config.update("jax_enable_x64", True) __all__ = [ "jax", "jnp", + "nnx", ] diff --git a/deepmd/jax/utils/exclude_mask.py b/deepmd/jax/utils/exclude_mask.py new file mode 100644 index 0000000000..cac4cee092 --- /dev/null +++ b/deepmd/jax/utils/exclude_mask.py @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.utils.exclude_mask import PairExcludeMask as PairExcludeMaskDP +from deepmd.jax.common import ( + flax_module, + to_jax_array, +) + + +@flax_module +class PairExcludeMask(PairExcludeMaskDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"type_mask"}: + value = to_jax_array(value) + return super().__setattr__(name, value) diff --git a/deepmd/jax/utils/network.py b/deepmd/jax/utils/network.py index 629b51b8cd..2c406095cd 100644 --- a/deepmd/jax/utils/network.py +++ b/deepmd/jax/utils/network.py @@ -1,29 +1,74 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, + ClassVar, ) from deepmd.dpmodel.common import ( NativeOP, ) +from deepmd.dpmodel.utils.network import LayerNorm as LayerNormDP from deepmd.dpmodel.utils.network import NativeLayer as NativeLayerDP +from deepmd.dpmodel.utils.network import NetworkCollection as NetworkCollectionDP from deepmd.dpmodel.utils.network import ( make_embedding_network, make_fitting_network, make_multilayer_network, ) from deepmd.jax.common import ( + flax_module, to_jax_array, ) +from deepmd.jax.env import ( + nnx, +) + + +class ArrayAPIParam(nnx.Param): + def __array__(self, *args, **kwargs): + return self.value.__array__(*args, **kwargs) + + def __array_namespace__(self, *args, **kwargs): + return self.value.__array_namespace__(*args, **kwargs) + def __dlpack__(self, *args, **kwargs): + return self.value.__dlpack__(*args, **kwargs) + def __dlpack_device__(self, *args, **kwargs): + return self.value.__dlpack_device__(*args, **kwargs) + + +@flax_module class NativeLayer(NativeLayerDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"w", "b", "idt"}: value = to_jax_array(value) + if value is not None: + value = ArrayAPIParam(value) return super().__setattr__(name, value) -NativeNet = make_multilayer_network(NativeLayer, NativeOP) -EmbeddingNet = make_embedding_network(NativeNet, NativeLayer) -FittingNet = make_fitting_network(EmbeddingNet, NativeNet, NativeLayer) +@flax_module +class NativeNet(make_multilayer_network(NativeLayer, NativeOP)): + pass + + +class EmbeddingNet(make_embedding_network(NativeNet, NativeLayer)): + pass + + +class FittingNet(make_fitting_network(EmbeddingNet, NativeNet, NativeLayer)): + pass + + +@flax_module +class NetworkCollection(NetworkCollectionDP): + NETWORK_TYPE_MAP: ClassVar[dict[str, type]] = { + "network": NativeNet, + "embedding_network": EmbeddingNet, + "fitting_network": FittingNet, + } + + +class LayerNorm(LayerNormDP, NativeLayer): + pass diff --git a/deepmd/jax/utils/type_embed.py b/deepmd/jax/utils/type_embed.py index bc7c469524..3143460244 100644 --- a/deepmd/jax/utils/type_embed.py +++ b/deepmd/jax/utils/type_embed.py @@ -5,6 +5,7 @@ from deepmd.dpmodel.utils.type_embed import TypeEmbedNet as TypeEmbedNetDP from deepmd.jax.common import ( + flax_module, to_jax_array, ) from deepmd.jax.utils.network import ( @@ -12,6 +13,7 @@ ) +@flax_module class TypeEmbedNet(TypeEmbedNetDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"econf_tebd"}: diff --git a/pyproject.toml b/pyproject.toml index 6932960ace..b13dceeb07 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -137,6 +137,7 @@ cu12 = [ ] jax = [ 'jax>=0.4.33;python_version>="3.10"', + 'flax>=0.8.0;python_version>="3.10"', ] [tool.deepmd_build_backend.scripts] diff --git a/source/tests/array_api_strict/__init__.py b/source/tests/array_api_strict/__init__.py new file mode 100644 index 0000000000..27785c2fd5 --- /dev/null +++ b/source/tests/array_api_strict/__init__.py @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""Synchronize with deepmd.jax for test purpose only.""" diff --git a/source/tests/array_api_strict/common.py b/source/tests/array_api_strict/common.py new file mode 100644 index 0000000000..28f67a97f6 --- /dev/null +++ b/source/tests/array_api_strict/common.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import array_api_strict +import numpy as np + + +def to_array_api_strict_array(array: Optional[np.ndarray]): + """Convert a numpy array to a JAX array. + + Parameters + ---------- + array : np.ndarray + The numpy array to convert. + + Returns + ------- + jnp.ndarray + The JAX tensor. + """ + if array is None: + return None + return array_api_strict.asarray(array) diff --git a/source/tests/array_api_strict/descriptor/__init__.py b/source/tests/array_api_strict/descriptor/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/array_api_strict/descriptor/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/array_api_strict/descriptor/dpa1.py b/source/tests/array_api_strict/descriptor/dpa1.py new file mode 100644 index 0000000000..ebd688e303 --- /dev/null +++ b/source/tests/array_api_strict/descriptor/dpa1.py @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.dpa1 import DescrptBlockSeAtten as DescrptBlockSeAttenDP +from deepmd.dpmodel.descriptor.dpa1 import DescrptDPA1 as DescrptDPA1DP +from deepmd.dpmodel.descriptor.dpa1 import GatedAttentionLayer as GatedAttentionLayerDP +from deepmd.dpmodel.descriptor.dpa1 import ( + NeighborGatedAttention as NeighborGatedAttentionDP, +) +from deepmd.dpmodel.descriptor.dpa1 import ( + NeighborGatedAttentionLayer as NeighborGatedAttentionLayerDP, +) + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.exclude_mask import ( + PairExcludeMask, +) +from ..utils.network import ( + LayerNorm, + NativeLayer, + NetworkCollection, +) +from ..utils.type_embed import ( + TypeEmbedNet, +) + + +class GatedAttentionLayer(GatedAttentionLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"in_proj", "out_proj"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +class NeighborGatedAttentionLayer(NeighborGatedAttentionLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "attention_layer": + value = GatedAttentionLayer.deserialize(value.serialize()) + elif name == "attn_layer_norm": + value = LayerNorm.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +class NeighborGatedAttention(NeighborGatedAttentionDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "attention_layers": + value = [ + NeighborGatedAttentionLayer.deserialize(ii.serialize()) for ii in value + ] + return super().__setattr__(name, value) + + +class DescrptBlockSeAtten(DescrptBlockSeAttenDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_array_api_strict_array(value) + elif name in {"embeddings", "embeddings_strip"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "dpa1_attention": + value = NeighborGatedAttention.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) + + +class DescrptDPA1(DescrptDPA1DP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "se_atten": + value = DescrptBlockSeAtten.deserialize(value.serialize()) + elif name == "type_embedding": + value = TypeEmbedNet.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/utils/__init__.py b/source/tests/array_api_strict/utils/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/array_api_strict/utils/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/array_api_strict/utils/exclude_mask.py b/source/tests/array_api_strict/utils/exclude_mask.py new file mode 100644 index 0000000000..06f2e94b52 --- /dev/null +++ b/source/tests/array_api_strict/utils/exclude_mask.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.utils.exclude_mask import PairExcludeMask as PairExcludeMaskDP + +from ..common import ( + to_array_api_strict_array, +) + + +class PairExcludeMask(PairExcludeMaskDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"type_mask"}: + value = to_array_api_strict_array(value) + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/utils/network.py b/source/tests/array_api_strict/utils/network.py new file mode 100644 index 0000000000..42b0bb5c61 --- /dev/null +++ b/source/tests/array_api_strict/utils/network.py @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, + ClassVar, +) + +from deepmd.dpmodel.common import ( + NativeOP, +) +from deepmd.dpmodel.utils.network import LayerNorm as LayerNormDP +from deepmd.dpmodel.utils.network import NativeLayer as NativeLayerDP +from deepmd.dpmodel.utils.network import NetworkCollection as NetworkCollectionDP +from deepmd.dpmodel.utils.network import ( + make_embedding_network, + make_fitting_network, + make_multilayer_network, +) + +from ..common import ( + to_array_api_strict_array, +) + + +class NativeLayer(NativeLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"w", "b", "idt"}: + value = to_array_api_strict_array(value) + return super().__setattr__(name, value) + + +NativeNet = make_multilayer_network(NativeLayer, NativeOP) +EmbeddingNet = make_embedding_network(NativeNet, NativeLayer) +FittingNet = make_fitting_network(EmbeddingNet, NativeNet, NativeLayer) + + +class NetworkCollection(NetworkCollectionDP): + NETWORK_TYPE_MAP: ClassVar[dict[str, type]] = { + "network": NativeNet, + "embedding_network": EmbeddingNet, + "fitting_network": FittingNet, + } + + +class LayerNorm(LayerNormDP, NativeLayer): + pass diff --git a/source/tests/array_api_strict/utils/type_embed.py b/source/tests/array_api_strict/utils/type_embed.py new file mode 100644 index 0000000000..7551279002 --- /dev/null +++ b/source/tests/array_api_strict/utils/type_embed.py @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.utils.type_embed import TypeEmbedNet as TypeEmbedNetDP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.network import ( + EmbeddingNet, +) + + +class TypeEmbedNet(TypeEmbedNetDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"econf_tebd"}: + value = to_array_api_strict_array(value) + if name in {"embedding_net"}: + value = EmbeddingNet.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/source/tests/common/dpmodel/test_descriptor_dpa1.py b/source/tests/common/dpmodel/test_descriptor_dpa1.py index 317f4c3d3d..f441895f15 100644 --- a/source/tests/common/dpmodel/test_descriptor_dpa1.py +++ b/source/tests/common/dpmodel/test_descriptor_dpa1.py @@ -36,3 +36,22 @@ def test_self_consistency( mm1 = em1.call(self.coord_ext, self.atype_ext, self.nlist) for ii in [0, 1, 4]: np.testing.assert_allclose(mm0[ii], mm1[ii]) + + def test_multiple_frames(self): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + em0 = DescrptDPA1(self.rcut, self.rcut_smth, self.sel, ntypes=2) + em0.davg = davg + em0.dstd = dstd + two_coord_ext = np.concatenate([self.coord_ext, self.coord_ext], axis=0) + two_atype_ext = np.concatenate([self.atype_ext, self.atype_ext], axis=0) + two_nlist = np.concatenate([self.nlist, self.nlist], axis=0) + + mm0 = em0.call(two_coord_ext, two_atype_ext, two_nlist) + for ii in [0, 1, 4]: + np.testing.assert_allclose(mm0[ii][0], mm0[ii][2], err_msg=f"{ii} 0~2") + np.testing.assert_allclose(mm0[ii][1], mm0[ii][3], err_msg=f"{ii} 1~3") diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index c64b14c273..1070fe0f79 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -10,6 +10,9 @@ from enum import ( Enum, ) +from importlib.util import ( + find_spec, +) from typing import ( Any, Callable, @@ -33,6 +36,7 @@ INSTALLED_TF = Backend.get_backend("tensorflow")().is_available() INSTALLED_PT = Backend.get_backend("pytorch")().is_available() INSTALLED_JAX = Backend.get_backend("jax")().is_available() +INSTALLED_ARRAY_API_STRICT = find_spec("array_api_strict") is not None if os.environ.get("CI") and not (INSTALLED_TF and INSTALLED_PT): raise ImportError("TensorFlow or PyTorch should be tested in the CI") @@ -56,6 +60,7 @@ "INSTALLED_TF", "INSTALLED_PT", "INSTALLED_JAX", + "INSTALLED_ARRAY_API_STRICT", ] @@ -72,6 +77,7 @@ class CommonTest(ABC): """PyTorch model class.""" jax_class: ClassVar[Optional[type]] """JAX model class.""" + array_api_strict_class: ClassVar[Optional[type]] args: ClassVar[Optional[Union[Argument, list[Argument]]]] """Arguments that maps to the `data`.""" skip_dp: ClassVar[bool] = False @@ -83,6 +89,8 @@ class CommonTest(ABC): # we may usually skip jax before jax is fully supported skip_jax: ClassVar[bool] = True """Whether to skip the JAX model.""" + skip_array_api_strict: ClassVar[bool] = True + """Whether to skip the array_api_strict model.""" rtol = 1e-10 """Relative tolerance for comparing the return value. Override for float32.""" atol = 1e-10 @@ -163,6 +171,16 @@ def eval_jax(self, jax_obj: Any) -> Any: """ raise NotImplementedError("Not implemented") + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + """Evaluate the return value of array_api_strict. + + Parameters + ---------- + array_api_strict_obj : Any + The object of array_api_strict + """ + raise NotImplementedError("Not implemented") + class RefBackend(Enum): """Reference backend.""" @@ -170,6 +188,7 @@ class RefBackend(Enum): DP = 2 PT = 3 JAX = 5 + ARRAY_API_STRICT = 6 @abstractmethod def extract_ret(self, ret: Any, backend: RefBackend) -> tuple[np.ndarray, ...]: @@ -235,6 +254,11 @@ def get_jax_ret_serialization_from_cls(self, obj): data = obj.serialize() return ret, data + def get_array_api_strict_ret_serialization_from_cls(self, obj): + ret = self.eval_array_api_strict(obj) + data = obj.serialize() + return ret, data + def get_reference_backend(self): """Get the reference backend. @@ -248,6 +272,8 @@ def get_reference_backend(self): return self.RefBackend.PT if not self.skip_jax: return self.RefBackend.JAX + if not self.skip_array_api_strict: + return self.RefBackend.ARRAY_API_STRICT raise ValueError("No available reference") def get_reference_ret_serialization(self, ref: RefBackend): @@ -261,6 +287,12 @@ def get_reference_ret_serialization(self, ref: RefBackend): if ref == self.RefBackend.PT: obj = self.init_backend_cls(self.pt_class) return self.get_pt_ret_serialization_from_cls(obj) + if ref == self.RefBackend.JAX: + obj = self.init_backend_cls(self.jax_class) + return self.get_jax_ret_serialization_from_cls(obj) + if ref == self.RefBackend.ARRAY_API_STRICT: + obj = self.init_backend_cls(self.array_api_strict_class) + return self.get_array_api_strict_ret_serialization_from_cls(obj) raise ValueError("No available reference") def test_tf_consistent_with_ref(self): @@ -415,6 +447,40 @@ def test_jax_self_consistent(self): else: self.assertEqual(rr1, rr2) + def test_array_api_strict_consistent_with_ref(self): + """Test whether array_api_strict and reference are consistent.""" + if self.skip_array_api_strict: + self.skipTest("Unsupported backend") + ref_backend = self.get_reference_backend() + if ref_backend == self.RefBackend.ARRAY_API_STRICT: + self.skipTest("Reference is self") + ret1, data1 = self.get_reference_ret_serialization(ref_backend) + ret1 = self.extract_ret(ret1, ref_backend) + array_api_strict_obj = self.array_api_strict_class.deserialize(data1) + ret2 = self.eval_array_api_strict(array_api_strict_obj) + ret2 = self.extract_ret(ret2, self.RefBackend.ARRAY_API_STRICT) + data2 = array_api_strict_obj.serialize() + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + + def test_array_api_strict_self_consistent(self): + """Test whether array_api_strict is self consistent.""" + if self.skip_array_api_strict: + self.skipTest("Unsupported backend") + obj1 = self.init_backend_cls(self.array_api_strict_class) + ret1, data1 = self.get_array_api_strict_ret_serialization_from_cls(obj1) + obj1 = self.array_api_strict_class.deserialize(data1) + ret2, data2 = self.get_array_api_strict_ret_serialization_from_cls(obj1) + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + else: + self.assertEqual(rr1, rr2) + def tearDown(self) -> None: """Clear the TF session.""" if not self.skip_tf: diff --git a/source/tests/consistent/descriptor/common.py b/source/tests/consistent/descriptor/common.py index 74fc3d9b07..e0ca30c799 100644 --- a/source/tests/consistent/descriptor/common.py +++ b/source/tests/consistent/descriptor/common.py @@ -3,6 +3,8 @@ Any, ) +import numpy as np + from deepmd.common import ( make_default_mesh, ) @@ -12,6 +14,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, ) @@ -29,6 +33,12 @@ GLOBAL_TF_FLOAT_PRECISION, tf, ) +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) +if INSTALLED_ARRAY_API_STRICT: + import array_api_strict class DescriptorTest: @@ -99,3 +109,56 @@ def eval_pt_descriptor( x.detach().cpu().numpy() if torch.is_tensor(x) else x for x in pt_obj(ext_coords, ext_atype, nlist=nlist, mapping=mapping) ] + + def eval_jax_descriptor( + self, jax_obj: Any, natoms, coords, atype, box, mixed_types: bool = False + ) -> Any: + ext_coords, ext_atype, mapping = extend_coord_with_ghosts( + jnp.array(coords).reshape(1, -1, 3), + jnp.array(atype).reshape(1, -1), + jnp.array(box).reshape(1, 3, 3), + jax_obj.get_rcut(), + ) + nlist = build_neighbor_list( + ext_coords, + ext_atype, + natoms[0], + jax_obj.get_rcut(), + jax_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + return [ + np.asarray(x) if isinstance(x, jnp.ndarray) else x + for x in jax_obj(ext_coords, ext_atype, nlist=nlist, mapping=mapping) + ] + + def eval_array_api_strict_descriptor( + self, + array_api_strict_obj: Any, + natoms, + coords, + atype, + box, + mixed_types: bool = False, + ) -> Any: + array_api_strict.set_array_api_strict_flags(api_version="2023.12") + ext_coords, ext_atype, mapping = extend_coord_with_ghosts( + array_api_strict.asarray(coords.reshape(1, -1, 3)), + array_api_strict.asarray(atype.reshape(1, -1)), + array_api_strict.asarray(box.reshape(1, 3, 3)), + array_api_strict_obj.get_rcut(), + ) + nlist = build_neighbor_list( + ext_coords, + ext_atype, + natoms[0], + array_api_strict_obj.get_rcut(), + array_api_strict_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + return [ + np.asarray(x) if hasattr(x, "__array_namespace__") else x + for x in array_api_strict_obj( + ext_coords, ext_atype, nlist=nlist, mapping=mapping + ) + ] diff --git a/source/tests/consistent/descriptor/test_dpa1.py b/source/tests/consistent/descriptor/test_dpa1.py index 59d7369753..ed7884adb9 100644 --- a/source/tests/consistent/descriptor/test_dpa1.py +++ b/source/tests/consistent/descriptor/test_dpa1.py @@ -16,6 +16,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -33,6 +35,14 @@ from deepmd.tf.descriptor.se_atten import DescrptDPA1Compat as DescrptDPA1TF else: DescrptDPA1TF = None +if INSTALLED_JAX: + from deepmd.jax.descriptor.dpa1 import DescrptDPA1 as DescriptorDPA1JAX +else: + DescriptorDPA1JAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.dpa1 import DescrptDPA1 as DescriptorDPA1Strict +else: + DescriptorDPA1Strict = None from deepmd.utils.argcheck import ( descrpt_se_atten_args, ) @@ -183,6 +193,69 @@ def skip_dp(self) -> bool: temperature, ) + @property + def skip_jax(self) -> bool: + ( + tebd_dim, + tebd_input_mode, + resnet_dt, + type_one_side, + attn, + attn_layer, + attn_dotr, + excluded_types, + env_protection, + set_davg_zero, + scaling_factor, + normalize, + temperature, + ln_eps, + smooth_type_embedding, + concat_output_tebd, + precision, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return not INSTALLED_JAX or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + attn_dotr, + normalize, + temperature, + ) + + @property + def skip_array_api_strict(self) -> bool: + ( + tebd_dim, + tebd_input_mode, + resnet_dt, + type_one_side, + attn, + attn_layer, + attn_dotr, + excluded_types, + env_protection, + set_davg_zero, + scaling_factor, + normalize, + temperature, + ln_eps, + smooth_type_embedding, + concat_output_tebd, + precision, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return ( + not INSTALLED_ARRAY_API_STRICT + or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + attn_dotr, + normalize, + temperature, + ) + ) + @property def skip_tf(self) -> bool: ( @@ -226,6 +299,9 @@ def skip_tf(self) -> bool: tf_class = DescrptDPA1TF dp_class = DescrptDPA1DP pt_class = DescrptDPA1PT + jax_class = DescriptorDPA1JAX + array_api_strict_class = DescriptorDPA1Strict + args = descrpt_se_atten_args().append(Argument("ntypes", int, optional=False)) def setUp(self): @@ -313,6 +389,26 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index 1464517581..e2836c7a6c 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -12,6 +12,7 @@ ) from .common import ( + INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, @@ -37,6 +38,10 @@ from deepmd.jax.utils.type_embed import TypeEmbedNet as TypeEmbedNetJAX else: TypeEmbedNetJAX = object +if INSTALLED_ARRAY_API_STRICT: + from ..array_api_strict.utils.type_embed import TypeEmbedNet as TypeEmbedNetStrict +else: + TypeEmbedNetStrict = None @parameterized( @@ -71,8 +76,10 @@ def data(self) -> dict: dp_class = TypeEmbedNetDP pt_class = TypeEmbedNetPT jax_class = TypeEmbedNetJAX + array_api_strict_class = TypeEmbedNetStrict args = type_embedding_args() skip_jax = not INSTALLED_JAX + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT @property def addtional_data(self) -> dict: @@ -120,6 +127,12 @@ def eval_jax(self, jax_obj: Any) -> Any: raise ValueError("Output is numpy array") return [np.array(x) if isinstance(x, jnp.ndarray) else x for x in (out,)] + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + out = array_api_strict_obj() + return [ + np.asarray(x) if hasattr(x, "__array_namespace__") else x for x in (out,) + ] + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From 61f1681b48d2dc8f5a892ac782e5d48369ff7bd0 Mon Sep 17 00:00:00 2001 From: Anyang Peng <137014849+anyangml@users.noreply.github.com> Date: Fri, 11 Oct 2024 10:24:13 +0800 Subject: [PATCH 031/193] Feat (pt): Expose Linear Ener Model (#4194) ## Summary by CodeRabbit - **New Features** - Introduced two new JSON configuration files for linear energy calculations in water simulations. - Launched the `LinearEnergyModel` class for advanced energy and force calculations. - Added a parameter for customizable model weighting in the linear energy model. - Expanded test suite with new test classes for validating linear energy models. - Added new model configurations and test classes to enhance testing capabilities. - **Bug Fixes** - Corrected input handling in the deserialization method for version compatibility. - Adjusted numerical values in data files for accurate testing. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../model/atomic_model/linear_atomic_model.py | 57 +++++- deepmd/pt/model/model/__init__.py | 62 +++++++ deepmd/pt/model/model/dp_linear_model.py | 166 ++++++++++++++++++ doc/model/linear.md | 4 +- examples/water/d3/dftd3.txt | 2 +- examples/water/d3/input_pt.json | 96 ++++++++++ examples/water/linear/input_pt.json | 124 +++++++++++++ examples/water/zbl/input.json | 2 +- source/tests/common/test_examples.py | 2 + source/tests/pt/model/test_permutation.py | 1 + .../universal/common/cases/model/model.py | 19 ++ source/tests/universal/pt/model/test_model.py | 99 +++++++++++ 12 files changed, 622 insertions(+), 12 deletions(-) create mode 100644 deepmd/pt/model/model/dp_linear_model.py create mode 100644 examples/water/d3/input_pt.json create mode 100644 examples/water/linear/input_pt.json diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index d88c4c3af5..8d27fbcac4 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -48,12 +48,15 @@ class LinearEnergyAtomicModel(BaseAtomicModel): type_map : list[str] Mapping atom type to the name (str) of the type. For example `type_map[1]` gives the name of the type 1. + weights : Optional[Union[str,list[float]]] + Weights of the models. If str, must be `sum` or `mean`. If list, must be a list of float. """ def __init__( self, models: list[BaseAtomicModel], type_map: list[str], + weights: Optional[Union[str, list[float]]] = "mean", **kwargs, ): super().__init__(type_map, **kwargs) @@ -89,6 +92,16 @@ def __init__( ) self.nsels = torch.tensor(self.get_model_nsels(), device=env.DEVICE) # pylint: disable=no-explicit-dtype + if isinstance(weights, str): + assert weights in ["sum", "mean"] + elif isinstance(weights, list): + assert len(weights) == len(models) + else: + raise ValueError( + f"'weights' must be a string ('sum' or 'mean') or a list of float of length {len(models)}." + ) + self.weights = weights + def mixed_types(self) -> bool: """If true, the model 1. assumes total number of atoms aligned across frames; @@ -320,7 +333,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "LinearEnergyAtomicModel": data = copy.deepcopy(data) - check_version_compatibility(data.get("@version", 2), 2, 1) + check_version_compatibility(data.pop("@version", 2), 2, 1) data.pop("@class", None) data.pop("type", None) models = [ @@ -331,16 +344,42 @@ def deserialize(cls, data: dict) -> "LinearEnergyAtomicModel": return super().deserialize(data) def _compute_weight( - self, extended_coord, extended_atype, nlists_ + self, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + nlists_: list[torch.Tensor], ) -> list[torch.Tensor]: """This should be a list of user defined weights that matches the number of models to be combined.""" nmodels = len(self.models) nframes, nloc, _ = nlists_[0].shape - return [ - torch.ones((nframes, nloc, 1), dtype=torch.float64, device=env.DEVICE) - / nmodels - for _ in range(nmodels) - ] + if isinstance(self.weights, str): + if self.weights == "sum": + return [ + torch.ones( + (nframes, nloc, 1), dtype=torch.float64, device=env.DEVICE + ) + for _ in range(nmodels) + ] + elif self.weights == "mean": + return [ + torch.ones( + (nframes, nloc, 1), dtype=torch.float64, device=env.DEVICE + ) + / nmodels + for _ in range(nmodels) + ] + else: + raise ValueError( + "`weights` must be 'sum' or 'mean' when provided as a string." + ) + elif isinstance(self.weights, list): + return [ + torch.ones((nframes, nloc, 1), dtype=torch.float64, device=env.DEVICE) + * w + for w in self.weights + ] + else: + raise NotImplementedError def get_dim_fparam(self) -> int: """Get the number (dimension) of frame parameters of this atomic model.""" @@ -365,7 +404,9 @@ def get_sel_type(self) -> list[int]: return torch.unique( torch.cat( [ - torch.as_tensor(model.get_sel_type(), dtype=torch.int32) + torch.as_tensor( + model.get_sel_type(), dtype=torch.int64, device=env.DEVICE + ) for model in self.models ] ) diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 1c81d42013..26aefa6201 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -36,6 +36,9 @@ from .dos_model import ( DOSModel, ) +from .dp_linear_model import ( + LinearEnergyModel, +) from .dp_model import ( DPModelCommon, ) @@ -105,6 +108,62 @@ def get_spin_model(model_params): return SpinEnergyModel(backbone_model=backbone_model, spin=spin) +def get_linear_model(model_params): + model_params = copy.deepcopy(model_params) + weights = model_params.get("weights", "mean") + list_of_models = [] + ntypes = len(model_params["type_map"]) + for sub_model_params in model_params["models"]: + if "descriptor" in sub_model_params: + # descriptor + sub_model_params["descriptor"]["ntypes"] = ntypes + sub_model_params["descriptor"]["type_map"] = copy.deepcopy( + model_params["type_map"] + ) + descriptor = BaseDescriptor(**sub_model_params["descriptor"]) + # fitting + fitting_net = sub_model_params.get("fitting_net", {}) + fitting_net["type"] = fitting_net.get("type", "ener") + fitting_net["ntypes"] = descriptor.get_ntypes() + fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) + fitting_net["mixed_types"] = descriptor.mixed_types() + if fitting_net["type"] in ["dipole", "polar"]: + fitting_net["embedding_width"] = descriptor.get_dim_emb() + fitting_net["dim_descrpt"] = descriptor.get_dim_out() + grad_force = "direct" not in fitting_net["type"] + if not grad_force: + fitting_net["out_dim"] = descriptor.get_dim_emb() + if "ener" in fitting_net["type"]: + fitting_net["return_energy"] = True + fitting = BaseFitting(**fitting_net) + list_of_models.append( + DPAtomicModel(descriptor, fitting, type_map=model_params["type_map"]) + ) + + else: # must be pairtab + assert ( + "type" in sub_model_params and sub_model_params["type"] == "pairtab" + ), "Sub-models in LinearEnergyModel must be a DPModel or a PairTable Model" + list_of_models.append( + PairTabAtomicModel( + sub_model_params["tab_file"], + sub_model_params["rcut"], + sub_model_params["sel"], + type_map=model_params["type_map"], + ) + ) + + atom_exclude_types = model_params.get("atom_exclude_types", []) + pair_exclude_types = model_params.get("pair_exclude_types", []) + return LinearEnergyModel( + models=list_of_models, + type_map=model_params["type_map"], + weights=weights, + atom_exclude_types=atom_exclude_types, + pair_exclude_types=pair_exclude_types, + ) + + def get_zbl_model(model_params): model_params = copy.deepcopy(model_params) ntypes = len(model_params["type_map"]) @@ -247,6 +306,8 @@ def get_model(model_params): return get_zbl_model(model_params) else: return get_standard_model(model_params) + elif model_type == "linear_ener": + return get_linear_model(model_params) else: return BaseModel.get_class_by_type(model_type).get_model(model_params) @@ -265,4 +326,5 @@ def get_model(model_params): "DPZBLModel", "make_model", "make_hessian_model", + "LinearEnergyModel", ] diff --git a/deepmd/pt/model/model/dp_linear_model.py b/deepmd/pt/model/model/dp_linear_model.py new file mode 100644 index 0000000000..ef2e84bd19 --- /dev/null +++ b/deepmd/pt/model/model/dp_linear_model.py @@ -0,0 +1,166 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from copy import ( + deepcopy, +) +from typing import ( + Optional, +) + +import torch + +from deepmd.pt.model.atomic_model import ( + LinearEnergyAtomicModel, +) +from deepmd.pt.model.model.model import ( + BaseModel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + +from .dp_model import ( + DPModelCommon, +) +from .make_model import ( + make_model, +) + +DPLinearModel_ = make_model(LinearEnergyAtomicModel) + + +@BaseModel.register("linear_ener") +class LinearEnergyModel(DPLinearModel_): + model_type = "ener" + + def __init__( + self, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + + def translated_output_def(self): + out_def_data = self.model_output_def().get_data() + output_def = { + "atom_energy": deepcopy(out_def_data["energy"]), + "energy": deepcopy(out_def_data["energy_redu"]), + } + if self.do_grad_r("energy"): + output_def["force"] = deepcopy(out_def_data["energy_derv_r"]) + output_def["force"].squeeze(-2) + if self.do_grad_c("energy"): + output_def["virial"] = deepcopy(out_def_data["energy_derv_c_redu"]) + output_def["virial"].squeeze(-2) + output_def["atom_virial"] = deepcopy(out_def_data["energy_derv_c"]) + output_def["atom_virial"].squeeze(-3) + if "mask" in out_def_data: + output_def["mask"] = deepcopy(out_def_data["mask"]) + return output_def + + def forward( + self, + coord, + atype, + box: Optional[torch.Tensor] = None, + fparam: Optional[torch.Tensor] = None, + aparam: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, torch.Tensor]: + model_ret = self.forward_common( + coord, + atype, + box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad_r("energy"): + model_predict["force"] = model_ret["energy_derv_r"].squeeze(-2) + if self.do_grad_c("energy"): + model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-2) + if do_atomic_virial: + model_predict["atom_virial"] = model_ret["energy_derv_c"].squeeze(-3) + else: + model_predict["force"] = model_ret["dforce"] + if "mask" in model_ret: + model_predict["mask"] = model_ret["mask"] + return model_predict + + @torch.jit.export + def forward_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[torch.Tensor] = None, + fparam: Optional[torch.Tensor] = None, + aparam: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ): + model_ret = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + extra_nlist_sort=self.need_sorted_nlist_for_lower(), + ) + + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad_r("energy"): + model_predict["extended_force"] = model_ret["energy_derv_r"].squeeze(-2) + if self.do_grad_c("energy"): + model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-2) + if do_atomic_virial: + model_predict["extended_virial"] = model_ret["energy_derv_c"].squeeze( + -3 + ) + else: + assert model_ret["dforce"] is not None + model_predict["dforce"] = model_ret["dforce"] + return model_predict + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statictics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + type_map = local_jdata_cpy["type_map"] + min_nbor_dist = None + for idx, sub_model in enumerate(local_jdata_cpy["models"]): + if "tab_file" not in sub_model: + sub_model, temp_min = DPModelCommon.update_sel( + train_data, type_map, local_jdata["models"][idx] + ) + if min_nbor_dist is None or temp_min <= min_nbor_dist: + min_nbor_dist = temp_min + return local_jdata_cpy, min_nbor_dist diff --git a/doc/model/linear.md b/doc/model/linear.md index 3891559d90..47fdd1750b 100644 --- a/doc/model/linear.md +++ b/doc/model/linear.md @@ -1,7 +1,7 @@ -## Linear model {{ tensorflow_icon }} +## Linear model {{ tensorflow_icon }} {{ pytorch_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }} ::: One can linearly combine existing models with arbitrary coefficients: diff --git a/examples/water/d3/dftd3.txt b/examples/water/d3/dftd3.txt index bbc9726134..09e5fb697a 100644 --- a/examples/water/d3/dftd3.txt +++ b/examples/water/d3/dftd3.txt @@ -97,4 +97,4 @@ 9.700000000000001066e+00 -1.186747936398473687e-05 -7.637113677130612127e-06 -5.528293849956352819e-06 9.800000000000000711e+00 -1.114523618469756001e-05 -7.174288601187318493e-06 -5.194401230658985063e-06 9.900000000000000355e+00 -1.047381249252528874e-05 -6.743886368019750717e-06 -4.883815978498405921e-06 -1.000000000000000000e+01 0.000000000000000e00e+00 0.000000000000000e00e+00 0.000000000000000e00e+00 +1.000000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 diff --git a/examples/water/d3/input_pt.json b/examples/water/d3/input_pt.json new file mode 100644 index 0000000000..c2d9304a7e --- /dev/null +++ b/examples/water/d3/input_pt.json @@ -0,0 +1,96 @@ +{ + "_comment1": " model parameters", + "model": { + "type": "linear_ener", + "weights": "sum", + "type_map": [ + "O", + "H" + ], + "models": [ + { + "descriptor": { + "type": "se_atten", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "type_one_side": true, + "precision": "float64", + "seed": 1, + "_comment2": " that's all" + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "precision": "float64", + "seed": 1, + "_comment3": " that's all" + }, + "_comment4": " that's all" + }, + { + "type": "pairtab", + "tab_file": "dftd3.txt", + "rcut": 10.0, + "sel": 534 + } + ] + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-8, + "_comment5": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "_comment6": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "../data/data_0/", + "../data/data_1/", + "../data/data_2/" + ], + "batch_size": "auto", + "_comment7": "that's all" + }, + "validation_data": { + "systems": [ + "../data/data_3" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment8": "that's all" + }, + "numb_steps": 1000000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 1000, + "_comment9": "that's all" + }, + "_comment10": "that's all" +} diff --git a/examples/water/linear/input_pt.json b/examples/water/linear/input_pt.json new file mode 100644 index 0000000000..e8d8e07136 --- /dev/null +++ b/examples/water/linear/input_pt.json @@ -0,0 +1,124 @@ +{ + "_comment1": " model parameters", + "model": { + "type": "linear_ener", + "weights": "sum", + "type_map": [ + "O", + "H" + ], + "models": [ + { + "descriptor": { + "type": "se_atten", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "type_one_side": true, + "precision": "float64", + "seed": 1, + "_comment2": " that's all" + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "precision": "float64", + "seed": 1, + "_comment3": " that's all" + }, + "_comment4": " that's all" + }, + { + "descriptor": { + "type": "se_atten", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "type_one_side": true, + "precision": "float64", + "seed": 1, + "_comment2": " that's all" + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "precision": "float64", + "seed": 1, + "_comment3": " that's all" + }, + "_comment4": " that's all" + } + ] + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-8, + "_comment5": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "_comment6": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "../data/data_0/", + "../data/data_1/", + "../data/data_2/" + ], + "batch_size": "auto", + "_comment7": "that's all" + }, + "validation_data": { + "systems": [ + "../data/data_3" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment8": "that's all" + }, + "numb_steps": 1000000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 1000, + "_comment9": "that's all" + }, + "_comment10": "that's all" +} diff --git a/examples/water/zbl/input.json b/examples/water/zbl/input.json index cb5602d92d..54586ca0cf 100644 --- a/examples/water/zbl/input.json +++ b/examples/water/zbl/input.json @@ -10,7 +10,7 @@ "H" ], "descriptor": { - "type": "se_e2_a", + "type": "se_atten_v2", "sel": [ 46, 92 diff --git a/source/tests/common/test_examples.py b/source/tests/common/test_examples.py index 6abb482824..246e767f01 100644 --- a/source/tests/common/test_examples.py +++ b/source/tests/common/test_examples.py @@ -34,7 +34,9 @@ p_examples / "water" / "hybrid" / "input.json", p_examples / "water" / "dplr" / "train" / "dw.json", p_examples / "water" / "dplr" / "train" / "ener.json", + p_examples / "water" / "d3" / "input_pt.json", p_examples / "water" / "linear" / "input.json", + p_examples / "water" / "linear" / "input_pt.json", p_examples / "nopbc" / "train" / "input.json", p_examples / "water_tensor" / "dipole" / "dipole_input.json", p_examples / "water_tensor" / "polar" / "polar_input.json", diff --git a/source/tests/pt/model/test_permutation.py b/source/tests/pt/model/test_permutation.py index 6aec895041..2d391c7115 100644 --- a/source/tests/pt/model/test_permutation.py +++ b/source/tests/pt/model/test_permutation.py @@ -98,6 +98,7 @@ "data_stat_nbatch": 20, } + model_spin = { "type_map": ["O", "H", "B"], "descriptor": { diff --git a/source/tests/universal/common/cases/model/model.py b/source/tests/universal/common/cases/model/model.py index c31f5cd889..cee69d9d6c 100644 --- a/source/tests/universal/common/cases/model/model.py +++ b/source/tests/universal/common/cases/model/model.py @@ -28,6 +28,25 @@ def setUpClass(cls) -> None: cls.epsilon_dict = {} +class LinearEnerModelTest(ModelTestCase): + @classmethod + def setUpClass(cls) -> None: + cls.expected_rcut = 5.0 + cls.expected_type_map = ["O", "H"] + cls.expected_dim_fparam = 0 + cls.expected_dim_aparam = 0 + cls.expected_sel_type = [0, 1] + cls.expected_aparam_nall = False + cls.expected_model_output_type = ["energy", "mask"] + cls.model_output_equivariant = [] + cls.expected_sel = [46, 92] + cls.expected_sel_mix = sum(cls.expected_sel) + cls.expected_has_message_passing = False + cls.aprec_dict = {} + cls.rprec_dict = {} + cls.epsilon_dict = {} + + class DipoleModelTest(ModelTestCase): @classmethod def setUpClass(cls) -> None: diff --git a/source/tests/universal/pt/model/test_model.py b/source/tests/universal/pt/model/test_model.py index 41df0cf762..81c32eb94c 100644 --- a/source/tests/universal/pt/model/test_model.py +++ b/source/tests/universal/pt/model/test_model.py @@ -21,6 +21,7 @@ DOSModel, DPZBLModel, EnergyModel, + LinearEnergyModel, PolarModel, PropertyModel, SpinEnergyModel, @@ -43,6 +44,7 @@ DipoleModelTest, DosModelTest, EnerModelTest, + LinearEnerModelTest, PolarModelTest, PropertyModelTest, SpinEnerModelTest, @@ -803,3 +805,100 @@ def setUpClass(cls): cls.expected_sel_type = ft.get_sel_type() cls.expected_dim_fparam = ft.get_dim_fparam() cls.expected_dim_aparam = ft.get_dim_aparam() + + +@parameterized( + des_parameterized=( + ( + *[(param_func, DescrptDPA1) for param_func in DescriptorParamDPA1List], + *[(param_func, DescrptDPA2) for param_func in DescriptorParamDPA2List], + (DescriptorParamHybridMixed, DescrptHybrid), + (DescriptorParamHybridMixedTTebd, DescrptHybrid), + ), # descrpt_class_param & class + ((FittingParamEnergy, EnergyFittingNet),), # fitting_class_param & class + ), + fit_parameterized=( + ( + (DescriptorParamDPA1, DescrptDPA1), + (DescriptorParamDPA2, DescrptDPA2), + ), # descrpt_class_param & class + ( + *[(param_func, EnergyFittingNet) for param_func in FittingParamEnergyList], + ), # fitting_class_param & class + ), +) +class TestLinearEnergyModelPT(unittest.TestCase, LinearEnerModelTest, PTTestCase): + @property + def modules_to_test(self): + skip_test_jit = getattr(self, "skip_test_jit", False) + modules = PTTestCase.modules_to_test.fget(self) + if not skip_test_jit: + # for Model, we can test script module API + modules += [ + self._script_module + if hasattr(self, "_script_module") + else self.script_module + ] + return modules + + @classmethod + def setUpClass(cls): + LinearEnerModelTest.setUpClass() + (DescriptorParam, Descrpt) = cls.param[0] + (FittingParam, Fitting) = cls.param[1] + # set special precision + cls.aprec_dict["test_smooth"] = 1e-5 + cls.input_dict_ds = DescriptorParam( + len(cls.expected_type_map), + cls.expected_rcut, + cls.expected_rcut / 2, + cls.expected_sel, + cls.expected_type_map, + ) + + # set skip tests + skiptest, skip_reason = skip_model_tests(cls) + if skiptest: + raise cls.skipTest(cls, skip_reason) + + ds1, ds2 = Descrpt(**cls.input_dict_ds), Descrpt(**cls.input_dict_ds) + cls.input_dict_ft = FittingParam( + ntypes=len(cls.expected_type_map), + dim_descrpt=ds1.get_dim_out(), + mixed_types=ds1.mixed_types(), + type_map=cls.expected_type_map, + ) + ft1 = Fitting( + **cls.input_dict_ft, + ) + ft2 = Fitting( + **cls.input_dict_ft, + ) + dp_model1 = DPAtomicModel( + ds1, + ft1, + type_map=cls.expected_type_map, + ) + dp_model2 = DPAtomicModel( + ds2, + ft2, + type_map=cls.expected_type_map, + ) + cls.module = LinearEnergyModel( + [dp_model1, dp_model2], + type_map=cls.expected_type_map, + ) + # only test jit API once for different models + if ( + DescriptorParam not in defalut_des_param + or FittingParam not in defalut_fit_param + ): + cls.skip_test_jit = True + else: + with torch.jit.optimized_execution(False): + cls._script_module = torch.jit.script(cls.module) + cls.output_def = cls.module.translated_output_def() + cls.expected_has_message_passing = ds1.has_message_passing() + cls.expected_dim_fparam = ft1.get_dim_fparam() + cls.expected_dim_aparam = ft1.get_dim_aparam() + cls.expected_sel_type = ft1.get_sel_type() From 2ca1c06c6e24f2742ac6984f7036fddbde617a93 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 10 Oct 2024 22:25:40 -0400 Subject: [PATCH 032/193] chore: cache `deserialized_module` and `script_module` (#4196) ## Summary by CodeRabbit - **New Features** - Enhanced performance of module serialization and deserialization by converting instance properties to class methods for improved access and efficiency. - **Bug Fixes** - Resolved issues related to instance-level access, now allowing direct class-level method access for better functionality. - **Refactor** - Updated property signatures to utilize class methods for caching results, optimizing performance and resource management. --------- Signed-off-by: Jinzhe Zeng --- source/tests/universal/dpmodel/backend.py | 18 +++++++++++++++ source/tests/universal/pt/backend.py | 27 +++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/source/tests/universal/dpmodel/backend.py b/source/tests/universal/dpmodel/backend.py index 99170c20e1..4f624ae501 100644 --- a/source/tests/universal/dpmodel/backend.py +++ b/source/tests/universal/dpmodel/backend.py @@ -1,4 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from functools import ( + lru_cache, +) + import numpy as np from deepmd.dpmodel.common import ( @@ -30,8 +34,15 @@ def convert_to_numpy(cls, xx: np.ndarray) -> np.ndarray: def convert_from_numpy(cls, xx: np.ndarray) -> np.ndarray: return xx + @classmethod + @lru_cache(maxsize=1) + def _get_deserialized_module(cls): + return cls.module.deserialize(cls.module.serialize()) + @property def deserialized_module(self): + if hasattr(self.__class__, "module"): + return self._get_deserialized_module() return self.module.deserialize(self.module.serialize()) @property @@ -41,3 +52,10 @@ def modules_to_test(self): self.deserialized_module, ] return modules + + @classmethod + def tearDownClass(cls): + super().tearDownClass() + if hasattr(cls, "module"): + del cls.module + cls._get_deserialized_module.cache_clear() diff --git a/source/tests/universal/pt/backend.py b/source/tests/universal/pt/backend.py index 951bf18262..5146fdc79b 100644 --- a/source/tests/universal/pt/backend.py +++ b/source/tests/universal/pt/backend.py @@ -1,4 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from functools import ( + lru_cache, +) + import numpy as np import torch @@ -18,13 +22,28 @@ class PTTestCase(BackendTestCase): module: "torch.nn.Module" """PT module to test.""" + @classmethod + @lru_cache(maxsize=1) + def _get_script_module(cls): + with torch.jit.optimized_execution(False): + return torch.jit.script(cls.module) + @property def script_module(self): + if hasattr(self.__class__, "module"): + return self._get_script_module() with torch.jit.optimized_execution(False): return torch.jit.script(self.module) + @classmethod + @lru_cache(maxsize=1) + def _get_deserialized_module(cls): + return cls.module.deserialize(cls.module.serialize()) + @property def deserialized_module(self): + if hasattr(self.__class__, "module"): + return self._get_deserialized_module() return self.module.deserialize(self.module.serialize()) @property @@ -35,6 +54,14 @@ def modules_to_test(self): ] return modules + @classmethod + def tearDownClass(cls): + super().tearDownClass() + if hasattr(cls, "module"): + del cls.module + cls._get_deserialized_module.cache_clear() + cls._get_script_module.cache_clear() + def test_jit(self): if getattr(self, "skip_test_jit", False): self.skipTest("Skip test jit.") From 8174cf113625885ed6b58f17149e6a212175945a Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 10 Oct 2024 23:20:55 -0400 Subject: [PATCH 033/193] chore(ci): skip more tests on GPU CI (#4200) Also, only skip these GPU tests on the CI. When we test locally, it's expected to run the tests. ## Summary by CodeRabbit - **New Features** - Introduced a global variable `CI` to enhance test execution control based on the continuous integration environment. - **Bug Fixes** - Updated test skipping conditions across multiple test classes to ensure tests are only executed on CPU when the CI environment is active. - **Documentation** - Enhanced clarity on test conditions by including the `CI` variable in relevant test decorators. --------- Signed-off-by: Jinzhe Zeng --- source/tests/consistent/common.py | 10 ++++++++++ source/tests/universal/common/cases/model/utils.py | 13 +++++++------ .../dpmodel/atomc_model/test_atomic_model.py | 13 +++++++------ .../universal/dpmodel/descriptor/test_descriptor.py | 3 ++- .../tests/universal/dpmodel/fitting/test_fitting.py | 3 ++- source/tests/universal/dpmodel/model/test_model.py | 5 +++-- .../universal/dpmodel/utils/test_type_embed.py | 3 ++- source/tests/utils.py | 3 +++ 8 files changed, 36 insertions(+), 17 deletions(-) diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index 1070fe0f79..e3bf808978 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -3,6 +3,7 @@ import itertools import os import sys +import unittest from abc import ( ABC, abstractmethod, @@ -33,6 +34,11 @@ Backend, ) +from ..utils import ( + CI, + TEST_DEVICE, +) + INSTALLED_TF = Backend.get_backend("tensorflow")().is_available() INSTALLED_PT = Backend.get_backend("pytorch")().is_available() INSTALLED_JAX = Backend.get_backend("jax")().is_available() @@ -340,6 +346,7 @@ def test_tf_self_consistent(self): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_dp_consistent_with_ref(self): """Test whether DP and reference are consistent.""" if self.skip_dp: @@ -358,6 +365,7 @@ def test_dp_consistent_with_ref(self): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_dp_self_consistent(self): """Test whether DP is self consistent.""" if self.skip_dp: @@ -447,6 +455,7 @@ def test_jax_self_consistent(self): else: self.assertEqual(rr1, rr2) + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_array_api_strict_consistent_with_ref(self): """Test whether array_api_strict and reference are consistent.""" if self.skip_array_api_strict: @@ -465,6 +474,7 @@ def test_array_api_strict_consistent_with_ref(self): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_array_api_strict_self_consistent(self): """Test whether array_api_strict is self consistent.""" if self.skip_array_api_strict: diff --git a/source/tests/universal/common/cases/model/utils.py b/source/tests/universal/common/cases/model/utils.py index d583d06b05..628c415eb2 100644 --- a/source/tests/universal/common/cases/model/utils.py +++ b/source/tests/universal/common/cases/model/utils.py @@ -22,6 +22,7 @@ GLOBAL_SEED, ) from .....utils import ( + CI, TEST_DEVICE, ) @@ -327,7 +328,7 @@ def test_zero_forward(self): continue np.testing.assert_allclose(rr1, rr2, atol=aprec) - @unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_permutation(self): """Test permutation.""" if getattr(self, "skip_test_permutation", False): @@ -413,7 +414,7 @@ def test_permutation(self): else: raise RuntimeError(f"Unknown output key: {kk}") - @unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_trans(self): """Test translation.""" if getattr(self, "skip_test_trans", False): @@ -482,7 +483,7 @@ def test_trans(self): else: raise RuntimeError(f"Unknown output key: {kk}") - @unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_rot(self): """Test rotation.""" if getattr(self, "skip_test_rot", False): @@ -672,7 +673,7 @@ def test_rot(self): else: raise RuntimeError(f"Unknown output key: {kk}") - @unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_smooth(self): """Test smooth.""" if getattr(self, "skip_test_smooth", False): @@ -779,7 +780,7 @@ def test_smooth(self): else: raise RuntimeError(f"Unknown output key: {kk}") - @unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_autodiff(self): """Test autodiff.""" if getattr(self, "skip_test_autodiff", False): @@ -919,7 +920,7 @@ def ff_cell(bb): # not support virial by far pass - @unittest.skipIf(TEST_DEVICE == "cpu", "Skip test on CPU.") + @unittest.skipIf(TEST_DEVICE == "cpu" and CI, "Skip test on CPU.") def test_device_consistence(self): """Test forward consistency between devices.""" test_spin = getattr(self, "test_spin", False) diff --git a/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py b/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py index 4c5a2b291b..8e7324e2bc 100644 --- a/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py +++ b/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py @@ -26,6 +26,7 @@ parameterized, ) from ....utils import ( + CI, TEST_DEVICE, ) from ...common.cases.atomic_model.atomic_model import ( @@ -98,7 +99,7 @@ ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestEnergyAtomicModelDP(unittest.TestCase, EnerAtomicModelTest, DPTestCase): @classmethod def setUpClass(cls): @@ -165,7 +166,7 @@ def setUpClass(cls): ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestDosAtomicModelDP(unittest.TestCase, DosAtomicModelTest, DPTestCase): @classmethod def setUpClass(cls): @@ -227,7 +228,7 @@ def setUpClass(cls): ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestDipoleAtomicModelDP(unittest.TestCase, DipoleAtomicModelTest, DPTestCase): @classmethod def setUpClass(cls): @@ -290,7 +291,7 @@ def setUpClass(cls): ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestPolarAtomicModelDP(unittest.TestCase, PolarAtomicModelTest, DPTestCase): @classmethod def setUpClass(cls): @@ -351,7 +352,7 @@ def setUpClass(cls): ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestZBLAtomicModelDP(unittest.TestCase, ZBLAtomicModelTest, DPTestCase): @classmethod def setUpClass(cls): @@ -429,7 +430,7 @@ def setUpClass(cls): ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestPropertyAtomicModelDP(unittest.TestCase, PropertyAtomicModelTest, DPTestCase): @classmethod def setUpClass(cls): diff --git a/source/tests/universal/dpmodel/descriptor/test_descriptor.py b/source/tests/universal/dpmodel/descriptor/test_descriptor.py index 256bea74f8..fc7ee8b075 100644 --- a/source/tests/universal/dpmodel/descriptor/test_descriptor.py +++ b/source/tests/universal/dpmodel/descriptor/test_descriptor.py @@ -26,6 +26,7 @@ GLOBAL_SEED, ) from ....utils import ( + CI, TEST_DEVICE, ) from ...common.cases.descriptor.descriptor import ( @@ -519,7 +520,7 @@ def DescriptorParamHybridMixedTTebd(ntypes, rcut, rcut_smth, sel, type_map, **kw (DescriptorParamHybridMixedTTebd, DescrptHybrid), ) # class_param & class ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestDescriptorDP(unittest.TestCase, DescriptorTest, DPTestCase): def setUp(self): DescriptorTest.setUp(self) diff --git a/source/tests/universal/dpmodel/fitting/test_fitting.py b/source/tests/universal/dpmodel/fitting/test_fitting.py index 393bab1707..f64faee76f 100644 --- a/source/tests/universal/dpmodel/fitting/test_fitting.py +++ b/source/tests/universal/dpmodel/fitting/test_fitting.py @@ -20,6 +20,7 @@ GLOBAL_SEED, ) from ....utils import ( + CI, TEST_DEVICE, ) from ...common.cases.fitting.fitting import ( @@ -236,7 +237,7 @@ def FittingParamProperty( ), # class_param & class (True, False), # mixed_types ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestFittingDP(unittest.TestCase, FittingTest, DPTestCase): def setUp(self): ((FittingParam, Fitting), self.mixed_types) = self.param diff --git a/source/tests/universal/dpmodel/model/test_model.py b/source/tests/universal/dpmodel/model/test_model.py index 66edc2d50e..265dc43c6c 100644 --- a/source/tests/universal/dpmodel/model/test_model.py +++ b/source/tests/universal/dpmodel/model/test_model.py @@ -25,6 +25,7 @@ parameterized, ) from ....utils import ( + CI, TEST_DEVICE, ) from ...common.cases.model.model import ( @@ -112,7 +113,7 @@ def skip_model_tests(test_obj): ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestEnergyModelDP(unittest.TestCase, EnerModelTest, DPTestCase): @classmethod def setUpClass(cls): @@ -200,7 +201,7 @@ def setUpClass(cls): ), # fitting_class_param & class ), ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestSpinEnergyModelDP(unittest.TestCase, SpinEnerModelTest, DPTestCase): @classmethod def setUpClass(cls): diff --git a/source/tests/universal/dpmodel/utils/test_type_embed.py b/source/tests/universal/dpmodel/utils/test_type_embed.py index 67faef0a8d..ee3063af7d 100644 --- a/source/tests/universal/dpmodel/utils/test_type_embed.py +++ b/source/tests/universal/dpmodel/utils/test_type_embed.py @@ -6,6 +6,7 @@ ) from ....utils import ( + CI, TEST_DEVICE, ) from ...common.cases.utils.type_embed import ( @@ -16,7 +17,7 @@ ) -@unittest.skipIf(TEST_DEVICE != "cpu", "Only test on CPU.") +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") class TestTypeEmbd(unittest.TestCase, TypeEmbdTest, DPTestCase): def setUp(self): TypeEmbdTest.setUp(self) diff --git a/source/tests/utils.py b/source/tests/utils.py index 694f55186e..bfb3d445af 100644 --- a/source/tests/utils.py +++ b/source/tests/utils.py @@ -5,3 +5,6 @@ TEST_DEVICE = "cpu" else: TEST_DEVICE = "cuda" + +# see https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/store-information-in-variables#default-environment-variables +CI = os.environ.get("CI") == "true" From c10bc3c7bcf91c7d12b080df3a39181ffaf5bd93 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 13 Oct 2024 00:58:53 -0400 Subject: [PATCH 034/193] chore(tf): filter TF deprecation warnings (#4199) Fix #2367. Fix #3039. These warnings are not true - these deprecated APIs have existed for several years and never been removed. ## Summary by CodeRabbit - **New Features** - Enhanced logging capabilities for TensorFlow warnings. - Introduced a new filter to manage specific warning messages from the TensorFlow logger. - **Bug Fixes** - Improved the configuration sequence for the TensorFlow logger to ensure proper functionality. Signed-off-by: Jinzhe Zeng --- deepmd/tf/env.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/deepmd/tf/env.py b/deepmd/tf/env.py index 03f36fb675..5a66498dba 100644 --- a/deepmd/tf/env.py +++ b/deepmd/tf/env.py @@ -2,6 +2,7 @@ """Module that sets tensorflow working environment and exports inportant constants.""" import ctypes +import logging import os import platform from importlib import ( @@ -75,17 +76,27 @@ def dlopen_library(module: str, filename: str): dlopen_library("nvidia.cusparse.lib", "libcusparse.so*") dlopen_library("nvidia.cudnn.lib", "libcudnn.so*") + +FILTER_MSGS = [ + "is deprecated and will be removed in a future version.", + "disable_mixed_precision_graph_rewrite() called when mixed precision is already disabled.", +] + + +class TFWarningFilter(logging.Filter): + def filter(self, record): + return not any(msg in record.getMessage().strip() for msg in FILTER_MSGS) + + # keras 3 is incompatible with tf.compat.v1 # https://keras.io/getting_started/#tensorflow--keras-2-backwards-compatibility # 2024/04/24: deepmd.tf doesn't import tf.keras any more # import tensorflow v1 compatability -try: - import tensorflow.compat.v1 as tf +import tensorflow.compat.v1 as tf - tf.disable_v2_behavior() -except ImportError: - import tensorflow as tf +tf.get_logger().addFilter(TFWarningFilter()) +tf.disable_v2_behavior() try: import tensorflow.compat.v2 as tfv2 except ImportError: From 8279ccaaf4fa94d2919865128de57f942c30562e Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 13 Oct 2024 01:02:54 -0400 Subject: [PATCH 035/193] feat(jax/array-api): energy fitting (#4204) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced a fitting module for energy models using JAX, enhancing compatibility with different array backends. - Added `AtomExcludeMask` class for improved attribute handling in exclusion masks. - **Improvements** - Updated serialization and array handling methods for better integration with array APIs. - Enhanced testing capabilities for energy fitting with support for different backends. - **Documentation** - Added SPDX license identifier to relevant files for licensing clarity. Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/general_fitting.py | 48 +++++++------ deepmd/dpmodel/utils/exclude_mask.py | 8 ++- deepmd/jax/fitting/__init__.py | 1 + deepmd/jax/fitting/fitting.py | 39 +++++++++++ deepmd/jax/utils/exclude_mask.py | 9 +++ .../array_api_strict/fitting/__init__.py | 1 + .../tests/array_api_strict/fitting/fitting.py | 38 +++++++++++ .../array_api_strict/utils/exclude_mask.py | 8 +++ source/tests/consistent/fitting/test_ener.py | 67 +++++++++++++++++++ 9 files changed, 197 insertions(+), 22 deletions(-) create mode 100644 deepmd/jax/fitting/__init__.py create mode 100644 deepmd/jax/fitting/fitting.py create mode 100644 source/tests/array_api_strict/fitting/__init__.py create mode 100644 source/tests/array_api_strict/fitting/fitting.py diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index a587f69449..fd80ccb4aa 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -9,12 +9,16 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( DEFAULT_PRECISION, NativeOP, ) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.utils import ( AtomExcludeMask, FittingNet, @@ -283,11 +287,11 @@ def serialize(self) -> dict: "exclude_types": self.exclude_types, "nets": self.nets.serialize(), "@variables": { - "bias_atom_e": self.bias_atom_e, - "fparam_avg": self.fparam_avg, - "fparam_inv_std": self.fparam_inv_std, - "aparam_avg": self.aparam_avg, - "aparam_inv_std": self.aparam_inv_std, + "bias_atom_e": to_numpy_array(self.bias_atom_e), + "fparam_avg": to_numpy_array(self.fparam_avg), + "fparam_inv_std": to_numpy_array(self.fparam_inv_std), + "aparam_avg": to_numpy_array(self.aparam_avg), + "aparam_inv_std": to_numpy_array(self.aparam_inv_std), }, "type_map": self.type_map, # not supported @@ -344,6 +348,7 @@ def _call_common( The atomic parameter. shape: nf x nloc x nap. nap being `numb_aparam` """ + xp = array_api_compat.array_namespace(descriptor, atype) nf, nloc, nd = descriptor.shape net_dim_out = self._net_out_dim() # check input dim @@ -359,7 +364,7 @@ def _call_common( # we consider it as always zero for convenience. # Needs a compute_input_stats for vaccum passed from the # descriptor. - xx_zeros = np.zeros_like(xx) + xx_zeros = xp.zeros_like(xx) else: xx_zeros = None # check fparam dim, concate to input descriptor @@ -371,13 +376,15 @@ def _call_common( "which is not consistent with {self.numb_fparam}.", ) fparam = (fparam - self.fparam_avg) * self.fparam_inv_std - fparam = np.tile(fparam.reshape([nf, 1, self.numb_fparam]), [1, nloc, 1]) - xx = np.concatenate( + fparam = xp.tile( + xp.reshape(fparam, [nf, 1, self.numb_fparam]), (1, nloc, 1) + ) + xx = xp.concat( [xx, fparam], axis=-1, ) if xx_zeros is not None: - xx_zeros = np.concatenate( + xx_zeros = xp.concat( [xx_zeros, fparam], axis=-1, ) @@ -389,24 +396,24 @@ def _call_common( "get an input aparam of dim {aparam.shape[-1]}, ", "which is not consistent with {self.numb_aparam}.", ) - aparam = aparam.reshape([nf, nloc, self.numb_aparam]) + aparam = xp.reshape(aparam, [nf, nloc, self.numb_aparam]) aparam = (aparam - self.aparam_avg) * self.aparam_inv_std - xx = np.concatenate( + xx = xp.concat( [xx, aparam], axis=-1, ) if xx_zeros is not None: - xx_zeros = np.concatenate( + xx_zeros = xp.concat( [xx_zeros, aparam], axis=-1, ) # calcualte the prediction if not self.mixed_types: - outs = np.zeros([nf, nloc, net_dim_out]) # pylint: disable=no-explicit-dtype + outs = xp.zeros([nf, nloc, net_dim_out]) # pylint: disable=no-explicit-dtype for type_i in range(self.ntypes): - mask = np.tile( - (atype == type_i).reshape([nf, nloc, 1]), [1, 1, net_dim_out] + mask = xp.tile( + xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, net_dim_out) ) atom_property = self.nets[(type_i,)](xx) if self.remove_vaccum_contribution is not None and not ( @@ -415,15 +422,18 @@ def _call_common( ): assert xx_zeros is not None atom_property -= self.nets[(type_i,)](xx_zeros) - atom_property = atom_property + self.bias_atom_e[type_i] - atom_property = atom_property * mask + atom_property = atom_property + self.bias_atom_e[type_i, ...] + atom_property = atom_property * xp.astype(mask, atom_property.dtype) outs = outs + atom_property # Shape is [nframes, natoms[0], 1] else: - outs = self.nets[()](xx) + self.bias_atom_e[atype] + outs = self.nets[()](xx) + xp.reshape( + xp.take(self.bias_atom_e, xp.reshape(atype, [-1]), axis=0), + [nf, nloc, net_dim_out], + ) if xx_zeros is not None: outs -= self.nets[()](xx_zeros) # nf x nloc exclude_mask = self.emask.build_type_exclude_mask(atype) # nf x nloc x nod - outs = outs * exclude_mask[:, :, None] + outs = outs * xp.astype(exclude_mask[:, :, None], outs.dtype) return {self.var_name: outs} diff --git a/deepmd/dpmodel/utils/exclude_mask.py b/deepmd/dpmodel/utils/exclude_mask.py index 5469e66d97..b09a9b3e47 100644 --- a/deepmd/dpmodel/utils/exclude_mask.py +++ b/deepmd/dpmodel/utils/exclude_mask.py @@ -18,12 +18,12 @@ def __init__( ): self.ntypes = ntypes self.exclude_types = exclude_types - self.type_mask = np.array( + type_mask = np.array( [1 if tt_i not in self.exclude_types else 0 for tt_i in range(ntypes)], dtype=np.int32, ) # (ntypes) - self.type_mask = self.type_mask.reshape([-1]) + self.type_mask = type_mask.reshape([-1]) def get_exclude_types(self): return self.exclude_types @@ -52,7 +52,9 @@ def build_type_exclude_mask( """ xp = array_api_compat.array_namespace(atype) nf, natom = atype.shape - return xp.reshape(self.type_mask[atype], (nf, natom)) + return xp.reshape( + xp.take(self.type_mask, xp.reshape(atype, [-1]), axis=0), (nf, natom) + ) class PairExcludeMask: diff --git a/deepmd/jax/fitting/__init__.py b/deepmd/jax/fitting/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/jax/fitting/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/jax/fitting/fitting.py b/deepmd/jax/fitting/fitting.py new file mode 100644 index 0000000000..27ad791db9 --- /dev/null +++ b/deepmd/jax/fitting/fitting.py @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.fitting.ener_fitting import EnergyFittingNet as EnergyFittingNetDP +from deepmd.jax.common import ( + flax_module, + to_jax_array, +) +from deepmd.jax.utils.exclude_mask import ( + AtomExcludeMask, +) +from deepmd.jax.utils.network import ( + NetworkCollection, +) + + +def setattr_for_general_fitting(name: str, value: Any) -> Any: + if name in { + "bias_atom_e", + "fparam_avg", + "fparam_inv_std", + "aparam_avg", + "aparam_inv_std", + }: + value = to_jax_array(value) + elif name == "emask": + value = AtomExcludeMask(value.ntypes, value.exclude_types) + elif name == "nets": + value = NetworkCollection.deserialize(value.serialize()) + return value + + +@flax_module +class EnergyFittingNet(EnergyFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) diff --git a/deepmd/jax/utils/exclude_mask.py b/deepmd/jax/utils/exclude_mask.py index cac4cee092..a6cf210f94 100644 --- a/deepmd/jax/utils/exclude_mask.py +++ b/deepmd/jax/utils/exclude_mask.py @@ -3,6 +3,7 @@ Any, ) +from deepmd.dpmodel.utils.exclude_mask import AtomExcludeMask as AtomExcludeMaskDP from deepmd.dpmodel.utils.exclude_mask import PairExcludeMask as PairExcludeMaskDP from deepmd.jax.common import ( flax_module, @@ -10,6 +11,14 @@ ) +@flax_module +class AtomExcludeMask(AtomExcludeMaskDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"type_mask"}: + value = to_jax_array(value) + return super().__setattr__(name, value) + + @flax_module class PairExcludeMask(PairExcludeMaskDP): def __setattr__(self, name: str, value: Any) -> None: diff --git a/source/tests/array_api_strict/fitting/__init__.py b/source/tests/array_api_strict/fitting/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/array_api_strict/fitting/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/array_api_strict/fitting/fitting.py b/source/tests/array_api_strict/fitting/fitting.py new file mode 100644 index 0000000000..2e6bd9fe25 --- /dev/null +++ b/source/tests/array_api_strict/fitting/fitting.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.fitting.ener_fitting import EnergyFittingNet as EnergyFittingNetDP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.exclude_mask import ( + AtomExcludeMask, +) +from ..utils.network import ( + NetworkCollection, +) + + +def setattr_for_general_fitting(name: str, value: Any) -> Any: + if name in { + "bias_atom_e", + "fparam_avg", + "fparam_inv_std", + "aparam_avg", + "aparam_inv_std", + }: + value = to_array_api_strict_array(value) + elif name == "emask": + value = AtomExcludeMask(value.ntypes, value.exclude_types) + elif name == "nets": + value = NetworkCollection.deserialize(value.serialize()) + return value + + +class EnergyFittingNet(EnergyFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/utils/exclude_mask.py b/source/tests/array_api_strict/utils/exclude_mask.py index 06f2e94b52..7f5c29e0a8 100644 --- a/source/tests/array_api_strict/utils/exclude_mask.py +++ b/source/tests/array_api_strict/utils/exclude_mask.py @@ -3,6 +3,7 @@ Any, ) +from deepmd.dpmodel.utils.exclude_mask import AtomExcludeMask as AtomExcludeMaskDP from deepmd.dpmodel.utils.exclude_mask import PairExcludeMask as PairExcludeMaskDP from ..common import ( @@ -10,6 +11,13 @@ ) +class AtomExcludeMask(AtomExcludeMaskDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"type_mask"}: + value = to_array_api_strict_array(value) + return super().__setattr__(name, value) + + class PairExcludeMask(PairExcludeMaskDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"type_mask"}: diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index ac4f7ae543..ba2be1d86b 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -36,6 +38,22 @@ fitting_ener, ) +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) + from deepmd.jax.fitting.fitting import EnergyFittingNet as EnerFittingJAX +else: + EnerFittingJAX = object +if INSTALLED_ARRAY_API_STRICT: + import array_api_strict + + from ...array_api_strict.fitting.fitting import ( + EnergyFittingNet as EnerFittingStrict, + ) +else: + EnerFittingStrict = None + @parameterized( (True, False), # resnet_dt @@ -74,9 +92,25 @@ def skip_pt(self) -> bool: ) = self.param return CommonTest.skip_pt + skip_jax = not INSTALLED_JAX + + @property + def skip_array_api_strict(self) -> bool: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + atom_ener, + ) = self.param + # TypeError: The array_api_strict namespace does not support the dtype 'bfloat16' + return not INSTALLED_ARRAY_API_STRICT or precision == "bfloat16" + tf_class = EnerFittingTF dp_class = EnerFittingDP pt_class = EnerFittingPT + jax_class = EnerFittingJAX + array_api_strict_class = EnerFittingStrict args = fitting_ener() def setUp(self): @@ -157,6 +191,39 @@ def eval_dp(self, dp_obj: Any) -> Any: fparam=self.fparam if numb_fparam else None, )["energy"] + def eval_jax(self, jax_obj: Any) -> Any: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + atom_ener, + ) = self.param + return np.asarray( + jax_obj( + jnp.asarray(self.inputs), + jnp.asarray(self.atype.reshape(1, -1)), + fparam=jnp.asarray(self.fparam) if numb_fparam else None, + )["energy"] + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + array_api_strict.set_array_api_strict_flags(api_version="2023.12") + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + atom_ener, + ) = self.param + return np.asarray( + array_api_strict_obj( + array_api_strict.asarray(self.inputs), + array_api_strict.asarray(self.atype.reshape(1, -1)), + fparam=array_api_strict.asarray(self.fparam) if numb_fparam else None, + )["energy"] + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same From a1f867217e3d06a9f5921cd5b2b76e42649b0882 Mon Sep 17 00:00:00 2001 From: Anyang Peng <137014849+anyangml@users.noreply.github.com> Date: Mon, 14 Oct 2024 15:44:02 +0800 Subject: [PATCH 036/193] Chore: refactor get standard model (#4205) ## Summary by CodeRabbit - **Refactor** - Simplified model component creation by introducing a new function for better code clarity and reusability. - Updated model-building functions to utilize the new component creation logic, enhancing maintainability. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/model/model/__init__.py | 96 +++++++++++-------------------- 1 file changed, 35 insertions(+), 61 deletions(-) diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 26aefa6201..613baf440e 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -72,6 +72,29 @@ ) +def _get_standard_model_components(model_params, ntypes): + # descriptor + model_params["descriptor"]["ntypes"] = ntypes + model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) + descriptor = BaseDescriptor(**model_params["descriptor"]) + # fitting + fitting_net = model_params.get("fitting_net", {}) + fitting_net["type"] = fitting_net.get("type", "ener") + fitting_net["ntypes"] = descriptor.get_ntypes() + fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) + fitting_net["mixed_types"] = descriptor.mixed_types() + if fitting_net["type"] in ["dipole", "polar"]: + fitting_net["embedding_width"] = descriptor.get_dim_emb() + fitting_net["dim_descrpt"] = descriptor.get_dim_out() + grad_force = "direct" not in fitting_net["type"] + if not grad_force: + fitting_net["out_dim"] = descriptor.get_dim_emb() + if "ener" in fitting_net["type"]: + fitting_net["return_energy"] = True + fitting = BaseFitting(**fitting_net) + return descriptor, fitting, fitting_net["type"] + + def get_spin_model(model_params): model_params = copy.deepcopy(model_params) if not model_params["spin"]["use_spin"] or isinstance( @@ -117,25 +140,9 @@ def get_linear_model(model_params): if "descriptor" in sub_model_params: # descriptor sub_model_params["descriptor"]["ntypes"] = ntypes - sub_model_params["descriptor"]["type_map"] = copy.deepcopy( - model_params["type_map"] + descriptor, fitting, _ = _get_standard_model_components( + sub_model_params, ntypes ) - descriptor = BaseDescriptor(**sub_model_params["descriptor"]) - # fitting - fitting_net = sub_model_params.get("fitting_net", {}) - fitting_net["type"] = fitting_net.get("type", "ener") - fitting_net["ntypes"] = descriptor.get_ntypes() - fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) - fitting_net["mixed_types"] = descriptor.mixed_types() - if fitting_net["type"] in ["dipole", "polar"]: - fitting_net["embedding_width"] = descriptor.get_dim_emb() - fitting_net["dim_descrpt"] = descriptor.get_dim_out() - grad_force = "direct" not in fitting_net["type"] - if not grad_force: - fitting_net["out_dim"] = descriptor.get_dim_emb() - if "ener" in fitting_net["type"]: - fitting_net["return_energy"] = True - fitting = BaseFitting(**fitting_net) list_of_models.append( DPAtomicModel(descriptor, fitting, type_map=model_params["type_map"]) ) @@ -167,24 +174,7 @@ def get_linear_model(model_params): def get_zbl_model(model_params): model_params = copy.deepcopy(model_params) ntypes = len(model_params["type_map"]) - # descriptor - model_params["descriptor"]["ntypes"] = ntypes - model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) - descriptor = BaseDescriptor(**model_params["descriptor"]) - # fitting - fitting_net = model_params.get("fitting_net", None) - fitting_net["type"] = fitting_net.get("type", "ener") - fitting_net["ntypes"] = descriptor.get_ntypes() - fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) - fitting_net["mixed_types"] = descriptor.mixed_types() - fitting_net["embedding_width"] = descriptor.get_dim_out() - fitting_net["dim_descrpt"] = descriptor.get_dim_out() - grad_force = "direct" not in fitting_net["type"] - if not grad_force: - fitting_net["out_dim"] = descriptor.get_dim_emb() - if "ener" in fitting_net["type"]: - fitting_net["return_energy"] = True - fitting = BaseFitting(**fitting_net) + descriptor, fitting, _ = _get_standard_model_components(model_params, ntypes) dp_model = DPAtomicModel(descriptor, fitting, type_map=model_params["type_map"]) # pairtab filepath = model_params["use_srtab"] @@ -246,25 +236,9 @@ def get_standard_model(model_params): model_params_old = model_params model_params = copy.deepcopy(model_params) ntypes = len(model_params["type_map"]) - # descriptor - model_params["descriptor"]["ntypes"] = ntypes - model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) - descriptor = BaseDescriptor(**model_params["descriptor"]) - # fitting - fitting_net = model_params.get("fitting_net", {}) - fitting_net["type"] = fitting_net.get("type", "ener") - fitting_net["ntypes"] = descriptor.get_ntypes() - fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) - fitting_net["mixed_types"] = descriptor.mixed_types() - if fitting_net["type"] in ["dipole", "polar"]: - fitting_net["embedding_width"] = descriptor.get_dim_emb() - fitting_net["dim_descrpt"] = descriptor.get_dim_out() - grad_force = "direct" not in fitting_net["type"] - if not grad_force: - fitting_net["out_dim"] = descriptor.get_dim_emb() - if "ener" in fitting_net["type"]: - fitting_net["return_energy"] = True - fitting = BaseFitting(**fitting_net) + descriptor, fitting, fitting_net_type = _get_standard_model_components( + model_params, ntypes + ) atom_exclude_types = model_params.get("atom_exclude_types", []) pair_exclude_types = model_params.get("pair_exclude_types", []) preset_out_bias = model_params.get("preset_out_bias") @@ -272,18 +246,18 @@ def get_standard_model(model_params): preset_out_bias, model_params["type_map"] ) - if fitting_net["type"] == "dipole": + if fitting_net_type == "dipole": modelcls = DipoleModel - elif fitting_net["type"] == "polar": + elif fitting_net_type == "polar": modelcls = PolarModel - elif fitting_net["type"] == "dos": + elif fitting_net_type == "dos": modelcls = DOSModel - elif fitting_net["type"] in ["ener", "direct_force_ener"]: + elif fitting_net_type in ["ener", "direct_force_ener"]: modelcls = EnergyModel - elif fitting_net["type"] == "property": + elif fitting_net_type == "property": modelcls = PropertyModel else: - raise RuntimeError(f"Unknown fitting type: {fitting_net['type']}") + raise RuntimeError(f"Unknown fitting type: {fitting_net_type}") model = modelcls( descriptor=descriptor, From 2c664438eccab4dfee702f383a1563d8a2ed81f5 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:51:40 +0800 Subject: [PATCH 037/193] fix tensorflow bug --- source/api_cc/src/DeepPotTF.cc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 271a33d8d1..1e1f2d2039 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -857,8 +857,6 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, new_idx_map, old_idx_map, lmp_list, dcoord_, datype_, nghost, dspin_, ntypes, ntypes_spin, virtual_len, spin_norm); - // extend_lmp_list = InputNlist(extend_inum, &extend_ilist[0], - // &extend_numneigh[0], &extend_firstneigh[0]); InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], &extend_firstneigh[0]); @@ -916,23 +914,27 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, } // bkw map - std::vector dforce_tmp; + std::vector dforce_tmp, datom_energy_tmp, datom_virial_tmp; dforce_tmp.resize(static_cast(nframes) * fwd_map.size() * 3); - datom_energy_.resize(static_cast(nframes) * fwd_map.size()); - datom_virial_.resize(static_cast(nframes) * fwd_map.size() * 9); + datom_energy_tmp.resize(static_cast(nframes) * fwd_map.size()); + datom_virial_tmp.resize(static_cast(nframes) * fwd_map.size() * 9); select_map(dforce_tmp, dforce, bkw_map, 3, nframes, fwd_map.size(), nall_real); - select_map(datom_energy_, datom_energy, bkw_map, 1, nframes, + select_map(datom_energy_tmp, datom_energy, bkw_map, 1, nframes, fwd_map.size(), nall_real); - select_map(datom_virial_, datom_virial, bkw_map, 9, nframes, + select_map(datom_virial_tmp, datom_virial, bkw_map, 9, nframes, fwd_map.size(), nall_real); // backward force and mag. dforce_.resize(static_cast(nframes) * nall * 3); dforce_mag_.resize(static_cast(nframes) * nall * 3); + datom_energy_.resize(static_cast(nframes) * nall); + datom_virial_.resize(static_cast(nframes) * nall * 9); for (int ii = 0; ii < nall; ++ii) { for (int dd = 0; dd < 3; ++dd) { int new_idx = new_idx_map[ii]; dforce_[3*ii + dd] = dforce_tmp[3 * new_idx + dd]; + datom_energy_[ii] = datom_energy_tmp[new_idx]; + datom_virial_[ii] = datom_virial_tmp[new_idx]; if (datype[ii] < ntypes_spin && ii < nloc) { dforce_mag_[3*ii + dd] = dforce_tmp[3 * (new_idx + nloc) + dd]; } else if (datype[ii] < ntypes_spin) { From 4f3d9d436f2f6ba11b80ef882271940daf498ed2 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:59:39 +0800 Subject: [PATCH 038/193] fix mag force bug --- source/api_cc/src/DeepPotTF.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 1e1f2d2039..4531f6a6ce 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -935,9 +935,9 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, dforce_[3*ii + dd] = dforce_tmp[3 * new_idx + dd]; datom_energy_[ii] = datom_energy_tmp[new_idx]; datom_virial_[ii] = datom_virial_tmp[new_idx]; - if (datype[ii] < ntypes_spin && ii < nloc) { + if (datype_[ii] < ntypes_spin && ii < nloc) { dforce_mag_[3*ii + dd] = dforce_tmp[3 * (new_idx + nloc) + dd]; - } else if (datype[ii] < ntypes_spin) { + } else if (datype_[ii] < ntypes_spin) { dforce_mag_[3*ii + dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; } else { dforce_mag_[3*ii + dd] = 0.0; From 6fe8dde1b6649b2a12d46a868da31aad69b8012c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 14 Oct 2024 20:54:22 -0400 Subject: [PATCH 039/193] ci: pin ubuntu to 22.04 (#4213) It seems that GitHub starts to point ubuntu-latest to ubuntu-24.04 (xref: https://github.com/actions/runner-images/issues/10636), which brings some breaking changes. For example, cuda 11.8 doesn't support the default compiler in ubuntu-24.04. ## Summary by CodeRabbit - **New Features** - Updated build, test, and analysis workflows to run on Ubuntu 22.04, enhancing compatibility and performance for C++ projects. - **Bug Fixes** - Corrected indentation in the permissions section of the CodeQL workflow. - **Chores** - Adjusted timeout settings for the CodeQL analysis job based on the programming language. --- .github/workflows/build_cc.yml | 2 +- .github/workflows/codeql.yml | 2 +- .github/workflows/test_cc.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build_cc.yml b/.github/workflows/build_cc.yml index 775b88cfd3..a1ac032891 100644 --- a/.github/workflows/build_cc.yml +++ b/.github/workflows/build_cc.yml @@ -11,7 +11,7 @@ name: Build C++ jobs: buildcc: name: Build C++ - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: include: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c912ece8d5..583e7785d9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -13,7 +13,7 @@ concurrency: jobs: analyze: name: Analyze - runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-22.04' }} timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} permissions: actions: read diff --git a/.github/workflows/test_cc.yml b/.github/workflows/test_cc.yml index ebbfc4d960..768590980f 100644 --- a/.github/workflows/test_cc.yml +++ b/.github/workflows/test_cc.yml @@ -11,7 +11,7 @@ name: Test C++ jobs: testcc: name: Test C++ - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: check_memleak: [true, false] From 48f8a1ef0815e471b39de4376edf9fae829e2565 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 15 Oct 2024 11:20:48 -0400 Subject: [PATCH 040/193] feat(pt): support `DeepEval.eval_descriptor` (#4214) Fix #4112. ## Summary by CodeRabbit - **New Features** - Introduced a method for evaluating descriptors using the deep potential model. - Added functionality to control descriptor evaluation during model operations. - **Bug Fixes** - Removed conditional skip for descriptor evaluation tests, enhancing test coverage for PyTorch models. - **Tests** - Added a new test class for neighbor list setups in descriptor evaluation. --------- Signed-off-by: Jinzhe Zeng --- deepmd/pt/infer/deep_eval.py | 55 +++++++++++++++++++ .../pt/model/atomic_model/dp_atomic_model.py | 15 +++++ deepmd/pt/model/model/dp_model.py | 12 ++++ source/tests/infer/test_models.py | 2 - 4 files changed, 82 insertions(+), 2 deletions(-) diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 538dc65371..0a77a38135 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -598,3 +598,58 @@ def eval_typeebd(self) -> np.ndarray: def get_model_def_script(self) -> str: """Get model defination script.""" return self.model_def_script + + def eval_descriptor( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + **kwargs: Any, + ) -> np.ndarray: + """Evaluate descriptors by using this DP. + + Parameters + ---------- + coords + The coordinates of atoms. + The array should be of size nframes x natoms x 3 + cells + The cell of the region. + If None then non-PBC is assumed, otherwise using PBC. + The array should be of size nframes x 9 + atom_types + The atom types + The list should contain natoms ints + fparam + The frame parameter. + The array can be of size : + - nframes x dim_fparam. + - dim_fparam. Then all frames are assumed to be provided with the same fparam. + aparam + The atomic parameter + The array can be of size : + - nframes x natoms x dim_aparam. + - natoms x dim_aparam. Then all frames are assumed to be provided with the same aparam. + - dim_aparam. Then all frames and atoms are provided with the same aparam. + + Returns + ------- + descriptor + Descriptors. + """ + model = self.dp.model["Default"] + model.set_eval_descriptor_hook(True) + self.eval( + coords, + cells, + atom_types, + atomic=False, + fparam=fparam, + aparam=aparam, + **kwargs, + ) + descriptor = model.eval_descriptor() + model.set_eval_descriptor_hook(False) + return to_numpy_array(descriptor) diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 936a1fead3..edb1253234 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -62,6 +62,19 @@ def __init__( self.sel = self.descriptor.get_sel() self.fitting_net = fitting super().init_out_stat() + self.enable_eval_descriptor_hook = False + self.eval_descriptor_list = [] + + eval_descriptor_list: list[torch.Tensor] + + def set_eval_descriptor_hook(self, enable: bool) -> None: + """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" + self.enable_eval_descriptor_hook = enable + self.eval_descriptor_list = [] + + def eval_descriptor(self) -> torch.Tensor: + """Evaluate the descriptor.""" + return torch.concat(self.eval_descriptor_list) @torch.jit.export def fitting_output_def(self) -> FittingOutputDef: @@ -192,6 +205,8 @@ def forward_atomic( comm_dict=comm_dict, ) assert descriptor is not None + if self.enable_eval_descriptor_hook: + self.eval_descriptor_list.append(descriptor) # energy, force fit_ret = self.fitting_net( descriptor, diff --git a/deepmd/pt/model/model/dp_model.py b/deepmd/pt/model/model/dp_model.py index 8659526c49..bd278ed787 100644 --- a/deepmd/pt/model/model/dp_model.py +++ b/deepmd/pt/model/model/dp_model.py @@ -3,6 +3,8 @@ Optional, ) +import torch + from deepmd.pt.model.descriptor.base_descriptor import ( BaseDescriptor, ) @@ -52,3 +54,13 @@ def get_fitting_net(self): def get_descriptor(self): """Get the descriptor.""" return self.atomic_model.descriptor + + @torch.jit.export + def set_eval_descriptor_hook(self, enable: bool) -> None: + """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" + self.atomic_model.set_eval_descriptor_hook(enable) + + @torch.jit.export + def eval_descriptor(self) -> torch.Tensor: + """Evaluate the descriptor.""" + return self.atomic_model.eval_descriptor() diff --git a/source/tests/infer/test_models.py b/source/tests/infer/test_models.py index 6b62e994aa..2b0f292046 100644 --- a/source/tests/infer/test_models.py +++ b/source/tests/infer/test_models.py @@ -153,8 +153,6 @@ def test_1frame_atm(self): def test_descriptor(self): _, extension = self.param - if extension == ".pth": - self.skipTest("eval_descriptor not supported for PyTorch models") for ii, result in enumerate(self.case.results): if result.descriptor is None: continue From 5c092e673e61611e3116696374ad51b22ec37357 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 15 Oct 2024 11:24:34 -0400 Subject: [PATCH 041/193] fix: fix average training time for restart (#4212) Fix #4208. ## Summary by CodeRabbit - **New Features** - Enhanced training step management and logging for improved tracking. - Updated average training time calculations for more accurate reporting. - Refined model and checkpoint saving logic based on new tracking metrics. - Improved logging clarity for learning rates and losses. - **Bug Fixes** - Resolved issues related to inaccurate training time and logging conditions. - **Chores** - General code cleanup for better readability and organization. Signed-off-by: Jinzhe Zeng --- deepmd/pt/train/training.py | 14 ++++++++------ deepmd/tf/train/trainer.py | 20 +++++++++++++++----- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 95c73bd83c..4d746e84c0 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -889,8 +889,9 @@ def log_loss_valid(_task_key="Default"): ) # the first training time is not accurate if ( - _step_id + 1 - ) > self.disp_freq or self.num_steps < 2 * self.disp_freq: + (_step_id + 1 - self.start_step) > self.disp_freq + or self.num_steps - self.start_step < 2 * self.disp_freq + ): self.total_train_time += train_time if fout: @@ -981,13 +982,14 @@ def log_loss_valid(_task_key="Default"): with open("checkpoint", "w") as f: f.write(str(self.latest_model)) - if self.timing_in_training and self.num_steps // self.disp_freq > 0: - if self.num_steps >= 2 * self.disp_freq: + elapsed_batch = self.num_steps - self.start_step + if self.timing_in_training and elapsed_batch // self.disp_freq > 0: + if self.start_step >= 2 * self.disp_freq: log.info( "average training time: %.4f s/batch (exclude first %d batches)", self.total_train_time / ( - self.num_steps // self.disp_freq * self.disp_freq + elapsed_batch // self.disp_freq * self.disp_freq - self.disp_freq ), self.disp_freq, @@ -996,7 +998,7 @@ def log_loss_valid(_task_key="Default"): log.info( "average training time: %.4f s/batch", self.total_train_time - / (self.num_steps // self.disp_freq * self.disp_freq), + / (elapsed_batch // self.disp_freq * self.disp_freq), ) if JIT: diff --git a/deepmd/tf/train/trainer.py b/deepmd/tf/train/trainer.py index 7f9aeb27d2..9f353f2e32 100644 --- a/deepmd/tf/train/trainer.py +++ b/deepmd/tf/train/trainer.py @@ -416,6 +416,8 @@ def train(self, train_data=None, valid_data=None): fp = open(self.disp_file, "a") cur_batch = run_sess(self.sess, self.global_step) + start_batch = cur_batch + elapsed_batch = stop_batch - start_batch is_first_step = True self.cur_batch = cur_batch log.info( @@ -552,7 +554,10 @@ def train(self, train_data=None, valid_data=None): ) ) # the first training time is not accurate - if cur_batch > self.disp_freq or stop_batch < 2 * self.disp_freq: + if ( + cur_batch - start_batch > self.disp_freq + or elapsed_batch < 2 * self.disp_freq + ): total_train_time += train_time train_time = 0 wall_time_tic = toc @@ -594,18 +599,23 @@ def train(self, train_data=None, valid_data=None): self.save_checkpoint(cur_batch) if self.run_opt.is_chief: fp.close() - if self.timing_in_training and stop_batch // self.disp_freq > 0: - if stop_batch >= 2 * self.disp_freq: + elapsed_batch = stop_batch - start_batch + if self.timing_in_training and elapsed_batch // self.disp_freq > 0: + if elapsed_batch >= 2 * self.disp_freq: log.info( "average training time: %.4f s/batch (exclude first %d batches)", total_train_time - / (stop_batch // self.disp_freq * self.disp_freq - self.disp_freq), + / ( + elapsed_batch // self.disp_freq * self.disp_freq + - self.disp_freq + ), self.disp_freq, ) else: log.info( "average training time: %.4f s/batch", - total_train_time / (stop_batch // self.disp_freq * self.disp_freq), + total_train_time + / (elapsed_batch // self.disp_freq * self.disp_freq), ) if self.profiling and self.run_opt.is_chief: From 16172e6cbdc881da02779da230d636d41e86c6e9 Mon Sep 17 00:00:00 2001 From: Lysithea <52808607+CaRoLZhangxy@users.noreply.github.com> Date: Tue, 15 Oct 2024 23:25:30 +0800 Subject: [PATCH 042/193] fix(pt): keep mapping not none during lmp steps when nghost == 0 (#4209) enhancement on https://github.com/deepmodeling/deepmd-kit/pull/4144 ## Summary by CodeRabbit - **New Features** - Enhanced tensor mapping capabilities with the addition of a new `mapping_tensor` variable. - Updated `compute` method to handle ghost atoms and support improved tensor creation logic. - Overloaded `computew` methods to support both double and float types. - **Bug Fixes** - Improved error handling in the `translate_error` method for better exception management. --- source/api_cc/include/DeepPotPT.h | 1 + source/api_cc/src/DeepPotPT.cc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index 973c02c434..4144249367 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -338,6 +338,7 @@ class DeepPotPT : public DeepPotBase { int do_message_passing; // 1:dpa2 model 0:others bool gpu_enabled; at::Tensor firstneigh_tensor; + c10::optional mapping_tensor; torch::Dict comm_dict; /** * @brief Translate PyTorch exceptions to the DeePMD-kit exception. diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index c03576635a..84629042f4 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -164,7 +164,6 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, std::vector atype_64(datype.begin(), datype.end()); at::Tensor atype_Tensor = torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); - c10::optional mapping_tensor; if (ago == 0) { nlist_data.copy_from_nlist(lmp_list); nlist_data.shuffle_exclude_empty(fwd_map); From d24d7e7a4e67c4e04dce6c3a4ba32e295ef5677f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 16:01:42 +0000 Subject: [PATCH 043/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/tf/entrypoints/freeze.py | 2 +- source/api_c/include/c_api.h | 90 ++++++++++++++-------------- source/api_c/src/c_api.cc | 8 +-- source/api_cc/include/DeepPotTF.h | 2 +- source/api_cc/src/DeepPot.cc | 40 +++++++------ source/api_cc/src/DeepPotTF.cc | 99 ++++++++++++++++--------------- 6 files changed, 122 insertions(+), 119 deletions(-) diff --git a/deepmd/tf/entrypoints/freeze.py b/deepmd/tf/entrypoints/freeze.py index 6ca45773b5..71485e62b2 100755 --- a/deepmd/tf/entrypoints/freeze.py +++ b/deepmd/tf/entrypoints/freeze.py @@ -262,7 +262,7 @@ def freeze_graph( "fitting_attr/aparam_nall", "spin_attr/ntypes_spin", "spin_attr/virtual_len", - "spin_attr/spin_norm" + "spin_attr/spin_norm", ] different_set = set(output_node) - set(input_node) if different_set: diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index f62f438304..7b00c3aa1a 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -508,45 +508,45 @@ extern void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); - /** - * @brief Evaluate the energy, force and virial by using a DP with the mixed - *type. (double version) - * @param[in] dp The DP to use. - * @param[in] nframes The number of frames. - * @param[in] natoms The number of atoms. - * @param[in] coord The coordinates of atoms. The array should be of size - *natoms x 3. - * @param[in] atype The atom types. The array should contain nframes x - *natoms ints. - * @param[in] box The cell of the region. The array should be of size 9. - *Pass NULL if pbc is not used. - * @param[in] fparam The frame parameters. The array can be of size nframes - *x dim_fparam. - * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. - * @param[out] energy Output energy. - * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] virial Output virial. The array should be of size 9. - * @param[out] atomic_energy Output atomic energy. The array should be of - *size natoms. - * @param[out] atomic_virial Output atomic virial. The array should be of - *size natoms x 9. - * @warning The output arrays should be allocated before calling this - *function. Pass NULL if not required. - **/ - extern void DP_DeepPotComputeMixedType(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const int* atype, - const double* cell, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* virial, - double* atomic_energy, - double* atomic_virial); +/** + * @brief Evaluate the energy, force and virial by using a DP with the mixed + *type. (double version) + * @param[in] dp The DP to use. + * @param[in] nframes The number of frames. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size + *natoms x 3. + * @param[in] atype The atom types. The array should contain nframes x + *natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. + *Pass NULL if pbc is not used. + * @param[in] fparam The frame parameters. The array can be of size nframes + *x dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of + *size natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of + *size natoms x 9. + * @warning The output arrays should be allocated before calling this + *function. Pass NULL if not required. + **/ +extern void DP_DeepPotComputeMixedType(DP_DeepPot* dp, + const int nframes, + const int natoms, + const double* coord, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the mixed *type. (float version) @@ -1003,12 +1003,12 @@ void DP_DeepPotModelDeviComputeNListf2SP(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); - /** - * @brief Get the type map of a DP model deviation. - * @param[in] dp The DP model deviation to use. - * @return The cutoff radius. - */ - double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp); +/** + * @brief Get the type map of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The cutoff radius. + */ +double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp); /** * @brief Get the number of types of a DP model deviation. diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 3241c3e63e..cdc5c0698d 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -802,12 +802,12 @@ void DP_DeepPotModelDeviComputeNList_variant_sp(DP_DeepPotModelDevi* dp, std::vector> f, fm, v, ae, av; if (atomic_energy || atomic_virial) { DP_REQUIRES_OK( - dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, nghost, - nlist->nl, ago, fparam_, aparam_)); + dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + nghost, nlist->nl, ago, fparam_, aparam_)); } else { DP_REQUIRES_OK( - dp, dp->dp.compute(e, f, fm, v, coord_, spin_, atype_, cell_, - nghost, nlist->nl, ago, fparam_, aparam_)); + dp, dp->dp.compute(e, f, fm, v, coord_, spin_, atype_, cell_, nghost, + nlist->nl, ago, fparam_, aparam_)); } // 2D vector to 2D array, flatten first if (energy) { diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index dd42a2ae3b..4fe53d58c2 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -356,7 +356,7 @@ class DeepPotTF : public DeepPotBase { const int numb_types_spin, const std::vector& virtual_len, const std::vector& spin_norm); - void cum_sum(std::map &, std::map &); + void cum_sum(std::map&, std::map&); private: tensorflow::Session* session; diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 284ef784f5..03c90efc67 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -952,19 +952,20 @@ template void DeepPotModelDevi::compute( const std::vector& aparam); template -void DeepPotModelDevi::compute(std::vector& all_energy, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_) { +void DeepPotModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_) { if (numb_models == 0) { return; } @@ -973,8 +974,9 @@ void DeepPotModelDevi::compute(std::vector& all_energy, all_force_mag.resize(numb_models); all_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], all_virial[ii], dcoord_, dspin_, - datype_, dbox, nghost, lmp_list, ago, fparam, aparam_); + dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], dcoord_, dspin_, datype_, dbox, nghost, + lmp_list, ago, fparam, aparam_); } } @@ -1008,7 +1010,6 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); - template void DeepPotModelDevi::compute( std::vector& all_energy, @@ -1096,9 +1097,10 @@ void DeepPotModelDevi::compute( all_atom_energy.resize(numb_models); all_atom_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], all_virial[ii], - all_atom_energy[ii], all_atom_virial[ii], dcoord_, dspin_, datype_, - dbox, nghost, lmp_list, ago, fparam, aparam_); + dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], all_atom_energy[ii], all_atom_virial[ii], + dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, + fparam, aparam_); } } diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 4531f6a6ce..9e0caf6ff8 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -509,8 +509,8 @@ VT DeepPotTF::get_scalar(const std::string& name) const { } template -void DeepPotTF::get_vector(std::vector &vec, - const std::string &name) const { +void DeepPotTF::get_vector(std::vector& vec, + const std::string& name) const { session_get_vector(vec, session, name); } @@ -857,9 +857,8 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, new_idx_map, old_idx_map, lmp_list, dcoord_, datype_, nghost, dspin_, ntypes, ntypes_spin, virtual_len, spin_norm); - InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], - &extend_numneigh[0], - &extend_firstneigh[0]); + InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], + &extend_firstneigh[0]); std::vector fparam; std::vector aparam_; validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, @@ -932,15 +931,15 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, for (int ii = 0; ii < nall; ++ii) { for (int dd = 0; dd < 3; ++dd) { int new_idx = new_idx_map[ii]; - dforce_[3*ii + dd] = dforce_tmp[3 * new_idx + dd]; + dforce_[3 * ii + dd] = dforce_tmp[3 * new_idx + dd]; datom_energy_[ii] = datom_energy_tmp[new_idx]; datom_virial_[ii] = datom_virial_tmp[new_idx]; if (datype_[ii] < ntypes_spin && ii < nloc) { - dforce_mag_[3*ii + dd] = dforce_tmp[3 * (new_idx + nloc) + dd]; + dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (new_idx + nloc) + dd]; } else if (datype_[ii] < ntypes_spin) { - dforce_mag_[3*ii + dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; + dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; } else { - dforce_mag_[3*ii + dd] = 0.0; + dforce_mag_[3 * ii + dd] = 0.0; } } } @@ -1264,7 +1263,7 @@ void DeepPotTF::computew_mixed_type(std::vector& ener, coord, atype, box, fparam, aparam, atomic); } -void DeepPotTF::cum_sum(std::map &sum, std::map &vec) { +void DeepPotTF::cum_sum(std::map& sum, std::map& vec) { sum[0] = 0; for (int ii = 1; ii < vec.size(); ++ii) { sum[ii] = sum[ii - 1] + vec[ii - 1]; @@ -1441,43 +1440,45 @@ void DeepPotTF::extend(int& extend_inum, } } -template void DeepPotTF::extend(int& extend_inum, - std::vector& extend_ilist, - std::vector& extend_numneigh, - std::vector>& extend_neigh, - std::vector& extend_firstneigh, - std::vector& extend_dcoord, - std::vector& extend_atype, - int& extend_nghost, - std::map& new_idx_map, - std::map& old_idx_map, - const InputNlist& lmp_list, - const std::vector& dcoord, - const std::vector& atype, - const int nghost, - const std::vector& spin, - const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); - -template void DeepPotTF::extend(int& extend_inum, - std::vector& extend_ilist, - std::vector& extend_numneigh, - std::vector>& extend_neigh, - std::vector& extend_firstneigh, - std::vector& extend_dcoord, - std::vector& extend_atype, - int& extend_nghost, - std::map& new_idx_map, - std::map& old_idx_map, - const InputNlist& lmp_list, - const std::vector& dcoord, - const std::vector& atype, - const int nghost, - const std::vector& spin, - const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); +template void DeepPotTF::extend( + int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm); + +template void DeepPotTF::extend( + int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm); #endif From 593bf81379a7afb64f2d77dd3060d5292feae351 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 16 Oct 2024 00:10:24 +0800 Subject: [PATCH 044/193] Update c_api.h --- source/api_c/include/c_api.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 7b00c3aa1a..7794e553d3 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -514,14 +514,14 @@ extern void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, * @param[in] dp The DP to use. * @param[in] nframes The number of frames. * @param[in] natoms The number of atoms. - * @param[in] coord The coordinates of atoms. The array should be of size - *natoms x 3. - * @param[in] atype The atom types. The array should contain nframes x - *natoms ints. - * @param[in] box The cell of the region. The array should be of size 9. - *Pass NULL if pbc is not used. - * @param[in] fparam The frame parameters. The array can be of size nframes - *x dim_fparam. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] atype The atom types. The array should contain nframes x natoms + *ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x *dim_aparam. * @param[out] energy Output energy. From 3466e34b5dacafb47c071d3affde9747f52c503b Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 16 Oct 2024 00:11:50 +0800 Subject: [PATCH 045/193] Update c_api.h --- source/api_c/include/c_api.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 7794e553d3..9e4631f2ac 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -527,12 +527,12 @@ extern void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. - * @param[out] atomic_energy Output atomic energy. The array should be of - *size natoms. - * @param[out] atomic_virial Output atomic virial. The array should be of - *size natoms x 9. - * @warning The output arrays should be allocated before calling this - *function. Pass NULL if not required. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. **/ extern void DP_DeepPotComputeMixedType(DP_DeepPot* dp, const int nframes, From cfb47310e1d070824347f48883222d3605907ee4 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 15 Oct 2024 23:39:16 -0400 Subject: [PATCH 046/193] feat(jax/array-api): se_e2_a (#4217) ## Summary by CodeRabbit - **New Features** - Introduced a new class `DescrptSeAArrayAPI` for enhanced array compatibility. - Added a new class `DescrptSeA` integrated with the Flax library for neural network modules. - Improved handling of atomic types and neighbor lists for better performance and clarity. - **Tests** - Enhanced test suite to support additional backends and configurations, including JAX and strict array API. - Added new evaluation methods for testing across different frameworks. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- deepmd/dpmodel/descriptor/se_e2_a.py | 107 ++++++++++++++++-- deepmd/dpmodel/utils/nlist.py | 20 ++-- deepmd/jax/descriptor/se_e2_a.py | 33 ++++++ .../array_api_strict/descriptor/se_e2_a.py | 32 ++++++ .../consistent/descriptor/test_se_e2_a.py | 55 +++++++++ 5 files changed, 230 insertions(+), 17 deletions(-) create mode 100644 deepmd/jax/descriptor/se_e2_a.py create mode 100644 source/tests/array_api_strict/descriptor/se_e2_a.py diff --git a/deepmd/dpmodel/descriptor/se_e2_a.py b/deepmd/dpmodel/descriptor/se_e2_a.py index 29577ef79e..d29ce8862e 100644 --- a/deepmd/dpmodel/descriptor/se_e2_a.py +++ b/deepmd/dpmodel/descriptor/se_e2_a.py @@ -7,6 +7,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( @@ -14,6 +15,9 @@ PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.utils import ( EmbeddingNet, EnvMat, @@ -186,15 +190,15 @@ def __init__( self.reinit_exclude(exclude_types) in_dim = 1 # not considiering type embedding - self.embeddings = NetworkCollection( + embeddings = NetworkCollection( ntypes=self.ntypes, ndim=(1 if self.type_one_side else 2), network_type="embedding_network", ) for ii, embedding_idx in enumerate( - itertools.product(range(self.ntypes), repeat=self.embeddings.ndim) + itertools.product(range(self.ntypes), repeat=embeddings.ndim) ): - self.embeddings[embedding_idx] = EmbeddingNet( + embeddings[embedding_idx] = EmbeddingNet( in_dim, self.neuron, self.activation_function, @@ -202,8 +206,9 @@ def __init__( self.precision, seed=child_seed(seed, ii), ) + self.embeddings = embeddings self.env_mat = EnvMat(self.rcut, self.rcut_smth, protection=self.env_protection) - self.nnei = np.sum(self.sel) + self.nnei = np.sum(self.sel).item() self.davg = np.zeros( [self.ntypes, self.nnei, 4], dtype=PRECISION_DICT[self.precision] ) @@ -211,6 +216,7 @@ def __init__( [self.ntypes, self.nnei, 4], dtype=PRECISION_DICT[self.precision] ) self.orig_sel = self.sel + self.sel_cumsum = [0, *np.cumsum(self.sel).tolist()] def __setitem__(self, key, value): if key in ("avg", "data_avg", "davg"): @@ -321,8 +327,9 @@ def cal_g( ss, embedding_idx, ): + xp = array_api_compat.array_namespace(ss) nf_times_nloc, nnei = ss.shape[0:2] - ss = ss.reshape(nf_times_nloc, nnei, 1) + ss = xp.reshape(ss, (nf_times_nloc, nnei, 1)) # (nf x nloc) x nnei x ng gg = self.embeddings[embedding_idx].call(ss) return gg @@ -444,8 +451,8 @@ def serialize(self) -> dict: "env_mat": self.env_mat.serialize(), "embeddings": self.embeddings.serialize(), "@variables": { - "davg": self.davg, - "dstd": self.dstd, + "davg": to_numpy_array(self.davg), + "dstd": to_numpy_array(self.dstd), }, "type_map": self.type_map, } @@ -497,3 +504,89 @@ def update_sel( train_data, type_map, local_jdata_cpy["rcut"], local_jdata_cpy["sel"], False ) return local_jdata_cpy, min_nbor_dist + + +class DescrptSeAArrayAPI(DescrptSeA): + def call( + self, + coord_ext, + atype_ext, + nlist, + mapping: Optional[np.ndarray] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + coord_ext + The extended coordinates of atoms. shape: nf x (nallx3) + atype_ext + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping from extended to lcoal region. not used by this descriptor. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + this descriptor returns None + h2 + The rotationally equivariant pair-partical representation. + this descriptor returns None + sw + The smooth switch function. + """ + if not self.type_one_side: + raise NotImplementedError( + "type_one_side == False is not supported in DescrptSeAArrayAPI" + ) + del mapping + xp = array_api_compat.array_namespace(coord_ext, atype_ext, nlist) + input_dtype = coord_ext.dtype + # nf x nloc x nnei x 4 + rr, diff, ww = self.env_mat.call( + coord_ext, atype_ext, nlist, self.davg, self.dstd + ) + nf, nloc, nnei, _ = rr.shape + sec = xp.asarray(self.sel_cumsum) + + ng = self.neuron[-1] + gr = xp.zeros([nf * nloc, ng, 4], dtype=self.dstd.dtype) + exclude_mask = self.emask.build_type_exclude_mask(nlist, atype_ext) + # merge nf and nloc axis, so for type_one_side == False, + # we don't require atype is the same in all frames + exclude_mask = xp.reshape(exclude_mask, (nf * nloc, nnei)) + rr = xp.reshape(rr, (nf * nloc, nnei, 4)) + rr = xp.astype(rr, self.dstd.dtype) + + for embedding_idx in itertools.product( + range(self.ntypes), repeat=self.embeddings.ndim + ): + (tt,) = embedding_idx + mm = exclude_mask[:, sec[tt] : sec[tt + 1]] + tr = rr[:, sec[tt] : sec[tt + 1], :] + tr = tr * xp.astype(mm[:, :, None], tr.dtype) + ss = tr[..., 0:1] + gg = self.cal_g(ss, embedding_idx) + # gr_tmp = xp.einsum("lni,lnj->lij", gg, tr) + gr_tmp = xp.sum(gg[:, :, :, None] * tr[:, :, None, :], axis=1) + gr += gr_tmp + gr = xp.reshape(gr, (nf, nloc, ng, 4)) + # nf x nloc x ng x 4 + gr /= self.nnei + gr1 = gr[:, :, : self.axis_neuron, :] + # nf x nloc x ng x ng1 + # grrg = xp.einsum("flid,fljd->flij", gr, gr1) + grrg = xp.sum(gr[:, :, :, None, :] * gr1[:, :, None, :, :], axis=4) + # nf x nloc x (ng x ng1) + grrg = xp.astype( + xp.reshape(grrg, (nf, nloc, ng * self.axis_neuron)), input_dtype + ) + return grrg, gr[..., 1:], None, None, ww diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index 4806fa4cd8..c56f1bc061 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -163,20 +163,20 @@ def nlist_distinguish_types( xp = array_api_compat.array_namespace(nlist, atype) nf, nloc, _ = nlist.shape ret_nlist = [] - tmp_atype = xp.tile(atype[:, None], [1, nloc, 1]) + tmp_atype = xp.tile(atype[:, None, :], (1, nloc, 1)) mask = nlist == -1 - tnlist_0 = nlist.copy() - tnlist_0[mask] = 0 - tnlist = xp_take_along_axis(tmp_atype, tnlist_0, axis=2).squeeze() - tnlist = xp.where(mask, -1, tnlist) - snsel = tnlist.shape[2] + tnlist_0 = xp.where(mask, xp.zeros_like(nlist), nlist) + tnlist = xp_take_along_axis(tmp_atype, tnlist_0, axis=2) + tnlist = xp.where(mask, xp.full_like(tnlist, -1), tnlist) for ii, ss in enumerate(sel): - pick_mask = (tnlist == ii).astype(xp.int32) - sorted_indices = xp.argsort(-pick_mask, kind="stable", axis=-1) + pick_mask = xp.astype(tnlist == ii, xp.int32) + sorted_indices = xp.argsort(-pick_mask, stable=True, axis=-1) pick_mask_sorted = -xp.sort(-pick_mask, axis=-1) inlist = xp_take_along_axis(nlist, sorted_indices, axis=2) - inlist = xp.where(~pick_mask_sorted.astype(bool), -1, inlist) - ret_nlist.append(xp.split(inlist, [ss, snsel - ss], axis=-1)[0]) + inlist = xp.where( + ~xp.astype(pick_mask_sorted, xp.bool), xp.full_like(inlist, -1), inlist + ) + ret_nlist.append(inlist[..., :ss]) ret = xp.concat(ret_nlist, axis=-1) return ret diff --git a/deepmd/jax/descriptor/se_e2_a.py b/deepmd/jax/descriptor/se_e2_a.py new file mode 100644 index 0000000000..a60a4e9af1 --- /dev/null +++ b/deepmd/jax/descriptor/se_e2_a.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_e2_a import DescrptSeAArrayAPI as DescrptSeADP +from deepmd.jax.common import ( + flax_module, + to_jax_array, +) +from deepmd.jax.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.jax.utils.network import ( + NetworkCollection, +) + + +@flax_module +class DescrptSeA(DescrptSeADP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"dstd", "davg"}: + value = to_jax_array(value) + elif name in {"embeddings"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/descriptor/se_e2_a.py b/source/tests/array_api_strict/descriptor/se_e2_a.py new file mode 100644 index 0000000000..654b9f8925 --- /dev/null +++ b/source/tests/array_api_strict/descriptor/se_e2_a.py @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_e2_a import DescrptSeAArrayAPI as DescrptSeADP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.exclude_mask import ( + PairExcludeMask, +) +from ..utils.network import ( + NetworkCollection, +) + + +class DescrptSeA(DescrptSeADP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"dstd", "davg"}: + value = to_array_api_strict_array(value) + elif name in {"embeddings"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) diff --git a/source/tests/consistent/descriptor/test_se_e2_a.py b/source/tests/consistent/descriptor/test_se_e2_a.py index 2563ee1d6d..286703e21d 100644 --- a/source/tests/consistent/descriptor/test_se_e2_a.py +++ b/source/tests/consistent/descriptor/test_se_e2_a.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -33,6 +35,17 @@ descrpt_se_a_args, ) +if INSTALLED_JAX: + from deepmd.jax.descriptor.se_e2_a import DescrptSeA as DescrptSeAJAX +else: + DescrptSeAJAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.se_e2_a import ( + DescrptSeA as DescrptSeAArrayAPIStrict, + ) +else: + DescrptSeAArrayAPIStrict = None + @parameterized( (True, False), # resnet_dt @@ -98,9 +111,33 @@ def skip_tf(self) -> bool: ) = self.param return env_protection != 0.0 + @property + def skip_jax(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + precision, + env_protection, + ) = self.param + return not type_one_side or not INSTALLED_JAX + + @property + def skip_array_api_strict(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + precision, + env_protection, + ) = self.param + return not type_one_side or not INSTALLED_ARRAY_API_STRICT + tf_class = DescrptSeATF dp_class = DescrptSeADP pt_class = DescrptSeAPT + jax_class = DescrptSeAJAX + array_api_strict_class = DescrptSeAArrayAPIStrict args = descrpt_se_a_args() def setUp(self): @@ -177,6 +214,24 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From 5050f611133665580fb44cd62cbe6d84d4864ac8 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 15 Oct 2024 23:40:01 -0400 Subject: [PATCH 047/193] feat(jax/array-api): DOS fitting (#4218) ## Summary by CodeRabbit - **New Features** - Introduced the `DOSFittingNet` class for enhanced fitting capabilities. - Added methods to evaluate different backends (JAX and Array API Strict) for computing density of states. - Enhanced testing framework to conditionally include tests based on library availability. - **Bug Fixes** - Improved serialization of the `bias_atom_e` variable to ensure consistent data representation. - **Tests** - Expanded the `TestDOS` class with new attributes and methods for better backend evaluation. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- deepmd/dpmodel/fitting/dos_fitting.py | 3 +- deepmd/jax/fitting/fitting.py | 8 +++ .../tests/array_api_strict/fitting/fitting.py | 7 +++ source/tests/consistent/fitting/test_dos.py | 59 +++++++++++++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) diff --git a/deepmd/dpmodel/fitting/dos_fitting.py b/deepmd/dpmodel/fitting/dos_fitting.py index e9cd4a17ae..32225ac6c0 100644 --- a/deepmd/dpmodel/fitting/dos_fitting.py +++ b/deepmd/dpmodel/fitting/dos_fitting.py @@ -10,6 +10,7 @@ from deepmd.dpmodel.common import ( DEFAULT_PRECISION, + to_numpy_array, ) from deepmd.dpmodel.fitting.invar_fitting import ( InvarFitting, @@ -89,6 +90,6 @@ def serialize(self) -> dict: **super().serialize(), "type": "dos", } - dd["@variables"]["bias_atom_e"] = self.bias_atom_e + dd["@variables"]["bias_atom_e"] = to_numpy_array(self.bias_atom_e) return dd diff --git a/deepmd/jax/fitting/fitting.py b/deepmd/jax/fitting/fitting.py index 27ad791db9..284213c70a 100644 --- a/deepmd/jax/fitting/fitting.py +++ b/deepmd/jax/fitting/fitting.py @@ -3,6 +3,7 @@ Any, ) +from deepmd.dpmodel.fitting.dos_fitting import DOSFittingNet as DOSFittingNetDP from deepmd.dpmodel.fitting.ener_fitting import EnergyFittingNet as EnergyFittingNetDP from deepmd.jax.common import ( flax_module, @@ -37,3 +38,10 @@ class EnergyFittingNet(EnergyFittingNetDP): def __setattr__(self, name: str, value: Any) -> None: value = setattr_for_general_fitting(name, value) return super().__setattr__(name, value) + + +@flax_module +class DOSFittingNet(DOSFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/fitting/fitting.py b/source/tests/array_api_strict/fitting/fitting.py index 2e6bd9fe25..8b65320203 100644 --- a/source/tests/array_api_strict/fitting/fitting.py +++ b/source/tests/array_api_strict/fitting/fitting.py @@ -3,6 +3,7 @@ Any, ) +from deepmd.dpmodel.fitting.dos_fitting import DOSFittingNet as DOSFittingNetDP from deepmd.dpmodel.fitting.ener_fitting import EnergyFittingNet as EnergyFittingNetDP from ..common import ( @@ -36,3 +37,9 @@ class EnergyFittingNet(EnergyFittingNetDP): def __setattr__(self, name: str, value: Any) -> None: value = setattr_for_general_fitting(name, value) return super().__setattr__(name, value) + + +class DOSFittingNet(DOSFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) diff --git a/source/tests/consistent/fitting/test_dos.py b/source/tests/consistent/fitting/test_dos.py index ada65c8ac5..4a78b69341 100644 --- a/source/tests/consistent/fitting/test_dos.py +++ b/source/tests/consistent/fitting/test_dos.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -36,6 +38,20 @@ fitting_dos, ) +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) + from deepmd.jax.fitting.fitting import DOSFittingNet as DOSFittingJAX +else: + DOSFittingJAX = object +if INSTALLED_ARRAY_API_STRICT: + import array_api_strict + + from ...array_api_strict.fitting.fitting import DOSFittingNet as DOSFittingStrict +else: + DOSFittingStrict = object + @parameterized( (True, False), # resnet_dt @@ -74,9 +90,19 @@ def skip_pt(self) -> bool: ) = self.param return CommonTest.skip_pt + @property + def skip_jax(self) -> bool: + return not INSTALLED_JAX + + @property + def skip_array_api_strict(self) -> bool: + return not INSTALLED_ARRAY_API_STRICT + tf_class = DOSFittingTF dp_class = DOSFittingDP pt_class = DOSFittingPT + jax_class = DOSFittingJAX + array_api_strict_class = DOSFittingStrict args = fitting_dos() def setUp(self): @@ -157,6 +183,39 @@ def eval_dp(self, dp_obj: Any) -> Any: fparam=self.fparam if numb_fparam else None, )["dos"] + def eval_jax(self, jax_obj: Any) -> Any: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + numb_dos, + ) = self.param + return np.asarray( + jax_obj( + jnp.asarray(self.inputs), + jnp.asarray(self.atype.reshape(1, -1)), + fparam=jnp.asarray(self.fparam) if numb_fparam else None, + )["dos"] + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + array_api_strict.set_array_api_strict_flags(api_version="2023.12") + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + numb_dos, + ) = self.param + return np.asarray( + array_api_strict_obj( + array_api_strict.asarray(self.inputs), + array_api_strict.asarray(self.atype.reshape(1, -1)), + fparam=array_api_strict.asarray(self.fparam) if numb_fparam else None, + )["dos"] + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same From d7d221059ea39802354cca8ea2e3d800b62e7563 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 16 Oct 2024 09:50:28 -0400 Subject: [PATCH 048/193] fix(pt): make PT training step idx consistent with TF (#4221) Fix #4206. Currently, the training step index displayed in TF and PT has different meanings: - In TF, step 0 means no training; step 1 means a training step has been performed. The maximum training step is equal to the number of steps. - In PT, step 0 means a training step has been performed. The maximum training step is the number of steps minus 1. This PR corrects the definition of the step-index in PT and makes them consistent. There is still a difference after this PR: TF shows step 0, but PT shows step 1. Showing the loss of step 0 in PT needs heavy refactoring and is thus not included in this PR. ## Summary by CodeRabbit - **New Features** - Improved logging for training progress, starting step count from 1 for better clarity. - Enhanced TensorBoard logging for consistent step tracking. - **Bug Fixes** - Adjusted logging conditions to ensure the first step's results are included in the output. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/train/training.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 4d746e84c0..10e841682a 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -769,7 +769,10 @@ def fake_model(): raise ValueError(f"Not supported optimizer type '{self.opt_type}'") # Log and persist - if self.display_in_training and _step_id % self.disp_freq == 0: + display_step_id = _step_id + 1 + if self.display_in_training and ( + display_step_id % self.disp_freq == 0 or display_step_id == 1 + ): self.wrapper.eval() def log_loss_train(_loss, _more_loss, _task_key="Default"): @@ -821,7 +824,7 @@ def log_loss_valid(_task_key="Default"): if self.rank == 0: log.info( format_training_message_per_task( - batch=_step_id, + batch=display_step_id, task_name="trn", rmse=train_results, learning_rate=cur_lr, @@ -830,7 +833,7 @@ def log_loss_valid(_task_key="Default"): if valid_results: log.info( format_training_message_per_task( - batch=_step_id, + batch=display_step_id, task_name="val", rmse=valid_results, learning_rate=None, @@ -861,7 +864,7 @@ def log_loss_valid(_task_key="Default"): if self.rank == 0: log.info( format_training_message_per_task( - batch=_step_id, + batch=display_step_id, task_name=_key + "_trn", rmse=train_results[_key], learning_rate=cur_lr, @@ -870,7 +873,7 @@ def log_loss_valid(_task_key="Default"): if valid_results[_key]: log.info( format_training_message_per_task( - batch=_step_id, + batch=display_step_id, task_name=_key + "_val", rmse=valid_results[_key], learning_rate=None, @@ -883,7 +886,7 @@ def log_loss_valid(_task_key="Default"): if self.rank == 0 and self.timing_in_training: log.info( format_training_message( - batch=_step_id, + batch=display_step_id, wall_time=train_time, ) ) @@ -899,7 +902,7 @@ def log_loss_valid(_task_key="Default"): self.print_header(fout, train_results, valid_results) self.lcurve_should_print_header = False self.print_on_training( - fout, _step_id, cur_lr, train_results, valid_results + fout, display_step_id, cur_lr, train_results, valid_results ) if ( @@ -921,11 +924,15 @@ def log_loss_valid(_task_key="Default"): f.write(str(self.latest_model)) # tensorboard - if self.enable_tensorboard and _step_id % self.tensorboard_freq == 0: - writer.add_scalar(f"{task_key}/lr", cur_lr, _step_id) - writer.add_scalar(f"{task_key}/loss", loss, _step_id) + if self.enable_tensorboard and ( + display_step_id % self.tensorboard_freq == 0 or display_step_id == 1 + ): + writer.add_scalar(f"{task_key}/lr", cur_lr, display_step_id) + writer.add_scalar(f"{task_key}/loss", loss, display_step_id) for item in more_loss: - writer.add_scalar(f"{task_key}/{item}", more_loss[item], _step_id) + writer.add_scalar( + f"{task_key}/{item}", more_loss[item], display_step_id + ) self.t0 = time.time() self.total_train_time = 0.0 From af86b577089393c519e0c478ae0a50c1766708ab Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 17 Oct 2024 01:03:06 +0800 Subject: [PATCH 049/193] Chore(pt): refactor the command function interface (#4225) Fix #3934. ## Summary by CodeRabbit - **New Features** - Enhanced clarity and usability of the training process with explicit parameters for model training, freezing, and bias changing functions. - Improved function interfaces streamline user interactions and understanding. - **Bug Fixes** - Resolved issues related to parameter handling by transitioning from a flags-based system to a more structured approach. - **Refactor** - Updated function signatures for better readability and maintainability, improving the overall structure of the code. - Simplified the freezing mechanism in tests by removing the use of a `Namespace` object. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/entrypoints/main.py | 149 +++++++++++++++---------- source/tests/pt/model/test_deeppot.py | 6 +- source/tests/pt/test_init_frz_model.py | 6 +- 3 files changed, 94 insertions(+), 67 deletions(-) diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index a0694c41c5..7c8a95c5e7 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -239,16 +239,27 @@ def get_backend_info(self) -> dict: } -def train(FLAGS): - log.info("Configuration path: %s", FLAGS.INPUT) +def train( + input_file: str, + init_model: Optional[str], + restart: Optional[str], + finetune: Optional[str], + init_frz_model: Optional[str], + model_branch: str, + skip_neighbor_stat: bool = False, + use_pretrain_script: bool = False, + force_load: bool = False, + output: str = "out.json", +): + log.info("Configuration path: %s", input_file) SummaryPrinter()() - with open(FLAGS.INPUT) as fin: + with open(input_file) as fin: config = json.load(fin) # ensure suffix, as in the command line help, we say "path prefix of checkpoint files" - if FLAGS.init_model is not None and not FLAGS.init_model.endswith(".pt"): - FLAGS.init_model += ".pt" - if FLAGS.restart is not None and not FLAGS.restart.endswith(".pt"): - FLAGS.restart += ".pt" + if init_model is not None and not init_model.endswith(".pt"): + init_model += ".pt" + if restart is not None and not restart.endswith(".pt"): + restart += ".pt" # update multitask config multi_task = "model_dict" in config["model"] @@ -262,26 +273,24 @@ def train(FLAGS): # update fine-tuning config finetune_links = None - if FLAGS.finetune is not None: + if finetune is not None: config["model"], finetune_links = get_finetune_rules( - FLAGS.finetune, + finetune, config["model"], - model_branch=FLAGS.model_branch, - change_model_params=FLAGS.use_pretrain_script, + model_branch=model_branch, + change_model_params=use_pretrain_script, ) # update init_model or init_frz_model config if necessary - if ( - FLAGS.init_model is not None or FLAGS.init_frz_model is not None - ) and FLAGS.use_pretrain_script: - if FLAGS.init_model is not None: - init_state_dict = torch.load(FLAGS.init_model, map_location=DEVICE) + if (init_model is not None or init_frz_model is not None) and use_pretrain_script: + if init_model is not None: + init_state_dict = torch.load(init_model, map_location=DEVICE) if "model" in init_state_dict: init_state_dict = init_state_dict["model"] config["model"] = init_state_dict["_extra_state"]["model_params"] else: config["model"] = json.loads( torch.jit.load( - FLAGS.init_frz_model, map_location=DEVICE + init_frz_model, map_location=DEVICE ).get_model_def_script() ) @@ -291,7 +300,7 @@ def train(FLAGS): # do neighbor stat min_nbor_dist = None - if not FLAGS.skip_neighbor_stat: + if not skip_neighbor_stat: log.info( "Calculate neighbor statistics... (add --skip-neighbor-stat to skip this step)" ) @@ -320,16 +329,16 @@ def train(FLAGS): ) ) - with open(FLAGS.output, "w") as fp: + with open(output, "w") as fp: json.dump(config, fp, indent=4) trainer = get_trainer( config, - FLAGS.init_model, - FLAGS.restart, - FLAGS.finetune, - FLAGS.force_load, - FLAGS.init_frz_model, + init_model, + restart, + finetune, + force_load, + init_frz_model, shared_links=shared_links, finetune_links=finetune_links, ) @@ -343,26 +352,39 @@ def train(FLAGS): trainer.run() -def freeze(FLAGS): - model = inference.Tester(FLAGS.model, head=FLAGS.head).model +def freeze( + model: str, + output: str = "frozen_model.pth", + head: Optional[str] = None, +): + model = inference.Tester(model, head=head).model model.eval() model = torch.jit.script(model) extra_files = {} torch.jit.save( model, - FLAGS.output, + output, extra_files, ) - log.info(f"Saved frozen model to {FLAGS.output}") - - -def change_bias(FLAGS): - if FLAGS.INPUT.endswith(".pt"): - old_state_dict = torch.load(FLAGS.INPUT, map_location=env.DEVICE) + log.info(f"Saved frozen model to {output}") + + +def change_bias( + input_file: str, + mode: str = "change", + bias_value: Optional[list] = None, + datafile: Optional[str] = None, + system: str = ".", + numb_batch: int = 0, + model_branch: Optional[str] = None, + output: Optional[str] = None, +): + if input_file.endswith(".pt"): + old_state_dict = torch.load(input_file, map_location=env.DEVICE) model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict)) model_params = model_state_dict["_extra_state"]["model_params"] - elif FLAGS.INPUT.endswith(".pth"): - old_model = torch.jit.load(FLAGS.INPUT, map_location=env.DEVICE) + elif input_file.endswith(".pth"): + old_model = torch.jit.load(input_file, map_location=env.DEVICE) model_params_string = old_model.get_model_def_script() model_params = json.loads(model_params_string) old_state_dict = old_model.state_dict() @@ -373,10 +395,7 @@ def change_bias(FLAGS): "or a frozen model with a .pth extension" ) multi_task = "model_dict" in model_params - model_branch = FLAGS.model_branch - bias_adjust_mode = ( - "change-by-statistic" if FLAGS.mode == "change" else "set-by-statistic" - ) + bias_adjust_mode = "change-by-statistic" if mode == "change" else "set-by-statistic" if multi_task: assert ( model_branch is not None @@ -393,24 +412,24 @@ def change_bias(FLAGS): else model_params["model_dict"][model_branch]["type_map"] ) model_to_change = model if not multi_task else model[model_branch] - if FLAGS.INPUT.endswith(".pt"): + if input_file.endswith(".pt"): wrapper = ModelWrapper(model) wrapper.load_state_dict(old_state_dict["model"]) else: # for .pth model.load_state_dict(old_state_dict) - if FLAGS.bias_value is not None: + if bias_value is not None: # use user-defined bias assert model_to_change.model_type in [ "ener" ], "User-defined bias is only available for energy model!" assert ( - len(FLAGS.bias_value) == len(type_map) + len(bias_value) == len(type_map) ), f"The number of elements in the bias should be the same as that in the type_map: {type_map}." old_bias = model_to_change.get_out_bias() bias_to_set = torch.tensor( - FLAGS.bias_value, dtype=old_bias.dtype, device=old_bias.device + bias_value, dtype=old_bias.dtype, device=old_bias.device ).view(old_bias.shape) model_to_change.set_out_bias(bias_to_set) log.info( @@ -421,11 +440,11 @@ def change_bias(FLAGS): updated_model = model_to_change else: # calculate bias on given systems - if FLAGS.datafile is not None: - with open(FLAGS.datafile) as datalist: + if datafile is not None: + with open(datafile) as datalist: all_sys = datalist.read().splitlines() else: - all_sys = expand_sys_str(FLAGS.system) + all_sys = expand_sys_str(system) data_systems = process_systems(all_sys) data_single = DpLoaderSet( data_systems, @@ -438,7 +457,7 @@ def change_bias(FLAGS): data_requirement = mock_loss.label_requirement data_requirement += training.get_additional_data_requirement(model_to_change) data_single.add_data_requirement(data_requirement) - nbatches = FLAGS.numb_batch if FLAGS.numb_batch != 0 else float("inf") + nbatches = numb_batch if numb_batch != 0 else float("inf") sampled_data = make_stat_input( data_single.systems, data_single.dataloaders, @@ -453,11 +472,9 @@ def change_bias(FLAGS): else: model[model_branch] = updated_model - if FLAGS.INPUT.endswith(".pt"): + if input_file.endswith(".pt"): output_path = ( - FLAGS.output - if FLAGS.output is not None - else FLAGS.INPUT.replace(".pt", "_updated.pt") + output if output is not None else input_file.replace(".pt", "_updated.pt") ) wrapper = ModelWrapper(model) if "model" in old_state_dict: @@ -470,9 +487,7 @@ def change_bias(FLAGS): else: # for .pth output_path = ( - FLAGS.output - if FLAGS.output is not None - else FLAGS.INPUT.replace(".pth", "_updated.pth") + output if output is not None else input_file.replace(".pth", "_updated.pth") ) model = torch.jit.script(model) torch.jit.save( @@ -499,7 +514,18 @@ def main(args: Optional[Union[list[str], argparse.Namespace]] = None): log.info("DeePMD version: %s", __version__) if FLAGS.command == "train": - train(FLAGS) + train( + input_file=FLAGS.INPUT, + init_model=FLAGS.init_model, + restart=FLAGS.restart, + finetune=FLAGS.finetune, + init_frz_model=FLAGS.init_frz_model, + model_branch=FLAGS.model_branch, + skip_neighbor_stat=FLAGS.skip_neighbor_stat, + use_pretrain_script=FLAGS.use_pretrain_script, + force_load=FLAGS.force_load, + output=FLAGS.output, + ) elif FLAGS.command == "freeze": if Path(FLAGS.checkpoint_folder).is_dir(): checkpoint_path = Path(FLAGS.checkpoint_folder) @@ -508,9 +534,18 @@ def main(args: Optional[Union[list[str], argparse.Namespace]] = None): else: FLAGS.model = FLAGS.checkpoint_folder FLAGS.output = str(Path(FLAGS.output).with_suffix(".pth")) - freeze(FLAGS) + freeze(model=FLAGS.model, output=FLAGS.output, head=FLAGS.head) elif FLAGS.command == "change-bias": - change_bias(FLAGS) + change_bias( + input_file=FLAGS.INPUT, + mode=FLAGS.mode, + bias_value=FLAGS.bias_value, + datafile=FLAGS.datafile, + system=FLAGS.system, + numb_batch=FLAGS.numb_batch, + model_branch=FLAGS.model_branch, + output=FLAGS.output, + ) else: raise RuntimeError(f"Invalid command {FLAGS.command}!") diff --git a/source/tests/pt/model/test_deeppot.py b/source/tests/pt/model/test_deeppot.py index 8917c62cce..7f530b0a5e 100644 --- a/source/tests/pt/model/test_deeppot.py +++ b/source/tests/pt/model/test_deeppot.py @@ -2,9 +2,6 @@ import json import os import unittest -from argparse import ( - Namespace, -) from copy import ( deepcopy, ) @@ -123,12 +120,11 @@ class TestDeepPotFrozen(TestDeepPot): def setUp(self): super().setUp() frozen_model = "frozen_model.pth" - ns = Namespace( + freeze( model=self.model, output=frozen_model, head=None, ) - freeze(ns) self.model = frozen_model # Note: this can not actually disable cuda device to be used diff --git a/source/tests/pt/test_init_frz_model.py b/source/tests/pt/test_init_frz_model.py index 1cbc1b29b6..69c738d6bd 100644 --- a/source/tests/pt/test_init_frz_model.py +++ b/source/tests/pt/test_init_frz_model.py @@ -4,9 +4,6 @@ import shutil import tempfile import unittest -from argparse import ( - Namespace, -) from copy import ( deepcopy, ) @@ -70,12 +67,11 @@ def setUp(self): if imodel in [0, 1]: trainer.run() - ns = Namespace( + freeze( model="model.pt", output=frozen_model, head=None, ) - freeze(ns) self.models.append(frozen_model) def test_dp_test(self): From 2871fec8771c887010d9a660bb40425e603622d4 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 17 Oct 2024 10:06:06 +0800 Subject: [PATCH 050/193] Chore(pt):rm old pt implementation (#4223) Fix #3913. ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced `exclude_types` parameter in `DipoleFittingNet` and `PolarFittingNet` constructors for improved flexibility. - Added `SimpleLinear` class to enhance network functionality. - **Bug Fixes** - Removed `old_impl` parameter across various classes, streamlining interfaces and ensuring consistent behavior. - **Documentation** - Updated test cases to reflect the removal of `old_impl`, focusing on new implementations. - **Chores** - Deleted obsolete files and classes to simplify the codebase and improve maintainability. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/dpmodel/fitting/dipole_fitting.py | 3 - .../dpmodel/fitting/polarizability_fitting.py | 3 - deepmd/pt/model/backbone/__init__.py | 12 - deepmd/pt/model/backbone/backbone.py | 12 - deepmd/pt/model/backbone/evoformer2b.py | 103 -- deepmd/pt/model/descriptor/__init__.py | 4 - deepmd/pt/model/descriptor/dpa1.py | 2 - deepmd/pt/model/descriptor/dpa2.py | 2 - deepmd/pt/model/descriptor/gaussian_lcc.py | 319 ---- .../descriptor/repformer_layer_old_impl.py | 744 -------- deepmd/pt/model/descriptor/repformers.py | 102 +- deepmd/pt/model/descriptor/se_a.py | 158 +- deepmd/pt/model/descriptor/se_atten.py | 218 +-- deepmd/pt/model/descriptor/se_atten_v2.py | 2 - deepmd/pt/model/descriptor/se_r.py | 2 - deepmd/pt/model/network/network.py | 1637 ----------------- deepmd/pt/model/task/__init__.py | 4 - deepmd/pt/model/task/atten_lcc.py | 55 - deepmd/pt/model/task/dipole.py | 2 - deepmd/pt/model/task/fitting.py | 106 +- deepmd/pt/model/task/polarizability.py | 2 - .../tests/pt/model/test_descriptor_hybrid.py | 1 - source/tests/pt/model/test_descriptor_se_r.py | 3 - source/tests/pt/model/test_dpa1.py | 65 - source/tests/pt/model/test_dpa2.py | 41 - source/tests/pt/model/test_embedding_net.py | 7 +- source/tests/pt/model/test_ener_fitting.py | 48 - source/tests/pt/model/test_se_atten_v2.py | 2 - source/tests/pt/model/test_se_e2_a.py | 42 - 29 files changed, 206 insertions(+), 3495 deletions(-) delete mode 100644 deepmd/pt/model/backbone/__init__.py delete mode 100644 deepmd/pt/model/backbone/backbone.py delete mode 100644 deepmd/pt/model/backbone/evoformer2b.py delete mode 100644 deepmd/pt/model/descriptor/gaussian_lcc.py delete mode 100644 deepmd/pt/model/descriptor/repformer_layer_old_impl.py delete mode 100644 deepmd/pt/model/task/atten_lcc.py diff --git a/deepmd/dpmodel/fitting/dipole_fitting.py b/deepmd/dpmodel/fitting/dipole_fitting.py index f67bbc93a4..01bd60c777 100644 --- a/deepmd/dpmodel/fitting/dipole_fitting.py +++ b/deepmd/dpmodel/fitting/dipole_fitting.py @@ -105,7 +105,6 @@ def __init__( r_differentiable: bool = True, c_differentiable: bool = True, type_map: Optional[list[str]] = None, - old_impl=False, seed: Optional[Union[int, list[int]]] = None, ): if tot_ener_zero: @@ -141,7 +140,6 @@ def __init__( type_map=type_map, seed=seed, ) - self.old_impl = False def _net_out_dim(self): """Set the FittingNet output dim.""" @@ -151,7 +149,6 @@ def serialize(self) -> dict: data = super().serialize() data["type"] = "dipole" data["embedding_width"] = self.embedding_width - data["old_impl"] = self.old_impl data["r_differentiable"] = self.r_differentiable data["c_differentiable"] = self.c_differentiable return data diff --git a/deepmd/dpmodel/fitting/polarizability_fitting.py b/deepmd/dpmodel/fitting/polarizability_fitting.py index 2ff5052a83..73a691f482 100644 --- a/deepmd/dpmodel/fitting/polarizability_fitting.py +++ b/deepmd/dpmodel/fitting/polarizability_fitting.py @@ -107,7 +107,6 @@ def __init__( spin: Any = None, mixed_types: bool = False, exclude_types: list[int] = [], - old_impl: bool = False, fit_diag: bool = True, scale: Optional[list[float]] = None, shift_diag: bool = True, @@ -165,7 +164,6 @@ def __init__( type_map=type_map, seed=seed, ) - self.old_impl = False def _net_out_dim(self): """Set the FittingNet output dim.""" @@ -192,7 +190,6 @@ def serialize(self) -> dict: data["type"] = "polar" data["@version"] = 3 data["embedding_width"] = self.embedding_width - data["old_impl"] = self.old_impl data["fit_diag"] = self.fit_diag data["shift_diag"] = self.shift_diag data["@variables"]["scale"] = self.scale diff --git a/deepmd/pt/model/backbone/__init__.py b/deepmd/pt/model/backbone/__init__.py deleted file mode 100644 index a76bdb2a2d..0000000000 --- a/deepmd/pt/model/backbone/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from .backbone import ( - BackBone, -) -from .evoformer2b import ( - Evoformer2bBackBone, -) - -__all__ = [ - "BackBone", - "Evoformer2bBackBone", -] diff --git a/deepmd/pt/model/backbone/backbone.py b/deepmd/pt/model/backbone/backbone.py deleted file mode 100644 index ddeedfeff5..0000000000 --- a/deepmd/pt/model/backbone/backbone.py +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import torch - - -class BackBone(torch.nn.Module): - def __init__(self, **kwargs): - """BackBone base method.""" - super().__init__() - - def forward(self, **kwargs): - """Calculate backBone.""" - raise NotImplementedError diff --git a/deepmd/pt/model/backbone/evoformer2b.py b/deepmd/pt/model/backbone/evoformer2b.py deleted file mode 100644 index 1146b3a298..0000000000 --- a/deepmd/pt/model/backbone/evoformer2b.py +++ /dev/null @@ -1,103 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from deepmd.pt.model.backbone import ( - BackBone, -) -from deepmd.pt.model.network.network import ( - Evoformer2bEncoder, -) - - -class Evoformer2bBackBone(BackBone): - def __init__( - self, - nnei, - layer_num=6, - attn_head=8, - atomic_dim=1024, - pair_dim=100, - feature_dim=1024, - ffn_dim=2048, - post_ln=False, - final_layer_norm=True, - final_head_layer_norm=False, - emb_layer_norm=False, - atomic_residual=False, - evo_residual=False, - residual_factor=1.0, - activation_function="gelu", - **kwargs, - ): - """Construct an evoformer backBone.""" - super().__init__() - self.nnei = nnei - self.layer_num = layer_num - self.attn_head = attn_head - self.atomic_dim = atomic_dim - self.pair_dim = pair_dim - self.feature_dim = feature_dim - self.head_dim = feature_dim // attn_head - assert ( - feature_dim % attn_head == 0 - ), f"feature_dim {feature_dim} must be divided by attn_head {attn_head}!" - self.ffn_dim = ffn_dim - self.post_ln = post_ln - self.final_layer_norm = final_layer_norm - self.final_head_layer_norm = final_head_layer_norm - self.emb_layer_norm = emb_layer_norm - self.activation_function = activation_function - self.atomic_residual = atomic_residual - self.evo_residual = evo_residual - self.residual_factor = float(residual_factor) - self.encoder = Evoformer2bEncoder( - nnei=self.nnei, - layer_num=self.layer_num, - attn_head=self.attn_head, - atomic_dim=self.atomic_dim, - pair_dim=self.pair_dim, - feature_dim=self.feature_dim, - ffn_dim=self.ffn_dim, - post_ln=self.post_ln, - final_layer_norm=self.final_layer_norm, - final_head_layer_norm=self.final_head_layer_norm, - emb_layer_norm=self.emb_layer_norm, - atomic_residual=self.atomic_residual, - evo_residual=self.evo_residual, - residual_factor=self.residual_factor, - activation_function=self.activation_function, - ) - - def forward(self, atomic_rep, pair_rep, nlist, nlist_type, nlist_mask): - """Encoder the atomic and pair representations. - - Args: - - atomic_rep: Atomic representation with shape [nframes, nloc, atomic_dim]. - - pair_rep: Pair representation with shape [nframes, nloc, nnei, pair_dim]. - - nlist: Neighbor list with shape [nframes, nloc, nnei]. - - nlist_type: Neighbor types with shape [nframes, nloc, nnei]. - - nlist_mask: Neighbor mask with shape [nframes, nloc, nnei], `False` if blank. - - Returns - ------- - - atomic_rep: Atomic representation after encoder with shape [nframes, nloc, feature_dim]. - - transformed_atomic_rep: Transformed atomic representation after encoder with shape [nframes, nloc, atomic_dim]. - - pair_rep: Pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. - - delta_pair_rep: Delta pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. - - norm_x: Normalization loss of atomic_rep. - - norm_delta_pair_rep: Normalization loss of delta_pair_rep. - """ - ( - atomic_rep, - transformed_atomic_rep, - pair_rep, - delta_pair_rep, - norm_x, - norm_delta_pair_rep, - ) = self.encoder(atomic_rep, pair_rep, nlist, nlist_type, nlist_mask) - return ( - atomic_rep, - transformed_atomic_rep, - pair_rep, - delta_pair_rep, - norm_x, - norm_delta_pair_rep, - ) diff --git a/deepmd/pt/model/descriptor/__init__.py b/deepmd/pt/model/descriptor/__init__.py index 779e7a562c..4ffa937bcb 100644 --- a/deepmd/pt/model/descriptor/__init__.py +++ b/deepmd/pt/model/descriptor/__init__.py @@ -16,9 +16,6 @@ from .env_mat import ( prod_env_mat, ) -from .gaussian_lcc import ( - DescrptGaussianLcc, -) from .hybrid import ( DescrptHybrid, ) @@ -59,6 +56,5 @@ "DescrptDPA2", "DescrptHybrid", "prod_env_mat", - "DescrptGaussianLcc", "DescrptBlockRepformers", ] diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index 617e8b49b6..322fa3a12d 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -245,7 +245,6 @@ def __init__( # not implemented spin=None, type: Optional[str] = None, - old_impl: bool = False, ): super().__init__() # Ensure compatibility with the deprecated stripped_type_embedding option. @@ -290,7 +289,6 @@ def __init__( trainable_ln=trainable_ln, ln_eps=ln_eps, seed=child_seed(seed, 1), - old_impl=old_impl, ) self.use_econf_tebd = use_econf_tebd self.use_tebd_bias = use_tebd_bias diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index f1ef200b09..632efe5dbf 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -92,7 +92,6 @@ def __init__( use_econf_tebd: bool = False, use_tebd_bias: bool = False, type_map: Optional[list[str]] = None, - old_impl: bool = False, ): r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. @@ -235,7 +234,6 @@ def init_subclass_params(sub_data, sub_class): g1_out_conv=self.repformer_args.g1_out_conv, g1_out_mlp=self.repformer_args.g1_out_mlp, seed=child_seed(seed, 1), - old_impl=old_impl, ) self.rcsl_list = [ (self.repformers.get_rcut(), self.repformers.get_nsel()), diff --git a/deepmd/pt/model/descriptor/gaussian_lcc.py b/deepmd/pt/model/descriptor/gaussian_lcc.py deleted file mode 100644 index 8ac52215c0..0000000000 --- a/deepmd/pt/model/descriptor/gaussian_lcc.py +++ /dev/null @@ -1,319 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Optional, -) - -import torch -import torch.nn as nn - -from deepmd.pt.model.descriptor.base_descriptor import ( - BaseDescriptor, -) -from deepmd.pt.model.network.network import ( - Evoformer3bEncoder, - GaussianEmbedding, - TypeEmbedNet, -) -from deepmd.pt.utils import ( - env, -) -from deepmd.utils.path import ( - DPPath, -) - - -class DescrptGaussianLcc(torch.nn.Module, BaseDescriptor): - def __init__( - self, - rcut, - rcut_smth, - sel: int, - ntypes: int, - num_pair: int, - embed_dim: int = 768, - kernel_num: int = 128, - pair_embed_dim: int = 64, - num_block: int = 1, - layer_num: int = 12, - attn_head: int = 48, - pair_hidden_dim: int = 16, - ffn_embedding_dim: int = 768, - dropout: float = 0.0, - droppath_prob: float = 0.1, - pair_dropout: float = 0.25, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - pre_ln: bool = True, - do_tag_embedding: bool = False, - tag_ener_pref: bool = False, - atomic_sum_gbf: bool = False, - pre_add_seq: bool = True, - tri_update: bool = True, - **kwargs, - ): - """Construct a descriptor of Gaussian Based Local Cluster. - - Args: - - rcut: Cut-off radius. - - rcut_smth: Smooth hyper-parameter for pair force & energy. **Not used in this descriptor**. - - sel: For each element type, how many atoms is selected as neighbors. - - ntypes: Number of atom types. - - num_pair: Number of atom type pairs. Default is 2 * ntypes. - - kernel_num: Number of gaussian kernels. - - embed_dim: Dimension of atomic representation. - - pair_embed_dim: Dimension of pair representation. - - num_block: Number of evoformer blocks. - - layer_num: Number of attention layers. - - attn_head: Number of attention heads. - - pair_hidden_dim: Hidden dimension of pair representation during attention process. - - ffn_embedding_dim: Dimension during feed forward network. - - dropout: Dropout probability of atomic representation. - - droppath_prob: If not zero, it will use drop paths (Stochastic Depth) per sample and ignore `dropout`. - - pair_dropout: Dropout probability of pair representation during triangular update. - - attention_dropout: Dropout probability during attetion process. - - activation_dropout: Dropout probability of pair feed forward network. - - pre_ln: Do previous layer norm or not. - - do_tag_embedding: Add tag embedding to atomic and pair representations. (`tags`, `tags2`, `tags3` must exist) - - atomic_sum_gbf: Add sum of gaussian outputs to atomic representation or not. - - pre_add_seq: Add output of other descriptor (if has) to the atomic representation before attention. - """ - super().__init__() - self.rcut = rcut - self.rcut_smth = rcut_smth - self.embed_dim = embed_dim - self.num_pair = num_pair - self.kernel_num = kernel_num - self.pair_embed_dim = pair_embed_dim - self.num_block = num_block - self.layer_num = layer_num - self.attention_heads = attn_head - self.pair_hidden_dim = pair_hidden_dim - self.ffn_embedding_dim = ffn_embedding_dim - self.dropout = dropout - self.droppath_prob = droppath_prob - self.pair_dropout = pair_dropout - self.attention_dropout = attention_dropout - self.activation_dropout = activation_dropout - self.pre_ln = pre_ln - self.do_tag_embedding = do_tag_embedding - self.tag_ener_pref = tag_ener_pref - self.atomic_sum_gbf = atomic_sum_gbf - self.local_cluster = True - self.pre_add_seq = pre_add_seq - self.tri_update = tri_update - - if isinstance(sel, int): - sel = [sel] - - self.ntypes = ntypes - self.sec = torch.tensor(sel) # pylint: disable=no-explicit-dtype,no-explicit-device - self.nnei = sum(sel) - - if self.do_tag_embedding: - self.tag_encoder = nn.Embedding(3, self.embed_dim) - self.tag_encoder2 = nn.Embedding(2, self.embed_dim) - self.tag_type_embedding = TypeEmbedNet(10, pair_embed_dim) - self.edge_type_embedding = nn.Embedding( - (ntypes + 1) * (ntypes + 1), - pair_embed_dim, - padding_idx=(ntypes + 1) * (ntypes + 1) - 1, - dtype=env.GLOBAL_PT_FLOAT_PRECISION, - ) - self.gaussian_encoder = GaussianEmbedding( - rcut, - kernel_num, - num_pair, - embed_dim, - pair_embed_dim, - sel, - ntypes, - atomic_sum_gbf, - ) - self.backbone = Evoformer3bEncoder( - self.nnei, - layer_num=self.layer_num, - attn_head=self.attention_heads, - atomic_dim=self.embed_dim, - pair_dim=self.pair_embed_dim, - pair_hidden_dim=self.pair_hidden_dim, - ffn_embedding_dim=self.ffn_embedding_dim, - dropout=self.dropout, - droppath_prob=self.droppath_prob, - pair_dropout=self.pair_dropout, - attention_dropout=self.attention_dropout, - activation_dropout=self.activation_dropout, - pre_ln=self.pre_ln, - tri_update=self.tri_update, - ) - - @property - def dim_out(self): - """Returns the output dimension of atomic representation.""" - return self.embed_dim - - @property - def dim_in(self): - """Returns the atomic input dimension of this descriptor.""" - return self.embed_dim - - @property - def dim_emb(self): - """Returns the output dimension of pair representation.""" - return self.pair_embed_dim - - def compute_input_stats(self, merged: list[dict], path: Optional[DPPath] = None): - """Update mean and stddev for descriptor elements.""" - pass - - def forward( - self, - extended_coord, - nlist, - atype, - nlist_type, - nlist_loc=None, - atype_tebd=None, - nlist_tebd=None, - seq_input=None, - ): - """Calculate the atomic and pair representations of this descriptor. - - Args: - - extended_coord: Copied atom coordinates with shape [nframes, nall, 3]. - - nlist: Neighbor list with shape [nframes, nloc, nnei]. - - atype: Atom type with shape [nframes, nloc]. - - nlist_type: Atom type of neighbors with shape [nframes, nloc, nnei]. - - nlist_loc: Local index of neighbor list with shape [nframes, nloc, nnei]. - - atype_tebd: Atomic type embedding with shape [nframes, nloc, tebd_dim]. - - nlist_tebd: Type embeddings of neighbor with shape [nframes, nloc, nnei, tebd_dim]. - - seq_input: The sequential input from other descriptor with - shape [nframes, nloc, tebd_dim] or [nframes * nloc, 1 + nnei, tebd_dim] - - Returns - ------- - - result: descriptor with shape [nframes, nloc, self.filter_neuron[-1] * self.axis_neuron]. - - ret: environment matrix with shape [nframes, nloc, self.neei, out_size] - """ - nframes, nloc = nlist.shape[:2] - nall = extended_coord.shape[1] - nlist2 = torch.cat( - [ - torch.arange(0, nloc, device=nlist.device) # pylint: disable=no-explicit-dtype - .reshape(1, nloc, 1) - .expand(nframes, -1, -1), - nlist, - ], - dim=-1, - ) - nlist_loc2 = torch.cat( - [ - torch.arange(0, nloc, device=nlist_loc.device) # pylint: disable=no-explicit-dtype - .reshape(1, nloc, 1) - .expand(nframes, -1, -1), - nlist_loc, - ], - dim=-1, - ) - nlist_type2 = torch.cat([atype.reshape(nframes, nloc, 1), nlist_type], dim=-1) - nnei2_mask = nlist2 != -1 - padding_mask = nlist2 == -1 - nlist2 = nlist2 * nnei2_mask - nlist_loc2 = nlist_loc2 * nnei2_mask - - # nframes x nloc x (1 + nnei2) x (1 + nnei2) - pair_mask = nnei2_mask.unsqueeze(-1) * nnei2_mask.unsqueeze(-2) - # nframes x nloc x (1 + nnei2) x (1 + nnei2) x head - attn_mask = torch.zeros( - [nframes, nloc, 1 + self.nnei, 1 + self.nnei, self.attention_heads], - device=nlist.device, - dtype=extended_coord.dtype, - ) - attn_mask.masked_fill_(padding_mask.unsqueeze(2).unsqueeze(-1), float("-inf")) - # (nframes x nloc) x head x (1 + nnei2) x (1 + nnei2) - attn_mask = ( - attn_mask.reshape( - nframes * nloc, 1 + self.nnei, 1 + self.nnei, self.attention_heads - ) - .permute(0, 3, 1, 2) - .contiguous() - ) - - # Atomic feature - # [(nframes x nloc) x (1 + nnei2) x tebd_dim] - atom_feature = torch.gather( - atype_tebd, - dim=1, - index=nlist_loc2.reshape(nframes, -1) - .unsqueeze(-1) - .expand(-1, -1, self.embed_dim), - ).reshape(nframes * nloc, 1 + self.nnei, self.embed_dim) - if self.pre_add_seq and seq_input is not None: - first_dim = seq_input.shape[0] - if first_dim == nframes * nloc: - atom_feature += seq_input - elif first_dim == nframes: - atom_feature_seq = torch.gather( - seq_input, - dim=1, - index=nlist_loc2.reshape(nframes, -1) - .unsqueeze(-1) - .expand(-1, -1, self.embed_dim), - ).reshape(nframes * nloc, 1 + self.nnei, self.embed_dim) - atom_feature += atom_feature_seq - else: - raise RuntimeError - atom_feature = atom_feature * nnei2_mask.reshape( - nframes * nloc, 1 + self.nnei, 1 - ) - - # Pair feature - # [(nframes x nloc) x (1 + nnei2)] - nlist_type2_reshape = nlist_type2.reshape(nframes * nloc, 1 + self.nnei) - # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2)] - edge_type = nlist_type2_reshape.unsqueeze(-1) * ( - self.ntypes + 1 - ) + nlist_type2_reshape.unsqueeze(-2) - # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x pair_dim] - edge_feature = self.edge_type_embedding(edge_type) - - # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x 2] - edge_type_2dim = torch.cat( - [ - nlist_type2_reshape.view(nframes * nloc, 1 + self.nnei, 1, 1).expand( - -1, -1, 1 + self.nnei, -1 - ), - nlist_type2_reshape.view(nframes * nloc, 1, 1 + self.nnei, 1).expand( - -1, 1 + self.nnei, -1, -1 - ) - + self.ntypes, - ], - dim=-1, - ) - # [(nframes x nloc) x (1 + nnei2) x 3] - coord_selected = torch.gather( - extended_coord.unsqueeze(1) - .expand(-1, nloc, -1, -1) - .reshape(nframes * nloc, nall, 3), - dim=1, - index=nlist2.reshape(nframes * nloc, 1 + self.nnei, 1).expand(-1, -1, 3), - ) - - # Update pair features (or and atomic features) with gbf features - # delta_pos: [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x 3]. - atomic_feature, pair_feature, delta_pos = self.gaussian_encoder( - coord_selected, atom_feature, edge_type_2dim, edge_feature - ) - # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x pair_dim] - attn_bias = pair_feature - - # output: [(nframes x nloc) x (1 + nnei2) x tebd_dim] - # pair: [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x pair_dim] - output, pair = self.backbone( - atomic_feature, - pair=attn_bias, - attn_mask=attn_mask, - pair_mask=pair_mask, - atom_mask=nnei2_mask.reshape(nframes * nloc, 1 + self.nnei), - ) - - return output, pair, delta_pos, None diff --git a/deepmd/pt/model/descriptor/repformer_layer_old_impl.py b/deepmd/pt/model/descriptor/repformer_layer_old_impl.py deleted file mode 100644 index 47b20f7b03..0000000000 --- a/deepmd/pt/model/descriptor/repformer_layer_old_impl.py +++ /dev/null @@ -1,744 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Callable, -) - -import torch - -from deepmd.pt.model.network.network import ( - SimpleLinear, -) -from deepmd.pt.utils import ( - env, -) -from deepmd.pt.utils.utils import ( - ActivationFn, -) - - -def _make_nei_g1( - g1_ext: torch.Tensor, - nlist: torch.Tensor, -) -> torch.Tensor: - # nlist: nb x nloc x nnei - nb, nloc, nnei = nlist.shape - # g1_ext: nb x nall x ng1 - ng1 = g1_ext.shape[-1] - # index: nb x (nloc x nnei) x ng1 - index = nlist.reshape(nb, nloc * nnei).unsqueeze(-1).expand(-1, -1, ng1) - # gg1 : nb x (nloc x nnei) x ng1 - gg1 = torch.gather(g1_ext, dim=1, index=index) - # gg1 : nb x nloc x nnei x ng1 - gg1 = gg1.view(nb, nloc, nnei, ng1) - return gg1 - - -def _apply_nlist_mask( - gg: torch.Tensor, - nlist_mask: torch.Tensor, -) -> torch.Tensor: - # gg: nf x nloc x nnei x ng - # msk: nf x nloc x nnei - return gg.masked_fill(~nlist_mask.unsqueeze(-1), 0.0) - - -def _apply_switch(gg: torch.Tensor, sw: torch.Tensor) -> torch.Tensor: - # gg: nf x nloc x nnei x ng - # sw: nf x nloc x nnei - return gg * sw.unsqueeze(-1) - - -def _apply_h_norm( - hh: torch.Tensor, # nf x nloc x nnei x 3 -) -> torch.Tensor: - """Normalize h by the std of vector length. - do not have an idea if this is a good way. - """ - nf, nl, nnei, _ = hh.shape - # nf x nloc x nnei - normh = torch.linalg.norm(hh, dim=-1) - # nf x nloc - std = torch.std(normh, dim=-1) - # nf x nloc x nnei x 3 - hh = hh[:, :, :, :] / (1.0 + std[:, :, None, None]) - return hh - - -class Atten2Map(torch.nn.Module): - def __init__( - self, - ni: int, - nd: int, - nh: int, - has_gate: bool = False, # apply gate to attn map - smooth: bool = True, - attnw_shift: float = 20.0, - ): - super().__init__() - self.ni = ni - self.nd = nd - self.nh = nh - self.mapqk = SimpleLinear(ni, nd * 2 * nh, bias=False) # todo - self.has_gate = has_gate - self.smooth = smooth - self.attnw_shift = attnw_shift - - def forward( - self, - g2: torch.Tensor, # nb x nloc x nnei x ng2 - h2: torch.Tensor, # nb x nloc x nnei x 3 - nlist_mask: torch.Tensor, # nb x nloc x nnei - sw: torch.Tensor, # nb x nloc x nnei - ) -> torch.Tensor: - ( - nb, - nloc, - nnei, - _, - ) = g2.shape - nd, nh = self.nd, self.nh - # nb x nloc x nnei x nd x (nh x 2) - g2qk = self.mapqk(g2).view(nb, nloc, nnei, nd, nh * 2) - # nb x nloc x (nh x 2) x nnei x nd - g2qk = torch.permute(g2qk, (0, 1, 4, 2, 3)) - # nb x nloc x nh x nnei x nd - g2q, g2k = torch.split(g2qk, nh, dim=2) - # g2q = torch.nn.functional.normalize(g2q, dim=-1) - # g2k = torch.nn.functional.normalize(g2k, dim=-1) - # nb x nloc x nh x nnei x nnei - attnw = torch.matmul(g2q, torch.transpose(g2k, -1, -2)) / nd**0.5 - if self.has_gate: - gate = torch.matmul(h2, torch.transpose(h2, -1, -2)).unsqueeze(-3) - attnw = attnw * gate - # mask the attenmap, nb x nloc x 1 x 1 x nnei - attnw_mask = ~nlist_mask.unsqueeze(2).unsqueeze(2) - # mask the attenmap, nb x nloc x 1 x nnei x 1 - attnw_mask_c = ~nlist_mask.unsqueeze(2).unsqueeze(-1) - if self.smooth: - attnw = (attnw + self.attnw_shift) * sw[:, :, None, :, None] * sw[ - :, :, None, None, : - ] - self.attnw_shift - else: - attnw = attnw.masked_fill( - attnw_mask, - float("-inf"), - ) - attnw = torch.softmax(attnw, dim=-1) - attnw = attnw.masked_fill( - attnw_mask, - 0.0, - ) - # nb x nloc x nh x nnei x nnei - attnw = attnw.masked_fill( - attnw_mask_c, - 0.0, - ) - if self.smooth: - attnw = attnw * sw[:, :, None, :, None] * sw[:, :, None, None, :] - # nb x nloc x nnei x nnei - h2h2t = torch.matmul(h2, torch.transpose(h2, -1, -2)) / 3.0**0.5 - # nb x nloc x nh x nnei x nnei - ret = attnw * h2h2t[:, :, None, :, :] - # ret = torch.softmax(g2qk, dim=-1) - # nb x nloc x nnei x nnei x nh - ret = torch.permute(ret, (0, 1, 3, 4, 2)) - return ret - - -class Atten2MultiHeadApply(torch.nn.Module): - def __init__( - self, - ni: int, - nh: int, - ): - super().__init__() - self.ni = ni - self.nh = nh - self.mapv = SimpleLinear(ni, ni * nh, bias=False) - self.head_map = SimpleLinear(ni * nh, ni) - - def forward( - self, - AA: torch.Tensor, # nf x nloc x nnei x nnei x nh - g2: torch.Tensor, # nf x nloc x nnei x ng2 - ) -> torch.Tensor: - nf, nloc, nnei, ng2 = g2.shape - nh = self.nh - # nf x nloc x nnei x ng2 x nh - g2v = self.mapv(g2).view(nf, nloc, nnei, ng2, nh) - # nf x nloc x nh x nnei x ng2 - g2v = torch.permute(g2v, (0, 1, 4, 2, 3)) - # g2v = torch.nn.functional.normalize(g2v, dim=-1) - # nf x nloc x nh x nnei x nnei - AA = torch.permute(AA, (0, 1, 4, 2, 3)) - # nf x nloc x nh x nnei x ng2 - ret = torch.matmul(AA, g2v) - # nf x nloc x nnei x ng2 x nh - ret = torch.permute(ret, (0, 1, 3, 4, 2)).reshape(nf, nloc, nnei, (ng2 * nh)) - # nf x nloc x nnei x ng2 - return self.head_map(ret) - - -class Atten2EquiVarApply(torch.nn.Module): - def __init__( - self, - ni: int, - nh: int, - ): - super().__init__() - self.ni = ni - self.nh = nh - self.head_map = SimpleLinear(nh, 1, bias=False) - - def forward( - self, - AA: torch.Tensor, # nf x nloc x nnei x nnei x nh - h2: torch.Tensor, # nf x nloc x nnei x 3 - ) -> torch.Tensor: - nf, nloc, nnei, _ = h2.shape - nh = self.nh - # nf x nloc x nh x nnei x nnei - AA = torch.permute(AA, (0, 1, 4, 2, 3)) - h2m = torch.unsqueeze(h2, dim=2) - # nf x nloc x nh x nnei x 3 - h2m = torch.tile(h2m, [1, 1, nh, 1, 1]) - # nf x nloc x nh x nnei x 3 - ret = torch.matmul(AA, h2m) - # nf x nloc x nnei x 3 x nh - ret = torch.permute(ret, (0, 1, 3, 4, 2)).view(nf, nloc, nnei, 3, nh) - # nf x nloc x nnei x 3 - return torch.squeeze(self.head_map(ret), dim=-1) - - -class LocalAtten(torch.nn.Module): - def __init__( - self, - ni: int, - nd: int, - nh: int, - smooth: bool = True, - attnw_shift: float = 20.0, - ): - super().__init__() - self.ni = ni - self.nd = nd - self.nh = nh - self.mapq = SimpleLinear(ni, nd * 1 * nh, bias=False) - self.mapkv = SimpleLinear(ni, (nd + ni) * nh, bias=False) - self.head_map = SimpleLinear(ni * nh, ni) - self.smooth = smooth - self.attnw_shift = attnw_shift - - def forward( - self, - g1: torch.Tensor, # nb x nloc x ng1 - gg1: torch.Tensor, # nb x nloc x nnei x ng1 - nlist_mask: torch.Tensor, # nb x nloc x nnei - sw: torch.Tensor, # nb x nloc x nnei - ) -> torch.Tensor: - nb, nloc, nnei = nlist_mask.shape - ni, nd, nh = self.ni, self.nd, self.nh - assert ni == g1.shape[-1] - assert ni == gg1.shape[-1] - # nb x nloc x nd x nh - g1q = self.mapq(g1).view(nb, nloc, nd, nh) - # nb x nloc x nh x nd - g1q = torch.permute(g1q, (0, 1, 3, 2)) - # nb x nloc x nnei x (nd+ni) x nh - gg1kv = self.mapkv(gg1).view(nb, nloc, nnei, nd + ni, nh) - gg1kv = torch.permute(gg1kv, (0, 1, 4, 2, 3)) - # nb x nloc x nh x nnei x nd, nb x nloc x nh x nnei x ng1 - gg1k, gg1v = torch.split(gg1kv, [nd, ni], dim=-1) - - # nb x nloc x nh x 1 x nnei - attnw = torch.matmul(g1q.unsqueeze(-2), torch.transpose(gg1k, -1, -2)) / nd**0.5 - # nb x nloc x nh x nnei - attnw = attnw.squeeze(-2) - # mask the attenmap, nb x nloc x 1 x nnei - attnw_mask = ~nlist_mask.unsqueeze(-2) - # nb x nloc x nh x nnei - if self.smooth: - attnw = (attnw + self.attnw_shift) * sw.unsqueeze(-2) - self.attnw_shift - else: - attnw = attnw.masked_fill( - attnw_mask, - float("-inf"), - ) - attnw = torch.softmax(attnw, dim=-1) - attnw = attnw.masked_fill( - attnw_mask, - 0.0, - ) - if self.smooth: - attnw = attnw * sw.unsqueeze(-2) - - # nb x nloc x nh x ng1 - ret = ( - torch.matmul(attnw.unsqueeze(-2), gg1v).squeeze(-2).view(nb, nloc, nh * ni) - ) - # nb x nloc x ng1 - ret = self.head_map(ret) - return ret - - -class RepformerLayer(torch.nn.Module): - def __init__( - self, - rcut, - rcut_smth, - sel: int, - ntypes: int, - g1_dim=128, - g2_dim=16, - axis_neuron: int = 4, - update_chnnl_2: bool = True, - do_bn_mode: str = "no", - bn_momentum: float = 0.1, - update_g1_has_conv: bool = True, - update_g1_has_drrd: bool = True, - update_g1_has_grrg: bool = True, - update_g1_has_attn: bool = True, - update_g2_has_g1g1: bool = True, - update_g2_has_attn: bool = True, - update_h2: bool = False, - attn1_hidden: int = 64, - attn1_nhead: int = 4, - attn2_hidden: int = 16, - attn2_nhead: int = 4, - attn2_has_gate: bool = False, - activation_function: str = "tanh", - update_style: str = "res_avg", - set_davg_zero: bool = True, # TODO - smooth: bool = True, - ): - super().__init__() - self.epsilon = 1e-4 # protection of 1./nnei - self.rcut = rcut - self.rcut_smth = rcut_smth - self.ntypes = ntypes - sel = [sel] if isinstance(sel, int) else sel - self.nnei = sum(sel) - assert len(sel) == 1 - self.sel = torch.tensor(sel, device=env.DEVICE) # pylint: disable=no-explicit-dtype - self.sec = self.sel - self.axis_neuron = axis_neuron - self.set_davg_zero = set_davg_zero - self.do_bn_mode = do_bn_mode - self.bn_momentum = bn_momentum - self.act = ActivationFn(activation_function) - self.update_g1_has_grrg = update_g1_has_grrg - self.update_g1_has_drrd = update_g1_has_drrd - self.update_g1_has_conv = update_g1_has_conv - self.update_g1_has_attn = update_g1_has_attn - self.update_chnnl_2 = update_chnnl_2 - self.update_g2_has_g1g1 = update_g2_has_g1g1 if self.update_chnnl_2 else False - self.update_g2_has_attn = update_g2_has_attn if self.update_chnnl_2 else False - self.update_h2 = update_h2 if self.update_chnnl_2 else False - del update_g2_has_g1g1, update_g2_has_attn, update_h2 - self.update_style = update_style - self.smooth = smooth - self.g1_dim = g1_dim - self.g2_dim = g2_dim - - g1_in_dim = self.cal_1_dim(g1_dim, g2_dim, self.axis_neuron) - self.linear1 = SimpleLinear(g1_in_dim, g1_dim) - self.linear2 = None - self.proj_g1g2 = None - self.proj_g1g1g2 = None - self.attn2g_map = None - self.attn2_mh_apply = None - self.attn2_lm = None - self.attn2h_map = None - self.attn2_ev_apply = None - self.loc_attn = None - - if self.update_chnnl_2: - self.linear2 = SimpleLinear(g2_dim, g2_dim) - if self.update_g1_has_conv: - self.proj_g1g2 = SimpleLinear(g1_dim, g2_dim, bias=False) - if self.update_g2_has_g1g1: - self.proj_g1g1g2 = SimpleLinear(g1_dim, g2_dim, bias=False) - if self.update_g2_has_attn: - self.attn2g_map = Atten2Map( - g2_dim, attn2_hidden, attn2_nhead, attn2_has_gate, self.smooth - ) - self.attn2_mh_apply = Atten2MultiHeadApply(g2_dim, attn2_nhead) - self.attn2_lm = torch.nn.LayerNorm( - g2_dim, - elementwise_affine=True, - device=env.DEVICE, - dtype=env.GLOBAL_PT_FLOAT_PRECISION, - ) - if self.update_h2: - self.attn2h_map = Atten2Map( - g2_dim, attn2_hidden, attn2_nhead, attn2_has_gate, self.smooth - ) - self.attn2_ev_apply = Atten2EquiVarApply(g2_dim, attn2_nhead) - if self.update_g1_has_attn: - self.loc_attn = LocalAtten(g1_dim, attn1_hidden, attn1_nhead, self.smooth) - - if self.do_bn_mode == "uniform": - self.bn1 = self._bn_layer() - self.bn2 = self._bn_layer() - elif self.do_bn_mode == "component": - self.bn1 = self._bn_layer(nf=g1_dim) - self.bn2 = self._bn_layer(nf=g2_dim) - elif self.do_bn_mode == "no": - self.bn1, self.bn2 = None, None - else: - raise RuntimeError(f"unknown bn_mode {self.do_bn_mode}") - - def cal_1_dim(self, g1d: int, g2d: int, ax: int) -> int: - ret = g1d - if self.update_g1_has_grrg: - ret += g2d * ax - if self.update_g1_has_drrd: - ret += g1d * ax - if self.update_g1_has_conv: - ret += g2d - return ret - - def _update_h2( - self, - g2: torch.Tensor, - h2: torch.Tensor, - nlist_mask: torch.Tensor, - sw: torch.Tensor, - ) -> torch.Tensor: - assert self.attn2h_map is not None - assert self.attn2_ev_apply is not None - nb, nloc, nnei, _ = g2.shape - # # nb x nloc x nnei x nh2 - # h2_1 = self.attn2_ev_apply(AA, h2) - # h2_update.append(h2_1) - # nb x nloc x nnei x nnei x nh - AAh = self.attn2h_map(g2, h2, nlist_mask, sw) - # nb x nloc x nnei x nh2 - h2_1 = self.attn2_ev_apply(AAh, h2) - return h2_1 - - def _update_g1_conv( - self, - gg1: torch.Tensor, - g2: torch.Tensor, - nlist_mask: torch.Tensor, - sw: torch.Tensor, - ) -> torch.Tensor: - assert self.proj_g1g2 is not None - nb, nloc, nnei, _ = g2.shape - ng1 = gg1.shape[-1] - ng2 = g2.shape[-1] - # gg1 : nb x nloc x nnei x ng2 - gg1 = self.proj_g1g2(gg1).view(nb, nloc, nnei, ng2) - # nb x nloc x nnei x ng2 - gg1 = _apply_nlist_mask(gg1, nlist_mask) - if not self.smooth: - # normalized by number of neighbors, not smooth - # nb x nloc x 1 - invnnei = 1.0 / ( - self.epsilon + torch.sum(nlist_mask.type_as(gg1), dim=-1) - ).unsqueeze(-1) - else: - gg1 = _apply_switch(gg1, sw) - invnnei = (1.0 / float(nnei)) * torch.ones( - (nb, nloc, 1), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=gg1.device - ) - # nb x nloc x ng2 - g1_11 = torch.sum(g2 * gg1, dim=2) * invnnei - return g1_11 - - def _cal_h2g2( - self, - g2: torch.Tensor, - h2: torch.Tensor, - nlist_mask: torch.Tensor, - sw: torch.Tensor, - ) -> torch.Tensor: - # g2: nf x nloc x nnei x ng2 - # h2: nf x nloc x nnei x 3 - # msk: nf x nloc x nnei - nb, nloc, nnei, _ = g2.shape - ng2 = g2.shape[-1] - # nb x nloc x nnei x ng2 - g2 = _apply_nlist_mask(g2, nlist_mask) - if not self.smooth: - # nb x nloc - invnnei = 1.0 / (self.epsilon + torch.sum(nlist_mask.type_as(g2), dim=-1)) - # nb x nloc x 1 x 1 - invnnei = invnnei.unsqueeze(-1).unsqueeze(-1) - else: - g2 = _apply_switch(g2, sw) - invnnei = (1.0 / float(nnei)) * torch.ones( - (nb, nloc, 1, 1), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=g2.device - ) - # nb x nloc x 3 x ng2 - h2g2 = torch.matmul(torch.transpose(h2, -1, -2), g2) * invnnei - return h2g2 - - def _cal_grrg(self, h2g2: torch.Tensor) -> torch.Tensor: - # nb x nloc x 3 x ng2 - nb, nloc, _, ng2 = h2g2.shape - # nb x nloc x 3 x axis - h2g2m = torch.split(h2g2, self.axis_neuron, dim=-1)[0] - # nb x nloc x axis x ng2 - g1_13 = torch.matmul(torch.transpose(h2g2m, -1, -2), h2g2) / (3.0**1) - # nb x nloc x (axisxng2) - g1_13 = g1_13.view(nb, nloc, self.axis_neuron * ng2) - return g1_13 - - def _update_g1_grrg( - self, - g2: torch.Tensor, - h2: torch.Tensor, - nlist_mask: torch.Tensor, - sw: torch.Tensor, - ) -> torch.Tensor: - # g2: nf x nloc x nnei x ng2 - # h2: nf x nloc x nnei x 3 - # msk: nf x nloc x nnei - nb, nloc, nnei, _ = g2.shape - ng2 = g2.shape[-1] - # nb x nloc x 3 x ng2 - h2g2 = self._cal_h2g2(g2, h2, nlist_mask, sw) - # nb x nloc x (axisxng2) - g1_13 = self._cal_grrg(h2g2) - return g1_13 - - def _update_g2_g1g1( - self, - g1: torch.Tensor, # nb x nloc x ng1 - gg1: torch.Tensor, # nb x nloc x nnei x ng1 - nlist_mask: torch.Tensor, # nb x nloc x nnei - sw: torch.Tensor, # nb x nloc x nnei - ) -> torch.Tensor: - ret = g1.unsqueeze(-2) * gg1 - # nb x nloc x nnei x ng1 - ret = _apply_nlist_mask(ret, nlist_mask) - if self.smooth: - ret = _apply_switch(ret, sw) - return ret - - def _apply_bn( - self, - bn_number: int, - gg: torch.Tensor, - ): - if self.do_bn_mode == "uniform": - return self._apply_bn_uni(bn_number, gg) - elif self.do_bn_mode == "component": - return self._apply_bn_comp(bn_number, gg) - else: - return gg - - def _apply_nb_1(self, bn_number: int, gg: torch.Tensor) -> torch.Tensor: - nb, nl, nf = gg.shape - gg = gg.view([nb, 1, nl * nf]) - if bn_number == 1: - assert self.bn1 is not None - gg = self.bn1(gg) - else: - assert self.bn2 is not None - gg = self.bn2(gg) - return gg.view([nb, nl, nf]) - - def _apply_nb_2( - self, - bn_number: int, - gg: torch.Tensor, - ) -> torch.Tensor: - nb, nl, nnei, nf = gg.shape - gg = gg.view([nb, 1, nl * nnei * nf]) - if bn_number == 1: - assert self.bn1 is not None - gg = self.bn1(gg) - else: - assert self.bn2 is not None - gg = self.bn2(gg) - return gg.view([nb, nl, nnei, nf]) - - def _apply_bn_uni( - self, - bn_number: int, - gg: torch.Tensor, - mode: str = "1", - ) -> torch.Tensor: - if len(gg.shape) == 3: - return self._apply_nb_1(bn_number, gg) - elif len(gg.shape) == 4: - return self._apply_nb_2(bn_number, gg) - else: - raise RuntimeError(f"unsupported input shape {gg.shape}") - - def _apply_bn_comp( - self, - bn_number: int, - gg: torch.Tensor, - ) -> torch.Tensor: - ss = gg.shape - nf = ss[-1] - gg = gg.view([-1, nf]) - if bn_number == 1: - assert self.bn1 is not None - gg = self.bn1(gg).view(ss) - else: - assert self.bn2 is not None - gg = self.bn2(gg).view(ss) - return gg - - def forward( - self, - g1_ext: torch.Tensor, # nf x nall x ng1 - g2: torch.Tensor, # nf x nloc x nnei x ng2 - h2: torch.Tensor, # nf x nloc x nnei x 3 - nlist: torch.Tensor, # nf x nloc x nnei - nlist_mask: torch.Tensor, # nf x nloc x nnei - sw: torch.Tensor, # switch func, nf x nloc x nnei - ): - """ - Parameters - ---------- - g1_ext : nf x nall x ng1 extended single-atom chanel - g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant - h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant - nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) - nlist_mask : nf x nloc x nnei masks of the neighbor list. real nei 1 otherwise 0 - sw : nf x nloc x nnei switch function - - Returns - ------- - g1: nf x nloc x ng1 updated single-atom chanel - g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant - h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant - """ - cal_gg1 = ( - self.update_g1_has_drrd - or self.update_g1_has_conv - or self.update_g1_has_attn - or self.update_g2_has_g1g1 - ) - - nb, nloc, nnei, _ = g2.shape - nall = g1_ext.shape[1] - g1, _ = torch.split(g1_ext, [nloc, nall - nloc], dim=1) - assert (nb, nloc) == g1.shape[:2] - assert (nb, nloc, nnei) == h2.shape[:3] - ng1 = g1.shape[-1] - ng2 = g2.shape[-1] - nh2 = h2.shape[-1] - - if self.bn1 is not None: - g1 = self._apply_bn(1, g1) - if self.bn2 is not None: - g2 = self._apply_bn(2, g2) - if self.update_h2: - h2 = _apply_h_norm(h2) - - g2_update: list[torch.Tensor] = [g2] - h2_update: list[torch.Tensor] = [h2] - g1_update: list[torch.Tensor] = [g1] - g1_mlp: list[torch.Tensor] = [g1] - - if cal_gg1: - gg1 = _make_nei_g1(g1_ext, nlist) - else: - gg1 = None - - if self.update_chnnl_2: - # nb x nloc x nnei x ng2 - assert self.linear2 is not None - g2_1 = self.act(self.linear2(g2)) - g2_update.append(g2_1) - - if self.update_g2_has_g1g1: - assert gg1 is not None - assert self.proj_g1g1g2 is not None - g2_update.append( - self.proj_g1g1g2(self._update_g2_g1g1(g1, gg1, nlist_mask, sw)) - ) - - if self.update_g2_has_attn: - assert self.attn2g_map is not None - assert self.attn2_mh_apply is not None - assert self.attn2_lm is not None - # nb x nloc x nnei x nnei x nh - AAg = self.attn2g_map(g2, h2, nlist_mask, sw) - # nb x nloc x nnei x ng2 - g2_2 = self.attn2_mh_apply(AAg, g2) - g2_2 = self.attn2_lm(g2_2) - g2_update.append(g2_2) - - if self.update_h2: - h2_update.append(self._update_h2(g2, h2, nlist_mask, sw)) - - if self.update_g1_has_conv: - assert gg1 is not None - g1_mlp.append(self._update_g1_conv(gg1, g2, nlist_mask, sw)) - - if self.update_g1_has_grrg: - g1_mlp.append(self._update_g1_grrg(g2, h2, nlist_mask, sw)) - - if self.update_g1_has_drrd: - assert gg1 is not None - g1_mlp.append(self._update_g1_grrg(gg1, h2, nlist_mask, sw)) - - # nb x nloc x [ng1+ng2+(axisxng2)+(axisxng1)] - # conv grrg drrd - g1_1 = self.act(self.linear1(torch.cat(g1_mlp, dim=-1))) - g1_update.append(g1_1) - - if self.update_g1_has_attn: - assert gg1 is not None - assert self.loc_attn is not None - g1_update.append(self.loc_attn(g1, gg1, nlist_mask, sw)) - - # update - if self.update_chnnl_2: - g2_new = self.list_update(g2_update) - h2_new = self.list_update(h2_update) - else: - g2_new, h2_new = g2, h2 - g1_new = self.list_update(g1_update) - return g1_new, g2_new, h2_new - - @torch.jit.export - def list_update_res_avg( - self, - update_list: list[torch.Tensor], - ) -> torch.Tensor: - nitem = len(update_list) - uu = update_list[0] - for ii in range(1, nitem): - uu = uu + update_list[ii] - return uu / (float(nitem) ** 0.5) - - @torch.jit.export - def list_update_res_incr(self, update_list: list[torch.Tensor]) -> torch.Tensor: - nitem = len(update_list) - uu = update_list[0] - scale = 1.0 / (float(nitem - 1) ** 0.5) if nitem > 1 else 0.0 - for ii in range(1, nitem): - uu = uu + scale * update_list[ii] - return uu - - @torch.jit.export - def list_update(self, update_list: list[torch.Tensor]) -> torch.Tensor: - if self.update_style == "res_avg": - return self.list_update_res_avg(update_list) - elif self.update_style == "res_incr": - return self.list_update_res_incr(update_list) - else: - raise RuntimeError(f"unknown update style {self.update_style}") - - def _bn_layer( - self, - nf: int = 1, - ) -> Callable: - return torch.nn.BatchNorm1d( - nf, - eps=1e-5, - momentum=self.bn_momentum, - affine=False, - track_running_stats=True, - device=env.DEVICE, - dtype=env.GLOBAL_PT_FLOAT_PRECISION, - ) diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index 64965825a0..ad4ead4d74 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -41,7 +41,6 @@ from .repformer_layer import ( RepformerLayer, ) -from .repformer_layer_old_impl import RepformerLayer as RepformerLayerOld if not hasattr(torch.ops.deepmd, "border_op"): @@ -106,7 +105,6 @@ def __init__( use_sqrt_nnei: bool = True, g1_out_conv: bool = True, g1_out_mlp: bool = True, - old_impl: bool = False, ): r""" The repformer descriptor block. @@ -240,78 +238,48 @@ def __init__( self.ln_eps = ln_eps self.epsilon = 1e-4 self.seed = seed - self.old_impl = old_impl self.g2_embd = MLPLayer( 1, self.g2_dim, precision=precision, seed=child_seed(seed, 0) ) layers = [] for ii in range(nlayers): - if self.old_impl: - layers.append( - RepformerLayerOld( - self.rcut, - self.rcut_smth, - self.sel, - self.ntypes, - self.g1_dim, - self.g2_dim, - axis_neuron=self.axis_neuron, - update_chnnl_2=(ii != nlayers - 1), - update_g1_has_conv=self.update_g1_has_conv, - update_g1_has_drrd=self.update_g1_has_drrd, - update_g1_has_grrg=self.update_g1_has_grrg, - update_g1_has_attn=self.update_g1_has_attn, - update_g2_has_g1g1=self.update_g2_has_g1g1, - update_g2_has_attn=self.update_g2_has_attn, - update_h2=self.update_h2, - attn1_hidden=self.attn1_hidden, - attn1_nhead=self.attn1_nhead, - attn2_has_gate=self.attn2_has_gate, - attn2_hidden=self.attn2_hidden, - attn2_nhead=self.attn2_nhead, - activation_function=self.activation_function, - update_style=self.update_style, - smooth=self.smooth, - ) - ) - else: - layers.append( - RepformerLayer( - self.rcut, - self.rcut_smth, - self.sel, - self.ntypes, - self.g1_dim, - self.g2_dim, - axis_neuron=self.axis_neuron, - update_chnnl_2=(ii != nlayers - 1), - update_g1_has_conv=self.update_g1_has_conv, - update_g1_has_drrd=self.update_g1_has_drrd, - update_g1_has_grrg=self.update_g1_has_grrg, - update_g1_has_attn=self.update_g1_has_attn, - update_g2_has_g1g1=self.update_g2_has_g1g1, - update_g2_has_attn=self.update_g2_has_attn, - update_h2=self.update_h2, - attn1_hidden=self.attn1_hidden, - attn1_nhead=self.attn1_nhead, - attn2_has_gate=self.attn2_has_gate, - attn2_hidden=self.attn2_hidden, - attn2_nhead=self.attn2_nhead, - activation_function=self.activation_function, - update_style=self.update_style, - update_residual=self.update_residual, - update_residual_init=self.update_residual_init, - smooth=self.smooth, - trainable_ln=self.trainable_ln, - ln_eps=self.ln_eps, - precision=precision, - use_sqrt_nnei=self.use_sqrt_nnei, - g1_out_conv=self.g1_out_conv, - g1_out_mlp=self.g1_out_mlp, - seed=child_seed(child_seed(seed, 1), ii), - ) + layers.append( + RepformerLayer( + self.rcut, + self.rcut_smth, + self.sel, + self.ntypes, + self.g1_dim, + self.g2_dim, + axis_neuron=self.axis_neuron, + update_chnnl_2=(ii != nlayers - 1), + update_g1_has_conv=self.update_g1_has_conv, + update_g1_has_drrd=self.update_g1_has_drrd, + update_g1_has_grrg=self.update_g1_has_grrg, + update_g1_has_attn=self.update_g1_has_attn, + update_g2_has_g1g1=self.update_g2_has_g1g1, + update_g2_has_attn=self.update_g2_has_attn, + update_h2=self.update_h2, + attn1_hidden=self.attn1_hidden, + attn1_nhead=self.attn1_nhead, + attn2_has_gate=self.attn2_has_gate, + attn2_hidden=self.attn2_hidden, + attn2_nhead=self.attn2_nhead, + activation_function=self.activation_function, + update_style=self.update_style, + update_residual=self.update_residual, + update_residual_init=self.update_residual_init, + smooth=self.smooth, + trainable_ln=self.trainable_ln, + ln_eps=self.ln_eps, + precision=precision, + use_sqrt_nnei=self.use_sqrt_nnei, + g1_out_conv=self.g1_out_conv, + g1_out_mlp=self.g1_out_mlp, + seed=child_seed(child_seed(seed, 1), ii), ) + ) self.layers = torch.nn.ModuleList(layers) wanted_shape = (self.ntypes, self.nnei, 4) diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index 1b51acfa21..e939a2541b 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -55,9 +55,6 @@ EmbeddingNet, NetworkCollection, ) -from deepmd.pt.model.network.network import ( - TypeFilter, -) from deepmd.pt.utils.exclude_mask import ( PairExcludeMask, ) @@ -83,7 +80,6 @@ def __init__( resnet_dt: bool = False, exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, - old_impl: bool = False, type_one_side: bool = True, trainable: bool = True, seed: Optional[Union[int, list[int]]] = None, @@ -109,7 +105,6 @@ def __init__( resnet_dt=resnet_dt, exclude_types=exclude_types, env_protection=env_protection, - old_impl=old_impl, type_one_side=type_one_side, trainable=trainable, seed=seed, @@ -385,7 +380,6 @@ def __init__( resnet_dt: bool = False, exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, - old_impl: bool = False, type_one_side: bool = True, trainable: bool = True, seed: Optional[Union[int, list[int]]] = None, @@ -411,7 +405,6 @@ def __init__( self.precision = precision self.prec = PRECISION_DICT[self.precision] self.resnet_dt = resnet_dt - self.old_impl = old_impl self.env_protection = env_protection self.ntypes = len(sel) self.type_one_side = type_one_side @@ -431,39 +424,23 @@ def __init__( stddev = torch.ones(wanted_shape, dtype=self.prec, device=env.DEVICE) self.register_buffer("mean", mean) self.register_buffer("stddev", stddev) - self.filter_layers_old = None - self.filter_layers = None - - if self.old_impl: - if not self.type_one_side: - raise ValueError( - "The old implementation does not support type_one_side=False." - ) - filter_layers = [] - # TODO: remove - start_index = 0 - for type_i in range(self.ntypes): - one = TypeFilter(start_index, sel[type_i], self.filter_neuron) - filter_layers.append(one) - start_index += sel[type_i] - self.filter_layers_old = torch.nn.ModuleList(filter_layers) - else: - ndim = 1 if self.type_one_side else 2 - filter_layers = NetworkCollection( - ndim=ndim, ntypes=len(sel), network_type="embedding_network" + + ndim = 1 if self.type_one_side else 2 + filter_layers = NetworkCollection( + ndim=ndim, ntypes=len(sel), network_type="embedding_network" + ) + for ii, embedding_idx in enumerate( + itertools.product(range(self.ntypes), repeat=ndim) + ): + filter_layers[embedding_idx] = EmbeddingNet( + 1, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, ii), ) - for ii, embedding_idx in enumerate( - itertools.product(range(self.ntypes), repeat=ndim) - ): - filter_layers[embedding_idx] = EmbeddingNet( - 1, - self.filter_neuron, - activation_function=self.activation_function, - precision=self.precision, - resnet_dt=self.resnet_dt, - seed=child_seed(self.seed, ii), - ) - self.filter_layers = filter_layers + self.filter_layers = filter_layers self.stats = None # set trainable for param in self.parameters(): @@ -632,66 +609,49 @@ def forward( protection=self.env_protection, ) - if self.old_impl: - assert self.filter_layers_old is not None - dmatrix = dmatrix.view( - -1, self.ndescrpt - ) # shape is [nframes*nall, self.ndescrpt] - xyz_scatter = torch.empty( # pylint: disable=no-explicit-dtype - 1, - device=env.DEVICE, - ) - ret = self.filter_layers_old[0](dmatrix) - xyz_scatter = ret - for ii, transform in enumerate(self.filter_layers_old[1:]): - # shape is [nframes*nall, 4, self.filter_neuron[-1]] - ret = transform.forward(dmatrix) - xyz_scatter = xyz_scatter + ret - else: - assert self.filter_layers is not None - dmatrix = dmatrix.view(-1, self.nnei, 4) - dmatrix = dmatrix.to(dtype=self.prec) - nfnl = dmatrix.shape[0] - # pre-allocate a shape to pass jit - xyz_scatter = torch.zeros( - [nfnl, 4, self.filter_neuron[-1]], - dtype=self.prec, - device=extended_coord.device, - ) - # nfnl x nnei - exclude_mask = self.emask(nlist, extended_atype).view(nfnl, self.nnei) - for embedding_idx, ll in enumerate(self.filter_layers.networks): - if self.type_one_side: - ii = embedding_idx - # torch.jit is not happy with slice(None) - # ti_mask = torch.ones(nfnl, dtype=torch.bool, device=dmatrix.device) - # applying a mask seems to cause performance degradation - ti_mask = None - else: - # ti: center atom type, ii: neighbor type... - ii = embedding_idx // self.ntypes - ti = embedding_idx % self.ntypes - ti_mask = atype.ravel().eq(ti) - # nfnl x nt - if ti_mask is not None: - mm = exclude_mask[ti_mask, self.sec[ii] : self.sec[ii + 1]] - else: - mm = exclude_mask[:, self.sec[ii] : self.sec[ii + 1]] - # nfnl x nt x 4 - if ti_mask is not None: - rr = dmatrix[ti_mask, self.sec[ii] : self.sec[ii + 1], :] - else: - rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] - rr = rr * mm[:, :, None] - ss = rr[:, :, :1] - # nfnl x nt x ng - gg = ll.forward(ss) - # nfnl x 4 x ng - gr = torch.matmul(rr.permute(0, 2, 1), gg) - if ti_mask is not None: - xyz_scatter[ti_mask] += gr - else: - xyz_scatter += gr + dmatrix = dmatrix.view(-1, self.nnei, 4) + dmatrix = dmatrix.to(dtype=self.prec) + nfnl = dmatrix.shape[0] + # pre-allocate a shape to pass jit + xyz_scatter = torch.zeros( + [nfnl, 4, self.filter_neuron[-1]], + dtype=self.prec, + device=extended_coord.device, + ) + # nfnl x nnei + exclude_mask = self.emask(nlist, extended_atype).view(nfnl, self.nnei) + for embedding_idx, ll in enumerate(self.filter_layers.networks): + if self.type_one_side: + ii = embedding_idx + # torch.jit is not happy with slice(None) + # ti_mask = torch.ones(nfnl, dtype=torch.bool, device=dmatrix.device) + # applying a mask seems to cause performance degradation + ti_mask = None + else: + # ti: center atom type, ii: neighbor type... + ii = embedding_idx // self.ntypes + ti = embedding_idx % self.ntypes + ti_mask = atype.ravel().eq(ti) + # nfnl x nt + if ti_mask is not None: + mm = exclude_mask[ti_mask, self.sec[ii] : self.sec[ii + 1]] + else: + mm = exclude_mask[:, self.sec[ii] : self.sec[ii + 1]] + # nfnl x nt x 4 + if ti_mask is not None: + rr = dmatrix[ti_mask, self.sec[ii] : self.sec[ii + 1], :] + else: + rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] + rr = rr * mm[:, :, None] + ss = rr[:, :, :1] + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = torch.matmul(rr.permute(0, 2, 1), gg) + if ti_mask is not None: + xyz_scatter[ti_mask] += gr + else: + xyz_scatter += gr xyz_scatter /= self.nnei xyz_scatter_1 = xyz_scatter.permute(0, 2, 1) diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py index c760f7330b..c028230e9b 100644 --- a/deepmd/pt/model/descriptor/se_atten.py +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -26,10 +26,6 @@ MLPLayer, NetworkCollection, ) -from deepmd.pt.model.network.network import ( - NeighborWiseAttention, - TypeFilter, -) from deepmd.pt.utils import ( env, ) @@ -85,7 +81,6 @@ def __init__( ln_eps: Optional[float] = 1e-5, seed: Optional[Union[int, list[int]]] = None, type: Optional[str] = None, - old_impl: bool = False, ): r"""Construct an embedding net of type `se_atten`. @@ -182,7 +177,6 @@ def __init__( if ln_eps is None: ln_eps = 1e-5 self.ln_eps = ln_eps - self.old_impl = old_impl if isinstance(sel, int): sel = [sel] @@ -195,40 +189,22 @@ def __init__( self.ndescrpt = self.nnei * 4 # order matters, placed after the assignment of self.ntypes self.reinit_exclude(exclude_types) - if self.old_impl: - assert self.tebd_input_mode in [ - "concat" - ], "Old implementation does not support tebd_input_mode != 'concat'." - self.dpa1_attention = NeighborWiseAttention( - self.attn_layer, - self.nnei, - self.filter_neuron[-1], - self.attn_dim, - dotr=self.attn_dotr, - do_mask=self.attn_mask, - activation=self.activation_function, - scaling_factor=self.scaling_factor, - normalize=self.normalize, - temperature=self.temperature, - smooth=self.smooth, - ) - else: - self.dpa1_attention = NeighborGatedAttention( - self.attn_layer, - self.nnei, - self.filter_neuron[-1], - self.attn_dim, - dotr=self.attn_dotr, - do_mask=self.attn_mask, - scaling_factor=self.scaling_factor, - normalize=self.normalize, - temperature=self.temperature, - trainable_ln=self.trainable_ln, - ln_eps=self.ln_eps, - smooth=self.smooth, - precision=self.precision, - seed=child_seed(self.seed, 0), - ) + self.dpa1_attention = NeighborGatedAttention( + self.attn_layer, + self.nnei, + self.filter_neuron[-1], + self.attn_dim, + dotr=self.attn_dotr, + do_mask=self.attn_mask, + scaling_factor=self.scaling_factor, + normalize=self.normalize, + temperature=self.temperature, + trainable_ln=self.trainable_ln, + ln_eps=self.ln_eps, + smooth=self.smooth, + precision=self.precision, + seed=child_seed(self.seed, 0), + ) wanted_shape = (self.ntypes, self.nnei, 4) mean = torch.zeros( @@ -245,48 +221,32 @@ def __init__( else: self.embd_input_dim = 1 - self.filter_layers_old = None - self.filter_layers = None self.filter_layers_strip = None - if self.old_impl: - filter_layers = [] - one = TypeFilter( - 0, - self.nnei, - self.filter_neuron, - return_G=True, - tebd_dim=self.tebd_dim, - use_tebd=True, - tebd_mode=self.tebd_input_mode, - ) - filter_layers.append(one) - self.filter_layers_old = torch.nn.ModuleList(filter_layers) - else: - filter_layers = NetworkCollection( + filter_layers = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers[0] = EmbeddingNet( + self.embd_input_dim, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 1), + ) + self.filter_layers = filter_layers + if self.tebd_input_mode in ["strip"]: + filter_layers_strip = NetworkCollection( ndim=0, ntypes=self.ntypes, network_type="embedding_network" ) - filter_layers[0] = EmbeddingNet( - self.embd_input_dim, + filter_layers_strip[0] = EmbeddingNet( + self.tebd_dim_input, self.filter_neuron, activation_function=self.activation_function, precision=self.precision, resnet_dt=self.resnet_dt, - seed=child_seed(self.seed, 1), + seed=child_seed(self.seed, 2), ) - self.filter_layers = filter_layers - if self.tebd_input_mode in ["strip"]: - filter_layers_strip = NetworkCollection( - ndim=0, ntypes=self.ntypes, network_type="embedding_network" - ) - filter_layers_strip[0] = EmbeddingNet( - self.tebd_dim_input, - self.filter_neuron, - activation_function=self.activation_function, - precision=self.precision, - resnet_dt=self.resnet_dt, - seed=child_seed(self.seed, 2), - ) - self.filter_layers_strip = filter_layers_strip + self.filter_layers_strip = filter_layers_strip self.stats = None def get_rcut(self) -> float: @@ -500,75 +460,51 @@ def forward( sw = sw.masked_fill(~nlist_mask, 0.0) # (nb x nloc) x nnei exclude_mask = exclude_mask.view(nb * nloc, nnei) - if self.old_impl: - assert self.filter_layers_old is not None - dmatrix = dmatrix.view( - -1, self.ndescrpt - ) # shape is [nframes*nall, self.ndescrpt] - gg = self.filter_layers_old[0]( - dmatrix, - atype_tebd=atype_tebd_nnei, - nlist_tebd=atype_tebd_nlist, - ) # shape is [nframes*nall, self.neei, out_size] - input_r = torch.nn.functional.normalize( - dmatrix.reshape(-1, self.nnei, 4)[:, :, 1:4], dim=-1 - ) - gg = self.dpa1_attention( - gg, nlist_mask, input_r=input_r, sw=sw - ) # shape is [nframes*nloc, self.neei, out_size] - inputs_reshape = dmatrix.view(-1, self.nnei, 4).permute( - 0, 2, 1 - ) # shape is [nframes*natoms[0], 4, self.neei] - xyz_scatter = torch.matmul( - inputs_reshape, gg - ) # shape is [nframes*natoms[0], 4, out_size] - else: - assert self.filter_layers is not None - # nfnl x nnei x 4 - dmatrix = dmatrix.view(-1, self.nnei, 4) - nfnl = dmatrix.shape[0] - # nfnl x nnei x 4 - rr = dmatrix - rr = rr * exclude_mask[:, :, None] - ss = rr[:, :, :1] - nlist_tebd = atype_tebd_nlist.reshape(nfnl, nnei, self.tebd_dim) - atype_tebd = atype_tebd_nnei.reshape(nfnl, nnei, self.tebd_dim) - if self.tebd_input_mode in ["concat"]: - if not self.type_one_side: - # nfnl x nnei x (1 + tebd_dim * 2) - ss = torch.concat([ss, nlist_tebd, atype_tebd], dim=2) - else: - # nfnl x nnei x (1 + tebd_dim) - ss = torch.concat([ss, nlist_tebd], dim=2) - # nfnl x nnei x ng - gg = self.filter_layers.networks[0](ss) - elif self.tebd_input_mode in ["strip"]: - # nfnl x nnei x ng - gg_s = self.filter_layers.networks[0](ss) - assert self.filter_layers_strip is not None - if not self.type_one_side: - # nfnl x nnei x (tebd_dim * 2) - tt = torch.concat([nlist_tebd, atype_tebd], dim=2) - else: - # nfnl x nnei x tebd_dim - tt = nlist_tebd - # nfnl x nnei x ng - gg_t = self.filter_layers_strip.networks[0](tt) - if self.smooth: - gg_t = gg_t * sw.reshape(-1, self.nnei, 1) - # nfnl x nnei x ng - gg = gg_s * gg_t + gg_s + # nfnl x nnei x 4 + dmatrix = dmatrix.view(-1, self.nnei, 4) + nfnl = dmatrix.shape[0] + # nfnl x nnei x 4 + rr = dmatrix + rr = rr * exclude_mask[:, :, None] + ss = rr[:, :, :1] + nlist_tebd = atype_tebd_nlist.reshape(nfnl, nnei, self.tebd_dim) + atype_tebd = atype_tebd_nnei.reshape(nfnl, nnei, self.tebd_dim) + if self.tebd_input_mode in ["concat"]: + if not self.type_one_side: + # nfnl x nnei x (1 + tebd_dim * 2) + ss = torch.concat([ss, nlist_tebd, atype_tebd], dim=2) + else: + # nfnl x nnei x (1 + tebd_dim) + ss = torch.concat([ss, nlist_tebd], dim=2) + # nfnl x nnei x ng + gg = self.filter_layers.networks[0](ss) + elif self.tebd_input_mode in ["strip"]: + # nfnl x nnei x ng + gg_s = self.filter_layers.networks[0](ss) + assert self.filter_layers_strip is not None + if not self.type_one_side: + # nfnl x nnei x (tebd_dim * 2) + tt = torch.concat([nlist_tebd, atype_tebd], dim=2) else: - raise NotImplementedError + # nfnl x nnei x tebd_dim + tt = nlist_tebd + # nfnl x nnei x ng + gg_t = self.filter_layers_strip.networks[0](tt) + if self.smooth: + gg_t = gg_t * sw.reshape(-1, self.nnei, 1) + # nfnl x nnei x ng + gg = gg_s * gg_t + gg_s + else: + raise NotImplementedError - input_r = torch.nn.functional.normalize( - rr.reshape(-1, self.nnei, 4)[:, :, 1:4], dim=-1 - ) - gg = self.dpa1_attention( - gg, nlist_mask, input_r=input_r, sw=sw - ) # shape is [nframes*nloc, self.neei, out_size] - # nfnl x 4 x ng - xyz_scatter = torch.matmul(rr.permute(0, 2, 1), gg) + input_r = torch.nn.functional.normalize( + rr.reshape(-1, self.nnei, 4)[:, :, 1:4], dim=-1 + ) + gg = self.dpa1_attention( + gg, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + # nfnl x 4 x ng + xyz_scatter = torch.matmul(rr.permute(0, 2, 1), gg) xyz_scatter = xyz_scatter / self.nnei xyz_scatter_1 = xyz_scatter.permute(0, 2, 1) rot_mat = xyz_scatter_1[:, :, 1:4] diff --git a/deepmd/pt/model/descriptor/se_atten_v2.py b/deepmd/pt/model/descriptor/se_atten_v2.py index f73ff255e6..11d783261e 100644 --- a/deepmd/pt/model/descriptor/se_atten_v2.py +++ b/deepmd/pt/model/descriptor/se_atten_v2.py @@ -71,7 +71,6 @@ def __init__( # not implemented spin=None, type: Optional[str] = None, - old_impl: bool = False, ) -> None: r"""Construct smooth version of embedding net of type `se_atten_v2`. @@ -191,7 +190,6 @@ def __init__( # not implemented spin=spin, type=type, - old_impl=old_impl, ) def serialize(self) -> dict: diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index b873ee20b8..e82bb23dac 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -68,7 +68,6 @@ def __init__( resnet_dt: bool = False, exclude_types: list[tuple[int, int]] = [], env_protection: float = 0.0, - old_impl: bool = False, trainable: bool = True, seed: Optional[Union[int, list[int]]] = None, type_map: Optional[list[str]] = None, @@ -84,7 +83,6 @@ def __init__( self.precision = precision self.prec = PRECISION_DICT[self.precision] self.resnet_dt = resnet_dt - self.old_impl = False # this does not support old implementation. self.exclude_types = exclude_types self.ntypes = len(sel) self.type_map = type_map diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index ef50274b03..12e1eabf22 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -26,10 +26,6 @@ except ImportError: from torch.jit import Final -from functools import ( - partial, -) - import torch.utils.checkpoint from deepmd.dpmodel.utils.type_embed import ( @@ -48,247 +44,6 @@ def Tensor(*shape): return torch.empty(shape, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) -class Dropout(nn.Module): - def __init__(self, p): - super().__init__() - self.p = p - - def forward(self, x, inplace: bool = False): - if self.p > 0 and self.training: - return F.dropout(x, p=self.p, training=True, inplace=inplace) - else: - return x - - -class Identity(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return x - - -class DropPath(torch.nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" - - def __init__(self, prob=None): - super().__init__() - self.drop_prob = prob - - def forward(self, x): - if self.drop_prob == 0.0 or not self.training: - return x - keep_prob = 1 - self.drop_prob - shape = (x.shape[0],) + (1,) * ( - x.ndim - 1 - ) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize - output = x.div(keep_prob) * random_tensor - return output - - def extra_repr(self) -> str: - return f"prob={self.drop_prob}" - - -def softmax_dropout( - input_x, dropout_prob, is_training=True, mask=None, bias=None, inplace=True -): - input_x = input_x.contiguous() - if not inplace: - input_x = input_x.clone() - if mask is not None: - input_x += mask - if bias is not None: - input_x += bias - return F.dropout(F.softmax(input_x, dim=-1), p=dropout_prob, training=is_training) - - -def checkpoint_sequential( - functions, - input_x, - enabled=True, -): - def wrap_tuple(a): - return (a,) if type(a) is not tuple else a - - def exec(func, a): - return wrap_tuple(func(*a)) - - def get_wrap_exec(func): - def wrap_exec(*a): - return exec(func, a) - - return wrap_exec - - input_x = wrap_tuple(input_x) - - is_grad_enabled = torch.is_grad_enabled() - - if enabled and is_grad_enabled: - for func in functions: - input_x = torch.utils.checkpoint.checkpoint(get_wrap_exec(func), *input_x) - else: - for func in functions: - input_x = exec(func, input_x) - return input_x - - -class ResidualLinear(nn.Module): - resnet: Final[int] - - def __init__(self, num_in, num_out, bavg=0.0, stddev=1.0, resnet_dt=False): - """Construct a residual linear layer. - - Args: - - num_in: Width of input tensor. - - num_out: Width of output tensor. - - resnet_dt: Using time-step in the ResNet construction. - """ - super().__init__() - self.num_in = num_in - self.num_out = num_out - self.resnet = resnet_dt - - self.matrix = nn.Parameter(data=Tensor(num_in, num_out)) - nn.init.normal_(self.matrix.data, std=stddev / np.sqrt(num_out + num_in)) - self.bias = nn.Parameter(data=Tensor(1, num_out)) - nn.init.normal_(self.bias.data, mean=bavg, std=stddev) - if self.resnet: - self.idt = nn.Parameter(data=Tensor(1, num_out)) - nn.init.normal_(self.idt.data, mean=1.0, std=0.001) - - def forward(self, inputs): - """Return X ?+ X*W+b.""" - xw_plus_b = torch.matmul(inputs, self.matrix) + self.bias - hidden = torch.tanh(xw_plus_b) - if self.resnet: - hidden = hidden * self.idt - if self.num_in == self.num_out: - return inputs + hidden - elif self.num_in * 2 == self.num_out: - return torch.cat([inputs, inputs], dim=1) + hidden - else: - return hidden - - -class TypeFilter(nn.Module): - use_tebd: Final[bool] - tebd_mode: Final[str] - - def __init__( - self, - offset, - length, - neuron, - return_G=False, - tebd_dim=0, - use_tebd=False, - tebd_mode="concat", - ): - """Construct a filter on the given element as neighbor. - - Args: - - offset: Element offset in the descriptor matrix. - - length: Atom count of this element. - - neuron: Number of neurons in each hidden layers of the embedding net. - """ - super().__init__() - self.offset = offset - self.length = length - self.tebd_dim = tebd_dim - self.use_tebd = use_tebd - self.tebd_mode = tebd_mode - supported_tebd_mode = ["concat", "dot", "dot_residual_s", "dot_residual_t"] - assert ( - tebd_mode in supported_tebd_mode - ), f"Unknown tebd_mode {tebd_mode}! Supported are {supported_tebd_mode}." - if use_tebd and tebd_mode == "concat": - self.neuron = [1 + tebd_dim * 2, *neuron] - else: - self.neuron = [1, *neuron] - - deep_layers = [] - for ii in range(1, len(self.neuron)): - one = ResidualLinear(self.neuron[ii - 1], self.neuron[ii]) - deep_layers.append(one) - self.deep_layers = nn.ModuleList(deep_layers) - - deep_layers_t = [] - if use_tebd and tebd_mode in ["dot", "dot_residual_s", "dot_residual_t"]: - self.neuron_t = [tebd_dim * 2, *neuron] - for ii in range(1, len(self.neuron_t)): - one = ResidualLinear(self.neuron_t[ii - 1], self.neuron_t[ii]) - deep_layers_t.append(one) - self.deep_layers_t = nn.ModuleList(deep_layers_t) - - self.return_G = return_G - - def forward( - self, - inputs, - atype_tebd: Optional[torch.Tensor] = None, - nlist_tebd: Optional[torch.Tensor] = None, - ): - """Calculate decoded embedding for each atom. - - Args: - - inputs: Descriptor matrix. Its shape is [nframes*natoms[0], len_descriptor]. - - Returns - ------- - - `torch.Tensor`: Embedding contributed by me. Its shape is [nframes*natoms[0], 4, self.neuron[-1]]. - """ - inputs_i = inputs[:, self.offset * 4 : (self.offset + self.length) * 4] - inputs_reshape = inputs_i.reshape( - -1, 4 - ) # shape is [nframes*natoms[0]*self.length, 4] - xyz_scatter = inputs_reshape[:, 0:1] - - # concat the tebd as input - if self.use_tebd and self.tebd_mode == "concat": - assert nlist_tebd is not None and atype_tebd is not None - nlist_tebd = nlist_tebd.reshape(-1, self.tebd_dim) - atype_tebd = atype_tebd.reshape(-1, self.tebd_dim) - # [nframes * nloc * nnei, 1 + tebd_dim * 2] - xyz_scatter = torch.concat([xyz_scatter, nlist_tebd, atype_tebd], dim=1) - - for linear in self.deep_layers: - xyz_scatter = linear(xyz_scatter) - # [nframes * nloc * nnei, out_size] - - # dot the tebd output - if self.use_tebd and self.tebd_mode in [ - "dot", - "dot_residual_s", - "dot_residual_t", - ]: - assert nlist_tebd is not None and atype_tebd is not None - nlist_tebd = nlist_tebd.reshape(-1, self.tebd_dim) - atype_tebd = atype_tebd.reshape(-1, self.tebd_dim) - # [nframes * nloc * nnei, tebd_dim * 2] - two_side_tebd = torch.concat([nlist_tebd, atype_tebd], dim=1) - for linear in self.deep_layers_t: - two_side_tebd = linear(two_side_tebd) - # [nframes * nloc * nnei, out_size] - if self.tebd_mode == "dot": - xyz_scatter = xyz_scatter * two_side_tebd - elif self.tebd_mode == "dot_residual_s": - xyz_scatter = xyz_scatter * two_side_tebd + xyz_scatter - elif self.tebd_mode == "dot_residual_t": - xyz_scatter = xyz_scatter * two_side_tebd + two_side_tebd - - xyz_scatter = xyz_scatter.view( - -1, self.length, self.neuron[-1] - ) # shape is [nframes*natoms[0], self.length, self.neuron[-1]] - if self.return_G: - return xyz_scatter - else: - # shape is [nframes*natoms[0], 4, self.length] - inputs_reshape = inputs_i.view(-1, self.length, 4).permute(0, 2, 1) - return torch.matmul(inputs_reshape, xyz_scatter) - - class SimpleLinear(nn.Module): use_timestep: Final[bool] @@ -396,53 +151,6 @@ def _normal_init(self): nn.init.kaiming_normal_(self.weight, nonlinearity="linear") -class Transition(nn.Module): - def __init__(self, d_in, n, dropout=0.0): - super().__init__() - - self.d_in = d_in - self.n = n - - self.linear_1 = Linear(self.d_in, self.n * self.d_in, init="relu") - self.act = nn.GELU() - self.linear_2 = Linear(self.n * self.d_in, d_in, init="final") - self.dropout = dropout - - def _transition(self, x): - x = self.linear_1(x) - x = self.act(x) - x = F.dropout(x, p=self.dropout, training=self.training) - x = self.linear_2(x) - return x - - def forward( - self, - x: torch.Tensor, - ) -> torch.Tensor: - x = self._transition(x=x) - return x - - -class Embedding(nn.Embedding): - def __init__( - self, - num_embeddings: int, - embedding_dim: int, - padding_idx: Optional[int] = None, - dtype=torch.float64, - ): - super().__init__( - num_embeddings, embedding_dim, padding_idx=padding_idx, dtype=dtype - ) - self._normal_init() - - if padding_idx is not None: - self.weight.data[self.padding_idx].zero_() - - def _normal_init(self, std=0.02): - nn.init.normal_(self.weight, mean=0.0, std=std) - - class NonLinearHead(nn.Module): def __init__(self, input_dim, out_dim, activation_fn, hidden=None): super().__init__() @@ -456,27 +164,6 @@ def forward(self, x): return x -class NonLinear(nn.Module): - def __init__(self, input, output_size, hidden=None): - super().__init__() - - if hidden is None: - hidden = input - self.layer1 = Linear(input, hidden, init="relu") - self.layer2 = Linear(hidden, output_size, init="final") - - def forward(self, x): - x = F.linear(x, self.layer1.weight) - # x = fused_ops.bias_torch_gelu(x, self.layer1.bias) - x = nn.GELU()(x) + self.layer1.bias - x = self.layer2(x) - return x - - def zero_init(self): - nn.init.zeros_(self.layer2.weight) - nn.init.zeros_(self.layer2.bias) - - class MaskLMHead(nn.Module): """Head for masked language modeling.""" @@ -844,1327 +531,3 @@ def serialize(self) -> dict: "type_map": self.type_map, "embedding": self.embedding_net.serialize(), } - - -@torch.jit.script -def gaussian(x, mean, std: float): - pi = 3.14159 - a = (2 * pi) ** 0.5 - return torch.exp(-0.5 * (((x - mean) / std) ** 2)) / (a * std) - - -class GaussianKernel(nn.Module): - def __init__(self, K=128, num_pair=512, std_width=1.0, start=0.0, stop=9.0): - super().__init__() - self.K = K - std_width = std_width - start = start - stop = stop - mean = torch.linspace(start, stop, K, dtype=env.GLOBAL_PT_FLOAT_PRECISION) # pylint: disable=no-explicit-device - self.std = (std_width * (mean[1] - mean[0])).item() - self.register_buffer("mean", mean) - self.mul = Embedding( - num_pair + 1, 1, padding_idx=num_pair, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.bias = Embedding( - num_pair + 1, 1, padding_idx=num_pair, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - nn.init.constant_(self.bias.weight, 0) - nn.init.constant_(self.mul.weight, 1.0) - - def forward(self, x, atom_pair): - mul = self.mul(atom_pair).abs().sum(dim=-2) - bias = self.bias(atom_pair).sum(dim=-2) - x = mul * x.unsqueeze(-1) + bias - # [nframes, nloc, nnei, K] - x = x.expand(-1, -1, -1, self.K) - mean = self.mean.view(-1) - return gaussian(x, mean, self.std) - - -class GaussianEmbedding(nn.Module): - def __init__( - self, - rcut, - kernel_num, - num_pair, - embed_dim, - pair_embed_dim, - sel, - ntypes, - atomic_sum_gbf, - ): - """Construct a gaussian kernel based embedding of pair representation. - - Args: - rcut: Radial cutoff. - kernel_num: Number of gaussian kernels. - num_pair: Number of different pairs. - embed_dim: Dimension of atomic representation. - pair_embed_dim: Dimension of pair representation. - sel: Number of neighbors. - ntypes: Number of atom types. - """ - super().__init__() - self.gbf = GaussianKernel(K=kernel_num, num_pair=num_pair, stop=rcut) - self.gbf_proj = NonLinear(kernel_num, pair_embed_dim) - self.embed_dim = embed_dim - self.pair_embed_dim = pair_embed_dim - self.atomic_sum_gbf = atomic_sum_gbf - if self.atomic_sum_gbf: - if kernel_num != self.embed_dim: - self.edge_proj = torch.nn.Linear( - kernel_num, self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - else: - self.edge_proj = None - self.ntypes = ntypes - self.nnei = sel - - def forward(self, coord_selected, atom_feature, edge_type_2dim, edge_feature): - ## local cluster forward - """Calculate decoded embedding for each atom. - Args: - coord_selected: Clustered atom coordinates with shape [nframes*nloc, natoms, 3]. - atom_feature: Previous calculated atomic features with shape [nframes*nloc, natoms, embed_dim]. - edge_type_2dim: Edge index for gbf calculation with shape [nframes*nloc, natoms, natoms, 2]. - edge_feature: Previous calculated edge features with shape [nframes*nloc, natoms, natoms, pair_dim]. - - Returns - ------- - atom_feature: Updated atomic features with shape [nframes*nloc, natoms, embed_dim]. - attn_bias: Updated edge features as attention bias with shape [nframes*nloc, natoms, natoms, pair_dim]. - delta_pos: Delta position for force/vector prediction with shape [nframes*nloc, natoms, natoms, 3]. - """ - ncluster, natoms, _ = coord_selected.shape - # ncluster x natoms x natoms x 3 - delta_pos = coord_selected.unsqueeze(1) - coord_selected.unsqueeze(2) - # (ncluster x natoms x natoms - dist = delta_pos.norm(dim=-1).view(-1, natoms, natoms) - # [ncluster, natoms, natoms, K] - gbf_feature = self.gbf(dist, edge_type_2dim) - if self.atomic_sum_gbf: - edge_features = gbf_feature - # [ncluster, natoms, K] - sum_edge_features = edge_features.sum(dim=-2) - if self.edge_proj is not None: - sum_edge_features = self.edge_proj(sum_edge_features) - # [ncluster, natoms, embed_dim] - atom_feature = atom_feature + sum_edge_features - - # [ncluster, natoms, natoms, pair_dim] - gbf_result = self.gbf_proj(gbf_feature) - - attn_bias = gbf_result + edge_feature - return atom_feature, attn_bias, delta_pos - - -class NeighborWiseAttention(nn.Module): - def __init__( - self, - layer_num, - nnei, - embed_dim, - hidden_dim, - dotr=False, - do_mask=False, - post_ln=True, - ffn=False, - ffn_embed_dim=1024, - activation="tanh", - scaling_factor=1.0, - head_num=1, - normalize=True, - temperature=None, - smooth=True, - ): - """Construct a neighbor-wise attention net.""" - super().__init__() - self.layer_num = layer_num - attention_layers = [] - for i in range(self.layer_num): - attention_layers.append( - NeighborWiseAttentionLayer( - nnei, - embed_dim, - hidden_dim, - dotr=dotr, - do_mask=do_mask, - post_ln=post_ln, - ffn=ffn, - ffn_embed_dim=ffn_embed_dim, - activation=activation, - scaling_factor=scaling_factor, - head_num=head_num, - normalize=normalize, - temperature=temperature, - smooth=smooth, - ) - ) - self.attention_layers = nn.ModuleList(attention_layers) - - def forward( - self, - input_G, - nei_mask, - input_r: Optional[torch.Tensor] = None, - sw: Optional[torch.Tensor] = None, - ): - """ - Args: - input_G: Input G, [nframes * nloc, nnei, embed_dim]. - nei_mask: neighbor mask, [nframes * nloc, nnei]. - input_r: normalized radial, [nframes, nloc, nei, 3]. - - Returns - ------- - out: Output G, [nframes * nloc, nnei, embed_dim] - - """ - out = input_G - # https://github.com/pytorch/pytorch/issues/39165#issuecomment-635472592 - for layer in self.attention_layers: - out = layer(out, nei_mask, input_r=input_r, sw=sw) - return out - - -class NeighborWiseAttentionLayer(nn.Module): - ffn: Final[bool] - - def __init__( - self, - nnei, - embed_dim, - hidden_dim, - dotr=False, - do_mask=False, - post_ln=True, - ffn=False, - ffn_embed_dim=1024, - activation="tanh", - scaling_factor=1.0, - head_num=1, - normalize=True, - temperature=None, - smooth=True, - ): - """Construct a neighbor-wise attention layer.""" - super().__init__() - self.nnei = nnei - self.embed_dim = embed_dim - self.hidden_dim = hidden_dim - self.dotr = dotr - self.do_mask = do_mask - self.post_ln = post_ln - self.ffn = ffn - self.smooth = smooth - self.attention_layer = GatedSelfAttetion( - nnei, - embed_dim, - hidden_dim, - dotr=dotr, - do_mask=do_mask, - scaling_factor=scaling_factor, - head_num=head_num, - normalize=normalize, - temperature=temperature, - smooth=smooth, - ) - self.attn_layer_norm = nn.LayerNorm( - self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE - ) - if self.ffn: - self.ffn_embed_dim = ffn_embed_dim - self.fc1 = nn.Linear( - self.embed_dim, self.ffn_embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.activation_fn = ActivationFn(activation) - self.fc2 = nn.Linear( - self.ffn_embed_dim, self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.final_layer_norm = nn.LayerNorm( - self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - - def forward( - self, - x, - nei_mask, - input_r: Optional[torch.Tensor] = None, - sw: Optional[torch.Tensor] = None, - ): - residual = x - if not self.post_ln: - x = self.attn_layer_norm(x) - x = self.attention_layer(x, nei_mask, input_r=input_r, sw=sw) - x = residual + x - if self.post_ln: - x = self.attn_layer_norm(x) - if self.ffn: - residual = x - if not self.post_ln: - x = self.final_layer_norm(x) - x = self.fc1(x) - x = self.activation_fn(x) - x = self.fc2(x) - x = residual + x - if self.post_ln: - x = self.final_layer_norm(x) - return x - - -class GatedSelfAttetion(nn.Module): - def __init__( - self, - nnei, - embed_dim, - hidden_dim, - dotr=False, - do_mask=False, - scaling_factor=1.0, - head_num=1, - normalize=True, - temperature=None, - bias=True, - smooth=True, - ): - """Construct a neighbor-wise attention net.""" - super().__init__() - self.nnei = nnei - self.embed_dim = embed_dim - self.hidden_dim = hidden_dim - self.head_num = head_num - self.dotr = dotr - self.do_mask = do_mask - if temperature is None: - self.scaling = (self.hidden_dim * scaling_factor) ** -0.5 - else: - self.scaling = temperature - self.normalize = normalize - self.in_proj = SimpleLinear( - embed_dim, - hidden_dim * 3, - bavg=0.0, - stddev=1.0, - use_timestep=False, - bias=bias, - ) - self.out_proj = SimpleLinear( - hidden_dim, embed_dim, bavg=0.0, stddev=1.0, use_timestep=False, bias=bias - ) - self.smooth = smooth - - def forward( - self, - query, - nei_mask, - input_r: Optional[torch.Tensor] = None, - sw: Optional[torch.Tensor] = None, - attnw_shift: float = 20.0, - ): - """ - Args: - query: input G, [nframes * nloc, nnei, embed_dim]. - nei_mask: neighbor mask, [nframes * nloc, nnei]. - input_r: normalized radial, [nframes, nloc, nei, 3]. - - Returns - ------- - type_embedding: - - """ - q, k, v = self.in_proj(query).chunk(3, dim=-1) - # [nframes * nloc, nnei, hidden_dim] - q = q.view(-1, self.nnei, self.hidden_dim) - k = k.view(-1, self.nnei, self.hidden_dim) - v = v.view(-1, self.nnei, self.hidden_dim) - if self.normalize: - q = F.normalize(q, dim=-1) - k = F.normalize(k, dim=-1) - v = F.normalize(v, dim=-1) - q = q * self.scaling - k = k.transpose(1, 2) - # [nframes * nloc, nnei, nnei] - attn_weights = torch.bmm(q, k) - # [nframes * nloc, nnei] - nei_mask = nei_mask.view(-1, self.nnei) - if self.smooth: - # [nframes * nloc, nnei] - assert sw is not None - sw = sw.view([-1, self.nnei]) - attn_weights = (attn_weights + attnw_shift) * sw[:, :, None] * sw[ - :, None, : - ] - attnw_shift - else: - attn_weights = attn_weights.masked_fill( - ~nei_mask.unsqueeze(1), float("-inf") - ) - attn_weights = F.softmax(attn_weights, dim=-1) - attn_weights = attn_weights.masked_fill(~nei_mask.unsqueeze(-1), 0.0) - if self.smooth: - assert sw is not None - attn_weights = attn_weights * sw[:, :, None] * sw[:, None, :] - if self.dotr: - assert input_r is not None, "input_r must be provided when dotr is True!" - angular_weight = torch.bmm(input_r, input_r.transpose(1, 2)) - attn_weights = attn_weights * angular_weight - o = torch.bmm(attn_weights, v) - output = self.out_proj(o) - return output - - -class LocalSelfMultiheadAttention(nn.Module): - def __init__(self, feature_dim, attn_head, scaling_factor=1.0): - super().__init__() - self.feature_dim = feature_dim - self.attn_head = attn_head - self.head_dim = feature_dim // attn_head - assert ( - feature_dim % attn_head == 0 - ), f"feature_dim {feature_dim} must be divided by attn_head {attn_head}!" - self.scaling = (self.head_dim * scaling_factor) ** -0.5 - self.in_proj = SimpleLinear(self.feature_dim, self.feature_dim * 3) - # TODO debug - # self.out_proj = SimpleLinear(self.feature_dim, self.feature_dim) - - def forward( - self, - query, - attn_bias: Optional[torch.Tensor] = None, - nlist_mask: Optional[torch.Tensor] = None, - nlist: Optional[torch.Tensor] = None, - return_attn=True, - ): - nframes, nloc, feature_dim = query.size() - _, _, nnei = nlist.size() - assert feature_dim == self.feature_dim - # [nframes, nloc, feature_dim] - q, k, v = self.in_proj(query).chunk(3, dim=-1) - # [nframes * attn_head * nloc, 1, head_dim] - q = ( - q.view(nframes, nloc, self.attn_head, self.head_dim) - .transpose(1, 2) - .contiguous() - .view(nframes * self.attn_head * nloc, 1, self.head_dim) - * self.scaling - ) - # [nframes, nloc, feature_dim] --> [nframes, nloc + 1, feature_dim] - # with nlist [nframes, nloc, nnei] --> [nframes, nloc, nnei, feature_dim] - # padding = torch.zeros(feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION).to(k.device) - # k = torch.concat([k, padding.unsqueeze(0).unsqueeze(1)], dim=1) - # v = torch.concat([v, padding.unsqueeze(0).unsqueeze(1)], dim=1) - - # [nframes, nloc * nnei, feature_dim] - index = nlist.view(nframes, -1).unsqueeze(-1).expand(-1, -1, feature_dim) - k = torch.gather(k, dim=1, index=index) - # [nframes, nloc * nnei, feature_dim] - v = torch.gather(v, dim=1, index=index) - # [nframes * attn_head * nloc, nnei, head_dim] - k = ( - k.view(nframes, nloc, nnei, self.attn_head, self.head_dim) - .permute(0, 3, 1, 2, 4) - .contiguous() - .view(nframes * self.attn_head * nloc, nnei, self.head_dim) - ) - v = ( - v.view(nframes, nloc, nnei, self.attn_head, self.head_dim) - .permute(0, 3, 1, 2, 4) - .contiguous() - .view(nframes * self.attn_head * nloc, nnei, self.head_dim) - ) - # [nframes * attn_head * nloc, 1, nnei] - attn_weights = torch.bmm(q, k.transpose(1, 2)) - # maskfill - # [nframes, attn_head, nloc, nnei] - attn_weights = attn_weights.view( - nframes, self.attn_head, nloc, nnei - ).masked_fill(~nlist_mask.unsqueeze(1), float("-inf")) - # add bias - if return_attn: - attn_weights = attn_weights + attn_bias - # softmax - # [nframes * attn_head * nloc, 1, nnei] - attn = F.softmax(attn_weights, dim=-1).view( - nframes * self.attn_head * nloc, 1, nnei - ) - # bmm - # [nframes * attn_head * nloc, 1, head_dim] - o = torch.bmm(attn, v) - assert list(o.size()) == [nframes * self.attn_head * nloc, 1, self.head_dim] - # [nframes, nloc, feature_dim] - o = ( - o.view(nframes, self.attn_head, nloc, self.head_dim) - .transpose(1, 2) - .contiguous() - .view(nframes, nloc, self.feature_dim) - ) - # out - ## TODO debug: - # o = self.out_proj(o) - if not return_attn: - return o - else: - return o, attn_weights, attn - - -class NodeTaskHead(nn.Module): - def __init__( - self, - embed_dim: int, - pair_dim: int, - num_head: int, - ): - super().__init__() - self.layer_norm = nn.LayerNorm(embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) - self.pair_norm = nn.LayerNorm(pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) - self.embed_dim = embed_dim - self.q_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot") - self.k_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot") - self.v_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot") - self.num_heads = num_head - self.head_dim = embed_dim // num_head - self.scaling = self.head_dim**-0.5 - self.force_proj = Linear(embed_dim, 1, init="final", bias=False) - self.linear_bias = Linear(pair_dim, num_head) - self.dropout = 0.1 - - def zero_init(self): - nn.init.zeros_(self.force_proj.weight) - - def forward( - self, - query: Tensor, - pair: Tensor, - delta_pos: Tensor, - attn_mask: Tensor = None, - ) -> Tensor: - ncluster, natoms, _ = query.size() - query = self.layer_norm(query) - # [ncluster, natoms, natoms, pair_dim] - pair = self.pair_norm(pair) - - # [ncluster, attn_head, natoms, head_dim] - q = ( - self.q_proj(query) - .view(ncluster, natoms, self.num_heads, -1) - .transpose(1, 2) - * self.scaling - ) - # [ncluster, attn_head, natoms, head_dim] - k = ( - self.k_proj(query) - .view(ncluster, natoms, self.num_heads, -1) - .transpose(1, 2) - ) - v = ( - self.v_proj(query) - .view(ncluster, natoms, self.num_heads, -1) - .transpose(1, 2) - ) - # [ncluster, attn_head, natoms, natoms] - attn = q @ k.transpose(-1, -2) - del q, k - # [ncluster, attn_head, natoms, natoms] - bias = self.linear_bias(pair).permute(0, 3, 1, 2).contiguous() - - # [ncluster, attn_head, natoms, natoms] - attn_probs = softmax_dropout( - attn, - self.dropout, - self.training, - mask=attn_mask, - bias=bias.contiguous(), - ).view(ncluster, self.num_heads, natoms, natoms) - - # delta_pos: [ncluster, natoms, natoms, 3] - # [ncluster, attn_head, natoms, natoms, 3] - rot_attn_probs = attn_probs.unsqueeze(-1) * delta_pos.unsqueeze(1).type_as( - attn_probs - ) - # [ncluster, attn_head, 3, natoms, natoms] - rot_attn_probs = rot_attn_probs.permute(0, 1, 4, 2, 3) - # [ncluster, attn_head, 3, natoms, head_dim] - x = rot_attn_probs @ v.unsqueeze(2) - # [ncluster, natoms, 3, embed_dim] - x = x.permute(0, 3, 2, 1, 4).contiguous().view(ncluster, natoms, 3, -1) - cur_force = self.force_proj(x).view(ncluster, natoms, 3) - return cur_force - - -class EnergyHead(nn.Module): - def __init__( - self, - input_dim, - output_dim, - ): - super().__init__() - self.layer_norm = nn.LayerNorm(input_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) - self.linear_in = Linear(input_dim, input_dim, init="relu") - - self.linear_out = Linear(input_dim, output_dim, bias=True, init="final") - - def forward(self, x): - x = x.type(self.linear_in.weight.dtype) - x = F.gelu(self.layer_norm(self.linear_in(x))) - x = self.linear_out(x) - return x - - -class OuterProduct(nn.Module): - def __init__(self, d_atom, d_pair, d_hid=32): - super().__init__() - - self.d_atom = d_atom - self.d_pair = d_pair - self.d_hid = d_hid - - self.linear_in = nn.Linear( - d_atom, d_hid * 2, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.linear_out = nn.Linear( - d_hid**2, d_pair, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.act = nn.GELU() - - def _opm(self, a, b): - # [nframes, nloc, d] - nframes, nloc, d = a.shape - a = a.view(nframes, nloc, 1, d, 1) - b = b.view(nframes, 1, nloc, 1, d) - # [nframes, nloc, nloc, d, d] - outer = a * b - outer = outer.view(outer.shape[:-2] + (-1,)) - outer = self.linear_out(outer) - return outer - - def forward( - self, - m: torch.Tensor, - nlist: torch.Tensor, - op_mask: float, - op_norm: float, - ) -> torch.Tensor: - ab = self.linear_in(m) - ab = ab * op_mask - a, b = ab.chunk(2, dim=-1) - # [ncluster, natoms, natoms, d_pair] - z = self._opm(a, b) - z *= op_norm - return z - - -class Attention(nn.Module): - def __init__( - self, - q_dim: int, - k_dim: int, - v_dim: int, - head_dim: int, - num_heads: int, - gating: bool = False, - dropout: float = 0.0, - ): - super().__init__() - - self.num_heads = num_heads - self.head_dim = head_dim - total_dim = head_dim * self.num_heads - self.total_dim = total_dim - self.q_dim = q_dim - self.gating = gating - self.linear_q = Linear(q_dim, total_dim, bias=False, init="glorot") - self.linear_k = Linear(k_dim, total_dim, bias=False, init="glorot") - self.linear_v = Linear(v_dim, total_dim, bias=False, init="glorot") - self.linear_o = Linear(total_dim, q_dim, init="final") - self.linear_g = None - if self.gating: - self.linear_g = Linear(q_dim, total_dim, init="gating") - # precompute the 1/sqrt(head_dim) - self.norm = head_dim**-0.5 - self.dropout = dropout - - def forward( - self, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - bias: torch.Tensor, - mask: torch.Tensor = None, - ) -> torch.Tensor: - nframes, nloc, embed_dim = q.size() - g = None - if self.linear_g is not None: - # gating, use raw query input - # [nframes, nloc, total_dim] - g = self.linear_g(q) - # [nframes, nloc, total_dim] - q = self.linear_q(q) - q *= self.norm - # [nframes, nloc, total_dim] - k = self.linear_k(k) - # [nframes, nloc, total_dim] - v = self.linear_v(v) - # global - # q [nframes, h, nloc, d] - # k [nframes, h, nloc, d] - # v [nframes, h, nloc, d] - # attn [nframes, h, nloc, nloc] - # o [nframes, h, nloc, d] - - # [nframes, h, nloc, d] - q = q.view(q.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3).contiguous() - k = k.view(k.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3).contiguous() - v = v.view(v.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3) - # [nframes, h, nloc, nloc] - attn = torch.matmul(q, k.transpose(-1, -2)) - del q, k - # [nframes, h, nloc, nloc] - attn = softmax_dropout(attn, self.dropout, self.training, mask=mask, bias=bias) - # [nframes, h, nloc, d] - o = torch.matmul(attn, v) - del attn, v - - # local - # q [nframes, h, nloc, 1, d] - # k [nframes, h, nloc, nnei, d] - # v [nframes, h, nloc, nnei, d] - # attn [nframes, h, nloc, nnei] - # o [nframes, h, nloc, d] - - assert list(o.size()) == [nframes, self.num_heads, nloc, self.head_dim] - # [nframes, nloc, total_dim] - o = o.transpose(-2, -3).contiguous() - o = o.view(*o.shape[:-2], -1) - - if g is not None: - o = torch.sigmoid(g) * o - - # merge heads - o = self.linear_o(o) - return o - - -class AtomAttention(nn.Module): - def __init__( - self, - q_dim: int, - k_dim: int, - v_dim: int, - pair_dim: int, - head_dim: int, - num_heads: int, - gating: bool = False, - dropout: float = 0.0, - ): - super().__init__() - - self.mha = Attention( - q_dim, k_dim, v_dim, head_dim, num_heads, gating=gating, dropout=dropout - ) - self.layer_norm = nn.LayerNorm(pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) - self.linear_bias = Linear(pair_dim, num_heads) - - def forward( - self, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - nlist: torch.Tensor, - pair: torch.Tensor, - mask: torch.Tensor = None, - ) -> torch.Tensor: - pair = self.layer_norm(pair) - bias = self.linear_bias(pair).permute(0, 3, 1, 2).contiguous() - return self.mha(q, k, v, bias=bias, mask=mask) - - -class TriangleMultiplication(nn.Module): - def __init__(self, d_pair, d_hid): - super().__init__() - - self.linear_ab_p = Linear(d_pair, d_hid * 2) - self.linear_ab_g = Linear(d_pair, d_hid * 2, init="gating") - - self.linear_g = Linear(d_pair, d_pair, init="gating") - self.linear_z = Linear(d_hid, d_pair, init="final") - - self.layer_norm_out = nn.LayerNorm(d_hid, dtype=env.GLOBAL_PT_FLOAT_PRECISION) - - def forward( - self, - z: torch.Tensor, - mask: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - # z : [nframes, nloc, nloc, pair_dim] - - # [nframes, nloc, nloc, pair_dim] - g = self.linear_g(z) - if self.training: - ab = self.linear_ab_p(z) * torch.sigmoid(self.linear_ab_g(z)) - else: - ab = self.linear_ab_p(z) - ab *= torch.sigmoid(self.linear_ab_g(z)) - # [nframes, nloc, nloc, d] - a, b = torch.chunk(ab, 2, dim=-1) - del z, ab - - # [nframes, d, nloc_i, nloc_k] row not trans - a1 = a.permute(0, 3, 1, 2) - # [nframes, d, nloc_k, nloc_j(i)] trans - b1 = b.transpose(-1, -3) - # [nframes, d, nloc_i, nloc_j] - x = torch.matmul(a1, b1) - del a1, b1 - - # [nframes, d, nloc_k, nloc_j(i)] not trans - b2 = b.permute(0, 3, 1, 2) - # [nframes, d, nloc_i, nloc_k] col trans # check TODO - a2 = a.transpose(-1, -3) - - # [nframes, d, nloc_i, nloc_j] - x = x + torch.matmul(a2, b2) - del a, b, a2, b2 - - # [nframes, nloc_i, nloc_j, d] - x = x.permute(0, 2, 3, 1) - - x = self.layer_norm_out(x) - x = self.linear_z(x) - return g * x - - -class EvoformerEncoderLayer(nn.Module): - def __init__( - self, - feature_dim: int = 768, - ffn_dim: int = 2048, - attn_head: int = 8, - activation_fn: str = "gelu", - post_ln: bool = False, - ): - super().__init__() - self.feature_dim = feature_dim - self.ffn_dim = ffn_dim - self.attn_head = attn_head - self.activation_fn = ( - ActivationFn(activation_fn) if activation_fn is not None else None - ) - self.post_ln = post_ln - self.self_attn_layer_norm = nn.LayerNorm( - self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - - self.self_attn = LocalSelfMultiheadAttention( - self.feature_dim, - self.attn_head, - ) - self.final_layer_norm = nn.LayerNorm( - self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.fc1 = SimpleLinear(self.feature_dim, self.ffn_dim) - self.fc2 = SimpleLinear(self.ffn_dim, self.feature_dim) - - def forward( - self, - x, - attn_bias: Optional[torch.Tensor] = None, - nlist_mask: Optional[torch.Tensor] = None, - nlist: Optional[torch.Tensor] = None, - return_attn=True, - ): - residual = x - if not self.post_ln: - x = self.self_attn_layer_norm(x) - x = self.self_attn( - query=x, - attn_bias=attn_bias, - nlist_mask=nlist_mask, - nlist=nlist, - return_attn=return_attn, - ) - if return_attn: - x, attn_weights, attn_probs = x - x = residual + x - if self.post_ln: - x = self.self_attn_layer_norm(x) - - residual = x - if not self.post_ln: - x = self.final_layer_norm(x) - x = self.fc1(x) - x = self.activation_fn(x) - x = self.fc2(x) - x = residual + x - if self.post_ln: - x = self.final_layer_norm(x) - if not return_attn: - return x - else: - return x, attn_weights, attn_probs - - -# output: atomic_rep, transformed_atomic_rep, pair_rep, delta_pair_rep, norm_x, norm_delta_pair_rep, -class Evoformer2bEncoder(nn.Module): - def __init__( - self, - nnei: int, - layer_num: int = 6, - attn_head: int = 8, - atomic_dim: int = 1024, - pair_dim: int = 100, - feature_dim: int = 1024, - ffn_dim: int = 2048, - post_ln: bool = False, - final_layer_norm: bool = True, - final_head_layer_norm: bool = False, - emb_layer_norm: bool = False, - atomic_residual: bool = False, - evo_residual: bool = False, - residual_factor: float = 1.0, - activation_function: str = "gelu", - ): - super().__init__() - self.nnei = nnei - self.layer_num = layer_num - self.attn_head = attn_head - self.atomic_dim = atomic_dim - self.pair_dim = pair_dim - self.feature_dim = feature_dim - self.ffn_dim = ffn_dim - self.post_ln = post_ln - self._final_layer_norm = final_layer_norm - self._final_head_layer_norm = final_head_layer_norm - self._emb_layer_norm = emb_layer_norm - self.activation_function = activation_function - self.evo_residual = evo_residual - self.residual_factor = residual_factor - if atomic_residual and atomic_dim == feature_dim: - self.atomic_residual = True - else: - self.atomic_residual = False - self.in_proj = SimpleLinear( - self.atomic_dim, - self.feature_dim, - bavg=0.0, - stddev=1.0, - use_timestep=False, - activate="tanh", - ) # TODO - self.out_proj = SimpleLinear( - self.feature_dim, - self.atomic_dim, - bavg=0.0, - stddev=1.0, - use_timestep=False, - activate="tanh", - ) - if self._emb_layer_norm: - self.emb_layer_norm = nn.LayerNorm( - self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - - ## TODO debug : self.in_proj_pair = NonLinearHead(self.pair_dim, self.attn_head, activation_fn=None) - self.in_proj_pair = SimpleLinear(self.pair_dim, self.attn_head, activate=None) - evoformer_encoder_layers = [] - for i in range(self.layer_num): - evoformer_encoder_layers.append( - EvoformerEncoderLayer( - feature_dim=self.feature_dim, - ffn_dim=self.ffn_dim, - attn_head=self.attn_head, - activation_fn=self.activation_function, - post_ln=self.post_ln, - ) - ) - self.evoformer_encoder_layers = nn.ModuleList(evoformer_encoder_layers) - if self._final_layer_norm: - self.final_layer_norm = nn.LayerNorm( - self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - if self._final_head_layer_norm: - self.final_head_layer_norm = nn.LayerNorm( - self.attn_head, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - - def forward(self, atomic_rep, pair_rep, nlist, nlist_type, nlist_mask): - """Encoder the atomic and pair representations. - - Args: - - atomic_rep: Atomic representation with shape [nframes, nloc, atomic_dim]. - - pair_rep: Pair representation with shape [nframes, nloc, nnei, pair_dim]. - - nlist: Neighbor list with shape [nframes, nloc, nnei]. - - nlist_type: Neighbor types with shape [nframes, nloc, nnei]. - - nlist_mask: Neighbor mask with shape [nframes, nloc, nnei], `False` if blank. - - Returns - ------- - - atomic_rep: Atomic representation after encoder with shape [nframes, nloc, feature_dim]. - - transformed_atomic_rep: Transformed atomic representation after encoder with shape [nframes, nloc, atomic_dim]. - - pair_rep: Pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. - - delta_pair_rep: Delta pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. - - norm_x: Normalization loss of atomic_rep. - - norm_delta_pair_rep: Normalization loss of delta_pair_rep. - """ - # Global branch - nframes, nloc, _ = atomic_rep.size() - nnei = pair_rep.shape[2] - input_atomic_rep = atomic_rep - # [nframes, nloc, feature_dim] - if self.atomic_residual: - atomic_rep = atomic_rep + self.in_proj(atomic_rep) - else: - atomic_rep = self.in_proj(atomic_rep) - - if self._emb_layer_norm: - atomic_rep = self.emb_layer_norm(atomic_rep) - - # Local branch - # [nframes, nloc, nnei, attn_head] - pair_rep = self.in_proj_pair(pair_rep) - # [nframes, attn_head, nloc, nnei] - pair_rep = pair_rep.permute(0, 3, 1, 2).contiguous() - input_pair_rep = pair_rep - pair_rep = pair_rep.masked_fill(~nlist_mask.unsqueeze(1), float("-inf")) - - for i in range(self.layer_num): - atomic_rep, pair_rep, _ = self.evoformer_encoder_layers[i]( - atomic_rep, - attn_bias=pair_rep, - nlist_mask=nlist_mask, - nlist=nlist, - return_attn=True, - ) - - def norm_loss(x, eps=1e-10, tolerance=1.0): - # x = x.float() - max_norm = x.shape[-1] ** 0.5 - norm = torch.sqrt(torch.sum(x**2, dim=-1) + eps) - error = F.relu((norm - max_norm).abs() - tolerance) - return error - - def masked_mean(mask, value, dim=-1, eps=1e-10): - return ( - torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim)) - ).mean() - - # atomic_rep shape: [nframes, nloc, feature_dim] - # pair_rep shape: [nframes, attn_head, nloc, nnei] - - norm_x = torch.mean(norm_loss(atomic_rep)) - if self._final_layer_norm: - atomic_rep = self.final_layer_norm(atomic_rep) - - delta_pair_rep = pair_rep - input_pair_rep - delta_pair_rep = delta_pair_rep.masked_fill(~nlist_mask.unsqueeze(1), 0) - # [nframes, nloc, nnei, attn_head] - delta_pair_rep = ( - delta_pair_rep.view(nframes, self.attn_head, nloc, nnei) - .permute(0, 2, 3, 1) - .contiguous() - ) - - # [nframes, nloc, nnei] - norm_delta_pair_rep = norm_loss(delta_pair_rep) - norm_delta_pair_rep = masked_mean(mask=nlist_mask, value=norm_delta_pair_rep) - if self._final_head_layer_norm: - delta_pair_rep = self.final_head_layer_norm(delta_pair_rep) - - if self.atomic_residual: - transformed_atomic_rep = atomic_rep + self.out_proj(atomic_rep) - else: - transformed_atomic_rep = self.out_proj(atomic_rep) - - if self.evo_residual: - transformed_atomic_rep = ( - self.residual_factor * transformed_atomic_rep + input_atomic_rep - ) * (1 / np.sqrt(2)) - - return ( - atomic_rep, - transformed_atomic_rep, - pair_rep, - delta_pair_rep, - norm_x, - norm_delta_pair_rep, - ) - - -class Evoformer3bEncoderLayer(nn.Module): - def __init__( - self, - nnei, - embedding_dim: int = 768, - pair_dim: int = 64, - pair_hidden_dim: int = 32, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - droppath_prob: float = 0.0, - pair_dropout: float = 0.25, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - pre_ln: bool = True, - tri_update: bool = True, - ): - super().__init__() - # Initialize parameters - self.nnei = nnei - self.embedding_dim = embedding_dim - self.num_attention_heads = num_attention_heads - self.attention_dropout = attention_dropout - - # self.dropout = dropout - self.activation_dropout = activation_dropout - - if droppath_prob > 0.0: - self.dropout_module = DropPath(droppath_prob) - else: - self.dropout_module = Dropout(dropout) - - # self.self_attn = AtomAttentionLocal(embedding_dim, embedding_dim, embedding_dim, pair_dim, - # embedding_dim // num_attention_heads, num_attention_heads, - # gating=False, dropout=attention_dropout) - self.self_attn = AtomAttention( - embedding_dim, - embedding_dim, - embedding_dim, - pair_dim, - embedding_dim // num_attention_heads, - num_attention_heads, - gating=False, - dropout=attention_dropout, - ) - # layer norm associated with the self attention layer - self.pre_ln = pre_ln - self.self_attn_layer_norm = nn.LayerNorm( - self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.fc1 = nn.Linear( - self.embedding_dim, ffn_embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.fc2 = nn.Linear( - ffn_embedding_dim, self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.final_layer_norm = nn.LayerNorm( - self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - - self.x_layer_norm_opm = nn.LayerNorm( - self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - # self.opm = OuterProductLocal(self.embedding_dim, pair_dim, d_hid=pair_hidden_dim) - self.opm = OuterProduct(self.embedding_dim, pair_dim, d_hid=pair_hidden_dim) - # self.pair_layer_norm_opm = nn.LayerNorm(pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) - self.pair_layer_norm_ffn = nn.LayerNorm( - pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.pair_ffn = Transition( - pair_dim, - 1, - dropout=activation_dropout, - ) - self.pair_dropout = pair_dropout - self.tri_update = tri_update - if self.tri_update: - self.pair_layer_norm_trimul = nn.LayerNorm( - pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION - ) - self.pair_tri_mul = TriangleMultiplication(pair_dim, pair_hidden_dim) - - def update_pair( - self, - x, - pair, - nlist, - op_mask, - op_norm, - ): - # local: - # [nframes, nloc, nnei, pair_dim] - # global: - # [nframes, nloc, nloc, pair_dim] - pair = pair + self.dropout_module( - self.opm(self.x_layer_norm_opm(x), nlist, op_mask, op_norm) - ) - if not self.pre_ln: - pair = self.pair_layer_norm_opm(pair) - return x, pair - - def shared_dropout(self, x, shared_dim, dropout): - shape = list(x.shape) - shape[shared_dim] = 1 - with torch.no_grad(): - mask = x.new_ones(shape) - return F.dropout(mask, p=dropout, training=self.training) * x - - def forward( - self, - x: torch.Tensor, - pair: torch.Tensor, - nlist: torch.Tensor = None, - attn_mask: Optional[torch.Tensor] = None, - pair_mask: Optional[torch.Tensor] = None, - op_mask: float = 1.0, - op_norm: float = 1.0, - ): - """Encoder the atomic and pair representations. - - Args: - - x: Atomic representation with shape [ncluster, natoms, embed_dim]. - - pair: Pair representation with shape [ncluster, natoms, natoms, pair_dim]. - - attn_mask: Attention mask with shape [ncluster, head, natoms, natoms]. - - pair_mask: Neighbor mask with shape [ncluster, natoms, natoms]. - - """ - # [ncluster, natoms, embed_dim] - residual = x - if self.pre_ln: - x = self.self_attn_layer_norm(x) - x = self.self_attn( - x, - x, - x, - nlist=nlist, - pair=pair, - mask=attn_mask, - ) - # x = F.dropout(x, p=self.dropout, training=self.training) - x = self.dropout_module(x) - x = residual + x - if not self.pre_ln: - x = self.self_attn_layer_norm(x) - - residual = x - if self.pre_ln: - x = self.final_layer_norm(x) - x = F.linear(x, self.fc1.weight) - # x = fused_ops.bias_torch_gelu(x, self.fc1.bias) - x = nn.GELU()(x) + self.fc1.bias - x = F.dropout(x, p=self.activation_dropout, training=self.training) - x = self.fc2(x) - # x = F.dropout(x, p=self.dropout, training=self.training) - x = self.dropout_module(x) - - x = residual + x - if not self.pre_ln: - x = self.final_layer_norm(x) - - block = [ - partial( - self.update_pair, - nlist=nlist, - op_mask=op_mask, - op_norm=op_norm, - ) - ] - - x, pair = checkpoint_sequential( - block, - input_x=(x, pair), - ) - - if self.tri_update: - residual_pair = pair - if self.pre_ln: - pair = self.pair_layer_norm_trimul(pair) - - pair = self.shared_dropout( - self.pair_tri_mul(pair, pair_mask), -3, self.pair_dropout - ) - pair = residual_pair + pair - if not self.pre_ln: - pair = self.pair_layer_norm_trimul(pair) - - residual_pair = pair - if self.pre_ln: - pair = self.pair_layer_norm_ffn(pair) - pair = self.dropout_module(self.pair_ffn(pair)) - pair = residual_pair + pair - if not self.pre_ln: - pair = self.pair_layer_norm_ffn(pair) - return x, pair - - -class Evoformer3bEncoder(nn.Module): - def __init__( - self, - nnei, - layer_num=6, - attn_head=8, - atomic_dim=768, - pair_dim=64, - pair_hidden_dim=32, - ffn_embedding_dim=3072, - dropout: float = 0.1, - droppath_prob: float = 0.0, - pair_dropout: float = 0.25, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - pre_ln: bool = True, - tri_update: bool = True, - **kwargs, - ): - super().__init__() - self.nnei = nnei - if droppath_prob > 0: - droppath_probs = [ - x.item() - for x in torch.linspace(0, droppath_prob, layer_num) # pylint: disable=no-explicit-dtype,no-explicit-device - ] - else: - droppath_probs = None - - self.layers = nn.ModuleList( - [ - Evoformer3bEncoderLayer( - nnei, - atomic_dim, - pair_dim, - pair_hidden_dim, - ffn_embedding_dim, - num_attention_heads=attn_head, - dropout=dropout, - droppath_prob=droppath_probs[_], - pair_dropout=pair_dropout, - attention_dropout=attention_dropout, - activation_dropout=activation_dropout, - pre_ln=pre_ln, - tri_update=tri_update, - ) - for _ in range(layer_num) - ] - ) - - def forward(self, x, pair, attn_mask=None, pair_mask=None, atom_mask=None): - """Encoder the atomic and pair representations. - - Args: - x: Atomic representation with shape [ncluster, natoms, atomic_dim]. - pair: Pair representation with shape [ncluster, natoms, natoms, pair_dim]. - attn_mask: Attention mask (with -inf for softmax) with shape [ncluster, head, natoms, natoms]. - pair_mask: Pair mask (with 1 for real atom pair and 0 for padding) with shape [ncluster, natoms, natoms]. - atom_mask: Atom mask (with 1 for real atom and 0 for padding) with shape [ncluster, natoms]. - - Returns - ------- - x: Atomic representation with shape [ncluster, natoms, atomic_dim]. - pair: Pair representation with shape [ncluster, natoms, natoms, pair_dim]. - - """ - # [ncluster, natoms, 1] - op_mask = atom_mask.unsqueeze(-1) - op_mask = op_mask * (op_mask.size(-2) ** -0.5) - eps = 1e-3 - # [ncluster, natoms, natoms, 1] - op_norm = 1.0 / (eps + torch.einsum("...bc,...dc->...bdc", op_mask, op_mask)) - for layer in self.layers: - x, pair = layer( - x, - pair, - nlist=None, - attn_mask=attn_mask, - pair_mask=pair_mask, - op_mask=op_mask, - op_norm=op_norm, - ) - return x, pair diff --git a/deepmd/pt/model/task/__init__.py b/deepmd/pt/model/task/__init__.py index 572dc60d56..02d852eab7 100644 --- a/deepmd/pt/model/task/__init__.py +++ b/deepmd/pt/model/task/__init__.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from .atten_lcc import ( - FittingNetAttenLcc, -) from .base_fitting import ( BaseFitting, ) @@ -32,7 +29,6 @@ ) __all__ = [ - "FittingNetAttenLcc", "DenoiseNet", "DipoleFittingNet", "EnergyFittingNet", diff --git a/deepmd/pt/model/task/atten_lcc.py b/deepmd/pt/model/task/atten_lcc.py deleted file mode 100644 index 4f54038548..0000000000 --- a/deepmd/pt/model/task/atten_lcc.py +++ /dev/null @@ -1,55 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import torch -import torch.nn as nn - -from deepmd.pt.model.network.network import ( - EnergyHead, - NodeTaskHead, -) -from deepmd.pt.model.task.fitting import ( - Fitting, -) -from deepmd.pt.utils import ( - env, -) - - -class FittingNetAttenLcc(Fitting): - def __init__( - self, embedding_width, bias_atom_e, pair_embed_dim, attention_heads, **kwargs - ): - super().__init__() - self.embedding_width = embedding_width - self.engergy_proj = EnergyHead(self.embedding_width, 1) - self.energe_agg_factor = nn.Embedding(4, 1, dtype=env.GLOBAL_PT_FLOAT_PRECISION) - nn.init.normal_(self.energe_agg_factor.weight, 0, 0.01) - bias_atom_e = torch.tensor(bias_atom_e) # pylint: disable=no-explicit-dtype,no-explicit-device - self.register_buffer("bias_atom_e", bias_atom_e) - self.pair_embed_dim = pair_embed_dim - self.attention_heads = attention_heads - self.node_proc = NodeTaskHead( - self.embedding_width, self.pair_embed_dim, self.attention_heads - ) - self.node_proc.zero_init() - - def forward(self, output, pair, delta_pos, atype, nframes, nloc): - # [nframes x nloc x tebd_dim] - output_nloc = (output[:, 0, :]).reshape(nframes, nloc, self.embedding_width) - # Optional: GRRG or mean of gbf TODO - - # energy outut - # [nframes, nloc] - energy_out = self.engergy_proj(output_nloc).view(nframes, nloc) - # [nframes, nloc] - energy_factor = self.energe_agg_factor(torch.zeros_like(atype)).view( - nframes, nloc - ) - energy_out = (energy_out * energy_factor) + self.bias_atom_e[atype] - energy_out = energy_out.sum(dim=-1) - - # vector output - # predict_force: [(nframes x nloc) x (1 + nnei2) x 3] - predict_force = self.node_proc(output, pair, delta_pos=delta_pos) - # predict_force_nloc: [nframes x nloc x 3] - predict_force_nloc = (predict_force[:, 0, :]).reshape(nframes, nloc, 3) - return energy_out, predict_force_nloc diff --git a/deepmd/pt/model/task/dipole.py b/deepmd/pt/model/task/dipole.py index 56b14677b9..79f9a0a86c 100644 --- a/deepmd/pt/model/task/dipole.py +++ b/deepmd/pt/model/task/dipole.py @@ -113,7 +113,6 @@ def __init__( type_map=type_map, **kwargs, ) - self.old_impl = False # this only supports the new implementation. def _net_out_dim(self): """Set the FittingNet output dim.""" @@ -123,7 +122,6 @@ def serialize(self) -> dict: data = super().serialize() data["type"] = "dipole" data["embedding_width"] = self.embedding_width - data["old_impl"] = self.old_impl data["r_differentiable"] = self.r_differentiable data["c_differentiable"] = self.c_differentiable return data diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 1827569a17..10f88519e1 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -19,9 +19,6 @@ FittingNet, NetworkCollection, ) -from deepmd.pt.model.network.network import ( - ResidualDeep, -) from deepmd.pt.model.task.base_fitting import ( BaseFitting, ) @@ -211,41 +208,24 @@ def __init__( in_dim = self.dim_descrpt + self.numb_fparam + self.numb_aparam - self.old_impl = kwargs.get("old_impl", False) - if self.old_impl: - filter_layers = [] - for type_i in range(self.ntypes if not self.mixed_types else 1): - bias_type = 0.0 - one = ResidualDeep( - type_i, - self.dim_descrpt, + self.filter_layers = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + net_dim_out, self.neuron, - bias_type, - resnet_dt=self.resnet_dt, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), ) - filter_layers.append(one) - self.filter_layers_old = torch.nn.ModuleList(filter_layers) - self.filter_layers = None - else: - self.filter_layers = NetworkCollection( - 1 if not self.mixed_types else 0, - self.ntypes, - network_type="fitting_network", - networks=[ - FittingNet( - in_dim, - net_dim_out, - self.neuron, - self.activation_function, - self.resnet_dt, - self.precision, - bias_out=True, - seed=child_seed(self.seed, ii), - ) - for ii in range(self.ntypes if not self.mixed_types else 1) - ], - ) - self.filter_layers_old = None + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) # set trainable for param in self.parameters(): param.requires_grad = self.trainable @@ -488,47 +468,29 @@ def _forward_common( dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=descriptor.device, ) # jit assertion - if self.old_impl: - assert self.filter_layers_old is not None - assert xx_zeros is None - if self.mixed_types: - atom_property = self.filter_layers_old[0](xx) + self.bias_atom_e[atype] - outs = outs + atom_property # Shape is [nframes, natoms[0], 1] - else: - for type_i, filter_layer in enumerate(self.filter_layers_old): - mask = atype == type_i - atom_property = filter_layer(xx) - atom_property = atom_property + self.bias_atom_e[type_i] - atom_property = atom_property * mask.unsqueeze(-1) - outs = outs + atom_property # Shape is [nframes, natoms[0], 1] + if self.mixed_types: + atom_property = self.filter_layers.networks[0](xx) + self.bias_atom_e[atype] + if xx_zeros is not None: + atom_property -= self.filter_layers.networks[0](xx_zeros) + outs = outs + atom_property # Shape is [nframes, natoms[0], net_dim_out] else: - if self.mixed_types: - atom_property = ( - self.filter_layers.networks[0](xx) + self.bias_atom_e[atype] - ) + for type_i, ll in enumerate(self.filter_layers.networks): + mask = (atype == type_i).unsqueeze(-1) + mask = torch.tile(mask, (1, 1, net_dim_out)) + atom_property = ll(xx) if xx_zeros is not None: - atom_property -= self.filter_layers.networks[0](xx_zeros) + # must assert, otherwise jit is not happy + assert self.remove_vaccum_contribution is not None + if not ( + len(self.remove_vaccum_contribution) > type_i + and not self.remove_vaccum_contribution[type_i] + ): + atom_property -= ll(xx_zeros) + atom_property = atom_property + self.bias_atom_e[type_i] + atom_property = atom_property * mask outs = ( outs + atom_property ) # Shape is [nframes, natoms[0], net_dim_out] - else: - for type_i, ll in enumerate(self.filter_layers.networks): - mask = (atype == type_i).unsqueeze(-1) - mask = torch.tile(mask, (1, 1, net_dim_out)) - atom_property = ll(xx) - if xx_zeros is not None: - # must assert, otherwise jit is not happy - assert self.remove_vaccum_contribution is not None - if not ( - len(self.remove_vaccum_contribution) > type_i - and not self.remove_vaccum_contribution[type_i] - ): - atom_property -= ll(xx_zeros) - atom_property = atom_property + self.bias_atom_e[type_i] - atom_property = atom_property * mask - outs = ( - outs + atom_property - ) # Shape is [nframes, natoms[0], net_dim_out] # nf x nloc mask = self.emask(atype) # nf x nloc x nod diff --git a/deepmd/pt/model/task/polarizability.py b/deepmd/pt/model/task/polarizability.py index a16ab886d4..512044efbd 100644 --- a/deepmd/pt/model/task/polarizability.py +++ b/deepmd/pt/model/task/polarizability.py @@ -138,7 +138,6 @@ def __init__( type_map=type_map, **kwargs, ) - self.old_impl = False # this only supports the new implementation. def _net_out_dim(self): """Set the FittingNet output dim.""" @@ -195,7 +194,6 @@ def serialize(self) -> dict: data["type"] = "polar" data["@version"] = 3 data["embedding_width"] = self.embedding_width - data["old_impl"] = self.old_impl data["fit_diag"] = self.fit_diag data["shift_diag"] = self.shift_diag data["@variables"]["scale"] = to_numpy_array(self.scale) diff --git a/source/tests/pt/model/test_descriptor_hybrid.py b/source/tests/pt/model/test_descriptor_hybrid.py index 5d03b28399..074af4da4e 100644 --- a/source/tests/pt/model/test_descriptor_hybrid.py +++ b/source/tests/pt/model/test_descriptor_hybrid.py @@ -41,7 +41,6 @@ def test_jit( self.rcut, self.rcut_smth, self.sel, - old_impl=False, ) ddsub1 = DescrptSeR( self.rcut, diff --git a/source/tests/pt/model/test_descriptor_se_r.py b/source/tests/pt/model/test_descriptor_se_r.py index f3692101c5..e4aa405dd8 100644 --- a/source/tests/pt/model/test_descriptor_se_r.py +++ b/source/tests/pt/model/test_descriptor_se_r.py @@ -61,7 +61,6 @@ def test_consistency( self.sel, precision=prec, resnet_dt=idt, - old_impl=False, exclude_mask=em, seed=GLOBAL_SEED, ).to(env.DEVICE) @@ -130,7 +129,6 @@ def test_load_stat(self): self.sel, precision=prec, resnet_dt=idt, - old_impl=False, seed=GLOBAL_SEED, ) dd0.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) @@ -181,7 +179,6 @@ def test_jit( self.sel, precision=prec, resnet_dt=idt, - old_impl=False, seed=GLOBAL_SEED, ) dd0.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_dpa1.py b/source/tests/pt/model/test_dpa1.py index b825885311..d168ceb2ae 100644 --- a/source/tests/pt/model/test_dpa1.py +++ b/source/tests/pt/model/test_dpa1.py @@ -70,7 +70,6 @@ def test_consistency( tebd_input_mode=tm, use_econf_tebd=ect, type_map=["O", "H"] if ect else None, - old_impl=False, seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) @@ -108,69 +107,6 @@ def test_consistency( atol=atol, err_msg=err_msg, ) - # old impl - if ( - idt is False - and prec == "float64" - and to is False - and tm == "concat" - and ect is False - ): - dd3 = DescrptDPA1( - self.rcut, - self.rcut_smth, - self.sel_mix, - self.nt, - attn_layer=2, - precision=prec, - resnet_dt=idt, - smooth_type_embedding=sm, - old_impl=True, - seed=GLOBAL_SEED, - ).to(env.DEVICE) - dd0_state_dict = dd0.se_atten.state_dict() - dd3_state_dict = dd3.se_atten.state_dict() - - dd0_state_dict_attn = dd0.se_atten.dpa1_attention.state_dict() - dd3_state_dict_attn = dd3.se_atten.dpa1_attention.state_dict() - for i in dd3_state_dict: - dd3_state_dict[i] = ( - dd0_state_dict[ - i.replace(".deep_layers.", ".layers.") - .replace("filter_layers_old.", "filter_layers._networks.") - .replace( - ".attn_layer_norm.weight", ".attn_layer_norm.matrix" - ) - ] - .detach() - .clone() - ) - if ".bias" in i and "attn_layer_norm" not in i: - dd3_state_dict[i] = dd3_state_dict[i].unsqueeze(0) - dd3.se_atten.load_state_dict(dd3_state_dict) - - dd0_state_dict_tebd = dd0.type_embedding.state_dict() - dd3_state_dict_tebd = dd3.type_embedding.state_dict() - for i in dd3_state_dict_tebd: - dd3_state_dict_tebd[i] = ( - dd0_state_dict_tebd[i.replace("embedding.weight", "matrix")] - .detach() - .clone() - ) - dd3.type_embedding.load_state_dict(dd3_state_dict_tebd) - - rd3, _, _, _, _ = dd3( - torch.tensor(self.coord_ext, dtype=dtype, device=env.DEVICE), - torch.tensor(self.atype_ext, dtype=int, device=env.DEVICE), - torch.tensor(self.nlist, dtype=int, device=env.DEVICE), - ) - np.testing.assert_allclose( - rd0.detach().cpu().numpy(), - rd3.detach().cpu().numpy(), - rtol=rtol, - atol=atol, - err_msg=err_msg, - ) def test_jit( self, @@ -211,7 +147,6 @@ def test_jit( tebd_input_mode=tm, use_econf_tebd=ect, type_map=["O", "H"] if ect else None, - old_impl=False, seed=GLOBAL_SEED, ) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_dpa2.py b/source/tests/pt/model/test_dpa2.py index 0beb34c031..2eac49d573 100644 --- a/source/tests/pt/model/test_dpa2.py +++ b/source/tests/pt/model/test_dpa2.py @@ -154,7 +154,6 @@ def test_consistency( precision=prec, use_econf_tebd=ect, type_map=["O", "H"] if ect else None, - old_impl=False, seed=GLOBAL_SEED, ).to(env.DEVICE) @@ -193,45 +192,6 @@ def test_consistency( rtol=rtol, atol=atol, ) - # old impl - if prec == "float64" and rus == "res_avg" and ect is False and ns is False: - dd3 = DescrptDPA2( - self.nt, - repinit=repinit, - repformer=repformer, - # kwargs for descriptor - smooth=sm, - exclude_types=[], - add_tebd_to_repinit_out=False, - precision=prec, - old_impl=True, - seed=GLOBAL_SEED, - ).to(env.DEVICE) - dd0_state_dict = dd0.state_dict() - dd3_state_dict = dd3.state_dict() - for i in list(dd0_state_dict.keys()): - if ".bias" in i and ( - ".linear1." in i or ".linear2." in i or ".head_map." in i - ): - dd0_state_dict[i] = dd0_state_dict[i].unsqueeze(0) - if ".attn2_lm.matrix" in i: - dd0_state_dict[ - i.replace(".attn2_lm.matrix", ".attn2_lm.weight") - ] = dd0_state_dict.pop(i) - - dd3.load_state_dict(dd0_state_dict) - rd3, _, _, _, _ = dd3( - torch.tensor(self.coord_ext, dtype=dtype, device=env.DEVICE), - torch.tensor(self.atype_ext, dtype=int, device=env.DEVICE), - torch.tensor(self.nlist, dtype=int, device=env.DEVICE), - torch.tensor(self.mapping, dtype=int, device=env.DEVICE), - ) - np.testing.assert_allclose( - rd0.detach().cpu().numpy(), - rd3.detach().cpu().numpy(), - rtol=rtol, - atol=atol, - ) def test_jit( self, @@ -350,7 +310,6 @@ def test_jit( precision=prec, use_econf_tebd=ect, type_map=["O", "H"] if ect else None, - old_impl=False, seed=GLOBAL_SEED, ).to(env.DEVICE) diff --git a/source/tests/pt/model/test_embedding_net.py b/source/tests/pt/model/test_embedding_net.py index 3605316437..1566eb2416 100644 --- a/source/tests/pt/model/test_embedding_net.py +++ b/source/tests/pt/model/test_embedding_net.py @@ -167,20 +167,15 @@ def test_consistency(self): ) # Reproduced - old_impl = False descriptor = DescrptSeA( self.rcut, self.rcut_smth, self.sel, neuron=self.filter_neuron, axis_neuron=self.axis_neuron, - old_impl=old_impl, ).to(DEVICE) for name, param in descriptor.named_parameters(): - if old_impl: - ms = re.findall(r"(\d)\.deep_layers\.(\d)\.([a-z]+)", name) - else: - ms = re.findall(r"(\d)\.layers\.(\d)\.([a-z]+)", name) + ms = re.findall(r"(\d)\.layers\.(\d)\.([a-z]+)", name) if len(ms) == 1: m = ms[0] key = gen_key(worb=m[2], depth=int(m[1]) + 1, elemid=int(m[0])) diff --git a/source/tests/pt/model/test_ener_fitting.py b/source/tests/pt/model/test_ener_fitting.py index 3255db2784..5c55766455 100644 --- a/source/tests/pt/model/test_ener_fitting.py +++ b/source/tests/pt/model/test_ener_fitting.py @@ -10,7 +10,6 @@ DescrptSeA, ) from deepmd.pt.model.task.ener import ( - EnergyFittingNet, InvarFitting, ) from deepmd.pt.utils import ( @@ -103,53 +102,6 @@ def test_consistency( ) self.assertEqual(ft0.get_sel_type(), ft1.get_sel_type()) - def test_new_old( - self, - ): - nf, nloc, nnei = self.nlist.shape - dd = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE) - rd0, _, _, _, _ = dd( - torch.tensor(self.coord_ext, dtype=dtype, device=env.DEVICE), - torch.tensor(self.atype_ext, dtype=int, device=env.DEVICE), - torch.tensor(self.nlist, dtype=int, device=env.DEVICE), - ) - atype = torch.tensor(self.atype_ext[:, :nloc], dtype=int, device=env.DEVICE) - - od = 1 - for foo, mixed_types in itertools.product( - [True], - [True, False], - ): - ft0 = EnergyFittingNet( - self.nt, - dd.dim_out, - mixed_types=mixed_types, - ).to(env.DEVICE) - ft1 = EnergyFittingNet( - self.nt, - dd.dim_out, - mixed_types=mixed_types, - old_impl=True, - ).to(env.DEVICE) - dd0 = ft0.state_dict() - dd1 = ft1.state_dict() - for kk, vv in dd1.items(): - new_kk = kk - new_kk = new_kk.replace("filter_layers_old", "filter_layers.networks") - new_kk = new_kk.replace("deep_layers", "layers") - new_kk = new_kk.replace("final_layer", "layers.3") - dd1[kk] = dd0[new_kk] - if kk.split(".")[-1] in ["idt", "bias"]: - dd1[kk] = dd1[kk].unsqueeze(0) - dd1["bias_atom_e"] = dd0["bias_atom_e"] - ft1.load_state_dict(dd1) - ret0 = ft0(rd0, atype) - ret1 = ft1(rd0, atype) - np.testing.assert_allclose( - to_numpy_array(ret0["energy"]), - to_numpy_array(ret1["energy"]), - ) - def test_jit( self, ): diff --git a/source/tests/pt/model/test_se_atten_v2.py b/source/tests/pt/model/test_se_atten_v2.py index f9857fc728..462b2aca34 100644 --- a/source/tests/pt/model/test_se_atten_v2.py +++ b/source/tests/pt/model/test_se_atten_v2.py @@ -66,7 +66,6 @@ def test_consistency( type_one_side=to, use_econf_tebd=ect, type_map=["O", "H"] if ect else None, - old_impl=False, seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) @@ -138,7 +137,6 @@ def test_jit( type_one_side=to, use_econf_tebd=ect, type_map=["O", "H"] if ect else None, - old_impl=False, seed=GLOBAL_SEED, ) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_se_e2_a.py b/source/tests/pt/model/test_se_e2_a.py index abe13ce86e..da9e69243c 100644 --- a/source/tests/pt/model/test_se_e2_a.py +++ b/source/tests/pt/model/test_se_e2_a.py @@ -58,7 +58,6 @@ def test_consistency( self.sel, precision=prec, resnet_dt=idt, - old_impl=False, exclude_types=em, seed=GLOBAL_SEED, ).to(env.DEVICE) @@ -105,46 +104,6 @@ def test_consistency( atol=atol, err_msg=err_msg, ) - # old impl - if idt is False and prec == "float64" and em == []: - dd3 = DescrptSeA( - self.rcut, - self.rcut_smth, - self.sel, - precision=prec, - resnet_dt=idt, - old_impl=True, - seed=GLOBAL_SEED, - ).to(env.DEVICE) - dd0_state_dict = dd0.sea.state_dict() - dd3_state_dict = dd3.sea.state_dict() - for i in dd3_state_dict: - dd3_state_dict[i] = ( - dd0_state_dict[ - i.replace(".deep_layers.", ".layers.").replace( - "filter_layers_old.", "filter_layers.networks." - ) - ] - .detach() - .clone() - ) - if ".bias" in i: - dd3_state_dict[i] = dd3_state_dict[i].unsqueeze(0) - dd3.sea.load_state_dict(dd3_state_dict) - - rd3, gr3, _, _, sw3 = dd3( - torch.tensor(self.coord_ext, dtype=dtype, device=env.DEVICE), - torch.tensor(self.atype_ext, dtype=int, device=env.DEVICE), - torch.tensor(self.nlist, dtype=int, device=env.DEVICE), - ) - for aa, bb in zip([rd1, gr1, sw1], [rd3, gr3, sw3]): - np.testing.assert_allclose( - aa.detach().cpu().numpy(), - bb.detach().cpu().numpy(), - rtol=rtol, - atol=atol, - err_msg=err_msg, - ) def test_jit( self, @@ -169,7 +128,6 @@ def test_jit( self.sel, precision=prec, resnet_dt=idt, - old_impl=False, seed=GLOBAL_SEED, ) dd0.sea.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) From 1e1090a302fa66b10a29cd255ce41881da57a9e8 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 17 Oct 2024 07:44:47 -0400 Subject: [PATCH 051/193] chore(lmp): add LAMMPS DPA-2 nopbc tests (#4220) Adding tests to see whether #4167 is resolved. The answer is no. Segfaults are thrown with MPI. ## Summary by CodeRabbit - **New Features** - Introduced a new command-line argument `--nopbc` to modify boundary conditions in LAMMPS simulations. - **Tests** - Added a comprehensive suite of unit tests for the DeepMD potential in LAMMPS, covering various configurations and scenarios to ensure accuracy and reliability. --------- Signed-off-by: Jinzhe Zeng --- source/lmp/tests/run_mpi_pair_deepmd.py | 6 +- source/lmp/tests/test_lammps_dpa_pt_nopbc.py | 728 +++++++++++++++++++ 2 files changed, 733 insertions(+), 1 deletion(-) create mode 100644 source/lmp/tests/test_lammps_dpa_pt_nopbc.py diff --git a/source/lmp/tests/run_mpi_pair_deepmd.py b/source/lmp/tests/run_mpi_pair_deepmd.py index 0c4291ab3a..7c0ff6edc0 100644 --- a/source/lmp/tests/run_mpi_pair_deepmd.py +++ b/source/lmp/tests/run_mpi_pair_deepmd.py @@ -21,6 +21,7 @@ parser.add_argument("MD_FILE", type=str) parser.add_argument("OUTPUT", type=str) parser.add_argument("--balance", action="store_true") +parser.add_argument("--nopbc", action="store_true") args = parser.parse_args() data_file = args.DATAFILE @@ -38,7 +39,10 @@ # 6 and 0 atoms lammps.processors("1 2 1") lammps.units("metal") -lammps.boundary("p p p") +if args.nopbc: + lammps.boundary("f f f") +else: + lammps.boundary("p p p") lammps.atom_style("atomic") lammps.neighbor("2.0 bin") lammps.neigh_modify("every 10 delay 0 check no") diff --git a/source/lmp/tests/test_lammps_dpa_pt_nopbc.py b/source/lmp/tests/test_lammps_dpa_pt_nopbc.py new file mode 100644 index 0000000000..15fe2c0bc2 --- /dev/null +++ b/source/lmp/tests/test_lammps_dpa_pt_nopbc.py @@ -0,0 +1,728 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +import os +import shutil +import subprocess as sp +import sys +import tempfile +from pathlib import ( + Path, +) + +import constants +import numpy as np +import pytest +from lammps import ( + PyLammps, +) +from write_lmp_data import ( + write_lmp_data, +) + +pbtxt_file2 = Path(__file__).parent.parent.parent / "tests" / "infer" / "deeppot.pbtxt" +pb_file = Path(__file__).parent.parent.parent / "tests" / "infer" / "deeppot_dpa.pth" +pb_file2 = Path(__file__).parent / "graph.pb" +system_file = Path(__file__).parent.parent.parent / "tests" +data_file = Path(__file__).parent / "data.lmp" +data_file_si = Path(__file__).parent / "data.si" +data_type_map_file = Path(__file__).parent / "data_type_map.lmp" +md_file = Path(__file__).parent / "md.out" + +# this is as the same as python and c++ tests, test_deeppot_a.py +expected_ae = np.array( + [ + -95.13216447995296, + -188.10146505781867, + -187.74742451023172, + -94.73864717001219, + -187.76956603003393, + -187.76904550434332, + ] +) +expected_e = np.sum(expected_ae) +expected_f = np.array( + [ + 0.7486830600282869, + -0.240322915088127, + -0.3943366458127905, + -0.1776248813665344, + 0.2359143394202788, + 0.4210018319063822, + -0.2368532809002255, + 0.0291156803500336, + -0.0219651427265617, + -1.407280069394403, + 0.4932116549421467, + -0.9482072853582465, + -0.1501958909452974, + -0.9720722611839484, + 1.5128172910814666, + 1.2232710625781733, + 0.4541535015596165, + -0.569310049090249, + ] +).reshape(6, 3) + +expected_f2 = np.array( + [ + -2.161037360255332107e00, + 9.052994347015581589e-01, + 1.635379623977007979e00, + 2.161037360255332107e00, + -9.052994347015581589e-01, + -1.635379623977007979e00, + -1.167128117249453811e-02, + 1.371975700096064992e-03, + -1.575265180249604477e-03, + 6.226508593971802341e-01, + -1.816734122009256991e-01, + 3.561766019664774907e-01, + -1.406075393906316626e-02, + 3.789140061530929526e-01, + -6.018777878642909140e-01, + -5.969188242856223736e-01, + -1.986125696522633155e-01, + 2.472764510780630642e-01, + ] +).reshape(6, 3) + +expected_v = -np.array( + [ + 1.4724482801774368e00, + -1.8952544175284314e-01, + -2.0502896614522359e-01, + -2.0361724110178425e-01, + 5.4221646102123211e-02, + 8.7963957026666373e-02, + -1.3233356224791937e-01, + 8.3907068051133571e-02, + 1.6072164570432412e-01, + 2.2913216241740741e00, + -6.0712170533586352e-02, + 1.2802395909429765e-01, + 6.9581050483420448e-03, + 2.0894022035588655e-02, + 4.3408316864598340e-02, + -1.4144392402206662e-03, + 3.6852652738654124e-02, + 7.7149761552687490e-02, + 5.6814285976509526e-01, + -7.0738211182030164e-02, + 5.4514470128648518e-02, + -7.1339324275474125e-02, + 9.8158535704203354e-03, + -8.3431069537701560e-03, + 5.4072790262097083e-02, + -8.1976736911977682e-03, + 7.6505804915597275e-03, + 1.6869950835783332e-01, + 2.1880432930426963e-02, + 1.0308234746703970e-01, + 9.1015395953307099e-02, + 7.1788910181538768e-02, + -1.4119552688428305e-01, + -1.4977320631771729e-01, + -1.0982955047012899e-01, + 2.3324521962640055e-01, + 8.1569862372597679e-01, + 6.2848559999917952e-02, + -4.5341405643671506e-02, + -3.9134119664198064e-01, + 4.1651372430088562e-01, + -5.8173709994663803e-01, + 6.6155672230934037e-01, + -6.4774042800560672e-01, + 9.0924772156749301e-01, + 2.0503134548416586e00, + 1.9684008914564011e-01, + -3.1711040533580070e-01, + 5.2891751962511613e-01, + 8.7385258358844808e-02, + -1.5487618319904839e-01, + -7.1396830520028809e-01, + -1.0977171171532918e-01, + 1.9792085656111236e-01, + ] +).reshape(6, 9) +expected_v2 = -np.array( + [ + -7.042445481792056761e-01, + 2.950213647777754078e-01, + 5.329418202437231633e-01, + 2.950213647777752968e-01, + -1.235900311906896754e-01, + -2.232594111831812944e-01, + 5.329418202437232743e-01, + -2.232594111831813499e-01, + -4.033073234276823849e-01, + -8.949230984097404917e-01, + 3.749002169013777030e-01, + 6.772391014992630298e-01, + 3.749002169013777586e-01, + -1.570527935667933583e-01, + -2.837082722496912512e-01, + 6.772391014992631408e-01, + -2.837082722496912512e-01, + -5.125052659994422388e-01, + 4.858210330291591605e-02, + -6.902596153269104431e-03, + 6.682612642430500391e-03, + -5.612247004554610057e-03, + 9.767795567660207592e-04, + -9.773758942738038254e-04, + 5.638322117219018645e-03, + -9.483806049779926932e-04, + 8.493873281881353637e-04, + -2.941738570564985666e-01, + -4.482529909499673171e-02, + 4.091569840186781021e-02, + -4.509020615859140463e-02, + -1.013919988807244071e-01, + 1.551440772665269030e-01, + 4.181857726606644232e-02, + 1.547200233064863484e-01, + -2.398213304685777592e-01, + -3.218625798524068354e-02, + -1.012438450438508421e-02, + 1.271639330380921855e-02, + 3.072814938490859779e-03, + -9.556241797915024372e-02, + 1.512251983492413077e-01, + -8.277872384009607454e-03, + 1.505412040827929787e-01, + -2.386150620881526407e-01, + -2.312295470054945568e-01, + -6.631490213524345034e-02, + 7.932427266386249398e-02, + -8.053754366323923053e-02, + -3.294595881137418747e-02, + 4.342495071150231922e-02, + 1.004599500126941436e-01, + 4.450400364869536163e-02, + -5.951077548033092968e-02, + ] +).reshape(6, 9) + +box = np.array([0, 13, 0, 13, 0, 13, 0, 0, 0]) +coord = np.array( + [ + [12.83, 2.56, 2.18], + [12.09, 2.87, 2.74], + [0.25, 3.32, 1.68], + [3.36, 3.00, 1.81], + [3.51, 2.51, 2.60], + [4.27, 3.22, 1.56], + ] +) +type_OH = np.array([1, 2, 2, 1, 2, 2]) +type_HO = np.array([2, 1, 1, 2, 1, 1]) + + +sp.check_output( + f"{sys.executable} -m deepmd convert-from pbtxt -i {pbtxt_file2.resolve()} -o {pb_file2.resolve()}".split() +) + + +def setup_module(): + write_lmp_data(box, coord, type_OH, data_file) + write_lmp_data(box, coord, type_HO, data_type_map_file) + write_lmp_data( + box * constants.dist_metal2si, + coord * constants.dist_metal2si, + type_OH, + data_file_si, + ) + + +def teardown_module(): + os.remove(data_file) + os.remove(data_type_map_file) + + +def _lammps(data_file, units="metal") -> PyLammps: + lammps = PyLammps() + lammps.units(units) + lammps.boundary("f f f") + lammps.atom_style("atomic") + if units == "metal" or units == "real": + lammps.neighbor("2.0 bin") + elif units == "si": + lammps.neighbor("2.0e-10 bin") + else: + raise ValueError("units should be metal, real, or si") + lammps.neigh_modify("every 10 delay 0 check no") + lammps.read_data(data_file.resolve()) + if units == "metal" or units == "real": + lammps.mass("1 16") + lammps.mass("2 2") + elif units == "si": + lammps.mass("1 %.10e" % (16 * constants.mass_metal2si)) + lammps.mass("2 %.10e" % (2 * constants.mass_metal2si)) + else: + raise ValueError("units should be metal, real, or si") + if units == "metal": + lammps.timestep(0.0005) + elif units == "real": + lammps.timestep(0.5) + elif units == "si": + lammps.timestep(5e-16) + else: + raise ValueError("units should be metal, real, or si") + lammps.fix("1 all nve") + return lammps + + +@pytest.fixture +def lammps(): + lmp = _lammps(data_file=data_file) + yield lmp + lmp.close() + + +@pytest.fixture +def lammps_type_map(): + lmp = _lammps(data_file=data_type_map_file) + yield lmp + lmp.close() + + +@pytest.fixture +def lammps_real(): + lmp = _lammps(data_file=data_file, units="real") + yield lmp + lmp.close() + + +@pytest.fixture +def lammps_si(): + lmp = _lammps(data_file=data_file_si, units="si") + yield lmp + lmp.close() + + +def test_pair_deepmd(lammps): + lammps.pair_style(f"deepmd {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(6): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + lammps.run(1) + + +def test_pair_deepmd_virial(lammps): + lammps.pair_style(f"deepmd {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.compute("virial all centroid/stress/atom NULL pair") + for ii in range(9): + jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + lammps.variable(f"virial{jj} atom c_virial[{ii+1}]") + lammps.dump( + "1 all custom 1 dump id " + " ".join([f"v_virial{ii}" for ii in range(9)]) + ) + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(6): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + idx_map = lammps.lmp.numpy.extract_atom("id") - 1 + for ii in range(9): + assert np.array( + lammps.variables[f"virial{ii}"].value + ) / constants.nktv2p == pytest.approx(expected_v[idx_map, ii]) + + +def test_pair_deepmd_model_devi(lammps): + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(6): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + assert md[7:] == pytest.approx(expected_md_f) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + assert md[1] == pytest.approx(np.max(expected_md_v)) + assert md[2] == pytest.approx(np.min(expected_md_v)) + assert md[3] == pytest.approx(np.sqrt(np.mean(np.square(expected_md_v)))) + + +def test_pair_deepmd_model_devi_virial(lammps): + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic" + ) + lammps.pair_coeff("* *") + lammps.compute("virial all centroid/stress/atom NULL pair") + for ii in range(9): + jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + lammps.variable(f"virial{jj} atom c_virial[{ii+1}]") + lammps.dump( + "1 all custom 1 dump id " + " ".join([f"v_virial{ii}" for ii in range(9)]) + ) + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(6): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + idx_map = lammps.lmp.numpy.extract_atom("id") - 1 + for ii in range(9): + assert np.array( + lammps.variables[f"virial{ii}"].value + ) / constants.nktv2p == pytest.approx(expected_v[idx_map, ii]) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + assert md[7:] == pytest.approx(expected_md_f) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + assert md[1] == pytest.approx(np.max(expected_md_v)) + assert md[2] == pytest.approx(np.min(expected_md_v)) + assert md[3] == pytest.approx(np.sqrt(np.mean(np.square(expected_md_v)))) + + +def test_pair_deepmd_model_devi_atomic_relative(lammps): + relative = 1.0 + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(6): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + assert md[7:] == pytest.approx(expected_md_f) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + assert md[1] == pytest.approx(np.max(expected_md_v)) + assert md[2] == pytest.approx(np.min(expected_md_v)) + assert md[3] == pytest.approx(np.sqrt(np.mean(np.square(expected_md_v)))) + + +def test_pair_deepmd_model_devi_atomic_relative_v(lammps): + relative = 1.0 + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative_v {relative}" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(6): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + assert md[7:] == pytest.approx(expected_md_f) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + norm = ( + np.abs( + np.mean([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) + ) + / 6 + ) + expected_md_v /= norm + relative + assert md[1] == pytest.approx(np.max(expected_md_v)) + assert md[2] == pytest.approx(np.min(expected_md_v)) + assert md[3] == pytest.approx(np.sqrt(np.mean(np.square(expected_md_v)))) + + +def test_pair_deepmd_type_map(lammps_type_map): + lammps_type_map.pair_style(f"deepmd {pb_file.resolve()}") + lammps_type_map.pair_coeff("* * H O") + lammps_type_map.run(0) + assert lammps_type_map.eval("pe") == pytest.approx(expected_e) + for ii in range(6): + assert lammps_type_map.atoms[ii].force == pytest.approx( + expected_f[lammps_type_map.atoms[ii].id - 1] + ) + lammps_type_map.run(1) + + +def test_pair_deepmd_real(lammps_real): + lammps_real.pair_style(f"deepmd {pb_file.resolve()}") + lammps_real.pair_coeff("* *") + lammps_real.run(0) + assert lammps_real.eval("pe") == pytest.approx( + expected_e * constants.ener_metal2real + ) + for ii in range(6): + assert lammps_real.atoms[ii].force == pytest.approx( + expected_f[lammps_real.atoms[ii].id - 1] * constants.force_metal2real + ) + lammps_real.run(1) + + +def test_pair_deepmd_virial_real(lammps_real): + lammps_real.pair_style(f"deepmd {pb_file.resolve()}") + lammps_real.pair_coeff("* *") + lammps_real.compute("virial all centroid/stress/atom NULL pair") + for ii in range(9): + jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + lammps_real.variable(f"virial{jj} atom c_virial[{ii+1}]") + lammps_real.dump( + "1 all custom 1 dump id " + " ".join([f"v_virial{ii}" for ii in range(9)]) + ) + lammps_real.run(0) + assert lammps_real.eval("pe") == pytest.approx( + expected_e * constants.ener_metal2real + ) + for ii in range(6): + assert lammps_real.atoms[ii].force == pytest.approx( + expected_f[lammps_real.atoms[ii].id - 1] * constants.force_metal2real + ) + idx_map = lammps_real.lmp.numpy.extract_atom("id") - 1 + for ii in range(9): + assert np.array( + lammps_real.variables[f"virial{ii}"].value + ) / constants.nktv2p_real == pytest.approx( + expected_v[idx_map, ii] * constants.ener_metal2real + ) + + +def test_pair_deepmd_model_devi_real(lammps_real): + lammps_real.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic" + ) + lammps_real.pair_coeff("* *") + lammps_real.run(0) + assert lammps_real.eval("pe") == pytest.approx( + expected_e * constants.ener_metal2real + ) + for ii in range(6): + assert lammps_real.atoms[ii].force == pytest.approx( + expected_f[lammps_real.atoms[ii].id - 1] * constants.force_metal2real + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + assert md[7:] == pytest.approx(expected_md_f * constants.force_metal2real) + assert md[4] == pytest.approx(np.max(expected_md_f) * constants.force_metal2real) + assert md[5] == pytest.approx(np.min(expected_md_f) * constants.force_metal2real) + assert md[6] == pytest.approx(np.mean(expected_md_f) * constants.force_metal2real) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + assert md[1] == pytest.approx(np.max(expected_md_v) * constants.ener_metal2real) + assert md[2] == pytest.approx(np.min(expected_md_v) * constants.ener_metal2real) + assert md[3] == pytest.approx( + np.sqrt(np.mean(np.square(expected_md_v))) * constants.ener_metal2real + ) + + +def test_pair_deepmd_model_devi_virial_real(lammps_real): + lammps_real.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic" + ) + lammps_real.pair_coeff("* *") + lammps_real.compute("virial all centroid/stress/atom NULL pair") + for ii in range(9): + jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + lammps_real.variable(f"virial{jj} atom c_virial[{ii+1}]") + lammps_real.dump( + "1 all custom 1 dump id " + " ".join([f"v_virial{ii}" for ii in range(9)]) + ) + lammps_real.run(0) + assert lammps_real.eval("pe") == pytest.approx( + expected_e * constants.ener_metal2real + ) + for ii in range(6): + assert lammps_real.atoms[ii].force == pytest.approx( + expected_f[lammps_real.atoms[ii].id - 1] * constants.force_metal2real + ) + idx_map = lammps_real.lmp.numpy.extract_atom("id") - 1 + for ii in range(9): + assert np.array( + lammps_real.variables[f"virial{ii}"].value + ) / constants.nktv2p_real == pytest.approx( + expected_v[idx_map, ii] * constants.ener_metal2real + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + assert md[7:] == pytest.approx(expected_md_f * constants.force_metal2real) + assert md[4] == pytest.approx(np.max(expected_md_f) * constants.force_metal2real) + assert md[5] == pytest.approx(np.min(expected_md_f) * constants.force_metal2real) + assert md[6] == pytest.approx(np.mean(expected_md_f) * constants.force_metal2real) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + assert md[1] == pytest.approx(np.max(expected_md_v) * constants.ener_metal2real) + assert md[2] == pytest.approx(np.min(expected_md_v) * constants.ener_metal2real) + assert md[3] == pytest.approx( + np.sqrt(np.mean(np.square(expected_md_v))) * constants.ener_metal2real + ) + + +def test_pair_deepmd_model_devi_atomic_relative_real(lammps_real): + relative = 1.0 + lammps_real.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative * constants.force_metal2real}" + ) + lammps_real.pair_coeff("* *") + lammps_real.run(0) + assert lammps_real.eval("pe") == pytest.approx( + expected_e * constants.ener_metal2real + ) + for ii in range(6): + assert lammps_real.atoms[ii].force == pytest.approx( + expected_f[lammps_real.atoms[ii].id - 1] * constants.force_metal2real + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + assert md[7:] == pytest.approx(expected_md_f * constants.force_metal2real) + assert md[4] == pytest.approx(np.max(expected_md_f) * constants.force_metal2real) + assert md[5] == pytest.approx(np.min(expected_md_f) * constants.force_metal2real) + assert md[6] == pytest.approx(np.mean(expected_md_f) * constants.force_metal2real) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + assert md[1] == pytest.approx(np.max(expected_md_v) * constants.ener_metal2real) + assert md[2] == pytest.approx(np.min(expected_md_v) * constants.ener_metal2real) + assert md[3] == pytest.approx( + np.sqrt(np.mean(np.square(expected_md_v))) * constants.ener_metal2real + ) + + +def test_pair_deepmd_model_devi_atomic_relative_v_real(lammps_real): + relative = 1.0 + lammps_real.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative_v {relative * constants.ener_metal2real}" + ) + lammps_real.pair_coeff("* *") + lammps_real.run(0) + assert lammps_real.eval("pe") == pytest.approx( + expected_e * constants.ener_metal2real + ) + for ii in range(6): + assert lammps_real.atoms[ii].force == pytest.approx( + expected_f[lammps_real.atoms[ii].id - 1] * constants.force_metal2real + ) + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + assert md[7:] == pytest.approx(expected_md_f * constants.force_metal2real) + assert md[4] == pytest.approx(np.max(expected_md_f) * constants.force_metal2real) + assert md[5] == pytest.approx(np.min(expected_md_f) * constants.force_metal2real) + assert md[6] == pytest.approx(np.mean(expected_md_f) * constants.force_metal2real) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + norm = ( + np.abs( + np.mean([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) + ) + / 6 + ) + expected_md_v /= norm + relative + assert md[1] == pytest.approx(np.max(expected_md_v) * constants.ener_metal2real) + assert md[2] == pytest.approx(np.min(expected_md_v) * constants.ener_metal2real) + assert md[3] == pytest.approx( + np.sqrt(np.mean(np.square(expected_md_v))) * constants.ener_metal2real + ) + + +def test_pair_deepmd_si(lammps_si): + lammps_si.pair_style(f"deepmd {pb_file.resolve()}") + lammps_si.pair_coeff("* *") + lammps_si.run(0) + assert lammps_si.eval("pe") == pytest.approx(expected_e * constants.ener_metal2si) + for ii in range(6): + assert lammps_si.atoms[ii].force == pytest.approx( + expected_f[lammps_si.atoms[ii].id - 1] * constants.force_metal2si + ) + lammps_si.run(1) + + +@pytest.mark.skipif( + shutil.which("mpirun") is None, reason="MPI is not installed on this system" +) +@pytest.mark.skipif( + importlib.util.find_spec("mpi4py") is None, reason="mpi4py is not installed" +) +@pytest.mark.parametrize( + ("balance_args",), + [(["--balance"],), ([],)], +) +def test_pair_deepmd_mpi(balance_args: list): + if balance_args == []: + # python:5331 terminated with signal 11 at PC=7f3e940e3806 SP=7ffd5787edc0. Backtrace: + # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0x95806)[0x7f3e940e3806] + # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0x8f76e)[0x7f3e940dd76e] + # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0x9a38a)[0x7f3e940e838a] + # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(_Z9border_opRKN2at6TensorES2_S2_S2_S2_S2_S2_S2_S2_+0x8e)[0x7f3e940dda63] + # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0xaeac3)[0x7f3e940fcac3] + pytest.skip(reason="Known segfault, see comments for details") + with tempfile.NamedTemporaryFile() as f: + sp.check_call( + [ + "mpirun", + "-n", + "2", + sys.executable, + Path(__file__).parent / "run_mpi_pair_deepmd.py", + data_file, + pb_file, + pb_file2, + md_file, + f.name, + *balance_args, + "--nopbc", + ] + ) + arr = np.loadtxt(f.name, ndmin=1) + pe = arr[0] + + relative = 1.0 + assert pe == pytest.approx(expected_e) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + assert md[7:] == pytest.approx(expected_md_f) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6 + ) + assert md[1] == pytest.approx(np.max(expected_md_v)) + assert md[2] == pytest.approx(np.min(expected_md_v)) + assert md[3] == pytest.approx(np.sqrt(np.mean(np.square(expected_md_v)))) From cc4b23d40c15479c0ea47783333efa789bb9ca8f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 17 Oct 2024 12:21:33 -0400 Subject: [PATCH 052/193] chore: move `LearningRateExp` to `deepmd.utils.learning_rate` (#4219) ## Summary by CodeRabbit - **New Features** - Introduced a new exponential decay learning rate scheduler to enhance training efficiency. - Added functionality to compute learning rates at specific training steps. - **Bug Fixes** - Removed the outdated `LearningRateExp` class from the previous module to avoid confusion. --------- Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/utils/learning_rate.py | 53 +++++++++++++++++++++++++ deepmd/pt/utils/learning_rate.py | 57 +++------------------------ 2 files changed, 59 insertions(+), 51 deletions(-) create mode 100644 deepmd/dpmodel/utils/learning_rate.py diff --git a/deepmd/dpmodel/utils/learning_rate.py b/deepmd/dpmodel/utils/learning_rate.py new file mode 100644 index 0000000000..5997b7d63a --- /dev/null +++ b/deepmd/dpmodel/utils/learning_rate.py @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np + + +class LearningRateExp: + def __init__( + self, + start_lr, + stop_lr, + decay_steps, + stop_steps, + decay_rate=None, + **kwargs, + ): + """ + Construct an exponential-decayed learning rate. + + Parameters + ---------- + start_lr + The learning rate at the start of the training. + stop_lr + The desired learning rate at the end of the training. + When decay_rate is explicitly set, this value will serve as + the minimum learning rate during training. In other words, + if the learning rate decays below stop_lr, stop_lr will be applied instead. + decay_steps + The learning rate is decaying every this number of training steps. + stop_steps + The total training steps for learning rate scheduler. + decay_rate + The decay rate for the learning rate. + If provided, the decay rate will be set instead of + calculating it through interpolation between start_lr and stop_lr. + """ + self.start_lr = start_lr + default_ds = 100 if stop_steps // 10 > 100 else stop_steps // 100 + 1 + self.decay_steps = decay_steps + if self.decay_steps >= stop_steps: + self.decay_steps = default_ds + self.decay_rate = np.exp( + np.log(stop_lr / self.start_lr) / (stop_steps / self.decay_steps) + ) + if decay_rate is not None: + self.decay_rate = decay_rate + self.min_lr = stop_lr + + def value(self, step) -> np.float64: + """Get the learning rate at the given step.""" + step_lr = self.start_lr * np.power(self.decay_rate, step // self.decay_steps) + if step_lr < self.min_lr: + step_lr = self.min_lr + return step_lr diff --git a/deepmd/pt/utils/learning_rate.py b/deepmd/pt/utils/learning_rate.py index 94c657abd4..3502434bc0 100644 --- a/deepmd/pt/utils/learning_rate.py +++ b/deepmd/pt/utils/learning_rate.py @@ -1,53 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import numpy as np +from deepmd.dpmodel.utils.learning_rate import ( + LearningRateExp, +) - -class LearningRateExp: - def __init__( - self, - start_lr, - stop_lr, - decay_steps, - stop_steps, - decay_rate=None, - **kwargs, - ): - """ - Construct an exponential-decayed learning rate. - - Parameters - ---------- - start_lr - The learning rate at the start of the training. - stop_lr - The desired learning rate at the end of the training. - When decay_rate is explicitly set, this value will serve as - the minimum learning rate during training. In other words, - if the learning rate decays below stop_lr, stop_lr will be applied instead. - decay_steps - The learning rate is decaying every this number of training steps. - stop_steps - The total training steps for learning rate scheduler. - decay_rate - The decay rate for the learning rate. - If provided, the decay rate will be set instead of - calculating it through interpolation between start_lr and stop_lr. - """ - self.start_lr = start_lr - default_ds = 100 if stop_steps // 10 > 100 else stop_steps // 100 + 1 - self.decay_steps = decay_steps - if self.decay_steps >= stop_steps: - self.decay_steps = default_ds - self.decay_rate = np.exp( - np.log(stop_lr / self.start_lr) / (stop_steps / self.decay_steps) - ) - if decay_rate is not None: - self.decay_rate = decay_rate - self.min_lr = stop_lr - - def value(self, step): - """Get the learning rate at the given step.""" - step_lr = self.start_lr * np.power(self.decay_rate, step // self.decay_steps) - if step_lr < self.min_lr: - step_lr = self.min_lr - return step_lr +__all__ = [ + "LearningRateExp", +] From c3a4f3eea785690ceb34fce0b6e0a72d4d8e9e60 Mon Sep 17 00:00:00 2001 From: Lysithea Date: Fri, 18 Oct 2024 18:15:35 +0800 Subject: [PATCH 053/193] extend sendlist nlist and other tensors but still bugs --- deepmd/pt/model/model/spin_model.py | 60 +++++++++++++++++++------- source/api_cc/include/DeepPotPT.h | 1 + source/api_cc/src/DeepPotPT.cc | 65 ++++++++++++++++++++++++++++- 3 files changed, 111 insertions(+), 15 deletions(-) diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index f5ab81e16d..0898a1d46a 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -66,6 +66,7 @@ def process_spin_input_lower( extended_spin, nlist, mapping: Optional[torch.Tensor] = None, + recv_num:Optional[torch.Tensor] = None ): """ Add `extended_spin` into `extended_coord` to generate virtual atoms, and extend `nlist` and `mapping`. @@ -82,18 +83,18 @@ def process_spin_input_lower( )[extended_atype].reshape([nframes, nall, 1]) virtual_extended_atype = extended_atype + self.ntypes_real extended_coord_updated = self.concat_switch_virtual( - extended_coord, virtual_extended_coord, nloc + extended_coord, virtual_extended_coord, nloc, recv_num = recv_num ) extended_atype_updated = self.concat_switch_virtual( - extended_atype, virtual_extended_atype, nloc + extended_atype, virtual_extended_atype, nloc, recv_num = recv_num ) if mapping is not None: virtual_mapping = mapping + nloc - mapping_updated = self.concat_switch_virtual(mapping, virtual_mapping, nloc) + mapping_updated = self.concat_switch_virtual(mapping, virtual_mapping, nloc, recv_num = recv_num) else: mapping_updated = None # extend the nlist - nlist_updated = self.extend_nlist(extended_atype, nlist) + nlist_updated = self.extend_nlist(extended_atype, nlist, recv_num = recv_num) return ( extended_coord_updated, extended_atype_updated, @@ -176,7 +177,7 @@ def process_spin_output_lower( return extended_out_real, extended_out_mag, atomic_mask > 0.0 @staticmethod - def extend_nlist(extended_atype, nlist): + def extend_nlist(extended_atype, nlist, recv_num:Optional[torch.Tensor] = None): nframes, nloc, nnei = nlist.shape nall = extended_atype.shape[1] nlist_mask = nlist != -1 @@ -203,10 +204,21 @@ def extend_nlist(extended_atype, nlist): second_part_index = (nall <= extended_nlist) & (extended_nlist < (nall + nloc)) extended_nlist[first_part_index] += nloc extended_nlist[second_part_index] -= nall - nloc + if recv_num is not None: + index_part = [] + origin_recv_num = torch.div(recv_num, 2).to(torch.int) + prefix_sum = torch.cumsum(origin_recv_num, dim=0) + prefix_sum = torch.cat((torch.tensor([0]), prefix_sum)) + for i in range(recv_num.size(0)): + index_part.append((nloc * 2 + prefix_sum[i] <= extended_nlist) & (extended_nlist < nloc *2 + prefix_sum[i+1])) + index_part.append((nloc + nall + prefix_sum[i] <= extended_nlist) & (extended_nlist < nloc + nall + prefix_sum[i+1])) + for i in range(recv_num.size(0)): + extended_nlist[index_part[2 * i]] += prefix_sum[i] + extended_nlist[index_part[2 * i + 1]] -= nall - nloc - prefix_sum[i + 1] return extended_nlist @staticmethod - def concat_switch_virtual(extended_tensor, extended_tensor_virtual, nloc: int): + def concat_switch_virtual(extended_tensor, extended_tensor_virtual, nloc: int, recv_num:Optional[torch.Tensor] = None): """ Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. - [:, :nloc]: original nloc real atoms. @@ -230,6 +242,15 @@ def concat_switch_virtual(extended_tensor, extended_tensor_virtual, nloc: int): :, nloc: ] extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] + if recv_num is not None: + origin_recv_num = torch.div(recv_num, 2).to(torch.int) + prefix_sum = torch.cumsum(recv_num, dim=0) + prefix_sum = torch.cat((torch.tensor([0]), prefix_sum)) + origin_prefix_sum = torch.cumsum(origin_recv_num, dim=0) + origin_prefix_sum = torch.cat((torch.tensor([0]), origin_prefix_sum)) + for i in range(recv_num.size(0)): + extended_tensor_updated[:,nloc + nloc + prefix_sum[i]: nloc + nloc + prefix_sum[i] + origin_recv_num[i]] = extended_tensor[:, nloc+origin_prefix_sum[i]:nloc + origin_prefix_sum[i+1]] + extended_tensor_updated[:,nloc + nloc + prefix_sum[i] + origin_recv_num[i]: nloc + nloc + prefix_sum[i + 1]] = extended_tensor_virtual[:, nloc+origin_prefix_sum[i]:nloc + origin_prefix_sum[i+1]] return extended_tensor_updated.view(out_shape) @staticmethod @@ -475,14 +496,25 @@ def forward_common_lower( extra_nlist_sort: bool = False, ): nframes, nloc = nlist.shape[:2] - ( - extended_coord_updated, - extended_atype_updated, - nlist_updated, - mapping_updated, - ) = self.process_spin_input_lower( - extended_coord, extended_atype, extended_spin, nlist, mapping=mapping - ) + if comm_dict is not None: + assert "recv_num" in comm_dict + ( + extended_coord_updated, + extended_atype_updated, + nlist_updated, + mapping_updated, + ) = self.process_spin_input_lower( + extended_coord, extended_atype, extended_spin, nlist, mapping=mapping,recv_num=comm_dict["recv_num"] + ) + else: + ( + extended_coord_updated, + extended_atype_updated, + nlist_updated, + mapping_updated, + ) = self.process_spin_input_lower( + extended_coord, extended_atype, extended_spin, nlist, mapping=mapping + ) if aparam is not None: aparam = self.expand_aparam(aparam, nloc * 2) model_ret = self.backbone_model.forward_common_lower( diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index aa24895a54..10ca789011 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -428,6 +428,7 @@ class DeepPotPT : public DeepPotBase { bool gpu_enabled; at::Tensor firstneigh_tensor; torch::Dict comm_dict; + int** spin_sendlist; /** * @brief Translate PyTorch exceptions to the DeePMD-kit exception. * @param[in] f The function to run. diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 3b62a44ef5..48a31c3102 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -373,6 +373,69 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, nlist_data.padding(); if (do_message_passing == 1 && nghost > 0) { int nswap = lmp_list.nswap; + spin_sendlist = new int*[nswap]; + std::vector prefixSum(nswap); + prefixSum[0] = 0; + prefixSum[1] = lmp_list.recvnum[0]; + for (int i = 2; i < nswap; ++i) { + prefixSum[i] = prefixSum[i - 1] + lmp_list.recvnum[i-1]; + } + for (int i = 0; i < nswap; ++i) { + spin_sendlist[i] = new int[lmp_list.sendnum[i] * 2]; + int* sendlist_part = new int[nswap]; + for (int j = 0; j < nswap; ++j) { + sendlist_part[j] = -1; + } + for(int j = 0; j < lmp_list.sendnum[i]; j++) + { + for(int ii = 0; ii < nswap; ++ii) + { + if (lmp_list.sendlist[i][j] >= nloc + prefixSum[ii] && sendlist_part[ii] == -1) + { + sendlist_part[ii] = j; + } + } + } + // std::cout< Date: Fri, 18 Oct 2024 10:16:23 +0000 Subject: [PATCH 054/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/model/model/spin_model.py | 57 +++++++++++++++++++++++------ source/api_cc/src/DeepPotPT.cc | 44 +++++++++++----------- 2 files changed, 66 insertions(+), 35 deletions(-) diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index 0898a1d46a..21afc7d9ec 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -66,7 +66,7 @@ def process_spin_input_lower( extended_spin, nlist, mapping: Optional[torch.Tensor] = None, - recv_num:Optional[torch.Tensor] = None + recv_num: Optional[torch.Tensor] = None, ): """ Add `extended_spin` into `extended_coord` to generate virtual atoms, and extend `nlist` and `mapping`. @@ -83,18 +83,20 @@ def process_spin_input_lower( )[extended_atype].reshape([nframes, nall, 1]) virtual_extended_atype = extended_atype + self.ntypes_real extended_coord_updated = self.concat_switch_virtual( - extended_coord, virtual_extended_coord, nloc, recv_num = recv_num + extended_coord, virtual_extended_coord, nloc, recv_num=recv_num ) extended_atype_updated = self.concat_switch_virtual( - extended_atype, virtual_extended_atype, nloc, recv_num = recv_num + extended_atype, virtual_extended_atype, nloc, recv_num=recv_num ) if mapping is not None: virtual_mapping = mapping + nloc - mapping_updated = self.concat_switch_virtual(mapping, virtual_mapping, nloc, recv_num = recv_num) + mapping_updated = self.concat_switch_virtual( + mapping, virtual_mapping, nloc, recv_num=recv_num + ) else: mapping_updated = None # extend the nlist - nlist_updated = self.extend_nlist(extended_atype, nlist, recv_num = recv_num) + nlist_updated = self.extend_nlist(extended_atype, nlist, recv_num=recv_num) return ( extended_coord_updated, extended_atype_updated, @@ -177,7 +179,7 @@ def process_spin_output_lower( return extended_out_real, extended_out_mag, atomic_mask > 0.0 @staticmethod - def extend_nlist(extended_atype, nlist, recv_num:Optional[torch.Tensor] = None): + def extend_nlist(extended_atype, nlist, recv_num: Optional[torch.Tensor] = None): nframes, nloc, nnei = nlist.shape nall = extended_atype.shape[1] nlist_mask = nlist != -1 @@ -210,15 +212,26 @@ def extend_nlist(extended_atype, nlist, recv_num:Optional[torch.Tensor] = None): prefix_sum = torch.cumsum(origin_recv_num, dim=0) prefix_sum = torch.cat((torch.tensor([0]), prefix_sum)) for i in range(recv_num.size(0)): - index_part.append((nloc * 2 + prefix_sum[i] <= extended_nlist) & (extended_nlist < nloc *2 + prefix_sum[i+1])) - index_part.append((nloc + nall + prefix_sum[i] <= extended_nlist) & (extended_nlist < nloc + nall + prefix_sum[i+1])) + index_part.append( + (nloc * 2 + prefix_sum[i] <= extended_nlist) + & (extended_nlist < nloc * 2 + prefix_sum[i + 1]) + ) + index_part.append( + (nloc + nall + prefix_sum[i] <= extended_nlist) + & (extended_nlist < nloc + nall + prefix_sum[i + 1]) + ) for i in range(recv_num.size(0)): extended_nlist[index_part[2 * i]] += prefix_sum[i] extended_nlist[index_part[2 * i + 1]] -= nall - nloc - prefix_sum[i + 1] return extended_nlist @staticmethod - def concat_switch_virtual(extended_tensor, extended_tensor_virtual, nloc: int, recv_num:Optional[torch.Tensor] = None): + def concat_switch_virtual( + extended_tensor, + extended_tensor_virtual, + nloc: int, + recv_num: Optional[torch.Tensor] = None, + ): """ Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. - [:, :nloc]: original nloc real atoms. @@ -249,8 +262,23 @@ def concat_switch_virtual(extended_tensor, extended_tensor_virtual, nloc: int, r origin_prefix_sum = torch.cumsum(origin_recv_num, dim=0) origin_prefix_sum = torch.cat((torch.tensor([0]), origin_prefix_sum)) for i in range(recv_num.size(0)): - extended_tensor_updated[:,nloc + nloc + prefix_sum[i]: nloc + nloc + prefix_sum[i] + origin_recv_num[i]] = extended_tensor[:, nloc+origin_prefix_sum[i]:nloc + origin_prefix_sum[i+1]] - extended_tensor_updated[:,nloc + nloc + prefix_sum[i] + origin_recv_num[i]: nloc + nloc + prefix_sum[i + 1]] = extended_tensor_virtual[:, nloc+origin_prefix_sum[i]:nloc + origin_prefix_sum[i+1]] + extended_tensor_updated[ + :, + nloc + nloc + prefix_sum[i] : nloc + + nloc + + prefix_sum[i] + + origin_recv_num[i], + ] = extended_tensor[ + :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] + ] + extended_tensor_updated[ + :, + nloc + nloc + prefix_sum[i] + origin_recv_num[i] : nloc + + nloc + + prefix_sum[i + 1], + ] = extended_tensor_virtual[ + :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] + ] return extended_tensor_updated.view(out_shape) @staticmethod @@ -504,7 +532,12 @@ def forward_common_lower( nlist_updated, mapping_updated, ) = self.process_spin_input_lower( - extended_coord, extended_atype, extended_spin, nlist, mapping=mapping,recv_num=comm_dict["recv_num"] + extended_coord, + extended_atype, + extended_spin, + nlist, + mapping=mapping, + recv_num=comm_dict["recv_num"], ) else: ( diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 48a31c3102..604b8350ba 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -376,22 +376,20 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, spin_sendlist = new int*[nswap]; std::vector prefixSum(nswap); prefixSum[0] = 0; - prefixSum[1] = lmp_list.recvnum[0]; + prefixSum[1] = lmp_list.recvnum[0]; for (int i = 2; i < nswap; ++i) { - prefixSum[i] = prefixSum[i - 1] + lmp_list.recvnum[i-1]; + prefixSum[i] = prefixSum[i - 1] + lmp_list.recvnum[i - 1]; } for (int i = 0; i < nswap; ++i) { spin_sendlist[i] = new int[lmp_list.sendnum[i] * 2]; int* sendlist_part = new int[nswap]; for (int j = 0; j < nswap; ++j) { - sendlist_part[j] = -1; + sendlist_part[j] = -1; } - for(int j = 0; j < lmp_list.sendnum[i]; j++) - { - for(int ii = 0; ii < nswap; ++ii) - { - if (lmp_list.sendlist[i][j] >= nloc + prefixSum[ii] && sendlist_part[ii] == -1) - { + for (int j = 0; j < lmp_list.sendnum[i]; j++) { + for (int ii = 0; ii < nswap; ++ii) { + if (lmp_list.sendlist[i][j] >= nloc + prefixSum[ii] && + sendlist_part[ii] == -1) { sendlist_part[ii] = j; } } @@ -403,30 +401,30 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, // std::cout< Date: Sun, 20 Oct 2024 13:50:54 +0800 Subject: [PATCH 055/193] fix(pt): remove deprecating torch.norm (#4233) ## Summary by CodeRabbit - **Chores** - Updated the normalization method for improved consistency and potential optimization. - Minor code formatting adjustments for enhanced readability. - Ensured compatibility with existing function calls by preserving parameters and return types. Co-authored-by: Han Wang --- deepmd/pt/utils/nlist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pt/utils/nlist.py b/deepmd/pt/utils/nlist.py index a4f81a23a5..1060b40ce1 100644 --- a/deepmd/pt/utils/nlist.py +++ b/deepmd/pt/utils/nlist.py @@ -457,7 +457,7 @@ def extend_coord_with_ghosts( xyz = xyz.view(-1, 3) xyz = xyz.to(device=device, non_blocking=True) # ns x 3 - shift_idx = xyz[torch.argsort(torch.norm(xyz, dim=1))] + shift_idx = xyz[torch.argsort(torch.linalg.norm(xyz, dim=-1))] ns, _ = shift_idx.shape nall = ns * nloc # nf x ns x 3 From cf8527586e1822e8db0518967f61b6d12e14cf28 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 21 Oct 2024 21:12:57 +0800 Subject: [PATCH 056/193] revert `extend sendlist nlist` --- deepmd/pt/model/model/spin_model.py | 93 +++++------------------------ source/api_cc/include/DeepPotPT.h | 1 - source/api_cc/src/DeepPotPT.cc | 63 +------------------ 3 files changed, 15 insertions(+), 142 deletions(-) diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index 21afc7d9ec..f5ab81e16d 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -66,7 +66,6 @@ def process_spin_input_lower( extended_spin, nlist, mapping: Optional[torch.Tensor] = None, - recv_num: Optional[torch.Tensor] = None, ): """ Add `extended_spin` into `extended_coord` to generate virtual atoms, and extend `nlist` and `mapping`. @@ -83,20 +82,18 @@ def process_spin_input_lower( )[extended_atype].reshape([nframes, nall, 1]) virtual_extended_atype = extended_atype + self.ntypes_real extended_coord_updated = self.concat_switch_virtual( - extended_coord, virtual_extended_coord, nloc, recv_num=recv_num + extended_coord, virtual_extended_coord, nloc ) extended_atype_updated = self.concat_switch_virtual( - extended_atype, virtual_extended_atype, nloc, recv_num=recv_num + extended_atype, virtual_extended_atype, nloc ) if mapping is not None: virtual_mapping = mapping + nloc - mapping_updated = self.concat_switch_virtual( - mapping, virtual_mapping, nloc, recv_num=recv_num - ) + mapping_updated = self.concat_switch_virtual(mapping, virtual_mapping, nloc) else: mapping_updated = None # extend the nlist - nlist_updated = self.extend_nlist(extended_atype, nlist, recv_num=recv_num) + nlist_updated = self.extend_nlist(extended_atype, nlist) return ( extended_coord_updated, extended_atype_updated, @@ -179,7 +176,7 @@ def process_spin_output_lower( return extended_out_real, extended_out_mag, atomic_mask > 0.0 @staticmethod - def extend_nlist(extended_atype, nlist, recv_num: Optional[torch.Tensor] = None): + def extend_nlist(extended_atype, nlist): nframes, nloc, nnei = nlist.shape nall = extended_atype.shape[1] nlist_mask = nlist != -1 @@ -206,32 +203,10 @@ def extend_nlist(extended_atype, nlist, recv_num: Optional[torch.Tensor] = None) second_part_index = (nall <= extended_nlist) & (extended_nlist < (nall + nloc)) extended_nlist[first_part_index] += nloc extended_nlist[second_part_index] -= nall - nloc - if recv_num is not None: - index_part = [] - origin_recv_num = torch.div(recv_num, 2).to(torch.int) - prefix_sum = torch.cumsum(origin_recv_num, dim=0) - prefix_sum = torch.cat((torch.tensor([0]), prefix_sum)) - for i in range(recv_num.size(0)): - index_part.append( - (nloc * 2 + prefix_sum[i] <= extended_nlist) - & (extended_nlist < nloc * 2 + prefix_sum[i + 1]) - ) - index_part.append( - (nloc + nall + prefix_sum[i] <= extended_nlist) - & (extended_nlist < nloc + nall + prefix_sum[i + 1]) - ) - for i in range(recv_num.size(0)): - extended_nlist[index_part[2 * i]] += prefix_sum[i] - extended_nlist[index_part[2 * i + 1]] -= nall - nloc - prefix_sum[i + 1] return extended_nlist @staticmethod - def concat_switch_virtual( - extended_tensor, - extended_tensor_virtual, - nloc: int, - recv_num: Optional[torch.Tensor] = None, - ): + def concat_switch_virtual(extended_tensor, extended_tensor_virtual, nloc: int): """ Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. - [:, :nloc]: original nloc real atoms. @@ -255,30 +230,6 @@ def concat_switch_virtual( :, nloc: ] extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] - if recv_num is not None: - origin_recv_num = torch.div(recv_num, 2).to(torch.int) - prefix_sum = torch.cumsum(recv_num, dim=0) - prefix_sum = torch.cat((torch.tensor([0]), prefix_sum)) - origin_prefix_sum = torch.cumsum(origin_recv_num, dim=0) - origin_prefix_sum = torch.cat((torch.tensor([0]), origin_prefix_sum)) - for i in range(recv_num.size(0)): - extended_tensor_updated[ - :, - nloc + nloc + prefix_sum[i] : nloc - + nloc - + prefix_sum[i] - + origin_recv_num[i], - ] = extended_tensor[ - :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] - ] - extended_tensor_updated[ - :, - nloc + nloc + prefix_sum[i] + origin_recv_num[i] : nloc - + nloc - + prefix_sum[i + 1], - ] = extended_tensor_virtual[ - :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] - ] return extended_tensor_updated.view(out_shape) @staticmethod @@ -524,30 +475,14 @@ def forward_common_lower( extra_nlist_sort: bool = False, ): nframes, nloc = nlist.shape[:2] - if comm_dict is not None: - assert "recv_num" in comm_dict - ( - extended_coord_updated, - extended_atype_updated, - nlist_updated, - mapping_updated, - ) = self.process_spin_input_lower( - extended_coord, - extended_atype, - extended_spin, - nlist, - mapping=mapping, - recv_num=comm_dict["recv_num"], - ) - else: - ( - extended_coord_updated, - extended_atype_updated, - nlist_updated, - mapping_updated, - ) = self.process_spin_input_lower( - extended_coord, extended_atype, extended_spin, nlist, mapping=mapping - ) + ( + extended_coord_updated, + extended_atype_updated, + nlist_updated, + mapping_updated, + ) = self.process_spin_input_lower( + extended_coord, extended_atype, extended_spin, nlist, mapping=mapping + ) if aparam is not None: aparam = self.expand_aparam(aparam, nloc * 2) model_ret = self.backbone_model.forward_common_lower( diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index 10ca789011..aa24895a54 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -428,7 +428,6 @@ class DeepPotPT : public DeepPotBase { bool gpu_enabled; at::Tensor firstneigh_tensor; torch::Dict comm_dict; - int** spin_sendlist; /** * @brief Translate PyTorch exceptions to the DeePMD-kit exception. * @param[in] f The function to run. diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 604b8350ba..3b62a44ef5 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -373,67 +373,6 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, nlist_data.padding(); if (do_message_passing == 1 && nghost > 0) { int nswap = lmp_list.nswap; - spin_sendlist = new int*[nswap]; - std::vector prefixSum(nswap); - prefixSum[0] = 0; - prefixSum[1] = lmp_list.recvnum[0]; - for (int i = 2; i < nswap; ++i) { - prefixSum[i] = prefixSum[i - 1] + lmp_list.recvnum[i - 1]; - } - for (int i = 0; i < nswap; ++i) { - spin_sendlist[i] = new int[lmp_list.sendnum[i] * 2]; - int* sendlist_part = new int[nswap]; - for (int j = 0; j < nswap; ++j) { - sendlist_part[j] = -1; - } - for (int j = 0; j < lmp_list.sendnum[i]; j++) { - for (int ii = 0; ii < nswap; ++ii) { - if (lmp_list.sendlist[i][j] >= nloc + prefixSum[ii] && - sendlist_part[ii] == -1) { - sendlist_part[ii] = j; - } - } - } - // std::cout< Date: Mon, 21 Oct 2024 21:50:55 +0800 Subject: [PATCH 057/193] fix spin communication in lammps --- deepmd/pt/model/descriptor/repformers.py | 36 +++++++++++--- deepmd/pt/model/model/spin_model.py | 36 +++----------- deepmd/pt/utils/spin.py | 62 ++++++++++++++++++++++++ source/api_cc/src/DeepPotPT.cc | 2 + 4 files changed, 100 insertions(+), 36 deletions(-) create mode 100644 deepmd/pt/utils/spin.py diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index a9e4ef7893..6a7bbbb95c 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -45,6 +45,9 @@ RepformerLayer, ) from .repformer_layer_old_impl import RepformerLayer as RepformerLayerOld +from deepmd.pt.utils.spin import ( + concat_switch_virtual, +) if not hasattr(torch.ops.deepmd, "border_op"): @@ -456,6 +459,7 @@ def forward( atype_embd = extended_atype_embd assert isinstance(atype_embd, torch.Tensor) # for jit g1 = self.act(atype_embd) + ng1 = g1.shape[-1] # nb x nloc x nnei x 1, nb x nloc x nnei x 3 if not self.direct_dist: g2, h2 = torch.split(dmatrix, [1, 3], dim=-1) @@ -482,10 +486,27 @@ def forward( assert mapping is not None g1_ext = torch.gather(g1, 1, mapping) else: - n_padding = nall - nloc - g1 = torch.nn.functional.pad( - g1.squeeze(0), (0, 0, 0, n_padding), value=0.0 - ) + has_spin = "has_spin" in comm_dict + if not has_spin: + n_padding = nall - nloc + g1 = torch.nn.functional.pad( + g1.squeeze(0), (0, 0, 0, n_padding), value=0.0 + ) + real_nloc = nloc + real_nall = nall + else: + # for spin + real_nloc = nloc // 2 + real_nall = nall // 2 + real_n_padding = real_nall - real_nloc + g1_real, g1_virtual = torch.split(g1, [real_nloc, real_nloc], dim=1) + # mix_g1: nb x real_nloc x (ng1 * 2) + mix_g1 = torch.cat([g1_real, g1_virtual], dim=2) + # nb x real_nall x (ng1 * 2) + g1 = torch.nn.functional.pad( + mix_g1.squeeze(0), (0, 0, 0, real_n_padding), value=0.0 + ) + assert "send_list" in comm_dict assert "send_proc" in comm_dict assert "recv_proc" in comm_dict @@ -500,10 +521,13 @@ def forward( comm_dict["recv_num"], g1, comm_dict["communicator"], - torch.tensor(nloc), # pylint: disable=no-explicit-dtype,no-explicit-device - torch.tensor(nall - nloc), # pylint: disable=no-explicit-dtype,no-explicit-device + torch.tensor(real_nloc), # pylint: disable=no-explicit-dtype,no-explicit-device + torch.tensor(real_nall - real_nloc), # pylint: disable=no-explicit-dtype,no-explicit-device ) g1_ext = ret[0].unsqueeze(0) + if has_spin: + g1_real_ext, g1_virtual_ext = torch.split(g1_ext, [ng1, ng1], dim=2) + g1_ext = concat_switch_virtual(g1_real_ext, g1_virtual_ext, real_nloc) g1, g2, h2 = ll.forward( g1_ext, g2, diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index f5ab81e16d..86c3eb1c2e 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -26,6 +26,9 @@ from deepmd.utils.spin import ( Spin, ) +from deepmd.pt.utils.spin import ( + concat_switch_virtual, +) from .make_model import ( make_model, @@ -81,15 +84,15 @@ def process_spin_input_lower( self.virtual_scale_mask.to(extended_atype.device) )[extended_atype].reshape([nframes, nall, 1]) virtual_extended_atype = extended_atype + self.ntypes_real - extended_coord_updated = self.concat_switch_virtual( + extended_coord_updated = concat_switch_virtual( extended_coord, virtual_extended_coord, nloc ) - extended_atype_updated = self.concat_switch_virtual( + extended_atype_updated = concat_switch_virtual( extended_atype, virtual_extended_atype, nloc ) if mapping is not None: virtual_mapping = mapping + nloc - mapping_updated = self.concat_switch_virtual(mapping, virtual_mapping, nloc) + mapping_updated = concat_switch_virtual(mapping, virtual_mapping, nloc) else: mapping_updated = None # extend the nlist @@ -205,33 +208,6 @@ def extend_nlist(extended_atype, nlist): extended_nlist[second_part_index] -= nall - nloc return extended_nlist - @staticmethod - def concat_switch_virtual(extended_tensor, extended_tensor_virtual, nloc: int): - """ - Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. - - [:, :nloc]: original nloc real atoms. - - [:, nloc: nloc + nloc]: virtual atoms corresponding to nloc real atoms. - - [:, nloc + nloc: nloc + nall]: ghost real atoms. - - [:, nloc + nall: nall + nall]: virtual atoms corresponding to ghost real atoms. - """ - nframes, nall = extended_tensor.shape[:2] - out_shape = list(extended_tensor.shape) - out_shape[1] *= 2 - extended_tensor_updated = torch.zeros( - out_shape, - dtype=extended_tensor.dtype, - device=extended_tensor.device, - ) - extended_tensor_updated[:, :nloc] = extended_tensor[:, :nloc] - extended_tensor_updated[:, nloc : nloc + nloc] = extended_tensor_virtual[ - :, :nloc - ] - extended_tensor_updated[:, nloc + nloc : nloc + nall] = extended_tensor[ - :, nloc: - ] - extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] - return extended_tensor_updated.view(out_shape) - @staticmethod def expand_aparam(aparam, nloc: int): """Expand the atom parameters for virtual atoms if necessary.""" diff --git a/deepmd/pt/utils/spin.py b/deepmd/pt/utils/spin.py new file mode 100644 index 0000000000..d940bda909 --- /dev/null +++ b/deepmd/pt/utils/spin.py @@ -0,0 +1,62 @@ +from typing import Optional +import torch + + +def concat_switch_virtual( + extended_tensor, + extended_tensor_virtual, + nloc: int, + recv_num: Optional[torch.Tensor] = None, +): + """ + Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. + - [:, :nloc]: original nloc real atoms. + - [:, nloc: nloc + nloc]: virtual atoms corresponding to nloc real atoms. + - [:, nloc + nloc: nloc + nall]: ghost real atoms. + - [:, nloc + nall: nall + nall]: virtual atoms corresponding to ghost real atoms. + """ + nframes, nall = extended_tensor.shape[:2] + out_shape = list(extended_tensor.shape) + out_shape[1] *= 2 + extended_tensor_updated = torch.zeros( + out_shape, + dtype=extended_tensor.dtype, + device=extended_tensor.device, + ) + extended_tensor_updated[:, :nloc] = extended_tensor[:, :nloc] + extended_tensor_updated[:, nloc : nloc + nloc] = extended_tensor_virtual[ + :, :nloc + ] + extended_tensor_updated[:, nloc + nloc : nloc + nall] = extended_tensor[ + :, nloc: + ] + extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] + # nloc + nloc + nghost + nghost + if recv_num is not None: + # recv_num : nswap * 1 + origin_recv_num = torch.div(recv_num, 2).to(torch.int) + prefix_sum = torch.cumsum(recv_num, dim=0) + prefix_sum = torch.cat((torch.tensor([0]), prefix_sum)) + # prefix_sum: (nswap+1) * 1 + origin_prefix_sum = torch.cumsum(origin_recv_num, dim=0) + origin_prefix_sum = torch.cat((torch.tensor([0]), origin_prefix_sum)) + # origin_prefix_sum: (nswap+1) * 1 + for i in range(recv_num.size(0)): + extended_tensor_updated[ + :, + nloc + nloc + prefix_sum[i] : nloc + + nloc + + prefix_sum[i] + + origin_recv_num[i], + ] = extended_tensor[ + :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] + ] + extended_tensor_updated[ + :, + nloc + nloc + prefix_sum[i] + origin_recv_num[i] : nloc + + nloc + + prefix_sum[i + 1], + ] = extended_tensor_virtual[ + :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] + ] + return extended_tensor_updated.view(out_shape) \ No newline at end of file diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 3b62a44ef5..a8657afa70 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -392,12 +392,14 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); torch::Tensor sendlist_tensor = torch::from_blob(lmp_list.sendlist, {total_send}, int32_option); + torch::Tensor has_spin = torch::tensor({1}, int32_option); comm_dict.insert("send_list", sendlist_tensor); comm_dict.insert("send_proc", sendproc_tensor); comm_dict.insert("recv_proc", recvproc_tensor); comm_dict.insert("send_num", sendnum_tensor); comm_dict.insert("recv_num", recvnum_tensor); comm_dict.insert("communicator", communicator_tensor); + comm_dict.insert("has_spin", has_spin); } if (do_message_passing == 1 && nghost == 0) { // for the situation that no ghost atoms (e.g. serial nopbc) From e5c0ecf013cc4ef1bdfb4e0aee6e84678e55dd2f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2024 13:54:51 +0000 Subject: [PATCH 058/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/model/descriptor/repformers.py | 10 ++++++---- deepmd/pt/model/model/spin_model.py | 6 +++--- deepmd/pt/utils/spin.py | 16 ++++++++-------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index c36194649c..fa3f9503aa 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -28,6 +28,9 @@ from deepmd.pt.utils.exclude_mask import ( PairExcludeMask, ) +from deepmd.pt.utils.spin import ( + concat_switch_virtual, +) from deepmd.pt.utils.utils import ( ActivationFn, ) @@ -41,9 +44,6 @@ from .repformer_layer import ( RepformerLayer, ) -from deepmd.pt.utils.spin import ( - concat_switch_virtual, -) if not hasattr(torch.ops.deepmd, "border_op"): @@ -493,7 +493,9 @@ def forward( g1_ext = ret[0].unsqueeze(0) if has_spin: g1_real_ext, g1_virtual_ext = torch.split(g1_ext, [ng1, ng1], dim=2) - g1_ext = concat_switch_virtual(g1_real_ext, g1_virtual_ext, real_nloc) + g1_ext = concat_switch_virtual( + g1_real_ext, g1_virtual_ext, real_nloc + ) g1, g2, h2 = ll.forward( g1_ext, g2, diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index e974e63582..4e76a41fe1 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -15,6 +15,9 @@ from deepmd.pt.model.atomic_model import ( DPAtomicModel, ) +from deepmd.pt.utils.spin import ( + concat_switch_virtual, +) from deepmd.pt.utils.utils import ( to_torch_tensor, ) @@ -24,9 +27,6 @@ from deepmd.utils.spin import ( Spin, ) -from deepmd.pt.utils.spin import ( - concat_switch_virtual, -) from .make_model import ( make_model, diff --git a/deepmd/pt/utils/spin.py b/deepmd/pt/utils/spin.py index d940bda909..aaff2078df 100644 --- a/deepmd/pt/utils/spin.py +++ b/deepmd/pt/utils/spin.py @@ -1,4 +1,8 @@ -from typing import Optional +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + import torch @@ -24,12 +28,8 @@ def concat_switch_virtual( device=extended_tensor.device, ) extended_tensor_updated[:, :nloc] = extended_tensor[:, :nloc] - extended_tensor_updated[:, nloc : nloc + nloc] = extended_tensor_virtual[ - :, :nloc - ] - extended_tensor_updated[:, nloc + nloc : nloc + nall] = extended_tensor[ - :, nloc: - ] + extended_tensor_updated[:, nloc : nloc + nloc] = extended_tensor_virtual[:, :nloc] + extended_tensor_updated[:, nloc + nloc : nloc + nall] = extended_tensor[:, nloc:] extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] # nloc + nloc + nghost + nghost if recv_num is not None: @@ -59,4 +59,4 @@ def concat_switch_virtual( ] = extended_tensor_virtual[ :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] ] - return extended_tensor_updated.view(out_shape) \ No newline at end of file + return extended_tensor_updated.view(out_shape) From 85c934b5b6be9955a1e2aaf25f1277b55c274e48 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 22 Oct 2024 12:00:06 +0800 Subject: [PATCH 059/193] Update spin_model.py --- deepmd/pt/model/model/spin_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index 4e76a41fe1..982c0bf1bf 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -445,7 +445,7 @@ def forward_common_lower( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, extra_nlist_sort: bool = False, ): nframes, nloc = nlist.shape[:2] @@ -583,7 +583,7 @@ def forward_lower( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[Dict[str, torch.Tensor]] = None, + comm_dict: Optional[dict[str, torch.Tensor]] = None, ): model_ret = self.forward_common_lower( extended_coord, From 35fd1c6e3eb4ad3f0b8af4341d41cab63e129a32 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 22 Oct 2024 12:41:01 +0800 Subject: [PATCH 060/193] Update spin.py --- deepmd/pt/utils/spin.py | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/deepmd/pt/utils/spin.py b/deepmd/pt/utils/spin.py index aaff2078df..b22e0a0c13 100644 --- a/deepmd/pt/utils/spin.py +++ b/deepmd/pt/utils/spin.py @@ -10,7 +10,6 @@ def concat_switch_virtual( extended_tensor, extended_tensor_virtual, nloc: int, - recv_num: Optional[torch.Tensor] = None, ): """ Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. @@ -31,32 +30,4 @@ def concat_switch_virtual( extended_tensor_updated[:, nloc : nloc + nloc] = extended_tensor_virtual[:, :nloc] extended_tensor_updated[:, nloc + nloc : nloc + nall] = extended_tensor[:, nloc:] extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] - # nloc + nloc + nghost + nghost - if recv_num is not None: - # recv_num : nswap * 1 - origin_recv_num = torch.div(recv_num, 2).to(torch.int) - prefix_sum = torch.cumsum(recv_num, dim=0) - prefix_sum = torch.cat((torch.tensor([0]), prefix_sum)) - # prefix_sum: (nswap+1) * 1 - origin_prefix_sum = torch.cumsum(origin_recv_num, dim=0) - origin_prefix_sum = torch.cat((torch.tensor([0]), origin_prefix_sum)) - # origin_prefix_sum: (nswap+1) * 1 - for i in range(recv_num.size(0)): - extended_tensor_updated[ - :, - nloc + nloc + prefix_sum[i] : nloc - + nloc - + prefix_sum[i] - + origin_recv_num[i], - ] = extended_tensor[ - :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] - ] - extended_tensor_updated[ - :, - nloc + nloc + prefix_sum[i] + origin_recv_num[i] : nloc - + nloc - + prefix_sum[i + 1], - ] = extended_tensor_virtual[ - :, nloc + origin_prefix_sum[i] : nloc + origin_prefix_sum[i + 1] - ] return extended_tensor_updated.view(out_shape) From 11aeb178d6beb8eafdeb7bec52cac6b053493a44 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 04:41:32 +0000 Subject: [PATCH 061/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/utils/spin.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/deepmd/pt/utils/spin.py b/deepmd/pt/utils/spin.py index b22e0a0c13..285dcaf93e 100644 --- a/deepmd/pt/utils/spin.py +++ b/deepmd/pt/utils/spin.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Optional, -) import torch From b4701da450ac24df0771ed40b8490698ee26ed6b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:29:51 +0800 Subject: [PATCH 062/193] [pre-commit.ci] pre-commit autoupdate (#4235) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.9 → v0.7.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.9...v0.7.0) - [github.com/pre-commit/mirrors-clang-format: v19.1.1 → v19.1.2](https://github.com/pre-commit/mirrors-clang-format/compare/v19.1.1...v19.1.2) - [github.com/scop/pre-commit-shfmt: v3.9.0-1 → v3.10.0-1](https://github.com/scop/pre-commit-shfmt/compare/v3.9.0-1...v3.10.0-1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6a1d303f64..30efa6b062 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.6.9 + rev: v0.7.0 hooks: - id: ruff args: ["--fix"] @@ -52,7 +52,7 @@ repos: - id: blacken-docs # C++ - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v19.1.1 + rev: v19.1.2 hooks: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) @@ -66,7 +66,7 @@ repos: exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt - rev: v3.9.0-1 + rev: v3.10.0-1 hooks: - id: shfmt # CMake From 6c5cb1df6d4bfbe79ba14b3f85b3940d74b26379 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:04:00 +0800 Subject: [PATCH 063/193] add ut for spin c++ --- source/api_c/include/c_api.h | 160 +++++++++ source/api_c/include/deepmd.hpp | 198 ++++++++++++ source/api_c/src/c_api.cc | 164 ++++++++++ source/api_cc/include/DeepPot.h | 306 ++++++++++++++++++ source/api_cc/include/DeepPotPT.h | 57 ++++ source/api_cc/include/DeepPotTF.h | 40 +++ source/api_cc/src/DeepPot.cc | 168 ++++++++++ source/api_cc/src/DeepPotTF.cc | 111 +++++++ .../api_cc/tests/test_deeppot_dpa1_pt_spin.cc | 169 ++++++++++ source/lmp/pair_deepmd.cpp | 19 +- source/tests/infer/deeppot_dpa_spin.pth | Bin 0 -> 216708 bytes 11 files changed, 1387 insertions(+), 5 deletions(-) create mode 100644 source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc create mode 100644 source/tests/infer/deeppot_dpa_spin.pth diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 9e4631f2ac..a4bdb6f422 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -161,6 +161,42 @@ extern void DP_DeepPotCompute(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); +/** + * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (double version) + * @attention The number of frames is assumed to be 1. + * @param[in] dp The DP to use. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force. The array should be of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + **/ +extern void DP_DeepPotComputeSP(DP_DeepPot* dp, + const int natom, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP. (float version) * @attention The number of frames is assumed to be 1. @@ -192,6 +228,42 @@ extern void DP_DeepPotComputef(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); +/** + * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (float version) + * @attention The number of frames is assumed to be 1. + * @param[in] dp The DP to use. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force. The array should be of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + **/ +extern void DP_DeepPotComputefSP(DP_DeepPot* dp, + const int natom, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor *list. (double version) @@ -339,6 +411,50 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); +/** + * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (double version) + * @version 2 + * @param[in] dp The DP to use. + * @param[in] nframes The number of frames. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + **/ +extern void DP_DeepPotCompute2SP(DP_DeepPot* dp, + const int nframes, + const int natom, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP. (float version) * @version 2 @@ -378,6 +494,50 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); +/** + * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (float version) + * @version 2 + * @param[in] dp The DP to use. + * @param[in] nframes The number of frames. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + **/ +extern void DP_DeepPotComputef2SP(DP_DeepPot* dp, + const int nframes, + const int natom, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor *list. (double version) diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index ca695b4a35..a952075789 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -95,6 +95,64 @@ inline void _DP_DeepPotCompute(DP_DeepPot *dp, energy, force, virial, atomic_energy, atomic_virial); } +// support spin +template +inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, + const int nframes, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); + +template <> +inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, + const int nframes, + const int natom, + const double *coord, + const double *spin, + const int *atype, + const double *cell, + const double *fparam, + const double *aparam, + double *energy, + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { + DP_DeepPotCompute2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, aparam, + energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + +template <> +inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, + const int nframes, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { + DP_DeepPotComputef2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, aparam, + energy, force, force_mag, virial, atomic_energy, atomic_virial); +} + template inline void _DP_DeepPotComputeNList(DP_DeepPot *dp, const int nframes, @@ -881,6 +939,71 @@ class DeepPot { nullptr, nullptr); DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; + // support spin + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @warning Natoms should not be zero when computing multiple frames. + **/ + template + void compute( + ENERGYVTYPE &ener, + std::vector &force, + std::vector &force_mag, + std::vector &virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; + assert(nframes * natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == nframes * 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); + force.resize(static_cast(nframes) * natoms * 3); + force_mag.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + VALUETYPE *force_ = &force[0]; + VALUETYPE *force_mag_ = &force_mag[0]; + VALUETYPE *virial_ = &virial[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, natoms, fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + + _DP_DeepPotComputeSP(dp, nframes, natoms, coord_, spin_, atype_, box_, + fparam__, aparam__, ener_, force_, force_mag_, virial_, + nullptr, nullptr); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); + }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -948,6 +1071,81 @@ class DeepPot { DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @warning Natoms should not be zero when computing multiple frames. + **/ + template + void compute( + ENERGYVTYPE &ener, + std::vector &force, + std::vector &force_mag, + std::vector &virial, + std::vector &atom_energy, + std::vector &atom_virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; + assert(nframes * natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == nframes * 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + + double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); + force.resize(static_cast(nframes) * natoms * 3); + force_mag.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); + VALUETYPE *force_ = &force[0]; + VALUETYPE *force_mag_ = &force_mag[0]; + VALUETYPE *virial_ = &virial[0]; + VALUETYPE *atomic_ener_ = &atom_energy[0]; + VALUETYPE *atomic_virial_ = &atom_virial[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, natoms, fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + + _DP_DeepPotCompute(dp, nframes, natoms, coord_, spin_, atype_, box_, + fparam__, aparam__, ener_, force_, force_mag_, virial_, + atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); + }; + /** * @brief Evaluate the energy, force and virial by using this DP with the *neighbor list. diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index cdc5c0698d..85166cb598 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -251,6 +251,98 @@ template void DP_DeepPotCompute_variant(DP_DeepPot* dp, float* virial, float* atomic_energy, float* atomic_virial); +// support spin +template +inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { + // init C++ vectors from C arrays + std::vector coord_(coord, coord + nframes * natoms * 3); + std::vector spin_(spin, spin + nframes * natoms * 3); + std::vector atype_(atype, atype + natoms); + std::vector cell_; + if (cell) { + // pbc + cell_.assign(cell, cell + nframes * 9); + } + std::vector fparam_; + if (fparam) { + fparam_.assign(fparam, fparam + nframes * dp->dfparam); + } + std::vector aparam_; + if (aparam) { + aparam_.assign(aparam, aparam + nframes * natoms * dp->daparam); + } + std::vector e; + std::vector f, fm, v, ae, av; + + DP_REQUIRES_OK(dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + fparam_, aparam_)); + // copy from C++ vectors to C arrays, if not NULL pointer + if (energy) { + std::copy(e.begin(), e.end(), energy); + } + if (force) { + std::copy(f.begin(), f.end(), force); + } + if (force_mag) { + std::copy(fm.begin(), fm.end(), force_mag); + } + if (virial) { + std::copy(v.begin(), v.end(), virial); + } + if (atomic_energy) { + std::copy(ae.begin(), ae.end(), atomic_energy); + } + if (atomic_virial) { + std::copy(av.begin(), av.end(), atomic_virial); + } +} + +template void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + +template void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + template inline void DP_DeepPotComputeNList_variant(DP_DeepPot* dp, @@ -1229,6 +1321,22 @@ void DP_DeepPotCompute(DP_DeepPot* dp, NULL, energy, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotComputeSP(DP_DeepPot* dp, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, NULL, + NULL, energy, force, force_mag, virial, atomic_energy, + atomic_virial); +} void DP_DeepPotComputef(DP_DeepPot* dp, const int natoms, @@ -1245,6 +1353,23 @@ void DP_DeepPotComputef(DP_DeepPot* dp, atomic_virial); } +void DP_DeepPotComputefSP(DP_DeepPot* dp, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, NULL, + NULL, energy, force, force_mag, virial, atomic_energy, + atomic_virial); +} + void DP_DeepPotComputeNList(DP_DeepPot* dp, const int natoms, const double* coord, @@ -1339,6 +1464,25 @@ void DP_DeepPotCompute2(DP_DeepPot* dp, fparam, aparam, energy, force, virial, atomic_energy, atomic_virial); } +void DP_DeepPotCompute2SP(DP_DeepPot* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotCompute_variant_sp(dp, nframes, natoms, coord, spin, atype, cell, + fparam, aparam, energy, force, force_mag, virial, + atomic_energy, atomic_virial); +} void DP_DeepPotComputef2(DP_DeepPot* dp, const int nframes, @@ -1358,6 +1502,26 @@ void DP_DeepPotComputef2(DP_DeepPot* dp, atomic_energy, atomic_virial); } +void DP_DeepPotComputef2SP(DP_DeepPot* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotCompute_variant_sp(dp, nframes, natoms, coord, spin, atype, cell, + fparam, aparam, energy, force, force_mag, virial, + atomic_energy, atomic_virial); +} + void DP_DeepPotComputeNList2(DP_DeepPot* dp, const int nframes, const int natoms, diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index d906546ee4..9173470dec 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -87,6 +87,64 @@ class DeepPotBase { const std::vector& aparam, const bool atomic) = 0; /** @} */ + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using this DP with spin input. + * @note The double precision interface is used by i-PI, GROMACS, ABACUS, and + *CP2k. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Request atomic energy and virial if atomic is true. + * @{ + **/ + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; + /** @} */ + /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -143,6 +201,39 @@ class DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic) = 0; + /** @} */ + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using this DP with spin input. + * @note The double precision interface is used by LAMMPS and AMBER. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Request atomic energy and virial if atomic is true. + * @{ + **/ virtual void computew(std::vector& ener, std::vector& force, std::vector& force_mag, @@ -343,6 +434,53 @@ class DeepPot { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); /** @} */ + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ + template + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + /** @} */ + /** * @brief Evaluate the energy, force and virial by using this DP. * @param[out] ener The system energy. @@ -390,6 +528,33 @@ class DeepPot { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + /** @} */ + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] inlist The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ template void compute(ENERGYTYPE& ener, std::vector& force, @@ -466,6 +631,60 @@ class DeepPot { const std::vector& aparam = std::vector()); /** @} */ + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ + template + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + /** @} */ + /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -520,6 +739,37 @@ class DeepPot { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + /** @} */ + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ template void compute(ENERGYTYPE& ener, std::vector& force, @@ -824,6 +1074,33 @@ class DeepPotModelDevi { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + + /** + * @brief Evaluate the energy, force, magnetic force and virial by using these DP models with spin input. + * @param[out] all_ener The system energies of all models. + * @param[out] all_force The forces on each atom of all models. + * @param[out] all_force_mag The magnetic forces on each atom of all models. + * @param[out] all_virial The virials of all models. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. dim_aparam. Then all frames and atoms are provided with the + *same aparam. + **/ template void compute(std::vector& all_ener, std::vector >& all_force, @@ -879,6 +1156,35 @@ class DeepPotModelDevi { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using these DP models with spin input. + * @param[out] all_ener The system energies of all models. + * @param[out] all_force The forces on each atom of all models. + * @param[out] all_force_mag The magnetic forces on each atom of all models. + * @param[out] all_virial The virials of all models. + * @param[out] all_atom_energy The atomic energies of all models. + * @param[out] all_atom_virial The atomic virials of all models. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. dim_aparam. Then all frames and atoms are provided with the + *same aparam. + **/ template void compute(std::vector& all_ener, std::vector >& all_force, diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index 83915e60a2..39a2e43488 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -74,6 +74,33 @@ class DeepPotPT : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Whether to compute the atomic energy and virial. + **/ template void compute(ENERGYVTYPE& ener, std::vector& force, @@ -129,6 +156,36 @@ class DeepPotPT : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial + *by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Whether to compute the atomic energy and virial. + **/ template void compute(ENERGYVTYPE& ener, std::vector& force, diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index 4fe53d58c2..5f4cefe05a 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -74,6 +74,20 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -251,6 +265,32 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); void computew(std::vector& ener, std::vector& force, std::vector& virial, diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 03c90efc67..3af999b641 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -134,6 +134,86 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam); +// support spin +template +void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + std::vector dener_; + std::vector datom_energy_, datom_virial_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, + dspin_, datype_, dbox, fparam_, aparam_, false); + dener = dener_[0]; +} + +template +void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + std::vector datom_energy_, datom_virial_; + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, + dspin_, datype_, dbox, fparam_, aparam_, false); +} + +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + + template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -392,6 +472,94 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam); +// support spin +template +void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + std::vector dener_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, + dspin_, datype_, dbox, fparam_, aparam_, true); + dener = dener_[0]; +} +template +void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, + dspin_, datype_, dbox, fparam_, aparam_, true); +} +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPot::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepPot::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + + template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index ba8caa3bb4..882e1a55f0 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -680,6 +680,84 @@ template void DeepPotTF::compute>( const std::vector& fparam, const std::vector& aparam, const bool atomic); +// support spin +template +void DeepPotTF::compute(ENERGYVTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_, + const bool atomic) { + std::cout<<"not support"<( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template void DeepPotTF::compute( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template void DeepPotTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template void DeepPotTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + template void DeepPotTF::compute(ENERGYVTYPE& dener, @@ -1162,6 +1240,39 @@ void DeepPotTF::computew(std::vector& ener, compute(ener, force, virial, atom_energy, atom_virial, coord, atype, box, fparam, aparam, atomic); } +// support spin +void DeepPotTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, atype, box, + fparam, aparam, atomic); +} +void DeepPotTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, atype, box, + fparam, aparam, atomic); +} void DeepPotTF::computew(std::vector& ener, std::vector& force, std::vector& virial, diff --git a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc new file mode 100644 index 0000000000..538794517f --- /dev/null +++ b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "DeepPot.h" +#include "neighbor_list.h" +#include "test_utils.h" + +// 1e-10 cannot pass; unclear bug or not +#undef EPSILON +#define EPSILON (std::is_same::value ? 1e-7 : 1e-1) + +template +class TestInferDeepPotDpaPtSpin : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + + std::vector atype = {0, 1, 1, 0, 1, 1}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + // Generated by the following Python code: + // import numpy as np + // from deepmd.infer import DeepPot + // coord = np.array([ + // 12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + // 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + // 3.51, 2.51, 2.60, 4.27, 3.22, 1.56 + // ]).reshape(1, -1) + // spin = np.array([ + // 0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + // 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0. + // ]).reshape(1, -1) + // atype = np.array([0, 1, 1, 0, 1, 1]) + // box = np.array([13., 0., 0., 0., 13., 0., 0., 0., 13.]).reshape(1, -1) + // dp = DeepPot("deeppot_dpa_spin.pth") + // e, f, _, ae, _, fm, _ = dp.eval(coord, box, atype, atomic=True, spin=spin) + // np.set_printoptions(precision=16) + // print(f"{e.ravel()=} {f.ravel()=} {fm.ravel()=} {ae.ravel()=}") + + std::vector expected_e = { + -5.835211567762678, -5.071189078159807, -5.044361601406714, + -5.582324154346981, -5.059906899269188, -5.074135576182056}; + std::vector expected_f = { + -0.0619881702551019, 0.0646720543680939, 0.2137632336140025, + 0.037800173877136 , -0.096327623008356 , -0.1531911892384847, + -0.112204927558682 , 0.0299145670766557, -0.0589474826303666, + 0.2278904556868233, 0.0382061907026398, 0.0888060647788163, + -0.0078898845686437, 0.0019385598635839, -0.0791616129664364, + -0.083607647181527 , -0.0384037490026167, -0.0112690135575317}; + std::vector expected_fm = { + -3.0778301386623275, -1.3135930534661662, -0.8332043979367366, + 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, + -0.5452347545527696, -0.2051506559632127, -0.4908015055951312, + 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0,}; + + int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + deepmd::DeepPot dp; + + void SetUp() override { + dp.init("../../tests/infer/deeppot_dpa_spin.pth"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override {}; +}; + +TYPED_TEST_SUITE(TestInferDeepPotDpaPtSpin, ValueTypes); + +TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepPot& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepPot& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index d3971691a2..6df66ff8ed 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -893,11 +893,20 @@ void PairDeepMD::compute(int eflag, int vflag) { } } else { if (numb_models == 1) { - // need support for spin - try { - deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); + if (!atom->sp_flag) { + try { + deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } else { + try { + const vector &dcoord_const = dcoord; + const vector &dspin_const = dspin; + deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord_const, dspin_const, dtype, dbox); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } } } else { error->all(FLERR, "Serial version does not support model devi"); diff --git a/source/tests/infer/deeppot_dpa_spin.pth b/source/tests/infer/deeppot_dpa_spin.pth new file mode 100644 index 0000000000000000000000000000000000000000..21aade1fba3a006480b295c4c1f9cdde592536f7 GIT binary patch literal 216708 zcmeFZ2UJ!`vo1_fK`??Mf&@uMB!j3}2r5B9lprc%0udBJ2`V|6ksKxGoO3L44lgem z1PKx)s3b*^`!Z){UC;T>otba`b=F<$zwWh)X20^=ILOQT2%75~Fv7eqV za9T>}zkB|_YW&Yf$p29Md%3E=yt_r}-_X@M5(q^nlt@9G!+oBCJr|G5oT z^~HOeb^W{ei~K|U|Iq(O9e&>bt7D|9|EKN0-uuV?>O0ZBzx_>{boHwI8HSucwV~)~ zDYn0A!~ax)Upv1L_=UhP1b!j#3xQt<{6gRt0{_29VAZyiXShF0Mnig4lKh!%N%fO$ zDRx?la@DqEWMr4xb8HaES(I;_KGpLt<*)O<@cEzYAJS3!mle0>!T?iN+x|m!|5E-s z{|lf0$^LQHSVO#zeGnMB?|osn>iL)Q*ZE)g{7?3eH-4*IF8nHg;qZT>AFSGspLb-I z|GW0%;ot4YKRC|6;!6L!>;9{8{_{0I|Nl?eTxtQOb8Fo$JLaJT8{;0Oa3|yxP_&M0i$I(9 z#Iu)&^1vhV1-ruQQjmLaq z`VifC{ZKKG^}YXLWR-^Dyzd8u_`*?Y9}k<#`Y{O7GloEo5bV{HyKN)Tgx@vRK9tpJ zgZ}7Pk;q;BQ0&C<(XBoY_Z`6ZyWQPzq|t8`qR!*A11)DAB7nq?`xsoIm1Ya zp&!1N4toUj=B`HFM^x> z7H(}+rNFAB!bi3;gdP^XW2XXI@yu4L+>s-#$a~CCNdHw27QZq1QV`yaug~wYnUSi1 zWX^SK8BREWR9WD?lC~Ut^YQH!HqTt}RkXP|U|Wj&J!gIpc$;AZztY>+({5z;j1)D3%}{7cy^+n zo%FG+%HP&c3W)t3{Y+o@={COig9qy51^vWU5bQP4s#xcXcWPL=b&Z-K;+4P3osc@b zZT+~SxiTHT$jNTyGOvP9O^bJi#4EA6{@De)R#(_oENe`2JQY0+#|I?7w?clGqPWxaXnFByDU1F6q-SKiSof`gPhl z%2W*~zkO%sL$hw6<34)dqqYdXh++A7))0=XrLSl5F2^nx?SyXOW(YUxFn&p>`3+y? za-F-s1))fT&Y$7C?&tO;1^g&T?VHEnc8$w87YzxWbri;fFn(f7Vuj$T}_kH5dB zSg>%k=tbu+vQ%gntv4J8@;L|H%Bgg0&d}kBp6!Cw{zbf{yFrrFwyW~D^^*d8tNOP) z9yN579{4m$QY0I7* zU>DgEmnZEA=cN;0S#QlmE52)2I={By(`?()`l$?XFj7i8WZ42Ijkd7x%(UVWD}zw3 zCR6llw<^@z)P|QT*L*3zp9!~wyl#<;M}o--@4jpI${=X;bpMvMmH6<;-14K(#yIi1 z@Z9F?D)iIfbmmWN!I0+xT#nU#$Q=54^9lKSjE_I+WPh>)CEl-B_ndBq%b{zP>XRIC z-Ye{Wj8!vEue{-T{E7%s(T;KCxw+{0@S}meU=Kt~mUdlOECYYB(7sKw#lV)igH^gQ z0GSinUj&B~jbm+t%nks|24xX9v4 zQvrbn9P{cq73d$aE-gN_78KZ2B=WmEF(2o2E?laBC$6Ca_cL1H!-^X&8dZEV(moZ(4y-veVT4RtN6Y&wfD^N&}0x5{JmXCgIA+X)kl$OyKjo zFHgC-02BE+*{WJn;gS|tiH2T3M(%k`Zc#;m+&fWoGk3jE;Nq*K7AS=3ts${nDw06* zl+mHH)Ky3$eo~)t(h?Rmce~zLUxE5776LCA>QQ&eUo5G#9%`5_JVqKj*wU`gyg#5D zdEf1#Hu#Z-9*Tkuif^T$>@|02sAduj;B44F(MGV4@wu;dq71L8)E@C>t3&>c4%@5Q z{m@agt^Kq^DbTpAVR<-dg z%lVa9Y!IYKd8!7`#^Y+9bSv(#mdwp9NyF;TRTpB9xnt2~mSjOGeo3?c;8~pwcb>}_QOowj=l&h@)i-u1vF2*;Vwzc zgBu5P-$u!0!{XM+QIYeNFj?o?zOp+Cs|_~YWq6thzM1~hCOxU}w2Lfhq$3kMn(TtY zTS~EUTbCF|O%xb#opY09uf)+SyTq09t8lA{ya?N=21t`%w%u<10CLiNi?+?VAqLLjUOJ@oIS}% zSs1JGx5p1sfPQuSko!WsQSx*KMXT0*eQiC04J$@@DK#rFd&G2*qi7xzdAHfOR{bjzJO4VySa?6_Qt#Fv{4y!xqD=ZQjp^eycy%msTHJQuSBmZ`Yp`@L#Pz z>6TyHF9d!e@C$+eClOe+A9VuDzLTvby*^3)_V`IkE?ONwdpNkN!db_lDNIj1$NK%h zl)ujZIX(}bIIhJ@AiZOP7)tx0XiX%4BVVOwjv(DSw^+b9{dB z#{b`V<;Gp%8&8yC&-*xuH4=`X-GUwSX~EJIY>pfDDECj$AI zoT>(jvq44X(STW4It~~P?{Q4)#p=ckmI2Ld3{d9s>pD;m8{OwWg*^xXBL_n11Y0)x z9xXSrJJbw*c6)V(cxrz0M~XK+%-cc&e^vhM_f+&}tVv|E;9p?<-?anm@gdSpcH>}1 zY`;Y1%y0T{&X9w-_LCmIn;|Su8LQ8Ru_SmikEC9ADQHk1@HJZ+tlBlAL4X0nw|9QRr#~$ z|IEJ%lK44}ztJ8iA6L!Prxf5d=Eb@7d(y$o<{?kqp<>XfQRNT$)`}k(yPl{x=Yj+O zqNqiAAN2Z9<;tO9c>6dd|%zAPww~oj>}C|0yu2c-w#BEyaV@PhVHxFNHl^J5E~q z`hkSpmLm!7tr%wfO)ath4SHWAKNBX4xMim7>FLYikTTUo#a|f+!`G)+`4{_eo6pxd z^9%JL#i!0`V3C1gZ^*K?Tze<61Aqj^Fwl zlhya3KL)8k!}mY6-|zh6v~0il~P7 zxQ>i&&)|a;P|T7ky(;=2`k!5Yr~hy3C;98E`r}nac1?0+f=YaW;unf$Y>%XHtKmoi z@#AkB*V~5R+U+ALS|5s#Y;ZRFhMQGVbpt<%L|WI!RTfk_1z`6@-MYrow}jbA>uvYT;DR^>OD*!T8;9 zBa6y(I)s{f@J=>XK!!&SZGBH2curK1k*BsGIbAq|^0_FuXtJ-scPScX*oRKAE+!!H zTZ-cO<2iUxwt%j%sSMlBh-rN9%!6U|mCc58o>*{lcbb}9BA!i&r4A@gL%lZh4V!yX zP~Waoees1e7BOjaoy#agu}?mUlOqY3JrP^uWK;>px0y|dgNdNUb~bqXnh3b3tq`wKM8|gc`oUMN8!B!rh9g6$?(y;_qNZ)LZmjiV)L#%1-N7Af;?nW z;LT%d`5Qidn6n&Qk#;u?sdq+>OHZWXUIIf?40Qp%3K?1YAX)-KPV#Gx9t#Dn&$Hyk zUeP$Yz_R1k_F^n>88bCl7mktV;uWIoT;X6U4M)O}QW&^{G5yk8TUtO}j8tB&0zQtI1el znik&eV0&Zm&J+4J#*OoK*5QWR#fH8nm0%nA!aks~678gpZnopdhwmrOJ_tCRisuHY z9!i)Y8nVW+-e`ybpOxcG_Q7RXy)yh`Wxfz3kFhzB-%rQsg12hT?0Hbd`}T{HP8c3G z3^sGRm<#uPG}ct@?*qcvrUqW`Sg_b_qimFxhAdWGN`hZv+zVJ*t@bW@A zu*&msOFU1-y7z5sKkf|yUF`?0Z%RtBC57QwkyQpb-P?c6=0Xzot(%d!DCvoGtXIzG zI@W_IyT*q)X~d&f5`` zl8jPfVZ+Qh`}^bwf^M6pMUQy`@8c77txBQDa6m-6et#Jr6EKNB%kKo22A^3e^=L!& z$eqCg?j~S(5<1Tn(T;+T6qudb<1ro^S!@J6@ZF~5+6s+QShMNXw;p|ajHpgvsS63i zM>NMaSY9f|`tAkVB~2epQ*#X&88ak(A0bt9zL`LzdpUDVHUnHYyfO(gw8u9TUMm-4 zZDH?iGGp;IRj_wq$(%yG8858s{QkTk9lWj(D`o=_bj;ML&gR)*EO*+jQ_ziX2%&+! zMYTYiBhkC{Mhco%O?74zTB6p6(*)1#dJx;r&2#9U6MA+Osx;ZRqFbJ>3P)!dc3ZbA z>P%+ik4-~Y=v7;Q+=nqC89RWITGxe6I|&S{U#D0*k@8cbKA6Zh)Pj29hhBTV1~^-k zqcJ@m3nSgCMMbZxu_4#sg}ibur19329okU|=Z@B%DoHHBD*k<@=Sh6mi8IM=0_&Uc zb9#H;#$zqG@1Wo7aPJDdIz^Ch60e0Uj)o^n=?I%|f8t9dmV@%lTq18{9)|EuKNT#h zge@DK_LUEj`b&d=Q&Vs#sEwS~^n6(g^BU@H^6t4XcJ%1M^E2TXZtCLm z>{$vXY}@FaG2RHvx3o0nwVH6hfQ0YkP4>Y0LH%^_@pAO%O42@YHUeg+c%FU9HieG{ z$&s#3aTpZKz1hD$6_ZVE^LC5aAXirJ&8IfO(8XlT!>{HGEsw=Hg*l=z)#a;md4(mk z53;}gCKZFDvRB7nZi#@h1;gdLW0g3+MQz6>l?K>LJ~t8cqy(fSB2xw4zlNf!n=~Vv z0#M(_deLX39;-?_>dL=nfmMi!a7TPTUg?dxy2zCaRT2}k1Jm{BkZCorCCLTYWbQQX z*C;?1^4yz0hDtF*FY+|#rz8KB;166a37GQvrQpg!4m@=ZmJX$<11aCNe6m4hq<+C8 zNG4hf&r<2#2Tl`_uA=W6-6mJ0FgRJB%}|O0bFRgg*b7lYtLf3bhj~zT@|n~W7C_%* zVdoi%G`yO_Ji9%)0VslK=)TTc!jmcvj-pY2d>|h$(UMk;g3)gaB=|FsafD#Db*c*6 z&uz_hIhP3?mZ37OIhko+^3RX931y!ZiX|s!!P%&CRs>;v} zVNV@ih%e^Com{_*L+hRK^n{e>^jImbJ@l@7Q-B3h)$(hRohrsI-N5IDn|y$c(sd!W ztqdl~=qS^<-O?*se z_*Jn45^Of9RL6J1$lgdJ${!gJalF+&VP`2WKVzwTp&Jf`R&CWP0cmI@a7H6i(-mNR zOV3XJ0BBKP&>enTiaQ2`wtM$^!l2~W6bF3-Ek|{0(}YM^zBgHu#uSB8dPbJ91y%UD zaBYb4x*FW*E}Ww>-hiBqdgt;PTEUKQo4m7b3BKriXyUIN3^Z(p?+seZQ2mJelyG4Q zycm0|>(4>52bwJ>O`aBFfsDBq1y;lU?S5y^Rd%8A`J+QLjm6-))jagzxe74a)##Y_ zAPw#BNZWr%ZNkC^rusea;_*m4MaD+;05B^nJ98Bqu{KNDy2Z5}?PXf(6|N;C<#(o& zIrqz9rZn#9)4hSPZu7&H)alb0RY1{u|78UTX*MRah}L2u$6hJN4Tb2SL!5YFU4ncq zAy+PxM_u^ z<}MHDmxQ~bCFkJUm!v#Ci;rgg@(+D*7d_b+_4*drajND)9^)HewxDmyw~5AV^%45H zo)Gl7p}x#b9|~d37VCx-vv6;eQ#E&80q%0>UvutJIfi*2J|J|d3H7q}t$R?M3$H!8 zOU2n+;N98HR|0#o;qogNjdz22aCNH5_v@`l4Dd6nJ4C|oa+0=~1#2dVZl++XSQOB=SeG#R zl>*a2X__=q((mHwuqT=m1<;w2P2W6NiQfB)9a-8^(N9~o$?hr<6xg$ySt&xGsY7}1 z==&frtngiL>yZXVE`n9e<#{-AO36a%K^lx~bgT(cOhvcdi-dP!%@7}d!en7Q0bDok zzvF$s94R9;l=q2O0jFEsIZsUw5Zu^(S&%Fh?x@Le#&56v&HlF%Pp^}tCE5QZ`MdVN z5Gh`@YJXclQ&UW>8iK~!BKm~u1fZeuGs{nD!cBrDlI8X@U_Yp`?W9CB>a)bf`Gk(3 zfy<@KUT@yx$eArCcx{JJg7EYx-~B-hSc<$(<2dJ=AispDzWc>OI0@mtsNaTh=yW^Z;poh4IUC;%#8^DV2~oo`9dG9B+*kG!UJB$s@T)f9vfvx3h_s8#SMQQnWhA*lbple=6IxDyeMRZz(zpKB5 zPlKJApOiZx%xz6`S4uA~y>N`1kFAHd`+|xdnluBKE~^a9LKF^k&37Kk#Rq{iWHSmAHi zH`dOPo=zu#%MQHbs?6k-w#W7{;NLU|zOXiSv z21pmzHZ&GS0C$P%z~~U^z0>@iA9Ywl4}o^{t5GLhk)?^$45-AHZ@T6!-P^FipJ`~Y zp%MuWSKTFytD#WUe(z?DG)Uj>ALc1tgiW8-oxE(_ktgo{uIl#%KuhFTdtNyR(MofL z9h(dBJk9NB{dEM`a)DcuFE|F3d|D`X6uiR#LbH5`pC@`|covN?rNY{R&*rj^Yw_EW ztcyYO7Rb5wl2mDQKUPw9kxjny2g>rnPqHUVpo8utg-AyPtaVF^NO_(E=1yy6jhu?X zXRmtTsn4w-FRRH_vRnpgro#8Zq#6#UGqqjtDTjoQP73OVQFwl{(0-Bea*o0p})kD)`aba%GGA<9N(@v<_kqPaB8%USx{`0yyu?L0dr zDSk_mKN}x@j^7IX&G;=8*lGwnHA4dTHoR-~9=QZEJ-i{-gXRRPV4UP_QgIVi0Z zKWCbZLYC+R4%v6@C~C?c$EshB*`lhwvKoWv?AJAu9+!rUMma`OJDO4B*5yu7K~E?c z?XtRNQ37Jaf=w%IQP`<-l*?EfK~(PL4_;_Mz0SCfX4*=~i;KW}pBrKNJ50omWWu+3 z?MmwLVj#x$>ru+3;#XG{J+BkpXmtM@UlLyh`u46rxLq&~o`{sb;*s;i)MIZR_>%JM z4R4o*k2RK}B}0Mmre>^mhuB`R}rQ%gOX33%wqR5YSFbD$q*Yz5j!6sksfx8bI; zgJ)|kqu@ZGOi7}G1AKgb+b2@d4Q1DTlyJEn3unsBkB~XE;FA(s%Q>BDFjExm$TV+< z?wB#+qU$^8co!Y}gChs}p8wb>p;CcM=No838vEe0@xa0qF&*l-*8C9r7zPaLN;8Qj zx1n>!+j>(_1;%WZc-A6niQ9WJkI;yOV`KSf^HEzOT-SSgF=Zhbnw2llJDb%&`0Ka( zPrWFILvrQ@YcmNDewq+o*4Gbx%Qd2Nd*4B)c5Y6Kh8HfDjXsniJ!h3$e5t@EVM{^E zzbDC`eV;$a|Al^!zY8BS;}@t&fD>PqfRbiSi*Ja<*OA?4TW1vAW(XKK}Bz?@#^u$cKOX`y<7( z{>k?j*!{VzvMvzMpOG@Tr4a}lYNrGa&pyUPQ@69 zaQ5=RXi(EVW$V(5uV-Tv4UN??taIO?ql59N<;&MaR13ug8S?7M_7ETwV^cR0 zy8?lnR7ReM2q5}0r!j7B0K(}GY*b&W#ZIL{7Qy)^@FVs*TWLT(oTV?g%RkZqguphh zEhNUoM))3~{Pqy=^YZFPGnM_;U(EJK#rzRC_}ky#@BH}%+~^|qmVht1u6TEEYeJqV z3oE0w#ju~NV{fKK1wQN?Cick3!Q9^ORRtIgUrW71^o~}5*!Ll4$L|Bd? zSu+f`MHS#}t4duykp>L2Vzc+*FM|oLS8wE|Dg0EflFY|0Gg-Wt+zeu?(+|pxRnT0JBgR+MNOVQe${?2%A z1p3IY^>m}p!6EKoddZ#DI7jJdXnMXJb)^@^zjo&WJ9lnAy>ctiT3w3VU`B-euLjCf zUz=ch_}V}iY5tk`OX7B?M~$dFqjd7y(@f;#}CztiB}jF1`sk_sFe_k%-6a?>C-UFBD+X5!zA}7b3`r z+;A>GPzTqtIG3e$ixI4qjZJS_Idt(Bsy zHIW#v`z>N@nSkb}jYdU|knH$--UpPjnj!LQl4*TdEyzUQCL7vY40q#r)KYa)N&KW? z_nMFfsJd}O_qtXsw%!SPHk8u@YXwHeL>b%QlwrQS*E9hv1`EW!jx{6KtOJ|xN+n)S zS@7QYmBeE-CaE7{FTs@4<&pw>2~e@8U*6cS5eOSYwpa&&nWGDl+nRa~}GJS?uf&Bf_ogN8eoCU4~6` zdw5@w6X1Q~tOE6b9}rj)3(e!3(Itu8{Pl1Z9)F~L-F|HiTyjjvIcp#&nxcmMaRLr^>vc9GS8NY8J-}$)~Lr0A7 z93Dj26KtpCBHf0Wls8!BL^HvgW166Fu?E~hxxK%*0tu`+kuG!uthYISLIK=lAw z`BVC4h%zqTex#3pQT%}koCO4wQ&4PqI$elGr|0S4i#36v`M$kTeePgNd64{&Uma5N zk)H&XW@MZ?Y*ofh;>Wbw)M|nn@vg5>&Q9Ml^tcpB`Q&gF9A`g#fORnhgmK@-)5#6U zF#9oh^UFr`YpeUV)u$Fza=NNfGCRrd6mC>~}GHT`6#|f1CJ1LBy@`1~$pv zb+~Cc*~0gBJl?TaC`*{E0q?kyN1aDpp~d;|{{7|zsPq1K)q{yNzjbhOLn|Q*)tsxu z4^CFYGPil-R;4IBGU0m1Ew~vstn>Go9&aYiNBuSwBH0M>6@o%Cye%+6eyVa&wG13} zFG~!uWWsSrfz8(%>afkm?v+Yg6BZw1efiL=9CuktuT#p-gO3mR-$o1Pzyrms41Qk% zZfxFoh3;T8^wKI8(#AZ8mbd0QpK7Y``|Sl8X=nCanJjeV`=u~Bs+jPruSJ1UOqltyY*5jMrR*T zA6E*4nCFKDKQ1;vr?%$hDmopG_+~p41?^_-gdKcrffINYHrnMN$pFE*>#0!p; znxAePuK?bx^fY<(a45UvlG}bh4Sl>+Z^UTf=g~2qu-m?4caA+Mqvdw8(^dW+&&EU<>=$bxI)a1$K%xrCn8k{Xr1zr;)iE7CRdwj1)4TPQc#@%-2f3k8FpWs zwkyXSYI{w-B{gHTiL*td6alur)-T;B*aU}ysu_?_h|c=bN3!1}L%r63Y@cs4n4RJ* z4KqteLDTGef@ho1?umexI#mo) z->R;6D?<9wUIx}Lhz1cu2nh`xKldbEy+tPIbybr+hE`iDn>{CgwtVU`~5 zB{qX7neW7G?i=74UeHy$N5H4Mo^z#B5@D*W^l-}KU`#IzY&VE+gi{<^#pO>CySrz@ zV%b~p`Q2;V*)SKkU5@pXb|d{hhThEha3&XDjC8Kozn+L<_bB$gd)*8pJe;qBmJ9Hq zZ{`P!rYg9whgrn^SvgKoNgDVcA>h#D?23C%KDx}`ckpK~$Cn+p6K4`rL6$*dt7}(2 zNQf>t33?Ji;CPO%!Ckgg-L>WDqRPnK{b)d_4Jt>TpPQy<6LqG>e*+U zyOtILo$Zx7Im4>Zl}}~Rb0imzC%<$vCfNfED^nEnj|j+7rrGFux)x`9w3PKZ>oMYB z^r0Y?C@?#iO|HXF0DG48qjk6Ia83P^_2-+Rm{?M$E@^EKo)y+bt;}Uu>UMeCj^kBm zzMW+^F`@yrxdTfd>@EbwR=!kDuL3Z;;Ct%xU=m1&PL-W`Q3V4RZ0hSDWn<-8gF~A+ z<8k*&Y$K&sHMBe`w%MUTip#m3NswnQ#B^P*U2?AVD7Z5!fFdy)G#mzZNoiET3)xFD z_Y@*9UtH7axvC!+balR?j41)Vx z^Ixp)-z$Az22qGUa7;Y;qdH)ZQ`3lDY(}{|(J8#&V{qi?OQXJrc5sTD zLAaK*UP8AlL%k?uA+7J$XEK&Wa7|r3xi+RACKxHOoVpTBDuzsd_*cT!dzTXpSRznz zCOzS9R5NaNVX7*QAmY-cGy!XaYCON*t1fI;HE!#W)d? z#M|-PPm%gloZAVx$*Yy<k#mrtU zg%+?1*lnCliu>(Uq!gVPDTTZ{Yx=13OfV~`lCa}@IUKLPl^s%A0pYHF+-sDwv3jZ4 zMtrggQzT_whh0c{GqX3w$Z4xE`t|oGal+L=7L$=1lc$db*ULRbNO=xEzK6!w%h$sG z;JnMCRCO3itFwoFR}+qj9s3a_;RH{Pr6#&bIDpH|Z*pP&PtfT|WSq^#3LJGwG4o-s z#N@s=r_;cf%X1xr00C@X zzNppwLW=X=aM6A5oq-XtA?R(L1vm3{vD68cL$mf<%l({<=<_A;VY6i;d^UOp@2yDd zXk^EIFw|+l>vFuOJB}q_o@edcyOwyYf7Y4vxPufIXR6zCr?DQ!k%W3=Z%E{HC`p1= z_hpJT1OiAh>E~ILd!nEuqk(>w6W)1fx~Ff2WZ%9FdC1q12lUiJTZY0(@%UyPk) z*8q#2o2cjGdI+{n7Zz(Ez_oYWTd_O?+-|(5X8z`kIf-+olqWLaC?~U*2~Qm;tMoF2 zHpF3-EN|`i=@v{-?oGW(iW3}J=nGlC)(lGmhr=~Z8=&Dv_0(cZ9q^MK_ut#wfc$G> zyR|oDV&&$_M!NYH5D6`&n`_R7+{DqVI{bxj^xpB@EfGy9cP;lL*DV6HoZGhUbZ!fJ zW$^jMkm61XJALEl7YniR;q9m@2S?~NzsP$@mc;+aNwz%oZ9(I6_ERyGjkvEMFp0j5 z2=%*eC%e$4fUJyzK$=epm|wJ$clITKZJ1;|+u1}ADVL+)WlKZ_S+Z|kqEVP(?xkdC zmWYW9%w18W^7RsuS4y`W9PfHvazSj{6iAO_NUt_KbxpVvy+{4)vEm2^M8(? z2>;FX$NtDM{i9sfr~K(!;ueywRrxc0KgUmn|6%;pvt<8qEBYF^?y>oKBq^WDUtdhf zAuDaHUlVe-%)9h*CxBN|qQna+()csjqn#CAj50%Nd|U4O zf$BbB8*r$=u`c<%eYea}_pPS8;aDRUo%Q8k<|xO`W7;f1{!MUla+dD#(^A~Tf5Fd7 zp$Qf)a#*=uu7bW7pBZ!&Nb?e9Z&3G+rDEBMM@!cg>u`wImrT>55s4hC7pX1Yfa3lc zPGRyAV11{#|J7n1k_)|kTt#1thvq8WE}W{swuUm@TPa~EFq7|jKDPs?*@e-RH3xUi zcIm%1aD>hLN>6v5D2F4SzE_@!HpA|d?!K;@5+IpBJDK6V70P&DbIvtP#PQkoQ=;b+ z@Hk3(6;L%m@jmej<|#GsLufu$l%fE7btJR4D%!$1iy8SPnKCG2l=$lT&I9GI(Vo(~ zTLMbdYhNac#-n$dMXUGrBpCaATb5PIpS13tootdX5lb5d`sV~wfq9#JBpfB+y)6no zA8a#mdi1;^Wu7&PNO{x=E7m~%W6JYF{`M%axWS-vPcfW3LhG5ySOSS^lV7i|e}T0M zJ0x`KD}iR}(P_2#COqJG@1XF_TBu;@zD?Hh7Ibq{WbRU?pixJauiwRdVBNZ0%v%ckrN`2wr{pNt8lVv6z_azLt-YLN+M`q~Q4DVxiEQR)k>2?smOP@s5 z`VxAxMl2#-%Yn;Sb|I0}AK$9SaPZQWgZDW#v-O!pK+6*xFKM0zgEZfw#Qo5d z52m?jGbcf{|6&YEo4@k-MB=+ddK4cohNR=GtM-;j!Bvnd;$gl>jKIwO(xh&aj2S1| zcRa3-hf8J>+@+S?_~_)ifay3QHW9L~zk?E39)88rRojLaggW22#ydl6j*dtODKC#+ zqJH#IZV7xR9t3U=0=!OV`LM)t4#@2?irh)-pV{;G9i}SIfZ;Ie&1J1P znA+AF_L#Q`b+=NP+oFp-SCTN||UI@V-j=RTG-C?$uK*P69Vd|08aG#n7cW z#C#?$2gk+jI_vvuV9n95HX#R!z%emAwaPekqswd=sf>?K>5?+C*8to-8dw7~1I+bdUiZNV~x{|ob29i&obE)|WI z0((S4-DbNS(4VT*lQ&C2x$EV^F;sPM;QhMJ0}kbQM=5YA^HnpLzLk%1|5gD`G=}MH zU&dq>^e^gkge;H?}#-JrS9{gfg%OA>1RbBWlMs|^FvgRZSTUS z^>m86a&s`D>+{CX7h7@st5x3i@l@>Q&^Qq>oq|nS&kWl&!mwW~Vnnf^1*AuW+&|}) zVmQ-|^I0(!B;JnJ>@0OXc(|3!(}Z<{a^}F9YnmBALv-*rxg8HJfnTiNQI~-HcZcRr zyg8sfvySPxOBEzD-y-`IQ;7Rh^QAw^)B@F#@ja^I1Q?0w*AZ#119+J+9zY)t_Y?;B z+wvozf>@}YdaDlO>X#gjpVWkJKWG)6y-q~YBk)zQD;e{?6yBbV%D@ihC7sLbyYU_= zRa;>PLi1Puho9u5VbY6gzTcn+&Fh4X{kPR&d%(?T2EA;&^b!@+F4W-(g50qk?Cn5p zl9*NYG!@IA0#)GWDsq5_ha|F1Nh}4cPFoGF($R{B^%`}29F;nxh$Vm!NWZ(k*rBM=&E(5^Xc_$DCA#z z`a)6}>~UiKLhV6+plYZAgI4u<9aImKcW;l%-Evd7C z_>OTT&-^W3j-(9!#9s~TwFa*}tG7ox1!k7gb+usZLt1i{y#TpR)>+(8Xa*X&8XpGZ zbU5jG<-_z&Z#=_Q$VTBvgtF}+kAv0}LDk_DdG0q)!GA;aU9o{;B-^b`{=B3FZ)kR= z>S2YZ@d)kU7djg+3OctYU+f||L?sDLLu-raA;z!D3$c}wOAfgqe zmAIQ{IW8|Gj!xa}1*%;`A}S?07%m+!eNZh6mg-_uFOl*@l+%;Mbh9#G+Cx$5NySUt zZM?krUYx{_sfzT7-e|*5t%n;kr?a72$D{6TO$#~;eYNoCv4hJE16xIu6VY_q-bReV zAGjH0Do9D^=u}3QDZHTu^C!xDX}F8=zIcj0_pJ={ex>jsFQf%dX?yBKe-D9;o#ba6 zYqH^Haq9Qv(r(=I@@-?7R1P?A{X~0nt{hoDj+<%RjDVQHV> zm7Y1(g2$s1Se_?W!l}$B>CD=ZsAO}}EF&cxVt4K>tHm@$cqe z3je--EsI!?`gKx#fniWF|8-&yeu``SbawkkVC;~QSWX+phK=?Y>^r7GI#BY8Y2hNc zum$b$HJ%5i_Jco&o2J3XdXmwY#2Y`#2_8~1D+lUL*Kge7tAQ5c>}D?l39yd* z9x?x$f7LtQ$s0oQMONj{{E`3U|B&*rIR3`^ALiQ@ugfN-;8!yDQ}Hgb2tpJ+ml&(D zj@I}%b#xY1N}eZ|`|6DcCpwdA+RKoMLN5Py{42O@<{dRo;wu&gH@H1934@CTJuB=J zr8r5*oc=_cgn537{nHN!aA|w!tE@^hIBQeJszaKO(4))czj8+eCkM!96Gz&B_O93C zb8Vz~2Mi~9Dc^Ns^42YnXBjJi{mFvVm4yb-HQs#bAbk)L_EleqZOBAb+6DQ24}zd3 zcj}9!d^#!LC`2vuaXN5)*(u4eRET{E?N2YIWx>7Wp$Wd1m8fLjfN!42Q=WdndclOW?+ooK_yRUEixy-C2R;f+zQH zPin#Rt1baz4fQy#ck@c0G|6w)knjp_%|XXCzLBaX1;9f;#^Cv_0b~tQgtV38z@?aL z*Mv+cIIr1L7ATwomSUF|94L!HOW@;x-INEOQD@;%WT=6AN=JET4z$2#yDrnWb4~Do zRl3q=C=KI>k689!BtooRp$yA>9wthrYZ-n{$JUCtk^+zCr2J%A3)g}o6q{{Vts7}a z+iS)b%)DY@pLf*DR;v!YJX7WV>_QdlKI?H1POXO>O%rKXHY9^z5H;ULs}vk<6KZ}_ z_yqcD@+9(!S*YJQ#{IuY`wn=j-v4o=Lc>Udlu}6<*`v~0G?3Dcl(hcOIrmzP@Bj1p{JdV*Iro|G=l#6*^FHV9dQl8#4)~oc zMRt>LNbQ`Q8;#Jbo4tLJ;caM9SvPmirFvj=*=(B3v^)rcDkkl6_6PPn!7e8|{_O}`Dddpt5|V#d1gEzi74p=t zhIf-UKmVu`0{BIqdGNIqLkpVOz25CrU}Zb5v%4Y>NKRa7XAxTlu85c9DSU+B;*#(r z%=QkPt|Djucu^sgmR-Ta5s03jXm9NB9SDZA4qf|r?@pbz%mb`q9~A{-qGu zA;CFuJ);BiJHOkJVH^vCJGRqp(Yaj1&&O;_9dp42`8lh_JxYMag>W~klXt*i+m=4v z*aTqve&vqCU5PNM|BQV0(i+fe+awUtUIOcMmQ1mDcNO&1W%bLeo`o^;sRoai76N*X z+y=MmI{0PQTb`6v>3}-Dv+gK*UTuC(VnmqICAi*=+abg;2FMHT-f=a!5z1TEymZdY z1C08dqJqVcnC%F$vluWIJZ*Sl{3t{LQ?DZfs5k03S&;%4jX6>|9 zs)ibs0_JTS8{pyGS!-9621AADtkBiVa^T8D&5VtPB~X{wJoce(VmJIcHoc3{0*OYW6I10W0U+tYu@a;%YNqMt#lS4ZE z0-mg|7%T>7Hp#Wd^;E%I@7aDz6+tL@ysgbk>mpp}e%0>0#vM5HAyD}A&YLjM@KxnT zty<{x{9DDD$||7PxPI1Lw9g)b3BHGG>ws?5=9jmr=y#&vrJ-)GM%X!{G|xM+8Q4)h zGjXUq2BCRUfVmd@?;344`v0Y}E}IW?ZmQ;|+Q*^S;YGn$KI{*;S2`?k=nW(OO5u zga(d*#8euGDYC~;Xc_%{R@V$In}<@T+GN6q2hSWTtiJ)wG=!EpqIl4hZ}q(#E4`rp zneb05kJQ55ZE6b~W1GO&$;N$)-$cQdqo%7xzD5Frhs%W;Ja2&j$smuxidt~|M0-Pd zMH#&IAw6`hT`M?Pa;_pv^#(|Jk+#UI9pw)kQVWQ%yA5Am{i$G*mJMg$;B&vBhV0CT z;rrSoqJV6FRdBHQB}h4R{d!VY638k!S&5v{ZV*v@m$-qr&<%Lib?q zOE>F1J;-nPbigMlxf(o|OujI!I2Stly}Dw4p$QfzF6Zv|Z3f58o-Z)xxBb9!Thui3Dgci`&$7eRRG>8_agBCKB`n*hIAcCK->-7w z7ASvG1bT1YpMt&01)h9vsa0!|;h72S7o&SJK-{^~Fb;MkKZ5f^93Hm8gO3**AJ<8S zj$r}fGChq@F2J)QI4KcK%wDY`wK5IJpKTU5+U(eAo~`Ov^1+(pL9SQRv*2s7Gt+37qu{;!N7?p9 zU4zp4bMJ}=T!bbLdKR4@_2H}IvQPL$a)I6~b4AX~d$3q(I(5!KCMbNdL3w2v%3Dj= zsOh<}6ee=7Ybe!^2iq?zZ!rAY1cW2EG_x<+2+ppV8u)VkKJ=WH?e41TETF$PH}EcR zD#{l%_l0fM&?BPY8c(P%tcWS6(^Lx~r{vZj@0F6kmiA~vxx6N@HdIqtDKHnD4LaU8 zwKN${t>%;Rn{*1k+RC?;!l;4y)%z~L%?pEi`&Mo`U)2P@tluJtP~7?Mu$o{E-DW+;P&r5f=~EMf(XKU4(vbKr&T6)L2#u; z=VDbyIlOmjh1&V@5+IX+=}|=@K$-r{BpjXtDGrB+%DP)X-u0_R_pX*e-xXIY7NO@v zLo{zLy7m4VxM;hDD>0}Pc3+$N=JT37ctNDUz-xU8416T1KwmvI3O(VJOd4Wa8po=yzb65|3^O6}kY}PCWY&tXmH+uD`J)?NlXr#W zTCyI=vB-04Kb;1JYoz82_#c4eN zk&`Aa-d2;?FZW2G|<$cWwPVuML4g=?$e-M zHt4VQYzYi3267&!Q@8e~L8mX}9X2wtq<>SR!^!ZPiq4PF=YRP(WApD7S^4)I*ED%1 z2)u+>A7s*Qm^6X%_2mg0*OxliHs-eJT6VLuIy);nLobX}wl^bBu0X3`BH`Bq|=1=dj{ahIjOTmr4y4;@UWw1(vVCn0g#+RSJ0`isF;+zIaSKyzMm96IbpmyqpBXzkQsvF)%zf8XBMc_@K zKRwK+>U|~*3RxcFI~79tq4ydKyU)X~r#YMrLSo^O_~#ls9Fl-}Y|w=XyUM}AT4{eV zt1BShIl$fDzW{{m)l7bqbr$T{e`Z<#^h|gsg;RQqs3+vwZ`#_d7S$8zOx^_3(OHpxo3uLlMy9Vr2y-$;3_PRWAV zl~*+-g~~y(YFG4ooir#}d}qjZC;|Adn!iyzqYT&_)1Mb68U-`sMCDTi(DS2g*Ant} zBm$|)ZEPQt>0nOM6gvvKt;(q1JKodydnlupy`C-+fXD@=V^Du1L** z%jG+{EH*Vl?n&wuMW6_pAJJAZpANyB{ZZ;)@5jN-ak@0ew~xUjf7R39o>xIpy}}iG zGb+J^+0*P2?bASX-!!M%mMpk(_5}^g#pwKQKDA)_&a+_qtjd|LT$%8?s5&L~SR9ys zV)l{y!r5>};jJNFlYDTRZW<=$jiBK6PoyZIP+>@DNM-f;V*rFp0^QZ z)MdQS0g!QVzocpz+|)eDzW!7uI9;PtqwkarJL-<+Nqox!2lBn7ubAh+qUNf4ff;w< z^&ijpE~~V_4TmGN>phF0K+dTup*86DtLmb#n~Dh_##_?W=}H6SoPFxs8d@uSrgOZW zV@U@1a;9$E)#hlBdAZ=4F?vsNTWON*O2K;I_rxss#neQ&_H;eNbl)}bF|4NZ({WpH z=*e3B%{DlU_6?kOt*{vQMStv+eNY6j85ef$6iLkW`FC$S{=48?TkwO* zyhFfdb7?}0a3nZ!ZiQZUdKDT=Mj&V_7zmZ{H5%i!+wVs962jD|jL4zAUAqd2AK*7{GwT%e|MT`8YFino1{N!TAA59&P2B1ExUV9h?G>Kp92utq*_N$Syf z_>8wmv3*?)IBY7>u9;r~pHW*KCQL|&6c5F`y=?K|Ze;9a%B2#}y^tenm1jQuTKq$Q zQ#B44N7e*wS1$&7RtL|03dn@jJ2}~vT~ETpdRNb+gqH$8?lt_HBh+TVZHrwXV^ z-P`vL#eX>&9TU*cguxo6*gO0wZBTpRTZ+m)FMk^Lh2ExjkqGXc(DK6v)}JLJEX zz6dQt`AWrKJTDmrHG_LXlL~?q@4||StK5H7+y})sr9vmBK+tD;mHp&B^gidqGt1AR z=Pa5+tqZY7O+fAED;bsfIZ#xRU&~|Zb?6x+vGPbt7^tkb{Hl}!;VbUL*w#$+evwsS z(ar`5Ku=F&hMderm|}c6YMn|dIA5dU7pzzf=XUyk{ygvi+@Tt4sKOuq`vdS#0bAUmeI zRrAGd!$z=#>!r2*)NJrY96YkFxeedf_v?SuM!#QqEyMAgO&M4n|HOL5@(S4A$E&!` z>K1(UK|f=b-YszT9lPp8muL_!F~qm@z-_Rlx3c71cR6rgb#}JQarE3vp4gR-nMEMh zoO8dT${i@ENB=UVuomjR3R|h8hw`6aJ9CS0rvurf^0TF<>%if+at3F3bKvpf=}VWP z{8%eZn_gN{4s83hT+T9~8M;ZIq`@ntFeg=kBfCEbgbKMRd#6*__z!vynyQW!j2 zj`C#d#id`_3EqJlGA>@Lu0r`{he)a!2t+WLjZYilK?Fptg2n1=vh! zoKW2n0*V@)Tb?#n!Z$OI27t9y0Nx(3l<}y7TQ>6x1}c(SMvJnOjjcuV*>7(LLZ7ICQ@EEkTtU1V4e*(Tp)yQYy2eA30| zr&r{H>*~E43;9doy+aDGq){Hf*d>vCxs+z0+puoxb%7+nzBVQ~r6wKt-fcO{eK;Nz zx$WVdEl~mIxk;E;gxA69kB4~rq^qFB;wXc%IP@NvdLs#T-yHBmJL}cmY1uHfaM5!c zbWZWW%INjXh-=^~{`k$Yh9cPKHk1=FEeFYs!{J-s+yPxXJg65XO2DC*Rm~lnF7XJ%Z^fiBIt54e3B;1B6=_D@b# zFvnn{=4bT$Kp*F7js57k?8%-+@zqb0!LH8kh}38ZvUXU~ULI(J3Y+@(+)TX;c5f4y zI`hVD5VYgVoIRPb(BV#^Rm3s$ewKBbvToV|@Zr9+7k)i8Af_uv@#EW4DC-om{Hx$Y z;B>ul@h5+CI4ckHP_&8%Nf+a^-pXge=R@^%jthd}l$V7@^0yn|ood|-TiJBL%U97+ zS6c=pjScv;H7>(tIh)zMHFLnA@3(CR%d3H?h0lqtHVM$mw|m9Cfh^E;Ot<|?QZBst zO^h?Lx&wM{`EYd7=Nn)#cX;?~iw2N2n3(g-{vy13SF)qqq5#f4<()41JPmw~cU;=G zrUfRPIpVdot`U}qihrN$4#6DnMY3u&5ZvU__{O--Vne*>akudGK|u z*wqA|SMcVS1e5IGA7GxntAy6p*XVcqB=g&>Z^6Y{TW%*ue1v_`@;{^#*ovQo8Ldfq zRtimYZqAK5_z>{#^cG(gPyr^y?4?ZBYlcaMGa|+Lhd^ME0NaAs-(b0=XO~2J2Z;7B zT`zd<1IP`MTHxy23oo}%-_FJM5sKIj(&R-ZYBU|*@^yk%2kE~_(&Rh-wf}~m&;2L< zn?COsI8m$=q{zwaTUT=jQ>(-jKC=8<0Po zJJ+AueGBx;-t5AVK7U@jH!#dQ7<{xa;_nrS25&hImA~#l?@tq2Kcyf2PR5CXrEb!D zs>t>0x6wBL>-D4j)_=18Fp&l!4TTa&Q(RJc&pHLv^H*ifEy;(aaaKcq!I41VR%Po| zi+do~^^EN%TYq@cWnzlYTJ*fn-DN7*7sWtpty^{TFQfo>1t&uT^+veZye_)<0(x)6 zYJ1Ixk04B{uZ~XL+XH56Mjzt(Sp@I)4JI_5y8$`W7vvm#SO%AhZJe&TCK-r5)Z(#Z8Q#AMe0+x08`!~EMs9sKlG@i!zNOGR}7Pn(Ng5!9n+*etZO#d2v`!sjg1A1%pbJv82U1c1cEa~>F z|1Qopj#RRM#~9sibfn|f`*ev0JQ&+D?}HL11i-F1&BoEj(Hdj-Gns|HQ*B(RR&Mt8 zRCAhz-Ci1QNo6=W*uw`oaru9oKCaHCQvv`1W=i~4eshjv!}XP(Cq0{ z2bzl=#%V!8gD%7aZdAu^n0F)ouy=xX6+Nn0t4aZ@;44KW@Hn*_R%ps>68Sy(vP+tV#v zZJZokc=T~gI!?FTU}NFRgH2gxPGUIe|Mmda^ z!JLL~%rp$ z=~f7(xQ(+bCTMaHq3VDbfqaBuXR(zFOvUtU#H1F8=@b9-Y^cQ2$nVTMGgN9xXLfhO zQF~@*bR%ZzOp_;C;&jx-#+6w|H@Bo(+Bl#uszjBA8&R$-Bg^_ib#ZWIpb}XbB~;?* zLbpd>Wn9*BxYsuYM4v^VZFVt){IIg%TNI*GZ`92o>K zt_++`rxrQ_2EKkp$_N@?q!{Na);L=+w~$XcAZ4jZO?ktkK5N+0B*Q z17dayCr1mKD>1(d-IZ!dbGN4Mr{hjz`Va^ww%EnSl8%b4X!b6|SUBCqk?28SX}D~3 z&e4^d%1bRE@M3n5?Bs0aggelg+c9oI#Fz1_VBwfVgQGodA02l=bIGA$tmRlcMb=r% zkJ&extE;0tm0@FPNhh`h%`C<&a%9kHmaP6niQH%-V!xS1;~I_IK(q=mH)l(lE1k-a zAG2gu8G?vGbD<(^x}!=Ky2sa9MU`bfbEa{2+000AzCDw&|1z~?yr@sp5sS# zaouN6>?5&(NGPFQMS_die-^YOMFgqK64?Hw0ud0&GGdNsCy9>4G%RQkdd4gRQK1Wl zyHXt;={BRq2vtsKtH+~?Rp?+(Fg6QdOB)=CGqkrZV`=RF!$p6iVnW2zkW{7G6Ub!E zVDCt!I}=3Zh@?G1pEOHCKqDq`bwaoyg#!UBYB5aWOnxB>2`(bqjt=LH;I(l<{Edo` z=&_=?*)uuI!O4k~9js~T9v1d)mWV|OMS|Lznj-Nai-ZPom1AAySyu%!bR$N0bf-Gw zPOeP(j{3;arDc^-1*yA;g^hwia6cWvX*CSqFzlpQAXaB?LnG!T%MYSKtZZCek^VzW zODy%k7>Py*IOd0dVl2AFa2g?0Nl8hR(8iYh ze|Gb482!VlakOFr5@IXSHdgh_%m2+m{eJ_}JUxGN293L<<~IcsBwcM~DS z(GuA}Vh6FACesMjj7%y=b7aC0jET+CBMpI(jY6dsuGnmr@ndV_DnoU)v9PnJ^B@3_ zMZ=_esw>T!2b+WJFT(z@vUj4n$|++)l!>F?7348t%B0cn%8J-r%48Gbx561Tdn-%? zH74vK7eu)BbZj1KOMJB?Z6|C#YB~{pwQ+Ps1Bp^541YtzETC|bW}o@+ybCG3WePbdi+{02l*w-75(!(%)WsE-T7U*M!xo<~B8^3y?v4}_9dRd%`I#74!a%E| zOk@aACNYF5lNoa<6BwctPR0TXFJmEP3PWtnplCd)1!!b5OdJg=guu48cQQxjq_ZoP zgmH9mY$;mU{BbQvlj_%I5~vwEl4T=^!p4GSo-O+y=GoYWGQ^P9`>12fcs3YpBum$l zBeHDhL#8=p0^OB4%I|EA zts)>~kPRnL*iggad-gH+9AoZ@!RRKu2qqFr@*hErX4q;3D?#t16vWi^$VR{LJhn!U zTrbms!2n_df}04#3)9fjn1k-op9y-?B^xq8v9(NS1ks!%Vn|tKN8jgyts^l@a2u0Q zu=R8Xfh>aH9B3Y-ka4luPscV;*h#1yi6U$x`ZkyBL@HvsJEHJW1D)w(Y5JK65>0-o zBT$NI{T1mkVke7+w@1$CtWj{VOdM)1RjFCH!_8&F7X}PJnES+2$T^b zaJUDlz%dFFQLMctoGhZ8=>_VHbFwx$x{ml*x+Fy^VtOp{)+flDlj+K__`<-=95Wb%ZF@_>TiJ{C;VKN#am3}uwF$*S^Ml=y-$ymhHk)#2N(R~?||0Hr4gm@u9 z9&d(XR$6OrRDO=kdUKk@;)qp-S^JV`|BE?|G5kpRG{(Y4O49+ep{SCxFfDML=IBB= ziHZ2XJSqzA*@4jG}-k?&(B$0^7@^Ku!u5g^!?5LXKJ4*t?>5 z3^PiBdHm9X2zHH$Vqp7_34?G?Zrv}D1Z+P!6_X~h1LWZ2JTc7Em)u0cS0p#_AjOLm z9%KRO++&9*Y$O>m+c>%6*kL9?lLcf@&CAbZ1_9OYkrd3ERAxjl1v|nZ2%p#&f)%i% z$QB}rWH@4h9rGjUjEL3|ZjTij&k4tlYk~Ku5BejU6YedlM{4YZFB$lAr8_v&ahj_e zPRD%o7<6B9nWU^Aj*nozevC0pK`6u#ra+!2=I2X-ML12^Nv1Yt`k>e;tu@_)S-xG2 z55LhZQz2Vm{>=RuQOGXN_BP05LaO@om>Oc)gu{)Up$L*FcO(Tb3ZVs14F0eDPi{Nn zL1qvMJ4@kXiUo&}xDvsj=P3N-HnGTy8M;BX9SY=OfqJAxK)6~iSP=6Io!r}C#{bq( zs#pk1LxujXp~4uXLJB9rLFz@0$+twX4U71fysk&Lv1T9#M+-$WEY}jOP7K3?ogbs4 z2znydMMYjNc7ejiAka;e(jCm{OgVKCVTaI<%wQBr^_LLU6B}enQ1@kDrlaA+G!?G+ zF-9qnal)5@Xw1)-F@`XZ{6>V$K!ii+J;W7E$f!&NW07OliRjsaA>-&s#KGO{=`J#e zHC<5%gpTtd(;bx%8j%OPI=UpJ&ZE#LdaPhz4IDFO;9ukHC}gnL3Ad9$ksBU8=KcDS zI5ZCyJ0c<-3Cf5=h$6%?Gtf(z>#Y7RSp1kt{tmqibvWejKqLG1hpTT1dMM1NY!$U(>04basw&>`ZaJXi{ecfv$>pdr5Vz*5KQ7B%b!;-w8p zH>DB0#7}A_!Zq_{nV#v4q>&DU3neFuWv~p@Oi~$<_Tg~rEY^k+@{Y7$Y#flYPj|+$ z{g^7qjgS~v4w4Im5=JaTIs~~abDAa2=~PFW10Bml__burWe{SU5Pde5u2?=(Rx;yH zSOH6yFtq?3D;$IGQTk+VGV}Ch?TEtAM2viM6tzqpG*XKdF^(W6fap`q;AN0lWQ?MY z#DpAHf-tcF;jWa#1ceLXf@=)!%6`S2JXVf+aG@R*L=U34m1OA*ZD9p-K71Ok4XakcwCx>cNM4 z)Du03_M<@F9t$KF0+R0^0@;9oY($?X^l3()7E(qMfkr}VxL|h}<5-q4h7TG4oB0?? zzKrXPaa=Nj3W9oBG|3?G%8x+i{D&alMPS|=v-!k!{^w0@4cS0hz;5JEdLIfv>JqLL4U*I zm;;&7T*!`Pu&maR?}T@-AH6!w)0dRggu+CUgpk;yh8a?!45UoQI0!t%P%HzByl0tI zWBe~uhs2W#_5$IF@Wy)yJdO9pUovf-SBw}2 zsa_f5v??S1pRv>k!bbJk|6(cftQZCLNbpp#*9bf@1l}70Jbv;Bnq!Q-c>Ak#QN!M$ z9>kILd!h&P$a+|Pd>AWT2v+6zhpgI%h`)aftBwUyUKaa^rX-~906FDvI_}diK+4Nu zpHUAYSUE`aUcnnS?Hke<-g1C2bMZ@?U8HpUec0 zivVv_V-tJ(Uv~`WW)8_i3^}S_(N5U@UywV6IqFpAWg7D`oq6G9Uig?7esm!X6C~1^ z$K@`#04n=u(tNbjIB{+?G8PX$0}b{Ep)Rk83!)lCUP!LvGtn)RCjTJX@mWN}@zPyh z37vE<`jWy#OL|@dq(agjz{kk!e%o!o+aw$jf4iVpd$|e~RsW6b_^XNCT9a zFo4e`2H|0rkVyl$2vNdC>d*0~^~hgx$LFD1-#=6m$MyL9zf=O;(G3)s!KI0B#Nx(e3E(oQ(2v0i z%n**qNcsW30#z}?IAih;a9OgHaMDK;5O6u7@c*Lh#~2!f`7zFkCy5#*9oX1AIpXqY zCM_VW-1$mmy) zG$4W|xGK6M!9s~ND0*RutD%DbA#28XoWHr_@;oCxD83R6LrR(P4wSqKzKWQe)Xn&6 zbV~ro{;TH0)rm&qy-;~oe9dTA!0I|)|7j2nn39Dw{n?SKMlO{e;jojIAmLH@{pwLM z-FFlpq2c&iGy+1)I_70PG2Val=#|EL^!Nre(x~V8Kb`t9XC))zoEhff8Ij{mPaoen zZdBXxB=uL>N($1y2~k3DBF45Qg_tcV=WIz)Xv>sb*_J7|ZJE+~ zJVTNDdQ#7pWN=%Op>0VHCP^B_chQ#0<63ASX8I;d#IPmPrmW?0ZL*YLe^M4&6N!cR zCbE!NBU3;+Fi_rzv;97NGg(0dt(X<|4mjnPwsdz^Tb+@%=){1CMLOcTM3L=Z*jf)& zjsaK>*GC1mf8!uE+yGUh(*XuLUce2}J;6eYDLlw&=NUD_aU-JDcotHS#f{0nsIeiw zh1HelIO6T&TZsl^SZFa~A)fy)3)!;xW*h2_*k?PskOCS>RurSQqM`^PPMmrEZB+_# z_zq%XZi*#>Y$v)U7KKjCep!?$(dd^&$>Y12U5Q1R4R;;4sNF<^F^l5)SBs*e-e^&K z(1na(3{#OjPD4fiQCKRB5tim?m_GZn6^>cr-$R-_21K$K9i1tW;bq(gbtD(X zH7=N_porU|8nh^4yX?>{xzppe%bsY!JHDF|?l9WT@sHh{hz3&QyD8(&qusDScEgDV zB>KqH36$%Kl3-nkrV|iST+s~^De|M{j2kNaM@aciM=Jh`CU-Q{sA8}s=Os2{@9?~g zG4pz$LfgOf*oP`n$NkL90p`UMT}XqEJgOqkCoIvi7k&_xk6FQJh5Tc|R;3h*rtN=e5O+q#pGqz8f3f`nypu;o^ zj-s9nbg)f$CC=z%n?T_)ROb8l6%d+b^n@Qjj#}%H=PM(dKtw|E6R7<6jrikX#c#x< zN*rI{K4^r|-9V93&fz#2xhij45_s0ilKlRwJ)<(tp79+&=^xHp{&Yz3+X|G&NI}2h zA;+7H3M%+XRD%Qtij?4|&@CY_Pyi5#46ZSX%b#d8MhdcGAI#OW&J-9dN%&h(oot1e z=QJ7wUCuBs0nE!;=H(po5{ND)=#BY=*WkjSp=2L)%IiqD>Ua>Uvi+MdSH*)-HPYL% zC=!N;pnGB+s=rC|P@>hCb*TPZX�KKx+tRULw$ilwV`=eDU+B@OPpc*)dm?{6yvf z@-Wl>G?^*FB=289T?uL<$0XCjn2A>SMN~0f_Nw8Rh%z#&hF?av%#9;*0LNqi;#W`& z$ymhMGRhUhBZ;Q$ERjr@T<$cKr+{<|ew7$+J;Au-^h5{N0XH5+bP^b*d_k0OL7tTp zUZR7I2RfA{B$Nvtjhgu~jxoK#F-gjJ4AGy7DYBTEE{w;L#p9yM>UxZm2)y4@bCBCe zyd#TW|lOSkdyIbAz^;9 z3T;s8H_D5|6GmDOr!k|}*G5{i3Wr-Kk`+X>*1-}_LSKI0>tO+d@J-~hql8x;)-LOl zB^yuyejRls_j-5}P%{@i8I=+Vu8j5nXjtZW+i18Ak@-iCn=(8u>CqD#n@Wtio|zp$ z@DzT7C?K;ViF`NyQnqeupE#z~N$jRl^LN?3_vbW1l28#EW5L6#8#CL&?k z$_hOdg^u$HF!0QgDvMvMvPMQi>6Pe#EgO{Mn~lo+$czf~zzp5m3Ezk3pdwb%3QlFv zU0iHvj(F}!I|rII67LAGJfg_W+|5J-!7-XgJbaWr^7Pv8J0kNN>df$bG};LxH*-et z@5o2|%RKiSPEjOMD}EEXYQLp9;st2XF&cz88W~LxB*;in7B56?Sgd>#-IA zCcFrhBC!LBcKllN9zni7M&w;`eqgfDm$?)Jzk^2Wl zHoP%R+@eh-PA_>hk>)KO-#77h`9lhhW6abIlqW!#4~UG3^a8vLbw(siuofvsQ8`|Y z%FxN?e_YtObO^j+JaAki2c%=qAQa{BO4M%Ho2(*WLRv|HB8c`@aEx}nMYJ04cqz)` z)ns4P*buK_btO8EdXu$8gE7iX04X!Ue|Erw*P*_Mpz4Vsc*mgRZ?-D(F@N7|lvgpF z7lGeKBaw78-oQ{)z#E7;xOLAG9(5zSCvboi$S*i(B3k`|14X=<>`UUHWw`4&9NZxq z~dlZ|YdY+?JK$-Iv_em^n(STeU|QmieL0&V{@MSWzBKR}E>mZEH#)M5L7 z+wD)x(LXaUgUrhp=4FU^`O3U}V_v>9FF%-5$T*?SB0sP>8x(_;lwl&)}gT|v72tpCi%#EFNxWt{hEuAA|vr}W?K+7!^0 zJG1C>D&I>>_LoOK#d_B)3o@RyK&abDmMcWM;Mln(T7d-ztYm+x2dr9Vl)mek-sI|C z59ud8zaH@XCh<9CnNP`=syxTTjHDlf{nY$}ckQ3_H*a|x&wJ)*W8UjpW2J51z0US` zuXd__QTxof@kC|O_4a)Al~*o?rP#fX*{|V4X^9NOKevRXKaOrq`O1Cy=bP2{%uJOV zJj8alp8;2r*ax;gDDKLiM|QF<%VYZ`(ruD34zn#?5BC<4R5Zp+}D# zTDk2_QE4yj&V&zjMsvy)GS+44s{0Q$URzz3*|k?jr11SEJucI&S1n>>mgH@*x#x4( za>~!_((G45{67{>TK~kXXUAT%WfmE3y!&UQ?#qu~{c>Kw0ZZKnGbSCqb^6EQ#K;@% zrZrcgAYYmg-;b;#b50!z_l!Ow@o{E|9Dj)1_Y)z8&ub}`L)ilRIAy*L9b+iM0?j}U zwl|MYFKggb3_K)uX%+v_d#d!u62T!(vCI_)IiC!S-!96LsP>7Ith!+5V3m{=mynVu zSFF_a~%cg@my&WOljZu)yPH8T5N@>YXwUPKG{D_(EogNDsQUx z9vj#7A+bFP#oN^Gp(WG#on-0TbCNyzRNY`tKMSh&1KlR zrt^o;@A#nv#F@ z+TNKFPt>|sSKPu*6^2e&xwXt-zT`1eIaT$CRkDL#?k?4v?ZwmlR5x{RXqr^(<}`Co zwCGn#g(~|C?XT*b4!ZpvI?A)Z&-jp=V-wb7R@qN4-21j@A-BQVHWA0oYS`YD-g7ib=c@LO9ebkCz5U_*?!t{v59+En-agSK!uj}V@`QQi)4F3jZf+~M zB74!Obgg&ZXiCYNODPrRI$ziW3<`Cgqc+WTY83M+++d>eA5trgj9 z&Z{YJ@w^u~%k$xt$+z6LRd3}E`KYu+w!tD6esNGkO-CXB)U{jiEW32Ylo_K|<<8AxP5>G11Iq=AA*k-U zl72423ODdie``}Mw>Y}x*8HV@8~SWk+cjU3_KoVkx8S5Xm)QRH;-Ba47`>h(LNU}U zmDpS!-6>u&>-(-J0b86;KVRhf`GoArgHh$lG{I+f>}G?d=angx_cw)Anq|LxsHxml zaN^F4v!Pe5pF=tP@$UPbrH3koFFofD@imj)v$k$yrs#Ro=%SM1bR!}2&wRPXt25@r zmDhg1JUL7Ll7nofWU=RQ*;QOiBPV!(0?n+#B_;PS=!Xjm9271%tyb%j-_Q17`<3GM zD<6bHgoBmXv6x3|&h=MhmL2hr+3EP@{QVnoZERlJs*Q_U6`gL}y}S7GjPe@;38(H_ zoz3N);cu38KCc^K3Cal58hkg#2P2gg&+Y^iXVvv)aHd>e=~@@=dFwKoBFfCZGgV$y5a!4SzgGt<2#CqwxzSDUw&!T>LR|=ViC@- z9kyI&p}!Q*5WmX7Bcaa+15&1|Ex9@qANn49>E|8Fs&#jZES>H>X|>-LP2Kcy_s1zq zK6r-m`_0N(Y9tsKer|IpU;5WV_U@hLo6W&WIW;|9$s&1MVXm;nPHay*+ck2J$)223 z;W3SCwzRi%^;E9e2M_Swep&x{)w-%TQIbv`y8V^YUp{*`VW4lK!HP|fr@mZr&2!W8 z0--yUzNEP@yOjT+jp5sd9SqDvoa@l(#u$HXJg?<=-MPU zU!i`%jLVzKq*K$k#jH4;wou^Pxj^Ai%}zsim6#P3a}0zjDPPRa6)7&={E(+9rq?!R zg^4GWldenP`etWBUB2*39rgIYIh&RD!m4LiWjs;7zGBK_A=4cT%3_1#%W&@+Ft_LFa@lp%x~3zmcb*V&@mv+;v$9*;U*WijTdnQ=;*Sd!9C%vI z&sL6Y?<$|v73k(Rt8&(BkEgC{H}7%ub!ZK4-PRg-#f39M)qR@lkx=84stawZ1eISl zTrs}yzpT6Bgi;x1zGc>S>yoU#h@E)fv6vLUv`2>?Maat~_6OI@NINR!a=vMb!lV~+ zK~*5{$IW*Stok}#R`YrtzTjYCV;4T5GA;Vnip@|?@A6Vf+kW9<5!y1#npSjJ$b9i{-Q~ghKw)C)1Kw2 z@0?pdb!YCRPU)?Ns)oA@kNaP*?9+L(^Tb-qR;7%K4wq`w=O-LZHq!1Eo%zFbakPJO zAC_NYrg+(K+VYkS85~s0S>AW*3bSSXXWTcPxbK|Yv)bC=~g&QsXqVRva) z?%ZW!uR65(_1Ia@_ATBW1^TyRxvS55LDeJQcG% zMe3-Dl!Ioik(3nrMbKydKc8*1RJ@KU$>o?YO9`x$PCk(wT^z>WN4dLq0_P;bEcHhs zm%DdzJWP+h+=6}}C9HRe|N7oF>?-pPHher;s_Uy(lNeR9+S_S?+LN97s}{=(ye`z-dwezt>&S$? zU(qDaPtm_Gx?#I@viJ4LSF4=c!tCZAtbOr*yLYjC->p+K_VgL_KJ-nPdsw`1@U+mB z1e+)F9$o^v+XhyeWJ;T!dpT`-sn0|iNpbBp-1jTqZrJ^*EWv?cFaKcuGF_q06$674 z*G>MJ;JC=4GVYo26wj$|_Z%@#=9afQQcP7|yLOF>vYJWkg}zgzTk{Pwb~l~XH5Z8r z**}TBRpIj8Afr!QJNksum!uo7=uLUsuekGLP%_``Rnx8rf0PM)yKgT2Xl1(B%I=M; znpKj%e%F#OoSB)aQtRx?GhJTigUU{mkNc&#d?yr|UzKj4;*1E>`Z@Kfvy>{s zY*#tO*(ZN8ZXm|)P^q`&Ee{sv6<&X4^HYH%5@rV#&NuAiof;Rqu|!jQ@%{xfI@TFn zRF(0sy+!mVIRv@XeZp94kBkQN}<@fSwuUop9PiOL~ z;63|SEIek_^7ZHKOAiY6TDhAQJ^%8359RaCcWN#@b#)OgRx(Q`yq)>Xn*W&XVO0P{6-qy(2PqCZI@J@_Wdc<+#rdj8N)}_kZH*Cv}>+%a_ z7;jzq^p0Y1vR_t=!0k3|y5+{G@V2@6xS&-0^yM+uYv%w~x5Kew$wC zqpn?c+(oj2S9ZVf)Tg&<7$=%2n2SvO)|YHC``bhH6Yp>SWGF1~!^X}0;nnv|X;nL_ zi$3{#`X{bSfh+F`xV+2}>}&ccvTJMj+GI<3(B^)a?4eYLF5#i}nO~F?wT$N`UWgvZ zKO^P1Wb%EVA;I!j6P@|8{o<^#eysPI%%y`%O*3b9 z4}I6VVdx$!aP`84q&(h(NA^Vp>sK)f){9@K^_(^g9XwQE@*Mt6v7B^2Xv&|t%_$_67&(0cY zIY)^{oj#GJrp+5G3|2k537N%f{r3x&64JU2Ly;1qkWs(DUqk8RqLnIC9RGThSz9`a0?GTWjiZm#*c(CnT8 zxur&&LOPyOex2G;GGgutzDbKh&!{b%9yho1THRc^$VQ7L@e+o+G_UU}OO2S+65peE zQ%~vO`I|#~bW*#-MZ=%9@ZRfN+0e(MuV}GPVVzy={<`Nob?j-Y^^X*)8n*`f#_j7< zD3fXE5M}tPAHmE#0VRjt<_%_MoPL^x(o6Qs>-$wL z8k|2T0yv7OEa z2+--yE`MbzpjW3f@5}o9X&v>h&6{PmEI&O5cgf7vmK*Rg%?vzM|G~?5&3wxA{Pnx4 z+1(kvYun#0&k3w8yx;pkrgFgyQ(yIEKNV~A7ML92JNlsEc*Sdzlwi%~%W)>BlG>Dx z1o4-do;pe69*~dnUoyStqIDI|0oS)#p?2menYGhB{qpLIy~+v$B`w$%@0y#jzB;FS zx&P7tBcFC-+_YO-yvXaARw19{`K-XC*T>g94UyJ0?Z(>$^;Np>H1c)Kson76{02&! zU#e4ATE*s+ZP_)hrAnoKv2Ev@qdLM{Uke{d3CPdX8>%#WBYn=q#cuzp_NqYb%xl$; z_ur}hG-dxG+Frx2j!Oaz;+`Dxe7kSfA<+E3KxM@ixab8A+uaxEJkFb=|DarCFxFgO z*h8GXo~G|EzOy^@N|n(y z#=-0PO6NDzG}13Ux8{2BdFPU(zT|S#|U47$%xIHm2ze@##I*_;DI-qVa^jy}0y z+wfT^Y2P-7@ci4C^PG&kY+5yC{NLYi3$6T^bb4`H^Wat!;jFLWp($9~lSc!)C7x)O z8_Sy|YEEym3^%e!HPVfE%$M{aa!~3nUqyL!L<_BAR#xs8qr}Cf16|Y@wF(oCteUYGUkjTVeB*gw zRgk*;)WK_RlmTIx$DP$HswJltNGHV{P*}{nLFaVA69?a;+gd7+i7>L!f-!sudG#)d7B>WKHXnd zUo$_dZfE(LoO~G<9{vT)W6kQ1yZ4JgwDzVr;OKBj|E5=vwIQ8vU#JH73 zg1ZX%2$_0SD_ve>y9$@IK)#J!i9C$%M=T9g;-njcPuBKIkjK@A+uI;fwH5xp53Ra9 z>=&%`mmYlmJNL2x#~>%~aGrmI+d%_Vll6xAGu@@YHTXNv^FDqz@970*u@>dpaiDLo z++Tga^ANu#>PtDVex5ponxk8kjbtvT?^%8ul~wP)b*ls+++CfCU54`JN&Kj;=2Y5~ z-rD0+x2M$1>;m^IRX!c|1j$Ac#~7b2_c%V-xqRy%(_TpNunazZ>G^+y^C_UZc|3rG zA-KaXAxcmF;IPhcMn+|qlcV}zrCW_24|;WVrPNs`eqFQ@TV;~Wcd%4o9rrHH@6AXn zG2IKc=Zln&58|b1?i^$CFXMgT%xYIS3)(R;f61>HtCBuFgCxhI#c=redvG0-OiNk_ z`B}7{d#7dTKKJmw8O$k)K$?+DLTOA2G$RM})p3r1wUL-u*VR{3#$j`F`!QhNrC%6=^ybZ08eg}!Rr#6uyzeU+qK2s)-$kIC_PN z%n3Iwyj9M0^a1~F2!5N1wI#LVz5K*;Y;x-o(q3`?*Dr~8tP6y<#HcwUniTq($FMAr z3NYw*;S`qcR4BaU%4aw;w)0}xH7V%y12FJEjjd|mZe{0JrAr5K#Lz56Fdp{#zqAO? z?)DzGe!S*nXM4}SM>WZ1_R=k|WVdEp`oz`zN1aT(U+aFKx-B)|fKwPQZ`?h7=8w^E zk8L-jh+c?M32ho5$LW6Kx0GCqX7tSEM_#>~X)&6M4B(3>kXh}VH9Qr+Ji@IVHhRg(y>IOz z2cBfRX_PQ^Fo)Ou>eOD~_u<+jX40aIN`fmmaBy4Hd(l~0NwpD|c^wq?82Kl+bT|o? zE?q@0Pmbk8TMtHKbjy9i97r|jEv}0r4rr!_@-mRD`e_j2u`o!yoHYaT$HH};8*Nx52Fm2my?L70ff&3#d8QmO@{ z9iQyC+H5epUuV^_4Ro(6H606A&C!zl?+MCvA9So=e3l$>)+vzJ2lT1w9OW)Fx!LN1 z)r1UqxzUsAOvtQ=%nYwNZv5$fOdU_yELK(OI+$F@D@|e@2fLHVGIgwZn@gFfkN!6& zPwq4RGq&nsr90C5 zESn0hgU>;K{Yd%{Eob5!G4d$cKUl+wQp{Y2Ew zZC#t)$e{jsKx|L)tA0yz>uyFLvcGzZlYUA_KjHXqln7K`IkpwK*&EmIp{s!7pU2;7 zUdM>m-+aGgl<5ZU={`hX&7AsE|A)915aF$$oP*^$9j} zci)<81y~rsUP(;03&<>A<7EkJ3CsVQs}2q{^LISgit~P`RGXt?dW-$Ja5$HOcnry= zuB0-*LwrpC0OmJY?g*W}Q|(_5bWT)s$XVcEcNzI=<>0WAS2;=7c-%64-0RB96dH%IcMEgsovx{4#wRw5pBF`? zw~6nLPREITu&nD-;Z%I_Drt-2Y@W++T37Bs;w#CU{w)%Nbd$)i=SKCObf~(8fe}%$ zl&A1CN1=%%4y$~Y8#ds0ouQ%^DO-*~gR~-Y6+7nI;49fu$<#LQiY}``82gZR^ixAy zhvYDS)uOKvjpAB4*8CF#;|b&%3i*v)bUbTtxSWH}!!=@$@6Eq~`Rihv#irm}N0@ee zqAQx=YzXU-BP_&b#jWf9rF4>g$5~n1lyAG^&maGCt$*!Q*xCwAQ|GgmV;K{_+vfZt zGM6Os+sNTl^?Tn@@r>~3nA&sla`+X?wcwShPX7{I!>299V#wg~t0UiawUlc(@TM(g zCsRbO9yWWr)b@M|=kW2jZy0>^y*2!Oq-pjurG7fRed>{`(E#Wx1k~7wkFA)A!KuVP zCF<&|a17JC=yyrj4Wqcc3^vN`v za*aDKD~|Ek)pK=9km@)aGRRax32WM6yu-Wh-u8&RRIrkiOi91*>*8+Hy((~uv*3$( zTds^1I3qjI>C$Upuw=2)vMa-qk(P8g^N~V9}0lN`&1)8 z7Sn^L9I;_7aUVj@q)B8g-ll94l+}i$Gik{mJa<0BqXn9WdJrT&17=}u;=EYjM72YE^Ts0C?OtJkK__| z-GiMQ^X`taMA1QVV>hIqgaBqhpwqttzp#g9$vnhG1qpzMT12%|?w*)Sv)0#LYlC6I)MDcu}??;u&=7C#^f zO(D&?Mdz?2!WGNN9^Iq%!h>9-?U9sg3(J+sy&})bMG^3XB*=e6oH>zF;P#H}8BhbY zq1claE~)N~eg{HWU{WUxcFxSZ%gFqq+(`tGh-3IAQ zJz^!TqRtUWOaC?vu2TcEhaWLWE&tkL`{04R|7nmXM4l^%QnS7_4El`BKV+CbV8EQx zMduKE2I$P55l83Xl&yhhf^0%c-TC#qPASs(i&S*`N&{FWEr?7G3+sogI~KtcdT zvqy8K$C6p$g-Q$jOlGNxm+8!*Oq|4SJie5LYX0gZEJOb!EDM;-Oj{3J{M^Y(>m+@= zOaSSY6c)1AQ?yo+u7#)cBkFJE!(|N6FM#5IF;&`{kEZ=+gzV|l6b};+O^J+?6}a$N zOP!$YBP{7M_BXpV>6cgGFTuEl+avdr&mZ@%C>Nw2O4cnBJgqZw+fb$~*NEcKRyD95 zkgQ<~(zy$iU>h`hZkuSk(O8MU%rl4S{d7q=g+gB8j)*kHAA9UusAt@518^M9RD zb4pWiWK$f~CO_P*T*@RiaCJd8<#};857I-px+7;lUvqp!YRao#SM!8tQff#UII{Z9 zW)){^$Y@xbaQDTN4R)t!d~o@4YRIc6@b3}bB(44;uzII9mCF>GD>UTzRmyigJ=5_e z{W(fMqE$;}+m|?*KDfem!=_Nc%iZZ-2Xy7@WIbSfyCP*m5xR090 zelHIbYe3({M4%L<%Zckn%m;JL1 z?KqWVcg6!!5G+{}@KxSlbsbjIro<=TJ!65mE+0?TPd*Mj41nH&Ka%`+V&R4`Ef|-h zjQh&P2if?^bJYIt)9Z-wqc84*xIb5Emh3d?fqh$mh?;4SnhNr9jn$S*+9lX-u?akm z`7jIY69T{mAZ7+$LOvRi_75;7fpQ`EAQ-mxT5x^!TGN=*0oD+YM6B@^kHXRZlok$G z#ptgFr2Q5i<%8>%XFjVoerKYs*Q1co!w8G$n94G2zWrnW>MdFj7gRw5Rk%d%zeTAd z5=lASIE!VhfVQHmMbn(TE9jZ1;-w%E8^9^?H%5^)Sf$k?gv1;b%qe>`Art%AufK7H zuT0Sf1Vt|p(M1|0l!@IEkHn;xc*w|$e9-8x{F2?|*_C1=>9VAXVWM_u2gtJ;(LKcC z?BlY4g^UHQ+(Fw!5K73hsb4Z^7IhoC(pP$pL$7~+&w=N0i&vJYKS)8^GJ_ae8j`+f z5TX)pX%reTf_F^?#Dn&rPLyW!ihg7)7!~C@h-AmzeF;LVt zjP2!!NWSdENpL18jBP-4VF4%wg_k9mAr4sODgW~)e>MnF(AAJd=WOTYK~l!ykpZQy z&)Gyb620WYTg%fS)^oPg4+plvH z?94dqfQS=PM`rgj7OCk;Q|#*?$`i&TH!olm?s`m*!P1hYck%nz?vamlyk*O!3)33# zmg8RN=;2-P;RCA}gY%0}Y1iQTQ&1@|}5y32=+OPj6Ue>0g{BvQ`q8!^_gINxYU9tZx$p16b?U_c%(sw_if?y{2t z+5!87hZ}wcuBz3e@fRmZ!a$#+UUm0NKbg9~0DiV2;h(=gyJ?6O11}Au!#xE$vkby^ z>#5d&mlMbpg-(<_!0d^95R>31RS;PaRXdq(mk>wx;k53wKEI%>xy?J0^pwm6gXJAa zIk}cjRKMa6u$Yfi1BO?uUe$1`+H`9+1FA@7jo_&pjTcp+~T=Ob>!J| z^T+ieq**+4Jd80@5(*qr{P+dGC)luuG)WX35DD&Q{@}W~OCLM0u>vJt**JJ-;zFNP zRlj_UhqNwsCpbt7z&t^&4cUoX-VH;eRq}4%Jh)!f8qjEQg}j97c_?y#FG_xmU~q(9 z!xZM8U3UD{Xk^hvaq9Y&`&-LvU+0q8jpdafzWYo0E9CcxLf1RhbFdfadoBRe^G6a9 z?P1gCcGwZw$8&?x&UW;@6FkprA6Ni-T?ZzHPt8rv%Q&}P&X2LG?KhB#KRIw5@)4di zo$al1aJ|^JaSVR!jZ_F+{n2gf0H z*SBmTodzd8he`xKrYXxvsfO-m*;Wi&3FD(5i#uheo=w6-uWix*$8_7osisPk%;x&=&j>t&`!W0+vF1 z5G4})^u_m0f}x=T%0YX;Cla7}AkTQoA9X4g)=su0XeNac((^+@?<*|}Cv;{=KzdJGRH+R1J97xKglFZnkd^{mNa&4S>d(tNy zLUX{HzB4y(HnI>6U=4w5;%q4|E$mKP+88K1_nhKsa>SkUE^A>1%MTB_{)0~q^4TKh z`32Ytxx|RZ{8i8#ycPW*9k#?P=GZ-I(qac_8T?bjHk%62@zaBSza6SQhuYrgMw*SU zG1?jLW=liwED-vH3|pwB;0lx|WozBGxj4gAQn z6c|r!d7Qig$g|Bb)-RH++J2Pg*05cO?B64L872B#UP-ff!V^nV@2{kFvQ+o#ux}i^ z0xp84E_qoc3Byid4;z&KEkc?~Y-67F%`alrHj%s{5qV;Ygk%~}@)4cjHDx!bAGCCY zchBg9@?_|IFL;CDn~VdBJ)$bPVL1$c)x6JYSACwPyl&2HPSL*$O#BM>{zfeh%CEV# zVNrBi=4DaRVhWfs=^+D>gg(lU{wU62<8`*z>q`eXk@4d;exOPJ$Rtmc5cfb$lu@lw zT;P)^tI*Sum3IHl&PqJo$19~&HphXBW5b^}#ZC-%=t^m;d*no_!n)y9H4Cr2GTB_& z_0-EHO(G#95zbD9V$!C0DX^Hlrm_$_~?`js26V$dG5(Q14@>~Rv#GtX)T98 zdS(qE1oiLHL_abVIFs^=%$(IzvNr^$f%(9UYb-6NF|*_15a=;4Sa`~h-9zLwvEDN} z6;n2q2IoN^@kv{&thE~O8!~7jjcKBvZ|B92RK4@~)=o_4@7GqWH5NrC^MJCCnC{!U zi+1j?dT*0nem?dkq&|?`gTJ!8CVnLNOA1rxn;iScl+S^PUmt0jaE||;(nj@@8Yk6* z)-qFn;k&0zI49+e`Uer0f{-YHc?%V;ZL>d_gm+AGXo2Nnk9efruAgQu$_p(H>45ey z4;EaOy}z^b_a%etbimuNN9E~e9J~OTrOlu+laz=vbXFtrJRWsJ`&yXc%u4%S+p@GJ zxv@6XUI;)su+dfzR`UWtmfE#?oLaTYL20i2I}m1!fj4WCd>70I%T+P?RfnBFqB4T( z9^cb6?yj=XJV8<`!WM$Po>zbhqb)$b`lPhHeeh*(v&sLGgfT z8kocV)FNMkYJzN4$W__zuhMU0|IA#7zrPTn{7qSoVA3bBuW%KXbIMWa-c!4?_ zMt#NCO&H_#?dyWfp{1HdnturKA3j^bKQsz!lvsML81y#cc@&bXL?!VEiVd!(dtEU3 zU<96!Iu6qMM`^f*%)&Fy<6n@|trrr|0Jp^mgIMo{W(ly*VovdMd?NFp(cvR);2b}L zhn1oYa6R`fnn zv~Tz7k1VnX7u+BL(e6SDw`ZDrR{?c%x*`QPklh8^0tqz`rivrzIk5YJQz;*$Q?mY=ES=G*BcP?r2-P=^ z$|lJv{Y{I?Pc2Gw(1`IR+@aGAp-z1IODd~9pn@XGp#vA_(`zw#EunMep+}!%3 z*FXBCi9vCeNntt7Gb5$EWh|eYj>T3_n2XM@X};L|2<5hEIm~ImT>FElNyE|pOwExU zWeILL-L8(H5Ka0VU)nyLtqe;_0a<_CPA%%VG(jjqtS0S5-}8$WJU z?yQHj7xSl=D2+`FGY6gaS)8$@TU{C|@&bi;hY9SXm(oKu_3;+7aiU!Ebj|9W|<&fw>bt;^OUy z6(R*ZimUqQW)S#b6*Q_s9Z9mg;jU9qw|5QXg|gs8+5`Vi*B+Ua=$j0FCT6-%lKz}E ztoyk*iy+w8^bPwz(l&8?PendzFfdw=|4rJ)14jP;LE82|Z8-mT)zbfEnj-#x5wu+s zOgSC$-tQQb9Z`bYN!&Ad1~amXzT|k8;=&YyS|}vb1e2~xyx6{lHT~6#9Lld} zkLJ@ysC8AaFQxcrlDnRTaauO~-?cl7I)4Pc@FCv_dlQp&m&muZ6lyB{_PS+CbNG)_ zG18Y%O0|&Ea_AAbEo5{zNLjJyopZlWEK!-0Yb8&xe1&>2_Egfsmg^gcfa{j3vXELU zQLc7a8_4RabRlPuz5A(Uk-EbBrms6+zw3!v=82`$jQ@AQAjUm^Zx>=foP_~@L~jAo zMK;=xiV*>bP`7n-ERmFKD6#63$%zkvJ-WH3GTG5&WZo+}Tj8Lb1Q>^$iz3PQQTDlNZV$FFnn& zwdt}Vw~v2+WyWu;zI68 z_-1X>2HR!hb>hcz96-sG4-NT=DmQiA#^_IN38F$&&HC9uh|o8LayO5N>xOPyLx8Xf z6bwL;#0!;YWt7Sn=jkIw#y z*wKMzKJ_sFXdXOQ-7A?~TN+*vp_E!9du;zF*d9*Xbbbfv6`^-H8BH1p(g2AIVS_oV~EV z#VRcw0wFXd$ti-csF7DF?EY`-lU2v?j?I+SqGp8LWh@5fLwE$7iGXyS`e`YyiG^J_ zoj0u6rWtsQwiK-SUe)lU=x@qnqKU>nTWJmRea1BGU9mrnVCnwFt@-5fX= z$3BS7=Hwp#vK6ckfdE^Z#-cNLfq0`CKEXhsL`!Q_+k_Kr-mNnV6uX0CJ7WrgH~&pq zu^=`dNK)V>G?dz;DkMnJ7s8M1nT(fZ_V-HL>~LD1MBV1ki-(+_B_k1G!m2>qHx72W z@}T!H^5hMEtRkm+)FLZqj3TSkh>F)Ah^q2_S>>--!^;;IIsE9je@AMXnD(P??mNIH zzDLChN7d)eetn|o8hYurBD76)YJTNn84S51mf?02P7#DrSi zT6g2}oJeB@?-MmfZRuQ@w$#*v3JPYn<&_b4;qqi-MJ1mhlOyQ2jRzev_Kl=u03^v` zq60epITJxc`-737dwKwUpQ2T2eEl#Zw^Q9v&5qyTmAP6nQ|+Y%WG=;T#Y9cRS&p)X zdXw~#nmZHsOS9=14=^f=;4y+96j1)wUnf`Ht=neu7untOZo5~{Gh8#e`o9O_Ml@y} zR3q>TCnVJN+wrk4IztFk73?^2wMN9x!eWjDS7*=_jqHrg_8t<;2C~R_Y3k);ke^z$ zA?8JoV^mZMy?Dce5hHQH8Zj~GPA59Ux9wb`&2nVGJ#8S7zd4v7A`1zSK9S3wC2T{RQNIA%E#vjt80d_|s>37sjH z4mnDw4tqL`;t}tqUBx0H=T$6*&B3>{zdgD$XI!dafYhaGZ?2Ht^<~vF0ta++1{q*R zf^on;ymt`DIi4L44t-Vc2%=$r?60CK_%DOF{lMFNZZKS8LW)zD92qJCL(f=9ahjb- zf_dobc82!&&)b+YTzz_e9d>S_H*EKXjW+D65~RGpJ!6X(M>$L4_1)E0EpDxRk*i6q zr(u1fdi^b)6TY>-@#4c8HnraNq2S+n-s*G-?~>4DnMl0p~x%yb z1Nre5yxzn<0?SmqDHrCCq=`_vh$JH4Zi?x^H8Fja4Kqm_*IE&dJa|3Iz~Nk24#`N) zDA7CMy(RwZ7#8&cY8X+r;;?`}#57>xs?eYno27p__Kj z4OZz&G}_g|-^Ht0SO^I{PJZFCaUS!;j+INpe$dBtE%|pff#-}zoB8LIi7D$%yABi^ zvWwGwXu)YiXmGReRPKw;&2*bV6tA7N9{SxKg*d!uSg)$XBo%57-3ILinj>bn6oC|t zl!C9Mf^u}4hZI3(5mh~g;~$!h3Yiwu&D{n8f(BDT8;+h=z~8wBzZeT8!JI!t$zFdL z7fKs|pS=lL+mkZ6QuA|DlhuY=^t#oO2I2>CN9&e{Rz>#LPNsh*qFSh&N#;9RU zZ%$i<*YUP8Kioqq0^ZD=&8&-scLa>@j`b6T*Q>!cKNRlul9mA~0KZ!-o>1-=LJs()#B%ZrvhL?N#N#(oaYCwL{AnBu z-e7;%Jia-j&L2jFL*Kv9P+>@E2-5EG%stQG@Es&AfOoC^>0_i`!B-gN$Z@+rJCNZL z=5+7Ot9qU#6to^RpAnYtn*k`{<{Hus7C}m&#+HWYO^B7A+A^j3ta*!mEx!%1@$wCr zB9gzzScdbil&OSrwbIN}xC**$d#8oG!N2TQr0WK=;ngV(bA?3~W8Uz!ypb_N0WvpN z;Q>zTlny40!>uO92ewq?goKIJ1~77QrO{F+C#_f^8XB>%75%g|kCw0j&bz!g279Qk zkbe%$9rED^)47+F76m=un%OX7flY&Y(WwSKXWL>*=>Qs;$H~@+(2FPa)3*RJV=QoK z4Qf;%t7fU_J(Ai8viF1C<)u2JtOQ-mB$Jq&LL3>t86(y^*?=BPd;~Ze@WCd2a4|sC zYJI<^_E)FwFg8A--Hw@Tu8cIXq29nt#!`dHVH>y`z`vJOUvP$cS|g$WbWOG#+UVU~ zJ%__t_wfwOg#owwQ!*0&?ReUmEhqOnu$x>{mT#;!Nl)~BlHOH@75HJGiK^X`U4TJX zPOj$3F%`?OhjL~&j)Sk=-GA?uZ2SjA)795u=4lP0HKh^h6(suUynEB7Y5K>mvN?a5 zkco7hL%ZtiC%!!v&`qWqiU+6e$;(Gt8)E81k4W8ZR%ozG(Z+N5bTYz!2gb}HydijkIkxMA{FvVXL8quv`xq6#aUZ;+6>ho&Lk&r@{W{ZW@Oi145UoZw!p^Ujz9-hrk-0$csmErF4kwyx|2-nTm>n z`SXbL5P#c=o+Vm#X02ucQ64E8?)=S5Q%#?=EqJGqqrJQ^nshgpjLLyVjUEERpao=d zGdGw05=b&?+5&n_CK;Hitw=HK@OLG+C4(@|CEm?cErhN%G!r0U+6@*@a=L9s($|qk z(yRN&v|3**O7U!Ng^XTg~# zDAgsPmwukQG9R}j7)|Vx=Z)wGOD%~h{s&^^BMTaH!p=Y1n&6$X1-vl@j{v6c4ec=X zaebPZ2FkVdMpuQlN5e_(^K_=`bfujy!uOE5?Eae^kn^&DfKLzjA-){4GWVmGB{C`9aS`5R#Xg) zmH-o?&B`@Hk$-|Rna`IxB>07E2bMn1`_i|#uI{q6yX(T&xcjn|xY}>ew{|?QYy8_! zlS^pZPze6tyERUTbD!^1)y0Zmz0rz`TXf@XFgRpdjX^2NBCfEn?&0F!i!se9Eka@^ zL5l^ns8U!Byno)J{3ch?yP^MV+4RF|?#)*sm5;5%ku$v&b14RrDMb>tXYbB zOrlc;Hk6y$>>+6cTqrGkosGgB?5S~77(j1zZd<8Mi8w3r4Gecsn&yMR!hH zC8-)&?eAKcEj)Q-fa0mp_k_ zKbCK->NCxKLvEd}ZU;vRxqt5t{E;yI{c8972yfV+`J9Ib*$AxmnUhnd+Kx$Ri;8-c zUV7KcYPh!1fji9xk!IRK^A~FxJav+8A8kfYFTgc#b`+|NG1-bRx#JazskgncfkjK5 z$`*$kqikhWUrxcPUR>RhcxCbZk*1Ud{GYo5j~w2^M`|3Pv0KS_J#K)6$lO*2?UF$L(f4zG^!DJD&Z!wnUASv9IK@U`HyR zCE*?gAp%{uqQb2+;q@?6TbV#1Py&xhNRQ3yROMQcNoWjLH9RB)z!m8y%(;(T@WflZ z?^kCQE7|HXpHacAYpG4Sq>BfjLU5el_Ac)GzVTL2afVo0Ol7uahGiSfSe~t;#TEvl&+qibDn@%u@5aR!26;#-)s?3ky zf8zK3T-oGvaI9+Fd!G?isAB!XH!s1T9=zZ)x)F)yCSbs1)ZOd1G!~@6GiMmY|qW+&^cTtWH$9 zJHxFkO-EbD!UkFd>%SU79SdCI+Px^0sO@Agc@HnHY^?@_nMS{c`l_&r)q=m0V$8GC zSz3_;4gxR?B4{_^@6DrQ4hjE?glCFrPTuElFPFQ?bFBs|iL^ZA^#aN*qLb7f1*mE{ zu>^Q1qHf;+EgKw7%NctdDUw3~AK1oeoq7~AtXsNa*d!$@in|Y&B*Ar#;y-Ul)#=I51-xcBnuUcil!8OhWDaWXgDz^*F%EJ9ntN1-@g z=YE;a#r+R&Mn)OaCcVQ6IHfgub9vXzC;fqyZtngbZ9vsR#nS@3A*(=R>E!4d zEc=_X95Dp~`7IE2r(0U)dAxOma9Qmv+n6cwTay-_pZ_gosFgPT=G18cHE#5HuAXGg zPzKqhj(G+@k6xGDMAo|jk>6LNxX52avQ5ryhH$}XEt_1o5|f6qzpRnG=#5(=G^im`mC}6A|d<9ea~TdAYA6Ju&F@B96&`Vsz}ZQV63I z{vtOi2Z45C-&SkH6rbjJc&$Sq!NZsf)#C;vCG7U>1?BXE%x5^OKRg3w7#dXGi4)6E zl|MdkfQ9&JOGQ`oI)xSdd?j7bBgqS)VD6`oN!B?Tv0CPl*36O{NmsLFUCn{BqFH*` zt|qSP7_||jWKXVF{C(lq(z)feSMhJ@uU!hrdnFFj85g!|7>#GcNP1Wmg@RS30+{BR zsFH1(BnCNG;477hS39qr@}ch91Q>sgFVpV{FhD)^#zi&I>bgNu+7hxVqUjF`v+x%YV+(N?NNe z8RwR`Rs?aCmD6C3^0hJ9KEV*+hcdiE)y0ugImc(m9@7-2+T%xfc;u^n#JY!*`xDmHklxn{OI~UB@ zl-eUMou@>+AxQb#rylVbS9kS0dsR7Y)GswnK>z1%NrrpB)YE@)G8!TgPlu^zPOzwW z97l9WdP7WkCh6k$A~G9aItuv}{(AP`Hs{GH$BCsi$#IEuFgBD7AWhRGABc!CKb z)a4!Yvd<97#`Qm_^}-1DH})6q1c=mBG~Z4%VVDB4&>J5D+=D0&m8p-C6hm7d&*eQK z7#0_;J97Hf&8Sybh!W-%(U+Lq^3sIWE4|A&i0SB}1nXAAqZN7K_!5!%+Cw;6nR>xq$Y z%M<@K-u~kcF8wtL(CB2GET)gNyE}3k4m@k%C_1Kj<6O}zFB57Wu{1>{qyIYg?nXyN z8IX%>-Z1slY%&0`w3NklrMlC7eleYXKc725%*Dti&QUy_&a7Zpi{h*}U+uZO$62rM z(JP`c-&hmGd~aCEi~lx}4qLD0)^4X*b4JNnhCN8XwM7(LTt@Tb+0+gYVoDxJ?h~PMGxxVDHg+46 z0%ghmJ(!V2!(0tR21 z`TdwQZsjVDgzrr|saPh2UY*au9sTFZqe>dUThCOz^YNu;Vyf&X6<5*iI)jd6jMc># zdR}wnxz^mHYMM$;IK6e$zx~t@j#Ie?U;M;#@&JA#TdFK5DDTQWx$=72vocv($Np234|Hr6~)X9((*H%18A-}V?3U`9t z$Y2z4#M#lw3b}k+9D|X!ht!!shr&rXXK}2$8r1%GsWvUpV1nul)_j(_novvcy1BqZ zi0RfK@Pa@uTo)+E^%@?4`!$!+@#F8QJpRoR?&K0&>m$Zwd{j|GI8DHIps8BxBTLG) zln8QwvjLh$rwClRi>nNC;Xuf-|7z{GhunjyIVbrZot(3z2$TmrB|CJiP<)%#OJ zZG?etvAqN*K%KUIBKSGU?JSw#ujLCvt9b5CzvOpJW67;PMgBXvs?+HTx`sHR>)C%t zsZP+SJ^fyqapuMAku8O2VJwbGw3i(Nd*K%x;TodmVc|^ISWv;& zp%%dO*=7%1fyl_M?=Kc+-&lvG>esBp>Dw(%@=B#odufbN8}*$_g-X&7G`=GVNT~{y z`J0U7*3kFC8|LGNGSaQ4Ah=mM=pE2v68V%;m9$I3^3`cvV1R_CU+)gqh( zU2Q2iN*F1jt_%=GEEy9`cp5*0mg zyFz6wQ>@RgD)TOJ7ib}NozPc*L8xWiLvbOa@GW<@;lJAYh;ken7Ie1do7bfoxhor3 zlXG_6`re#mbSg62fkPANS4itc4l+F_7llBXSy-a!!#{CuMv!842ES52?kwq@X?e7c z2{={?&G$YFK*ubRRp06HtU`eMuU(n&W zNsVR+B{c?P`ozf>K&a}R*!Ms8b4~h{eqxu>wSj)ux2~!CF5k)Iz74a%mEYqU={?w5 z83@DPU;mjG#K#S2ZUj~Nnw>-rIhr~eBJZN^(!R3f!^*%zTO+cyx{L-r1u&jeCizU6 z`qUTqsHmr3J5Q4F;FsddDzWz1ACL9FTg4kmy)*NCNd!EgC_JXCRxp35F>JbJ|ZDUs2zIsJ&}L&M0|6fJga}(##vtL?^*oOl?)yG zR^5^8ib^21dd=){cW{UOZts>+F1AAUY-Op7I#~;>v_dBH zFLId61!|9=MVLo%rTiJ*R%35edW;|4h!Z9VpF5$r9cp4~@T9Rw$wK4bN@#_3EqgqF z>1h?U*U5M6h*I0Ql5-q;f0c^hIkGindFXNHJn7_whAS}svldQ5NiMKOPkeAe%M zGKhNesN-DP{(S&v!Atm?$L`+`Za^!K+{?|d&i=q+p5%eawFIMt(2uJmluTxXSr21k zIa*&p0dY#Ywi9@zY^?W!rhb$Uo*Yw80;ly&k@DT|6)nZmfXXl!l2r7LCmxP<|1dk5 z9|UG_BQt^lGX=6?1}hs0Bi{YM5%dXU_!<{&#;K)rB<=xPa^L}v7n ziK%t-xj%cYuDR^=qoIl|mpx-~}csugtfwU8I@$#S6B z5r4Se;b+ZqDtLsDa7B{dzX*-ZjjJZu^)0BPxyL!Fx7o;fZ*ne_Dl8eJdnP2PGSA@M z9oKj%&@aM^V?s}-+C8p^Eb$Lv=(>$oN_X!57(LPgsXDMh`OBShw1XI@zcPUMNk_f(1^Q~; z;(Fe?_A4h8^Mbp zrrd0WYO6+4LZ`-;qR2k)M1A+;B5bbpv4&Aex;6*Ppi#H^qh|1Z_!bF2oUP^ero*ME zoz9xEL^EvEp1;Bu4rlK6uETs^-l6sQE6JjNT|VCi(RdfWiEI{Ef(}Q_#wY|Eg+|2F zYBpzeUxxj9=C)g6{^6l8MkfEoTRin3H1gni7FWLB@{4^IuerXJrdUkCM|Qm9{NCw& z&bvmR_hJq7NRIl|1jxfOfqP{Q>heLN&bV9`@e`dxM*DS5lap_yXfCXBLVhTJ2FXJf7#E>h%8rHIz{vP8#(UIuj)Z=P!dEI>tyYOBz_2sv&HU(#Ji4>)+ zTR#R!J14=GjZ0@v zgLH~PWB6Z4@mBm!!-7$C^qvOi!76aQimQ8tsw(+qAU$0Uk*^cDDXYd}1|y`yf(+KS;NYK>(4$a9|j~%Ayq$AjB zL4NdsHrMeF6g0;OgD2$UYpKg-KTZdm@$@;1bY=LZr1bROaDJsER8&&hX$DfeK_QxX3$3 zKRvQMWekCDP^I4dR>#IFkmEH@Hjn0?OXjL-C4G;X*O!%YRtkKV`8)uJ_Y@rZ6%aD_ z7NU(T6Ii`^aoNQOnBxLb zrnM8}TJ0WC$QN{>zQd@09muT;cMtE7=5*x8)?bvooY{YiK%-tZs(@n(RpveIP+6qP z^UJfY`pzHG!ZHs}@7JF~dm9s?fLvq*W1mPyQ=Z%r;{}n3=I zlrICqM`LpC37qG_0TDaJ6=% zHGcM?JKgXUIBt>ro_u1f&4+0pIP{!30-Js4ZW7$7VED&Cc-fx)`cU=u;^%KMaxSl} zM7lcoR_&AZfIr#@P4hm}^~u`n!3?9KGG_MFh(_f_H+PpcxpIE@_?O^dhJ}1|EpL^N zg8&~!XIwgtUEwN!4HJ#Td;)5VDC(46_2KoK%HQD+SNZ6-0$5!{ zW-F_P0LJi;b^% zmWO(@`6F~~(CZLq4p{Z`dNyKR!D3>?#8ao`w+>Qv@X;cupwBUr$-T9$MYL=gH~Cuj z1wlpk7hP4h4B#TxASUpu*ji141FdI5Xn3e|x@w2iH3SbCaFFR-L~AvfX4#-@7qSKm zNL<(Oq_uiY;~5)D2k{~$v8e#wgj9k7qh~&-Xsvc*y~xLkl=4K|&7OvFK?>`aiNdea zh>gwQ49e6ta9YJRxM97x#EPH+E%oD}c(7D)OQf-gG~>o+PFF&c)b;8$^%Aj&OaK7| zP_~T#E*h|G>6)e(y&NbX>)ix61P452Iw$cGTa&M8(j4w$sa}KX(~@Xcc#t-Ygb=Ry zwKlK0nqJpXP1Avip}Pbzs1w=+m4{{<9ihy+!Vawo4-(9LaM4-~S$QT>ZE8`V@ieRr zrY<22rBh@uuCsxdgW*xKB4sFrS9tJ8vBfK)rUhTC=fD$$3LpbTLam|CvQ(vWhxkhP zJ4l$$QM9y`B|K()lm9^d($+gs0120_35h)^f|k@BGYX7Vm@C?fwBf0rTqX?PX2dOv8sq*))Glu=9)G(bv*+baBe7GDz+B!PEDJ$y47~s*@IlsgR@U71ckhgY_225b+dBUuH;K zLN?p{f zaZCRFj5c)*>@+-G2~Rylz-*>-2gE^EussZqdrYWz##u=8qr((6x|(jMWCkQ_nrh9r zkam($OduEHO6Do@s6d|K2!_>Ao>o>Zz&>5oCpY+uE?h}EGLSYjiE@{+G#vnD4Na;P zQm`_r2K1T5aGCJcHW<(>YyNWsE#*T9vzO}T7D0ib^<^_3vLddSC366$=MX{ErJot6 zC_uMRZ*qIdC5TAKOXgD3UNWeTqAW=(EmSQ_rBq;?`;G;SY9ath9z!s)0 zCw)gUl)aYTZ7Cr17eO7%??|k^_5w-P4nZ2QLhlD01vb!v>AV`tUtg>-v`CA1sIJXe z!|L6>W^>$WeeQ7*zFl&is!P{*LGoc9ivOIYY-h|kYP|Gvb$iQ+*~h0tIDyqH`egG; zY_mPqpD%#1y1V?6vcG?Gs4KesQoSpIb-tU_IIaB`<=NoHa{*a7O#r&j<}!z@K~$IQ zpUtbNu9M$Rf(*=r$Ih=C9I|f<8%SO19l~jI(|RYEOXk~CnLzOMn7~)yn7ghygyiFe zD|gh@DPHht5!!f?dHwp?fO)0rMPM1(eAh9Utz&R=smrXV+=<#?Y_+XCcQnatvaM<$ zqnV=-|K^Riq!@}2@s4tA+g-#B@C<1%nKaW)21Ck5a~6wJkZvpJ%I>p?{OI0++Wx?j z<|2sg#7=d_`$&Ta;Y9w_@#WFG!^v6=vS$Qh<2Q9%W!p zGiyJwL0B9d@w&|F3@_!hAX$AN)Ws={Hhj9%Kre)R-|{*RVv((<@O7y&4`u2!5n~ga{U%3Z5l17d1m4yCbtoZk^mv*_dCS-@v|*e?)%7 z2_W)&<-W+i(0e2aC>k6wwq!pd9p7=q$xh`pfd#*t-c+Zyvw%fLn!KEFUtrxYa0*Uk zeSC?9zx)=C*sy>(>fw5)U7tDaxWiJ2z271X^{4f;&JAWP;f5?^FO7%EC~wfSa#FK0 z5}0TlJ%m-@W3UOkd(1&w`#Pk`Xi^tmov6i=$FFTRM^ulpPH?^oYEEnGF%-o-_UC2r zI)vdtk;l6@BoX)}2Z9(C$Uc)|a(MXKec_=6U?l^f$?NHslA}?JBs1xVLrOD&+aXfV zo6=OPIr1tfq_1i&BZ9^%*PTUvdP_{Dj zl87!HkP1W}0kGvZu3>PJxgltRxlJa?r+%nG?3t3mfZ<%zK)BNTsR7`xEJ3af>bGcQ zE6t%RZDs~@iDfU^DP}(4XFgQ^+e(wqu$i3z^uRr?JC{s_`g5ktD`#~*J{=}&Z#M;dTR`&Qyol+r1vy~k z74ud275bI>6?=13#5%+~Vn=1Y`hxqB|5YMtZ@7iMSHz^Xd4unw8KE-7B4je;bM#~S zV;uUbTWBj7{$lq+^kP`d#q!f5dJErUHIpZTn+fcs+szCDTGoHdHKHiVh5;{z8sh>y)C&+IyYd<@bgaFJ%Ip=k=4@WOG>@xxj5+w3<_V=yl~Gm! zWRHp1Omkh)rX2t?PJ6U~^lqiv1Z9;UeJ3gJS1%C)pHVN~yDizPp}x}y+=V_OVhZiIHKvfJy+)92BPJc2skLaB!Wd~{dVc{gNVZk##ROP%2- zbBO^%d^&I!@Bo1a(ni{1I_J__EyU6{3PWO?jRb6B=^KQZvt89d4A!(6X94ALU$n)# zpv`{ zHL_rR?e5t0wa|}z<-;87CbpIMRLB6aX$K&XC1NMc-%~J|J0g|<@oF5>ws#yKmH_*R z(cGXWxsG14UtfTR`VU%vlIPQ)3^rc|69omWSP|%q*VB*Y)k0F`CM|{~hi!H1+6=hl zj1?xU#E4)VAbB~nIun~O%+s;}>3bZ6eJ?|P*`GKG7KMnKh%%8l34Hr32K2HHkSBOk zZV_c!7Yxs!RI8=Up|iPvTyS*y;ixs{p~ZPp-+=6pIqs(*5uJT@8A=8NmVmT}0b7WX z*EPILLJ4$enxNvpVHA!_Ityn<4ZL3KqL<4f`Bi9!Ai0M?QPRtU@S*&364a~;nJ5i2 zf>1!1eLLF3JI!rmDHzV@+LWpO8TR&A`fSHZbmtnG552Mv&6)s^9h5+~geW@Osas!i zQB})?k_U1gBV%{w{x3XSWmRoo23chd9wC6F9xG#Z_Nj^(V1CLT8HgGhth`#)=1wMV zqol!dN`ihQl<6K8?Y_V>er|YC>Zs<`t0^G}0J}g4LxK>!Kq_?&_%sn^kDM`bDrt(h zo_w}_Wea8$=T&Y1xq22wmY8E<^Gwt`AzE*(SS4cOof_;j0n~KU=*N6 zM=x^2E|h;>K`Wqdg?mI$6JSIJ*j~!)+`cN^s#oYWk=V^TKO(r~4&Ax!@K>n}q$sV# z?Vf@muVTgU&kNKM;+4CLSNQOH6vMB3 zSWr6fjwojzFf^yhjQs(YoU2}UR=|9O^X4>{+#~*avh`8Mbm+I8voCM@Or@hx{)G*P zVm|fEv`3n5ejm(rABvz+2)X>txCG6cLQCoDW ztaI#}H~SDMqJrD1EKy1O@{tqAH=RS>2JLAJ3W`=8}0>FeUCA4Nu#*ViXXyfFf>5Y-q3wrh!$X zwpC+pJ$RjaZ7}>~PD}en0{!Tqw@B>l`AuHyCpQ_Oo#}`=d3ou0gBS;2OdFzG=6P}Q ztiVfHjjuJ{{e>L#zCVD5=GkVv^$0{+>5mllN?~PD>>m^iH6S-U-6Mme|7{#_NR?&4 zmtw8~{e}xD28p~;bT^!zY_{>@S~{WaeL%G-w^*5R>+=a%?o(-pSPfO*Ett*-aEh&2 z&~MJ2{D|C^+A>%_=YO|g>iN<`^e;K=w%ZW&3HcuuZwXKLjXu|iuWAS7mQ2t-B zc>h&H`Ckqo_WziHZ2bOJzyGJ2@_%y$A^a~Q?^0jUWm67o=tKuYAQeIuLw?Ya0vX2j zxPu#;C43M;;yaxI7iFBK93a`~B4oEN*e5;f($dnh)kA(Zc)7lQsP0T#yUOTfJMh_X z!TS?j+ng-@Xi&KHFT7iY7PU$ApAksuca`k#r7r59uOO!`NAP3EW^Ji_(Al(8s&_ij zBE%;ME#yFf|IjGF#BOD!k>ES-vFdJ>w3cQ7L4V^pmL3l2Mj;?=t7>DP@?WX5&Ki8q z=U@|Ptvc#}?Qb&45}eGZvFCf$xxt^@aHa?m>m$Z$m^Dv#akYn@68U)Vz%c#Dncodf z?)er=i%E|^fqzQ70#iXpHoGDQJ!0KPApE)92Xhyf}bv0UFD~4<{ zcd{~ouN^e}mu+3PFMXMGSahB?ihGoLZZig};fxZdM&7bhJk=fi$4gbkMK5I+Rg3zQ z9fIE$bVTmMcK0?rK#E`BJ4QIF{<7=QR5|I_ zc1{0`u<&!0-p^0K)4Os2-Sn@`71q8&^ph?@Sq7^x$-nSc9Wh^cvCJrMLht=9>@t@M zEWNgmxrhpSaS8!N^*0oklnCK)6~m7owl8H(i|S-IvS5`V>q)BTi!|^NtzM8&+Q5{* z#R_U**?p8m73(gB@9^6k@vIRymnGcDs3Y;W$V$YtCT26C-7aO2F3rCyxys{GRd$?z zngj5k6=^2xvAPTD*Ij62wV2@-3?~lH)OKraf|Oa}A#v_vB=ooMiCaKYzKqj(a!e_2 z4T~wwS-uJA?=9pAkx!w92nyL6>1NxVj5yc?xkOxY>q7-3NJ`Od{{qA^1c#nTCZxS)$ z_6m@x>?PyU1!BcNzajp0l{H|bct2)dq|hl5JIiKBp`U$|;yi)y{f4fY{CXHDe{jXqHPRp zLx3V9F7gpWoidlqGT{92%_lo=A+)uu#sw|fiDES^(?X_`AAB%EOE7jIhr)HbTE5uW z;i)?8&9G}PxoYsm8dGz1X@GLJUpdwnHlAhwNGf7tx5g-cgLk=tFZCJsTq*ScQJVkP z^B*QD^Efk{;#=0BJCLrwig_)Rqr|OWU7riR!ahtWhrZU;{t^uF_p)l)ATdgkx~(7+ zApCj`K_3E?O_z%5ki4yLtrnmrlPZ`0t9vhtH7x_&^P39uw)^1YG9TphY;OqdCNkZo zeY8^mJ-4&g!a*B<_qzPvyhEGIQbC7^Nx4{r6vCs)Ciao6s`qk{JA!3es3k-_I4TnJ zb5|KQ6k_+tvcq(wJ}dzJv3kKZ?oWxV)OedzEx6_vgo~339H9nwq?pS&nK|C^V<LlY zyapVv>k#WUF(YEhHo+s-O5o$Zy#`DrTuNn{Fs$icHx>O^Esd9lHXU`VYZJ^P<%n29 z%QKHq<#)j*L!hXY<8OG4w7vX0+uwr{Hy`D^loTSsCTVVjiYeAu7tO5W88=6h&mp~6 zZ zAH$F|#)Yscw<0vgsE{;K$~4CL5bh7W5YsFWvDq%rDWFb>9|)rG6sZ|wkGYjMgw@}N z3JxJo&dn&F4I4kdjn@`)Oa0P@Cbw*kp)L*lMC>%1N_s3wThzSFs8WM&cRQF3NLv7x zcMA-56|IkGujtTWx@gS7eGvy`An9UHkDP2TlD!visKSevT?;J=J%v?XU?DS5lABRX z1myfNjGhiqq&V`9_~B!iZv4v+7%>5j=K`;yBp>ah_!UeVBt2uYjN*~x`1uq9_QDpS z9CGgT&C$4u$_56`@;Te4jpw|Ucqbk1Vf4&O_=~5C5;YCr=V1hA`6|5ZWy`E$t_F$x zi?uxx#z@I&8>V%6l&k55ZfiMKHhj6&K5jXfysUPgY$He_cTz1$If%L;+x)D z**y$v9|{Wwx3ZWck4cNvp7F&c0@=E3Q!54%3RqP|HwtQat-9s%4u3dw(9q z^Y%f)pUxv9bWV$eewwL>cILr+NS}4JT?>>*vt(Ky+74`~6$rP0wA$Qa*sRHQ<%_n*9L2ohy_^`kOVG{073;hWnJ=I_3)Sl||4wV<(9}6%# z{^O;D)r5m@1xJIFqd0?IlF44<2A*0Hf&G($Zpy+cV{n`Clj}Eg#qq%hx0BuyqB(S( z4j2;Sz2AE#n?)+bH#^TPje)5LntCWO5<*_`dadK2 zP1$&WCtGi8;0vEs2H{dK-9Tn2DRny9d;OXFMh`Zuv>K{vKPiYi^EK_U(fWb=mJGu2H) zyxyouTcU6H{~0dRzgG}!Awodhvj2C(r5Xh7|6jx9|8D%C{9j|{|6_$pqXA-|q1MB% z3dOv&pz?s%Jm+*?c|HJEo&@7|Pf$k(Bv}MU?(6_f`Zw$2!Q2W|gf>k$&5}^6{GOKh z5*DiXy>N;c1SF*JaHuktw(n5kpVwPHyZJu5g)Jaa(P+Be*I&ISI{{bxCq;ZkhIY!0 zH8txkUj40I{NBsQ$4afFbDMnRU7V`}k9(}=mvJ?*lepeLNs>c6-)I`q2C2_45(KAf ztab7%V3bAun77rcHza2xA%VZOTDaty2^B5dmD|+VEeoY|B-=BmZAeq1v*;(0q4^al z(YbA4r*b`+OjN&)%~?Szy9i|~S_u$8H1#SVO5(DCqgfXI2`Y!LzA1#ZO5o>|+JB9tx2QBi zxlm94i(lY&oPaU8oksUW&N0D^;`5w;KxPrn&-e2tyKnjvbs)A1Z*Rr^k>;KQLea;% zuqcqNs3nVzm3b|{;aVQ|OoN|3AW)q`(Ic{QaHAsA_>9eX_Pt!VsM}n;f|Acvh4%@o zoaB$Qnok|j^b%lI_|Nrhb%iZtQY!&DgE8}n>mC$IP{QCuunR)HYHxg7pwx!<_KoXh zG7V8unv!6qH64^wj_zv@S=$0y`JvFYsl!LLR@W70`R#|)ptK5mCA{Cf z3d8~|`AKh((w2<`M$l$jxe)8c+t26BBvjjpXOBbEj@f!_o3%gyx(8n>vm>Nn%cWkG zDz4k`%x31&3S-$>=&V-tLZeFpb(p@&M$ejLfNR^um19~}T#_idPDrfKHbH--Mggl52j|v{c;s<9FA_Axy3+XJCcSUe2k z)na#~pj)0Sdy`s<*C9X~vRIrj zE_|E?F`z6jD5aOOqMwjBJpunf*F1jpdshm~3s>Oz-eE+_j^}R$#cKD)j`dByn9yW$ zmeHZ_&B!)vW{Z^8l2bIn2d-^W0$}UwqUMauN#Ob1!@lQQ=A|P2gQg3c(}OTT^uFdyu;^u$v zQoxJQsZ}=7j&JCZ1PJd#VzMtmdzP6$OAKH+T$6faQkJlTz_!TUjcg(;|~t zQ;u2*i?UCvcLCOEtjD$BaJJFPQ{Kasr90G-aig`Nlg)JHZqI~Szcq_r3NnbFoa_xpPu;0XM7CGpK#!gMN*c% z!0(9;b$?#LAZ=YrtsCM4Pgz`lMMZ?cLG?_2mHDS5_l*GqEUMOfQBwVnN612j(?{Pj z_ytVGEY9kB&4tc6b?Vm>uK6ZWp?RDZmR6+9t%#z+Xo+FPV?wnm_n5YPWrud`Ork?u8&XFpBxq^I)wD6Gn2R!sZlmxs5qtLg4?R5Xew^*O z=m9*h!PuhCNMcFNxM1bqISBzH15C`{aZ&K~=@oSN*xzZ}Tb1dH68IPN3*DtL6mO)w z(w0571~E#vuM|gvbr=+?ky9V<)j3l;?hxrSB_ryW3LPQEv6e0j)vA5=(|1p`5NL@E zKP~OD5Bz>MNQk9wxe7?vEevev<@yHvj?;U;^=zOaJO@|@aI|a8z0Z94@xyT_#+7}R zUq764e5qnAI*-GGzYyBXGX|T(j_*6M`!63A; zCX8SqolDr4R}|?M4{7WyqxN~tBF{$Kvod;<3?c|BbHwv6uZoKrNP4vBix=#Q+43&Z zrC^q$gr~_uT{sb9P(ryR4k)~sRc{L0GJrRKVyMufErm-V3GcwUj(VMB9E0(9$lx4t zL!wENd9d+8vRlq$j%*F7vPz6)kCHj(9hjqt@WxsCgX74n*g+fK)8Wx?v2bmey|CGk ztF5Dt(kE50(nQU^hs)YxbckuYRl#6p-ibQ7jkTkzk}ZYl$dq+sciurh!KBV| zxXO@Ng4#D)7Kv@)<3nI@qPBKih%G}|;j5lCT$u!B4L&28Orn*pTWjyro`3{(Jg)oX z+J)lmWpUen!EEL!-~D3qML?Kh<)+O|%S~quG6~d2aCF9v$@iTPuN}0{=0V{<$y;k| z8cYtDM&E}t;u?2V7b{EB2mhe6eu*U-%DE|YVQNk`rK3NHLngBZvs%gQH!_`Eu@<>C zqQ(Llmo8lL^Op?()mkSEXKds=05v7=@lU)EEc=9>yHDnijD?_;Pgf!AH(%p-6VgT) z(ng1hGu~wg;DrP2eE7t~=#DoPW#DAPowK5A*?EOK9C1rl#yu?Mk>-QmtyH#7L$5*j z(ch+CC4YLc!r@?MB~(eARNdv^88=|%VcyS8D3H4%*C2C$x~Q$n{#nFB6=5Zd=XP^8 zz!(9hZ~X`9Pw(CZ@phLKe`}%MV3mzJYh$8CiGSnMAdVU~03X(@#yZ4=ziy{;p`FaK=(#b%{Tq&&M0TT# zg!u8soFcKMc5{i3MYfNuarEYHQsh5gonqNlLb`4g5ZzAU)%n%><{Tayk2^w>qipIo zM8U;N=*4mQT(!nwCOgNw#@hygz{F)!{1|(DH)Vj_$G)uI}DT{xy9ZUW=9q3#Z9sa}nb4)Fedgl;urj)vjGKQUJJanL(il?^GTLa-6`GSJ4D zOwlv^q0b8%28&czPqO;(yh4WXLtB5UGf}uU<5VXabH~4c#h;OdJ@KU^^Z4*N*k{B& zs7NYH{^|gX5dAW;X@xzB1LaQ1<8BAva(;XZVfA^~;)=n88_?hRxpomfHo3k%BSch` z_a0+1d{=pw6m9(}V~-P??fvnyRd+aZ7N3pz7m-ZRa!8}HD97|Q?R^k7{RL;GXyL)- zsc+R%+>1nq{;-Dq-CoKv$+6QW&M`QkmXno^#MI&O@FQQv7>=R(>Tn3^KH^U@NMl0e z)))+bD$N`l;2N`!O@zl#2C+NVpFByu)vY5pOYs}hgxJZ3%l6VrC*m(JGx%;6( zS-0(DamCL)g`Nd2cgySReSgTHP=)d8ghh>n>qV3x_;|VJwHX&Jf}AH z)E_NM1Gq?6CK;xC9+H_=>~|L>RI#H{MH*~gR2?GCKMi^|P2Rl@6Q7pZJj)~?RSgS` zQ<8YfHmx&EZBkvQi*@|TkhcEqZQ#3Np!mZ8wPI}cAa>2WP?Bhtt7al5a*L#;mx zO{j#wFcX1?gubjF8PiQj+vUybUp+7(A~>&P1K^2I!-5$ficB&ND6N5`I;>wKvnyIf z-H`r{vnOtMU6`i(>-C)mzEvoAwBCK6KgO+{qui!^OVW{aRoRFAycrXA@~sip`3(l1 zr|VSG!SRL&|Ff^c-S+L0C)y0I{mXOdH%yFmq}$INJo5kEa^v;+4)N)n3wwV57Acm{ zZE8U(HFwoPGA_N#k<7dnKgnFgmH}Sx91#w`=3OgVGkQ3v=sLxo3k5?BVrs zcTvCvP{e<@X3n=VTA?lF1DB|Ymdr48oIZ3QFuigVpMTM`8GeQ2iIX!}z zJ{1loy^111r5u)IvmxCL&!kp!pVhsG?K$KhSo4#*pUv}@LD4Sn0qS)(Ga@8oEtl8% zPWB5Y&p30GBv=$Kv^~tR+)BoOhDWJmrA$giHW$LED09!Sr4Qg0u8@?Yh@*F>z#-Xu zg+)={K2=Wz`~_b96(i|&kO#@1g_Tt3i*9n%7W2^pg>OYa)?6j?&)CIkF&=$Xw^j<{ zc21=0Zr!h=Li+T6`_!KB7YF^Q;}bHnDbWY#Q@Q=xF)~O^}9I=d{)X4>!nZnf$AfR);{3J^_d9=OeFJVmmJZ%NgTrB0MKE2Hw z(Y#-ubH5&u!v7tlxH@I?D|;l!X)l3FxD?zWSwMt!@&rFKSNA6_%*O*PAe`EDaK%Kc z>eR!&3XSujpLl&?OJpMr`gFW=yS@S$h`!YxCs1xk$2sQmT<(w-wqh`RJ-pdvsF%wa zY9gPh#1aOs?_^7mCYOv3PaY-En>#EsIojYhl!3mD?7s26%@Yv(W-V$*sJE0{SLIDW zwb62f`Bw=?JT23@;D|yzGtqkM2$es@B=;|cT(bi7Euy4V(A*G@k%;}^9sqQff1(}> z+)m#xc0|etRJEQuMpawUZR&f{rT(XEyP>Q zYKpu;g=f{-)HaTSr$iTDC7r;ZO0{Xs^NX0$L2$UjNj`Uawbg%hBNs+xHCtZv=yR-j+m1?3B3+)rp&HJnNRRb?vvV zMA9VKEjHFYw7t0t1aBHaq~Y8PTW=JHe@LOWH%WXeFCP4FwyfN3J6foj0IMx-=6I5ZTh0bWugM7L6itZaap!J#E#2^J_Z&dkIGl4?yzhVDRk7KReZ5WFTc5V=74 z%ZSM)b0k56@k&JN8*E^~GMSt>z|Bxz2R58qUOFxOL?4JkrV`Q>8NL!9$n6G3j{#|` z?Ye12|7eA0J;XCuunL!vSgySV1aNrDGxNbw&d0JsvA6WMFG0qWSS%9gm;E86*fq3( zl$8?(aJT;7>w+Mruz*V|6LqQ5hr6L{gd3y+O5KQyzH>R1xQXcEeL9ry5-CZ*gs*bs zYPJ}v-?)E6yhkVX0{#(D-?+V@>uO*;D%y^5%W+nDA#n{Epg;tfnVNcxnJdBy#)l%} zz4F{waFtjY8j=~p;qJrMLT~^KRW`E9o&#E@^Tm|18JMFdqOLw$6xgFYH2{=hecKSW zx@Ud~3ZdP%aceZ_qmxR%E%@5~&>~H}avA2aM6!RG`bm#on3}M*tpEYnjZpKT_7pPXDr;E?y5fD!?hKdlz=!=puEbe#9nV2W+221Co*p%GkurxC$P6Ry z6H_t++`NHpQOW6qT8Dw$?Ht^bj>j-*dOIo>uobN$G_Aojt%Y*t{V6};e&QtA(BCiy zqQ#y|d1Ji}6kjuH70xP~RktmzJ3qpD3oy6+gENH^l`KubTz`+GZJ0Kmt0^Z0CF3Rt ze!rxYo=~#&v5@U0O*XqiI_7{*=SPb+D*^KOnfF!etrzqUS z>5_z83XN42SH}KmLY>eye1__8sugSq^4n|-DyZDXa8^hR>2y&C<5Ih+LqCUR{|h7~ z^JWuFE}S3wF$7>UHL<{Ni~rX}US;y5iCthx?sNweS}U6qSD`p}eXi7kYOAt(|2{la zpIU}pcL_3xp(F!&4gx2)qJ@c`u`fzF&8Z1LBcE!bFeMKWAOmTHz3=CZSHg|}T8O#D z7i&L#Lk4Xiuqzxx^eKwD^oI+S6xTEaXc|4$d4I@=9Wp=j&X1q)XFk~5^0w16XN*Jr z*0tj~oF@ugRnflKs%XopbC|L8>&d2nB@SkM6^OfbX>n|$Me05#ORelTDxQ?3; zVjyVEZYc3@c2@rTC7{A~wi*GCDgiS>R(=c$V8)lT;ybt1v&Xh8=ToOyzOrja0 zh!5?jJqwgN#?WygDj!1k*P zJn7tegmj(|_}~{3hIyEvSH3J?5kmSnjM$`x{){z7vP}&A*=qs^GR^E%Ug-?3bM%l= z{r3P3KnR6(5)51N3PbbjOhJqtQLX?q$oLIeuAo{AC!vb5p>zqyXsnviTNxwe>(lEV zw}};NA>BM(mAXF+jWT%XPsb9D;Jl`uiMCfEE|K$-XUJHy__!;pLjHBj`MS@>Lz#9@ zSP=Tr+>@WQ>whreS!XlR8|Sx^{b1pA8DG{P=WFjbz5abdKi-%6`K_!g-T29_ljnN~ zxvY*+vfceF-SEix4!GH+^ynA@m=YCl|3U<6M>9O9;St$>5wXdxK(H@+QL!>&ESc{G z5(SbWoCcd!4S(x4&kde(w&-pbTra$kyQaXp;T z4JnqA3*s`A{7ZEt%tJ%f)$fY}230?`6ntXt{KR`%&4pxikVchlR|t&$ znyRk$#V}Ub|5AUqFC_9IwPw|HGojiHL8@ZNCx5+7AYxJtY8zHOERmD&GUPLb$ecPc@H1x{7^cEzxD=CCh z?wu<0vqHQ2fxLm-!A?z5N8(196i95v(GH9C{M8hc11n3*iz)=z_eb2c-`=fNecMOlCDL zc$(o3`XS5{ANqlH|> zJK_yD(1==Vdr3f(;DfMqIkea}2xu5j^&dB3+zWV+uI@Q*3c8ba$d=6W-iQ;W&f)T5 z?#eOlO~6}7ujfQ(4txAQi79rk$Y(LVcxV73?{6eC(n~9iP=CeADR?JK-*5(JT_wR!M z<9jK+g@E}H(61d+7yt17_+Np1Vj?k3_^|uqZF|`9L8IO4n-`>% za=2;jp)oe9wq6Y3ku>av2Z}?AoKxw`6um&%CNm)*(GLj{9(PeNqTa=f#7dyAOa^WC|4%eUh727 zKg}Lwq<_XpB>)gM9Z_O^J@P|)R~E@uWfD-=#|kst&#aW{iG`FVfH9mb<|t1;H>Ra+ zIMr0lcO{IYFWgYuE|zFF>S8#lpakotq2{_x;aGGJjBCkR?=C(Kag6H^KN~8af5pNZh`#j`g9jd@(0o18S-!*31SbNLL6&_mg zV)yIf7mr!Y^12vdy+*0DjrXqX-g!{s=H5s`q+J&NT_V_6>46o+JD34Z(t5R7T>*=q z3txOXHUi_O3q3qr0YhC|BT1@wSwgC%3ok|l{oh&9p?Uow(o8?+G8ae*9Ck`%e9)kY z+44~_I{NC#80H*)ZAun)^IkWm&?QeBB?VFzh_OZFjKyqld%jrx-TrH|f>A;66I1m{ z+@1Qv{i2-fn#+BQN|s&)Zdggj)JDN>hGo*&uue{lNip18#Eg(@kt$3W@grfZS@lcE zC?kK^EFlbC6sK=tc;iF|g~^CN%>FKtYd0WmQyCtV!U^7W(TTP*4rN2eeC!t9#_O$JD)Q;FHR1}7j%7&+WN zdO(<3u1#lq^Ai0AqW=2Wd&7vF+Y^7WNL=6lO2?3hZ2xUp&S+G}t(D*^h033c(Ttcq zdRL2T##@^3UE-w7aX5~K&H8ss@kUv~4;ffz{^{U?J;eMNpHmsmjp@)J5N#E{rD>k@ zK?1!uZhMX~mZyG&rjCVOg=GGRO70JSWELYW4e-Mo#ZtYm>1ge&^}mKrJ%MBjbv*Wc z0VH#;#As6r4#2Sd$2|H#e8ICt0ey;Wpb27L=kHw(eJrz)aa(-wWTK2aj2fB0Gpe6W(GpCr$$BG8`Hteh5^Z$&UFTDzWBG?h0CVwm10q_vDbvx31Ni!BGDp* zF-4@LU}$PpK8kX;A97E6Z>=O*58W4jzOtW)+8jH_T%o~0VXLUrTjXxd&E!sRJdBXN;5*q~giad+CPCn(xRw6`vZe*3h$`FJiWSdecT zw$C=1S=C55+{?4PB1J8y&9b-edNNB-naNRD7^KfT^V~;Ata7U40XLH+{fg>YzdKYb zT7TQax5!4L%*Tm+xJuYH-N;U9*rn;fa!fp)^MkklqtHa7J_8GB(;UB1IEU|gxl_%2 zl_=7E5fS>tv9f=Q-}@U?yqQzEENtY^&=7K`{h*b7gahWKcj==2*_ZE{_j7(^r~rb){#xww<%EW-*CE;lN$Fv7ba24tt27lr{#mp3J-;n;K(q!TRp)MHe5C9-Oic#pmzg~F)_M*sXG(LO%KKd4 z#fOgv4T4fB0xktadZsk*x`K{Ft#wEN%5NTWFlTSa^XaC$@tER!1f|`TS;l9dY$<1_kH{|Ys8&4@mEu-+a&h6{+0ZHs_X?7 zR|E4Ej9)uc^+f?iQ@nEG~+w1AqftcvlHb7*9*nR&(q60 znAGC_MXhFFId=A8RF51#y_N0qoinT)XU2^rEjK*Am~@^Kl3S4R@GAO0~Yvj~h$ zm!ot!aM;gj_Ih@aF;f8!E|xSSx+NA>ylUs{vt*D){zzhX!1@rRTK>vA#+(7@LLSH% zNF1a>K2$E{48`n|SD!xsA@&&&jW@@GO_6HTH8B|s>(vApQP$Z!-C(U{uq`v#i+zJB z6D-g;DK)sn)ERsLI_)30U0^!_+Q%=>YnwafECKUAGe~ zBmfM7Nob-HzRsNk6QD1r+i|>${o|QaO*v^#ahRJjW+y@K`_BF+D?y_ocbCaM+T$M> zV}(T&?e&4bj67fGS8~SJ?~#a~q+HXRSh*Am^X`dUDj*bxn%)TxYfw0m)efJSC&=&{?b8aPJ_%>o4>2qfJkrOOH!I$ z_4?0L@-OUBpkQr&6gbm)cnxxxEM#nC88aROWA7D)@Mb1>Jcyt$T5>$bhW&YUk5OPK zVa#I0a0E)Q`%Fn0ZU`EpQ8D(ki79Pc&+UCwFH!Y5oN9HxAJL{>|Epp&<#r5fY(ecB zLt97eH{}!Y8Y5drel-;ZQ=Gv+tuP2`_{(`(8czi0KX$4q?j%KW||@-;;j665j*!?~R^opnPUGAl>tzJdZni zQSW_eAN6x$@Jf&bE%r%j>7EHC%81WjIcoA@w=H8XTyc++8r_2(&JoW_zQHeKINcaw z6*n%=ky=Fprjr(FNuH}rpT$Kb|45)bt1R4LqCBTrK4V6{Ra-v$mGrVP42WTcLwA5B zJ7VX71W+p;L^ZJ49~Q-Ho@EQk>>#1DvWqPY-Wsh4LoyZyV~t~yo{5eq_Xj*hj8Jki zh%SQz`Xf9k&KRf4$v&2fnzbTxT&`Ur=tmeMlMPZQ) z4Sj7B;ZGacPk(*c(Gm7!6YrSh>IZ! zHR-5C(l0)_Kf3CY_*st3|M;+<*e^Gq73Gt1;fWCCIWJm3@D1Y7F};EC(c^h?m*Mjp z+j47(EweSb0K{KZeSUf!Y84U)s0{Vru?%(~i2sJF+c}t;*)p1%xtKUxIl4MHGnzUY zG5^ihLB7~}ucl1=7W-fP8;VORfJAdwqe&2^MABXgr^JT^OgPO_M^j2wil%$ZU=O=b zRNDQJeGAW4>-NcQM=KM$3wLnlXY}&3@7( z`}4`X8&E3KtrTOD>GHs&Iy}0l1Ux060vd9oV!VMsB&+=vYp@y=Ng@Qag>Mvy$zZ-U zn~e}YKn!$EE-2SlpRsT`Qrc4RC97s4diFZpg{f|}eCaWc+=3J-Ci+yAB$e3+ScVXd zDJYCSjV5@L`C1B>1C-OZq2Cts2x+d0RH~a=cIzZ;1VPLjIpTm)1Dbodk&;$Y5cG24 zna?%lbUeegZ6ORT5)zfAH!aHe{T;O2J}G^}Ssk>5?sH&klG#Q2xwZ7d zlVfg_#~bjcDWFZal<@=vmK3L(DiF`|hSB|*g%jpWIsb7^E_HXtQ3L*)97~n!hF>{A z3gTo{IR~t*QQAM&t^r^xg4QVa9y6($Sro6B%T_BjidFAW%(DqQX3>LULkzfIuNFpW z!LPXLU1Mcy_l(9>n^vtkH~f}*SF$@y%U-oyfUYeRD(azR^3~ziJU>WEdvZHY!aP-m zp6SfH6(^^YY;S7@3L&YG{cbefaW;TME&vM7!eT@exxGYL+Yj;HIF7%UVt30?YjR;$ zwBG#f2BUk90k)ERmIu#@Bs8EIrf+ZwZ6wto6KGp=W2FKx)Y!*%M~sl1Esh)hK*QJq zLssA>J&!F^_lz-4!2hLpD5qbt5W1RSo^id2%vykJ8g>DtvQq&w@KlGvK;y@aEk!xN zx`^-tC;1zyaw^dsegI@3rzu6|4!=>T4k*Am%5h7T^}*j}o-Fo86`m+Ja|Fls#i&e{ zVad`?*a9L7KF9A~tJ;LuIn<^BrHj}w;1GySpvoJ5nfe$vX&gY~X4nZaj5OX*QZg^i zk=IMD3E+-BKFxwul%Q`K?`wxybW;uY+sl*MGgdJ zU|Wqrpc;gQj@)TkuBB*%3Dlh6fO#k z0!y~Xm}U&N-JS{&^EkD+h4Bbj-qmNj&g+aPxubo1Nf)r}6esHEw0|8i>!jp$gu<`Y zv^1a(QC7#Ad*&Os0u7yHkX>g29zTOOJgpG4O(&t?m;2H|Ii~Qz&3YMI@OGR7;X&fM z#2S>?6OKi&58AHYp*Mhlir6*S%MQ6R8oN>BGTG?kF`4PTz0#wlUv}KF-#XbquV1NZ z0KpN&m)7O7Q76Q^?i1**;5BwWfhZ&G^Q+v}i6y%`VlC6)?69W-SwR`JBD4`u#^Dphug`<_9tB2#1ws zqQo~?0hBVumvSt@B+O5szk*5%l!4qiI1msL`o9Y*f4 zioPkKn!sgY&EXY|Emma=Geb*ZyF7)fefyTGS|yu0dFl4}qvubinC#l0AK%|!{q|mW zANwAAe-1#oyQ`_KY|iMUrs`c^;C8K!x4YTxG+60<$xl|r+SF8Gf$Ey6HA8%#^i(&s zW2xTwlVBQaXaUZcteu~;%;H49gzM7cYM>Hs>KjK7rTFn9i_NC`_}F$haVLdBL`jCi ztvG*)b$CHeQ7uptttWSHg8mhLA@={Z# z>9qZh-NM1~uIW`7{dt?&kCL>o-(*tl41>_ESJODn|&Nu=uPO6rE<7HMk5h(zZPotW4!?&T{J&w9ef{qMwu<_qNU% zHUQ?0LQl+J@zghKh|>;+7GclWE3Nl~Pu}*1blC)06L6P<9YmPY7+qvG-y^xcZ=p_Q zoRiy-WaBgCrev)Y0b;v;x|7=S?JF3g4taS!N8i%!-o7)Kr>eT zcT%F6LJS`;+$Bvp!(hr6*Hx^%y7Y`p+u4<#Q|&DF+9bB%SU6U48FN(dRkNyEb5@OJ z{K+hAVeP1R(-|81j++1(by6Mrg16Jddy@GiKKdO+y_v)rel53|n58od`Ec92$zW%= zUIj!3QS~2n+X)<+No?Wt{Awaqr{<=0*x%-AWmdfkWpBdNn~s}cX*F3{dj(c(Hkmb| z)}^zdKfuu^qZ*x#GKsY0Y)!z6xxeGlQL*_5Q}U3~|Jg zB$|$!ThTYAtLmckEw`DElO!gvy@ii5k9#MLx0kHwV=-P?by!evw5<^CDXBh^R#3e4 zWTRFUyHalCnl{&~q~%kYo0KI@G_@ei+!Q?y!_u1NKu;a{t5Ms z7e$)PkM8)YYOBL&tHqVa7Sku8JM7UEAxRTgJ?V7$62*h)sbrXRrZ=;SvHAO*865jD zrB{aJ%MJ}!>X)rgovliE|-AsSo~ z(cRQKONYJY&QPt(TQDJe((07ilMbjayMW~GEgwJ|J3;Evs9L;0tRHNK6YcttW$n!|Pu9uI>8UAivP`!(d+jJ} zDb1`~gMOW<3aJwOYlxVsn&mDAE|9Yn_C|DS?nLLcpqZN)I%qz4Dw`~dDpT}YSd<3o z9Qn??QqOARS2EKoet2yN=$e~=@eh~DK`D^ooqyy4#)PDg--6 z$rjysw14OWNIBAwOz*p!*7qLZlQcSG9+CzNG@a?i0?(K3*1`HvM2Lc!L4=k&v{mBCt{6FFrd~#7qlUS4r=#jBb@cU+(kPMtQyE$}ZeGmX6xlz)8vuquTcg8UNu1IiU z`b#xHrgIr}(*(TFV3dJI1}LVfe|$8JY<0%Ml#1Fr>$$QH60=Zh5YZEb~K!%u98a)MkmMOu3fDhFnU3s&zgR~7yWRW+{CAe+r=3Pt~HX$ zM2M|PkA}@^G_gMRDdwErtXKsV*1GXR7)qqx0?loj#fk~bJV0JT*MLF3jSe%-EKF6) zaM_qo<2y+=)hhg{c0vWb{1A7)K%K}0FRRQC>)3Wj0m(dVbWzSxo;Z1E>MnwftqbG0 zVq5{+_)BL*_nIQy04731YFAI;)ZJX+WWWgrQ-d?Si`ul`k>a{74m9;GI!$VUb0e=#hyOi~w4b|()yiK@y*WP{;lp@k8c-+Bh zwr^ac(Hz|l)|9pS3h&pXuwM3a!p};JL<@2qJtz~r))9*m6Hn2U?Yn095b3-cU6Y2accbdB+@3#BqJ8%J3bPF5?FFW|HZrt_b zx#e!<@&abtc6&Ir0mFiBCg~)%3Qgc%8O3ef5vOaw;WVRx@7s4Hgn~P9i4UeNEv`mL zWt{qz#A>R!HqzIzcVFB!)`nkzTNe$-%U>LA({nGp*^Hd&%DJ3HfQol*-sphaW})TE z?67KxVNURDXG0YP+ZVY#U24;+L7Qj#7_l-94@((?$vK$Oo+2E1x_pmX9TU~mE7-FY zCak%$$eoVjt2uuv+7IwmyT-hXWBXE?swtgzAA={A-z>^_h1NJi`Nb?1QRgwLyS1}A z5*6_efYjnI@zw(tW%8+Tu_>x5Y9#p}M@T)v>f0$_q8Kb{B=NPU5N#Yelochu*g^;# znV*)tas`pBlN`*MhCk(xa;(k7-SXOwQ(bJpNL4^cEf%$0*>gpHEuv*fFIjTW5M`P# z(pi^mu!heTccK+jWJ%lxvZ}zgD&yuVT(ksW3=N94WH6LOpBSG3^@XGe7wFIdz z(sYHWFX3p*oLlnQ705(dlJ3&BM6&;UH){!PTVy{gnm&cul#g+rPhFPdsSK|x;=CY9 z^`;h*T14vhlRoH`*I2Tb9##*7V+QrnT5mR7pa|akFNkqz>Um~a`M0|Q7-APnFgIcooIVi4BD5p)@SReyBT z-S-{t#tr!`;N*u4UJZ6BYupsA0kEbXZNRNHFh7s97wWzS@PqU7u+kJ|0%6E^8@u)9 zEs`hXh4mfeD=*|XNJtg7E53SOFvmAbU|Vc>N2OS2wEZ=pI>1dDPATEdaKhSGFIKEG zBcNX7Pvm|#$(ufdLZ#BN@CO8-e00oja3WB3AAm*o1`?AdZ}a3j8a|5EXK- zTIB<#SpcYmtY{m1Fg+4*gq*L=9x_fncm#bv({qD2AVTs41sT6Wt1?!=4E#NCdTxc*LFgHLg0h3xIL4Xb3X}MgA*dhs z;H#Q5_MmiyYp1Kzoa2W$D@yUVw z){gx16D1ZO_p<&6*QN+)XzOq1nj;Srr*884H>?oMaoCVi)bB$?@C*8Mfv+c7AaX`eSCxn`ECde(1WxkrPIcTsnE%sLor~F*_KmBV{omIVKKui-T1|H3c5}`%94;w+vud@o z3R%x8GV2poY_y?BT;z~J^-T1+cX#ye@H;rACffgZb)(T_w}z7Fgdmq)rl~FxN^iWo z4|p#wVmP$x3iGC3=|XCP+9<(3G$t{VH>R&??g>|qO$lOR&Z^!HKCt`RBRonSI9JTq z`ODJFzpRI(yssbfZ7#UxHIAJ}H)7d~OFDosD^L1XT9j6SpvHvsQYnPcn%KjQ(rK^I zUJ$b>syKeYZwLs<%+v$D@{Ap9c?aliv(Fd^Q@L`Stny?y@w<7C2aXbTw_!gB>mBM( z;)*l!d^?-Ujz_`L8bYcpVAMRhl^ zMB~q%$<*0BJw60gokERL+!H4PJj*_7*IV}*11b3Wv)0&P(3Wzh&_Zp5*8&+kF!V)6 z-SqE3VPZAA_ch$M2BQ#>uC&%5^~N;ng%cY=8yc|LKXvILBebj1YJ5vR)=<|rwF^~d zu%nHBpRa?I`v|%Ayld0QBH3<@d*2m zdVXCNRSO)5w{hPftTRWV2{$mRxDAyg=YnBw-=NW}(SI;18 zHXEP7(B5oPnfUHJ|2)#+rWn5Kl(IynKp|#;x$8ddxmgxDtBzhRyMFm$y>Fxm@Gj@% z*%-MbH9^v|M3Kqt@p894Wzvsl>IG7$!sfc6h-D93(LlBTT|;O8vjyRnxLtlI9UHPp zn^`7`522;n_(lz9a3MJAG)DPh9XASVt})%9I*NR0L2OqZP{_u=Fz5*;d07Ty(+S^Cvm9X~L7ltpw$JKTZElv~iF8@aUk(%)15uW+VnhKEl zU3pVOGct(4TH1V}jXAM#rJyE&g2tY)EUFy)ID+CwXEXMRab#IEFHt+z=h)&Q2OnAqC7VT0=farXDrc+xZj(-Nxsrb%D^U5?hd znLs(z6d!tt4G$E4Vj~3P%ewv2k{j#%+0)Lx;)#n)Lcc2QF#dVE5WBQ&dxs#g$kK3q zw`Wl25guaYIm}9j`gc{hdwDZMrS(5U=W=upQeFI&SHX0)XKeFyta~OyY>zm00?WU- zS3ZUfA04kKOyvF*8+qq>#^?W?OV&?plj9l0|y1Yl`_X|CTN4!o1!56au zfnm?&&78vkmuY~@Qi0QIO)})M5GZn?9NPLs)(PW7zEB8nr@&p(g*-7hM|6>N)Qq)K z6ZA}{h89OvQi@(_>n%cu$>Q88F;9 zwsMV0syPj6-w@to$~FF1wl%@}aT1mst zz>>5@(hx-o00qCBFCHP{eNK1gA=}a?)n+{LeH1ZQ9lSfgS&1X1*+CBYkFj#Twl5*A z(m|#e+a@;ROXpP(ArS--VCx1!1B3QrrouY#fY?-2j#n$|P1nwC_dl82e{wi%s9uV6 zE$A77t&t;*()Hl;tS~J?GxiKc*H-20+4N3`Ex(8@9&TFt8MOAW>+JH$#SU-sjk5~c zrJ9JEbE}Q9T+sB;cn1>2BKUX2MD)KcH~c^DF#my={A0@be_nr>ss-(-vy$r0&N!#j!vig)kB705 zh|U8GMmY@3yGy&w#9YHC-ta5N&Ey#+yCK{=JO?_!obg~X2nQLyVYTWsmFn| z+iYg&;`umGN591p3b|Ul*EUE8OC^||sglC|lyG2Sx5>PibtEf!tXewr$y^mXu^>uQ zQyK=^mWFrwJNbvz)H8py06so`H!G#dY1{7+wv7qS)0kl9EPF~gJ$7+Zq}8H@5CYgi z!^xaFSEidZTDsyJMOl|2E}PsWE|E?k2nGU{8eezqSj*_*tTcJC?nm;RKVY8S0%!is zoELQR*h=)!>v5WqEhd`LcJwveab@9)sODY^Ap>)7Av7s$gbu>;yhhXDD!VN-oed33 zI8tQzGcf1cW6XDhe|CvGr%9KuLoWR3Gu5O)fBuXximg;p1WQ;iNI4KosphFD7@sNqJns&gLrP`6SJx*G1X(lrgV%Cb%X4fD6#mlNmW24kQ0r zkYGEm7E{1a5+9s{Jcx)zl4D2bxbCG7jvJ36* zEEzE#{?rRf9h13qswhwy9|Oxf&`q0Sr8(MYn&ffjR2WJX{jgH6T5?VRb$pe&@CrUx zEk(_r7Hh7^h%B(n#r=XZ9}%M}3+mi2JuMn^0iq!bm)()UY?l-{!XtQySJroEs=%cO zi%L`Xo54o1W|@s!r){PC3b-vj8Wmh751>Vu{7MZZmhJt>t78hJV6qA3t#kF;GKgyj=<6!oL>ivgJutTFKquJ5{4x zb0J#1Np(dlBcqcDr>&<3aL$9@qNYR;c}Xo`0xP?)w0FAiF8&4*dMRkyO7e|lK4z85 zC4&*eI4eY3cRJU<) zNgpF_3iXBH#T&7(F4c>An@S0^^z@k_ z-Rw{3ScW9)r`^r5(Lo*%6RlJy>nEXG47v*p3MrA0vRZHvag*g!ZBjwgYKpRw-GudSn464w14UY<4y&{5wtRcvRZq8UbkP-QUYQG|K=Yit&K?XB9n^ zSLimVTTl2_7hN@zBs03bS_j_S+2Qd~-CjZWLQtDb=f-*)?E&KHu$fC{Q)$KU^Gsm{ z*i9-z-!Gbh+!7v$DP(i53fsJz4XBx)Ez5edxr`{JKLSD!9-EkU7mT1Mg7J|>;w$9G zk#rBkQd~lIpvkVE4y1~4k@6aHBdnQTVCuXS(w+tDdn9MQAif!Z?1o0iRXV1_I?BQ3*Y?hMIKbBz^#&EA+N+8iM@RMn!0 zi5hj@YW|c>RHfW!##>|?_w@|qMqFpdn(x5vPUAK6v(p5%;^-w6gZ!mQXPaL(lhGv1 ztL#<2YJAXOlO)hJrof0PTu2thQ2q3iQ>^o8f@DL-b)3dbz(Sw-(Md17S+{{aQ|iz2 zC}O1wbBVrc#{hQ+d`XmiY^STY2a;rteiNCm%pVC%d(h4?B330{4y>un(x{mk|MYU> z#Aq^>ncn&*nMuHI$u>h3g)My2xvf-bGEUC~m$qxL8MfnlbBFxpN^z<}W?i`R8Z{gC zB0Oy=1!}{FOqm{<>~xl`@-+9m{q|P-4+RziD*QjD%9@G`Et}`PLPdHFsnpi`jhc$) zkMet$exIlg)bE}Kct`G2@^mC?A~lafpKZ%WcCmT=I4?H)_&{aQI^!9yc)F@V({FuD zvtmZyjs4f-3Vww`4Yj3Bk%1!NU9Q%E?;G}jq2h;V3ARyosDRW5iABRAr~Y`Kx>*!H z(ATe-s4~?bb!dPXn?EQW>XjWLyG7lu&BkvJNxv=i2J{t$Yb&}0*tG6;$74}W@@lp` zag7J{_)hYc{C?7;X7E-+NV~ar{7~DtUGYtYxU5rm$LzJ(=Y31Y@ShgA(D*Ii!A(HD zX<^bh*~&~ANdj7NkyR9q^5=xTQ(CjSP!Wnn0L;3M-dq0u{K(`7CLyNaircnHfd@%g zeLRiIzRl#1u3!4!ht>)Oz*MP-FVVYE9ekm80>|8cC`9EWu!WEo7S+#9BSZx43J5(w zd`@Jhudr0!iFyJZIsBM*$W#5GC{0a&9Kzm>fw5OVeuhy__Hm5ftA@paAiG@+0sz_^ zXp^?-GTXbm!KA(UG>~>7W}J|Q!Ym?r-b6I?39@lte9M)gM`*Tx->qBUCO?P8Sa0;x zbo$>=lN~bHhjSh6*^=G(@AkeuA#5d@=o(C8V zc3mTwS1yQ-J$%&NQU4)H{SteG@!V=n3whqlb_hMu)@>bDoN2>Z8nWVK5|V|{N5B-;FZEQGvj^{GU#%C=f;zLx*3`UG8j?h7Wf5yB{7Om|DNrjixsuG51ocJn%}F}u z>>tDy37w_{Ot+D;7zqeX2{}@;Bo@gok^!zs3{4SWQXE&rO&NCy`b{BpNSCDSQPj<2IUa+-Z zctaHGV+2I&Z?6!6bt15Lh48Mg;(-XOOo!fM1l?7?0h(y?dP2x&Xc(0wq|)$6*OGB3 zVgVG+ES;g-()Oh7$vnK_mt^~wdkb1BCvVhRO|&xc4bpW&uzR^e2Eee40il#ga}4D! zQ_Xkd0sZt>pNgY%_fmoWS!#VUAkHFOy-+FdU=u%4MEHpk;u`@vlKNUgkLsqU{lJ|K zI9K@ScYXr`qsHQInJ_PI1D^<<9EbH@PXa)QP;a9_Z|D*E6$So?PhtZaFmFtUm^3G} z0R}_pBryUsSK~;ZZ9yNvS93rD5l`{xuHI&8eQ;0NL2o7?{=y0Vx*2n*AfIT5ovAIb z00ZE+s)A2UkWcQz_tZOrflp{pcli8cE;e3)_ArH8kifmDrwwUul&k81$N`lRN}AJ0 z;7`2t5T;#>Ct3L9H(cMFalsFe;PIsR)hiah!K8RtK)corb>JI=jGscoGwWd(4Nle6 z;u=stEmS|_6(LbyX%qqE8~6UIws@y5EC5vPj#lskW#Wj=%buRGb7=e%?nxtn@#Y$O zzyWC0+DpM0gb)+(*fjP4Jw%?r0rZdfn@waN;uAidK;T~36KJF#)YDo(JM5eM2;YjUo?F@n z=02!f%yqD2U40qY#2%0rhs-Xcq+c4j0+Clm)-DvlA@F-5xk1&>;%8==517fHD|h?} zZ}~E$gN`f1vuhUn^TYk) z$5dD9y$zn=yT||uZz0pgR-v;hI7Ik3la zN&BioBceU%)sx6?Y?O}@YQOMbea1@}rV5ZI%)v}4z`VHx-4HfR zt+L;IBaT>GULeMLqolJ4RA6NudL*)1UeGbT%5QYnym2Cn_W~FMydlW&Q%in;UGH+Q z{V)LTA45RkIQUi>*M|^1T_)6m)UPA)YsKOeZ2*z&TQDK&gag}kR=)7|!v109K>x;w z!WEx-<}w^$5aPv)sC>|HV|*M)0kWqg6T7D+^HOVC%h`=(uX(irpe=O4J@|Xq+%NVI zs%LLzJ&?%0oX{?cr-UN|>6J-@2gf|?K6ZeEd2YWNkpt$CWBI_|D2%@t5uw#__X-Et z2N>-eP12rC0V9YO0w4%uAXD7ijntK5zy_Jns)Pf$4y>m-6nS@t1kYRRCJBSTz(@0D z5raP~y%A7gcp&Xc15Yax)!aU{i7loCfq6ga6jFe??38T(y5Sr**?N zc(A5*lO8m~fi!1Z6iBg-3*6p%GjvhQ>+eVF(5bMm0e$lh(qEQZ-`fh(PjKBufdi=v zx{h(j1tPFHCHqG|@GWltcQVK)W9|2te3U0lq)Ue)3E(x5I%|`d`xfBNqD*^-*+p(5 zj`nL{x`MxpW}0o%^ph;*$WUKlcUA~(V2-0);u z0X-??<#P}ENc_~|c0Wq#Ioh3!?5fq)>G{$1wh8xZ_Iy^{JWQ#Y|>w6|ycO8mIlnz=BVIM}-yS$$1o z`1>Xb`oC_XQWdA8K)xaM%xSQ?D6hkxIT=2}KDb>`ESfUsvSD?HlMOe$-e-%7f{FFG zCRG(FuZ*2z9q5HkE@eksx%bR=LjXA~3lUN&MMPYIWZQ|oPHw_dfnTg&bGXh&Kan^i zUjXU?K!M4Y*JdLo7LQOD4{5^$Sh7m?}bHgu)EkCQXSy* z_YBuFJVUy_a$%reaENI(i#vMYsK6-U+Fbp{9B|d!M8bw|=FzqKRt5t17B)IV{MSrx z%+-^rus1Y%5;IgVu<^NeYv);9K}6q>Fwi@^_AAu>?I%)4&&f-JaYlwdkiTqo&u8mo z;0xZRuSZb^6buar3hFCq`M-Ubfr$PoyryPmj&`Puj;?sP0Ry_u_rgR>2zovq{F z#)|iEQLR;$bJ${n>;3A#;L{2qB|DV|<~GD?P}#hg*HMIul4}<>olFL>?bZ|C8}Tr7 z!GpRtj>bPq@;50DaoqrYtgPQ!6r-u?`KKRmdBfCep zl-JYmsnoh>6##ou7kwN-O0;SPZ3j@;|CBa(xVUfczHl-fYZK zd!?>#FYWA-G<5LEfm6o&`x3c@1%r?HbMpg|yx-mEON>dx*zgkHkx|EypbNYHHBE-k z0YPr*brQo9Ss(=WLD70DK&30Qg5-&uTp|~uiM&S4te^^W^FM^AN z>@Q6Vv*|O(+0i3m6Y;pPGrAs@pAf#MSv`YaRhHzUyEbEFv=C2l!VX)sVD?iv`3=_~ z%*;K-E?5*f73V>u6eB3cC>+J&Jfy#pQitHtN3qIn(cH15g;BM3b<3pH;U*}?lWaMs z-ATeO#S-=9Kj4P!sQBvI705f?O=u_OSU{C;E+jFvck~n{K?~r7|peYFvONt3rlx>L*1|=3v*!CoQXSaG- zc*ypqCPalabX-9ak#Q0Q?him;pZ`dNMd3g~!4nh)Ax6UMxH4aB^q_px^y<0Iy!5%< z?csQMN6VBVLs8h-T&PpCnbWK1*Yhdsa(O>iul2sT##LwvkrtG1Aj=HE3>q(hJQ;Am zJgY0Dp%WYPK0my!aBHY*V9kuCbLZATZr|}P)-9!DL=V{Xf!`_YE{)}Dp%zBK`aU~eCrQ@Hw!j`blMETB z2IJlf=k+-^%XPEg{|67F%f9`LTf;-Flqa{>%`{_XW)ULAI#BjQ#NTk~tq~WuvUCV( z5}&cL!$*Px{XswG;sQdTaG(?EuH1Re9g$C>ekn?*a*0(soPsek&SsCrn-)D>Bj__2 zW?3&oBP&hC{d?QUMpG2({gtPOtbQGr`TKm)l;2iMOxDI>+No1$ZG{ib5f5h|s-8Qs z=b*%N8LsePWM3Ohy(wn@@(M#aF6eM=o)Jp9BM#yL9Kjgr6BvIadPeS0@e?b-@K4TY z$}6(GA&V=m?+6M%v3a9^U3u}wyLbZs`Zg48)By6)GOcPSP`M-T-m2SGjlJN>ScY5m zpsnnKKLU=l7ZpYLFcL5LBlyWo^-U~7hWg?_RYQcxJWRL`tOqIwH|qSkFydB3xF>(n zK3FlJk^E^Usuzii(_Wz%PXcA%|6Zl zA>_Gs$W!MEllp>oUTr^~{M}_xprl1Wh!Mnp=Ow@3ad z;LQti>xQ)fcV$4FE-J@z{hmhdOOEY}%sE!n>`Jsf=}L+z*AlL-E#8*$g}h8zm(2pa}XY&LFx`TM{x#)eG%zsu``<5}5qk?;GlOq`$JPJvKWYizY(T%kg+9K)BvNBr zXWbX6Hbr%a(S*xZATGVUXFMw_uD2E5kB^mPi^bH<<$r@pyTO~ z1v@ngiqVm~{%qa&dqT| z|8DW!&Er8{Xpz+r#f(V9)75N(FkYi`auar6V(xA?kpPR@`D%vhmc|SB-?DKdVW=lm ztY6WWigplNMZ9zv@H(M?bbQI+kWdx1vjLf~Y!&s8#vJ z;St|~Ui2gd80*7C3RU%2`Bf44;qI9-(u zeZju+_55oF$O44_PhtO0259Q|9|Z6}WIg_q2L5fT*#8>bqa*=`Att2JM?aWqRLSM3 ze0j+W2o;%h`wH$%00>4L{zkfRzwcaN3em}!Vx`LWM<2hwXniA9DC0J)-mWG#&b(b{ z;CZTgYqC3FLEgwCi8Z}eHMdog`j!=U`ZD9AoJbP<0_)hoRGK!PBbjy1cC(Mw1b6jJ zGqkbpP2ox#icOSs44q55bVly^Bp;<>n8kb=eNz>k5l~tXFLQ!mv7jXRcn$(s{IH@_ z^@EYvm&5B$2%=xn@BzR37PYlJ&LOopWqQ!Op?j;;-R+Q2yq?6I1legDQBrT6#+3wB z;JFfSRH(IID)b3p5THi#hrtXb|tvb^$TJc}_k6F#gr8qwWG&mV)nV4eQ$ zvsOL>>U_TT8vIvJ+`nFUJ|LX`mxcew{e|%_Ou#Kc$YF^ID{QG{N4B zm6gk}dY`ETo&r@+ujDzXD4`>&GJ8F)xHYHaNa2dmW6w&S$IhzAHwv&m_xk+)_Q7&3 zGt4yWC%cV0*U$Ud_ky_#czea%<9U9&P{qPnT}PsSlV-fCBVi6T8pudRX7@$J95*47 zeJd-Xvf;$bE4`sfKn4h+_6 zj}Z9j@Z^=DG#N^j5bc8>KfF>19b|Bx5nx8CeQ`?HbBVcQm4!{ z?TLg!NfI(wybx1`e8l{YyOYa_oL`P?Bwt50ofA^gG=3~|EgD&l!7Q9Q#nkqxX!e>D z#v(p&nbR~;9%;;~BFNk9rzI+v5EvtjSBLT5X~)6 z68^9N%sjOl_&Q5BlIXMG>{RV2&;sjWJBFB^n{^G*Q#16LE9bdWIgpL>+k>zGWe)Zg z<1b%b2?|co{$)F4p#RQxM1ZjVsqOp^d^OYGP6X|rT2O7`|0n(}y7O5250<)~U<6&| zx|N;-+EO=Qa)4X=F3hZT*{K6XOq%0LQDcgF1`azZ zbz5P5NM_Yq%u!kvph7oKV&at{H8j6_Hf|hfnbtgV7`=2*XbYxkEmvXIH4J;V^Q)5- zN&lIP7(*^SkyS<=RbrC$PUOwPO4?|@tnF3)h`;ruoL#cRlMwNR8HxCk9kTQzPP59e z5RUE4(ATu_Oo8|7E;>>n+Rn^ZZ@apJ$#b&@CrXyjJn*`Q{tM(eeRaV0msjrnZvrCq z{~sX#wr*Qutfsk)%#`^F42alA$?MYp>y|#I#eUp_&DH`3Kg#D5*94Bt4A>Pah zQtxDk2M1y);;;XVfVh7}z!%0FKdEA&XOZD1tT$hkjnAidQ%N;L!@@tpa`&<*T02Ok zxoyqmYayg=xT4Y<*6mif&pD1Y-bUW%x;9zQJO-S7q7~a!wP&_Qy5cq}^tJL7xF+SM zI2LlhQ2M0DE8W3Cf_|N=gaUY!2*JU{p(Ns!JpUYk^1r;1KRoOIV+728E`}NTZzCWb z5XnEq`M+`?{}KHD{{Gu)3H}=xb5nI}wwTby_g<+(@@*$#a-)|ZxYtEq*&#|D%%I5A zRFuSST(ySB`d;I{tLaiA_(IQizRg|c<4y9&w}|h#hrLsTNnKM7T{5@lh-c%5Ffl}u zKw4KUVio(`-84tp+bQLQ#E{4`HA>*Nq$YKflsOr3jq5b|_)yY}*DLo4eMp=hBYhmg z=rNv#iKyd(7v^A8Buur2X5AQrvc#%(YsCD_S-eRYFO94<(&fnS3&y4*a@{VlplMM> zez|G&+E&HR0dn7urLS36L-hY7CWc>Zqbp@*MLc^!JELDW2}7R!nCG=kscxZkjmNXw zmN@(1BBOcB_<%WwWZ0{Oc^$G00}<6Dge2Cgk;l@ z0Qgt$iTxA@pO-EVD1o?3+zAT!4x#rz+$4*IaOeGt-@>$xLEEw(V4Gt76->Z5tKawr#7Difyjg$;tQayU*R{ zo_#jjxxZFx@A@~_ddHY^%-)}_QL;6}1CAmDjUC+)a_eArrH;>UaQ-Nz6%~uT)o%Y3 z!kPZUY_w%uzOPs>{4Vml1Ys1fTK8Pv5tzI+)nPC=Hn)oLLVzGSkve>)WAHS+Ir#p> zAn4*mHo>QjgoveWVDbLoQXPiMvI-fo_Jir7iG#Qg7(3i5Xl-2Zu(zz)Ry-+DRz zZMEU}PhOM%t?vBmgOp?|b6gcve>c#NxGsXN;wOloRF>nI7NFLhKwBxS@Ra6zPZB1(?7jGh6MA^5bhA%G67ni_&fn-+2#D*EEk0Pgnd+tt-7&PyM{M)TIG( z?TIaLTgvV!8lu%JTWwbD8FGU5pcAf{ly!7P7^gv&T6@Ffw7+C%qfWceZXWY%F?zl1&heU5;4OU1Wp&>5D4C}%hKFx>tGap zdBd|lsnX!YyQ?%HFseKm%iHC?vuv)F?`g2{DCQ&1P%o6uWLCAoZV6xqg+D<#JV^>1 zgmJO`?fcAsEz@Du?j;;sB(;k^K$642C1&x*I6a}mAT=sBKAf%k+|H# z%?A+b$HImxLd^V2;$I{UMdQSYL{a0&4au}5ljG3;&?i(%!bloKuobzi*%ar}+{2h; z*oG(+uPr4yt5-BHJRon<%PzXc>kfJBc=^Y{e3IU##5x!~^gI z+DhG*M&=3r4BVA3wP2QV@QNp8{meYUPuiiKQ{p8cC%OT&5yPPXV1SvHgZWfB*d8$T zBH|P4Q7Hijy!p?GcOT&@cTA~L-->AZIeCIjR_N>?8x=I5xGqWHU=T` zj_ou1AoGAP;}}loq8;2hyaW=;L#6PCQ+$Qril*lT90(JBMLf&y;P0f46zN61%Z(xI z!J&Gizlf|{5b?x}9UH&GJ?rie^=rk{8|VtYB*|n7-tQJ6eZfV&-4LO?iyrt1xKj7G{vnhTd56{Jk1nOyBeDO09O@hk z6UGnZBzbNecxA{;jGOrz_{GLI^=cYV0*wc8LuqT}#Oy(OJGL?>$cKJwgeVTqPhXWQ zV5=6GO)2EPfNRAU#QaF(t%dz;{hKFREABmCh=0%s+5mb>iXw_Yo}%?5#`#vpJ6R5Y zCF=CR>Dh_vh2%z!>*a2(&fL}gYKt@b>z~yaSWTz=`g_h)1@j+OV{RbI|D+oKd!lk@ zJ!ca`{>M4mu{VcO_mm9svZpsB^D>3(e_BtNgU&FXqwc|u1WPnWsy+i zkA$5mi6o4R#($mUY%5>wtCWSWig^0obF&Q;mA!*tkaU*1fnQ=LC!XaCw45-;!7fuo zFh2Fytba3>wHo0*3ZWmvTIaA6TN$+q!C6#<@xI8U;C9zq4&qZg zqp}lO;=)pA7qBfljq6MPqC@->ZLZ7{FDEGss<4t)(iAX@pg5N)k0&ZgQ@q)shW6vY z6XRI^`s7 zyL%L?frjD0Z{3KY0<+s^3+~)%Ta(0aK zYByI3w8zZrBO}`wX!vGCt{jwkapC+}Y(?qv42mOa+A=36%u{m9DBHtF?DK(YyULoN zt=A!J>0Q}vhqK`q1;t5{+6zhwGQhhEt^tY_jgkP#H(Jgf(aQfM|BYpkdWLV2+=ZN( z!v|^+y_e9Q64C-v5`)s7o(t~>mD?$+Z&lT21Sj9 zy^t%m>&)8Tb)*HfPS~1yQDts(33S+Xa^jh6!)qYhUfnSKtYNmW?l@O#_fG=%1pz}# zl$ijHV29MkjFKC7OX|s_W|GBxZdk9-x5R;ZwAY}GsLeDQCZ42j%+a8^?5X)^G|rBa z(kwlel#XckP@d$|CSON2!}fI>TfCX>u=eKcIQ)b0YcvsxT&!u7*3}=5O7){W7Av+8 zgyILOJWjT2!sPCMJA#aOzTT}Z%^HYVQLfjMPxWWyQic6v-IPOsprU8OvnWj1t?v!- z${_}WGC;GdH5st;3HF!t2Qw+NOZZ}YPUCos<8>s){iE~c3jb(HZPMP+q3^k|_4S(^ z*PL}V#=B$^)oyyP-#JeKtj{NL`UoIkUsqHLZXUgmcu3WJIrK5afec!H@vivFR_y{? z>S5)X%=>AfE(4fHB<4X2N1gfb?C9uM`zd^Ije_#Q&g}&p2s>bnH<+D*j|B6;ON^$N zbVXeggIN4ZB%2~uV!9`?w6@4_Q;8ODNNQrRJ{6c!Z7S0#)PT(?o@qrvpo>IL8~0vT z;n6N(agsq*Ee~KHkp(~velhdRCQZ^(BDszz^Qca&fozVEwtuxx>OhUPn)sA_q^we70ug606P9$f$_b}n?`GqAaJ>BZaU_v@8m z=#x9acs+)xNtX$8lT{a&a{fVonZiU3mjvix2=kiojVp$O86AXgL5yvAhG$*(@`#mY z{CK5ezcjKBm||`V_U?gKHLFTKHphyfI-W{)vW(Aq0Qq?sjCT-bK03!`N{M(MlQtN> z%<{TO_)z=N2TgN~{ib+%a%?x1&kx)ZVdYgTx*6j5_m zlyz@UF-am)hbXLTf`3=3=hbm+>Z$25SQ>Uap2^6X`{2pb7cGNlp3^loAp7$Z!OR1_ zmsN1Rmu~;>%hwBBwC|qD%Nv(Pko&7qfFB`PCrB`VIPUu|8Xi^3IJ}!} zt?O#Z7->A36RjbP46w9DTBhdfgK2mC8P6*>JJ)C~w>IC@rI|;*^GJRe;gbgdBqTHV zsGJupq%)gXWfj+)xw2iEZIb2^VHZgU9d5WSEpO*`gX+t>NsQjAI~YMbW;93EcBDgcd4Fy4RB6)acqa+b89ovbq7P;AHCLM-MY zHI9RX!-UgvHI=`Xyrq4lD^YB1>GF5^@nzM|O%8YEdesK_nSA4A-MI@9_4?!m&m9CQ zan#Cmok?ES#n65Da(#}RNVxzd4V;lC$mIu7H4YR{V)R+lOOfH7<+C6*H>y%KSX-k` zOZbsukca)B7{rCer5^KzXLX*OR%dwy*>LvM%$oENtAKgKN_8W`(8@pk9m6VXY14fZ zCtK7GQTggbssz!>RdUPYlCtJ4sq+R*xWk%sj6TH#!}*NRD!8aB*~y_utVw6YVq73RPAsJT66Ysqe(<+3R>!U@`N2>NzP3GBhwE=O%F2SF$uJ$~1=8D9sVRb^gk`4pda^ z^{)9{+1}U#^TT2lZ3F4$smKPu9}6nzu;u1Z^fx;+SW3-ZD-Cr=WF)4;xUfHRDaJ!A_6VL)@JXe@|iRf3KM28vvk+&JJ&8bUTHEz(RCmNqrE zSHpLz8uQeBRPL6>jT>#De_-136qDbXcC}d6hKXoItpK4_=2jo!_rt5xCju|e{>s?z z+tRJpRiT8bLc3^>WE=B*Sxy^rJr&{MC;kkCeS>2U)QT}Zi4!a`9gp@n75z_2gpeYd|&qa9$W~5-gbXJx+c93 zzP_@uJ~mV0WHg|+y`u?lws1)YjXrJVcann=qw#OiA|gf`wWma@6`U9(N`BCUO_^Ph zJgm)uWhaCVhx8)t4H8_ChhcZF+&Q9}8J?ll)#=`GvhBHn z;Bkq83nB6lzA8Rybe(KNV1b7oPWU|>^)g?}B?&C}Y-o2hwI(o3EA%lOLx4Ba=*0Oe z6z>HsSkDE1Il(@%Bx1qdkwf5 zD_8X_lvE2-OjeIASg+i?M8=0i4@?)i0=t8v1YB$>%pISofHDV!V! zs{K-Ek93K~t1urOQ>E!qj04sgUb|y1E`z_Y900B#5t%qLTX9#pt6%UK?uDkDwY4gb zkj3c4Osk#??vHS%t9EK`f{)Y$j}fhg+SNBDZEwdR@0uF{P=jLUb8Vu{W;je=azWem zdJZQvi75;Oouw)o=P|bgj+5e~#KFl?)q0ccT8u_OCZNo1{OT3;l4PRMCeR+h;0Y^P zakT(kSl>hwvgVhScW&t*um;ra`{N0>vCl20MW=wm5g&(G)(|ThpLIsi?vUj8AZa7MjF;ZuONmO$+0%2X5ArEosc~L&1KS4Nr*^Sp68# zlDrAlx_>B&@BCbn7+1DuMcO8<)Qbwthmd+I=%KzwMr$}#7L|K5EwE+J>Vq@IBq!kI zWan#eqh)78<7Q-sf7(JxdV9j-ax{OpN9a1ts!_k)$+(HO-`ywJ-MI;}#^O<}3hRo& zskM-$tgkD9FaMM^iCb&M@r;A1{IuV3-Kj$#PGHOymugUI$orxgvlWH*KOVWyz z&3`u{dzsI;1$Ql`yv|KAEG*k`XdzOFFTdT3-b;)NU*MR9j~J`8csuu1-r z!6=0P0NYvp7{VXyC)RqSi(IsWhjlG_6NucJzQdM>unlTWXK;Y`b@ipOYp>Sz7KZ%b zRgx1*#e1xYF<3-@l?KS#6*Kxt!iVrA%i=*2gOhQ-&B{-}hrPThsVII1y-d-~@SN*w z;{lD;5UnkknuJyaKl*1eN)5G2J^|JvPrxh{)?-!~CVN6jur6atEE+PC+ ztvm-rjbFK*0X$;(40B+4KSaoC>T{Hy41;QI$$m+&SP)eg#WW?U6a@uKP-w_5$J30f z7}K?9RFRyIvmIzQ0RN5=AHU}>lV)~c&!th&^gX)oUtJtoA|aP8I2nxA2Sut=}i$XmuQ??6b_v0WSmn(6x}#P zs*y;7UQYx~zr*uW4qgA9Sc8~i?Bjsc!#|6-wEJtu-%U{A3or1MIJ>gc>>NfpY_5AT z-ZO#kY@c+;Pju?$E0ptUU!(79u`k3^sZe5Tdo1CY>{d4C-LtS)0wFPMZ14c-mXg*c zfJbg7(dhu{mh|T}_dBYFY8{_!t*#gcJx%;L$On7vfld=($@7(7T_PYPmb~vA`{Xqg zgbgSxL&Vo5iSutWaxJ^V#24!L2i(1@0z1TkyvJ=C@@hvS$x{{BtvX4z744h{p**o~ zOF|C_C)~5gP%XQV05mJ;v&)d(^9tCz8S+;s>DS-zZObA*7*3dHJJQdn@UKFo0j0d3 zeP1*&e)4<0QO{v9_Za?keZ4~OlVTkOYqq=uWt|DkYB!dUIL09g@?pd7ch6o z`Z6SFyAypu0Z@IOET!+sA~}K^)pLU2XE*?>@K$PPOn_vcAH&U;|1)|> zERZh{fM4oe4T*vJo_0sc-){)#iz)FK^%Xb1G(cICv!PHl`Ql(4pVY5p&SZLrIo^mz zQftUO_KsAzp)fsE;8u|E`e@vXwE2nn%C=-3h#%}(IqLU$DcPss(2J=jK48$lcaE5T zT2JC#k9g+g8L97!>SkNGs(B%O8N>$_@I>|rk@yY~-up*p8~7b_2d=u{Bc_^odx`q& zI`#RS>}496KB0h;AngDJIag%kuaH{{>dF4^>hLhl6Tv<+ zV1BY2`BulMw+4}($maxfPb9$4KD*XPi#wBuFX-9}ZVBJQ5MRgxl(v^9hwmVGQZJ;q z=7D>ddr6rwb|zR9Wd3m)Y49T80}sO7gYi3?f>{jrTdWB0SPH$=F*T1{mc;45h~KX4 zZLoLL9Y0Hyzo&Q+Ul_WpUcfKt@fiNvTQdnXYoZ+3?hQJ`-0lrZ#D4(?DVGJ)%{=W0 z<4;o(Ok)vT!@F9|L%s>Sm2v?inPJpk@BZl{0oHA&6pIprL1zgg=5e zg5pam@tte$y4Ay!5B$0JrQgjN=;xYT65}&CAqEmd)8Z_MFA@L-@BN4-q1G)tV_WFm zG%SoiCetd6vmXKRt0?Fb9U%QYfC!?EQBX}ZWzkyU>ly{1@ zp#{PN_N>wuM&=80V;>$1^a;x@7||Q2c1663QNTp#hcFh^uRl}^{E534f!GZSFuTE| zyYZtR_X|+_lqH=FIk^+?g^zRPDR%~Wc|rUjj_-{lo2M$-=8DkU9*5Ffq_W)VOX8LA zr4iYNxV;mAC;B85!QqCvM*xUW+;qC{Zf5UM`25?TzxI3uj`rwF#0cdJ^ zj>-(2&q)JRT}lML5WzB3OLYUEN*2Li#oeC;l=m{!oF=|6k5YJLbBJcqQ)B}h(sscw zd^uGN?KG9p?W@(G=ePf9+gLK0UQtH?0=lC7kEQ`UKz#q*X@H}Ny{Vm}jftbvKPS~( z{})4mVeD1g1NK{=9-$KoCxTIwbWr@;$zl?n;X5 zyb;;puo|?>!xe@q31fQa>JZ;ejGkG00=zzkCg?7+ur-b^8Ef$gdFj)_&5(~ ztZ#ugpGPNe*j6q)1l(E^>ot3RYr49|g1EadDVQfgcWqPVE7FMJLTDXRZD&U%kn8pn z9CHnYBPSbN`)pKLsW z%V?PFC!R9s?*rqA3~RabpAnRE>G;YnY~eobbi$X zd9Fv(eifmKjwN-KGgx`ybQUR~)8Fz%C!ONLr!y)0>N%N}BlTPyX)Mch$BKe!%H!?r zDji0a0^vZ!mVkDqKpZs3IR6tb}=!jSXS{4*cZ47A)9k zK8)$|B9Rb!1R6XoOzkEw(!mv$3e}2a0)cMN2DRBIHQ#N81@#$AE}^h-;^1GKoK@sf z^F3du+^=Y;yO|tjNiZOtp#F%T7Nz+5T-Yuk zvCDUkJ0JH9(H>Ki@i&y*G1;SJV8236MZ+IK#nU_9Mx6zL6UNMAp)n7?`0;>nXvQ-m z;oHY*fM+W`$p!Ayf0dDuSwHyF&Q2Y+b?Rm86WOBi2DE<#cgpXyzAy?LdLxydtfOBj zFv6T;U|tb&2P?knBHAnY^1{KTc>j5C|MD_$Mi&0)9V`0XvX&(p-yuZ|5h&^y8X^IK zLW}aGpCDZ9Q-MXuyG#;vL0h&cjW)+e=KWW^$REtF3M>t_EsIO$$z|)0-@*dMB7! zdV_5t|E*_1JQ0;!UGIxW#z}Z>Z>$l&?>&2ehD4$yHs*F(Q z14TbLLHc~d>b^N`C1cG@MUDaNdB`;vY`j=n^4oIBLPdl)EV(Bh2jG*Oe`G;9`$YV& ztscQ50tNQ&VifFUak~>=_cf#_I0#|ZuHm^3ZIJ>kVb)Yc59Tv5?oJ-VZq45f6(^Tb zP9J$uERIk!*P~ji2z9sH9N(hQW;qkr=cLiF+ZP*8G`aIv*p*IhDL*Zig(C5XAq%#> z9nFrJ641auw_I(IalGK|H*dv4TNaz(#cA8*iqZvm^(-0)Dp_wTv8y1u6DV+chBm3$ zJ2lq9+Xyg(^vvl~c|)}R9DfAar1tJZRT}ge)khP#e-IMG&_I1pr5!v$G(27++ zOb{lD#?qD_g1+t1N8?h?j_84M$|}(GKlg;~@VZ8BUHG;F#8$V8evt}7_o+3(tgBcU zUb0c1pqVqa^o(5^LhMWR_3ukQC8$&HiNXonmLG*y1KDG^khM@HsYq2@U~A$A<7ZJT z=tg4LCaS?ugNEm4O%%Zx?F;J}VnRkDB=buqu1Ol{c1t9lT8s?-4O>Ot-%ElQB_f2&*fH=EE3pri2TmsLbTO|9J(Gg$jFNk%269?&;glZg%ZkPhZp+S6*I6kf8s3M zpWBJkM4))5I*}N1hWqh#6t`1lZ3mopYgqjW{MPgj6aUbo*mojsF+wl|YN*K@JRfTc znJ?^u$*J@8zOSPff0MtLQIVbX(xu;)NZ+Iz!VhyInSAaszCC&A;0IfMx|NvkOtDYu zpeQTRHgaUT9QQ+Pl`f*>V*@Bj)sLFf{X-en6$F&~A+V^^k)L=Q-IJ&OMK{IN{pnry%2NAoid5|aWGBi5YRXbu zlEwFmG%lsfOO!qoF%8l&O>>l96frFr=E0zCKf`3V;%Zda-4@O!t|)5UmLJ!wvd-CX z@wI;(n}!;J%lw}(}LYzw{iLqvs|Gkb6B?4uL7@xX+Z@$w;i7X>VSvo<&B##Rl6!>bh(mbTim zjaLgk?PFJ@jrhVwyLbSv0TQloNJiFi%s6OyT^Zj;E@#zS1k-~Pm~nG9`(yQ+F5V=_ z+dJedQUXoQ^RnL(*$c37RY#Uf+QNIqWj%OhiLeQ+;aTBiQFpQL;DH^4M|V5Y-qZb=^!_xzlAOlpVxN$qlkzrD}H@?Ji9MqYvYRFR0qICgk_ zhi5VwyX?7o4dZc}QMDJqTj~;A8?T>Jn#<*~)PcL+XaF*>$77?swA`3WhO@6Zwevr( zmN9!bJ$siXcLW}^#gkg7WS!r#mKq#C)?%Sl_7-O9{ANm>k7Qsa$f5pZwPby!fR?Mv zl)>#$PS^g<3WTqJvi3yx%@0Pt1!~k3|Iuz!?7yEE_P^{l|Lx_|Kk~zh)BnlO8?_m| zx$WY1WjFJ1%@QGR=pwcLAq)bHY-!{7Hnz}MEwY|u%x#Jg5RT{yZ9v1wLU6}K zTP!mnX92ayETEMKJfWB;peAJ?Mdg+LLcpuh@gyb3toK3I^7Xpubk)Az9>qY*XnJ&b znB7(rR(s--+tcBe^47GKvt%Xj_*BDbI}N3pGYB-(cpD@2K6lg1gA6A=W$vxr2px@8 zj^J~YcXbD`X=>kx!~z;br<1`pd?NGf)zZ@PjA@@_UO^@fA(IO_gD;n#Rm9Yk@(3a9 zK3@()%0e}No)l9oNtMgP1uos;Nra6qgKdcu7<_$XV4ki%eAKGtMTQ0WzS;I>RO_CbefqRKVz-4wtuQm`=C33}+;B@d$B#bUT}EsCUWiC0tlxPt}0t)j-a(`_L4% zwka;Jg~1tin;u3)p*U{<-+Yta_440PVRG$coXfzdJPPD~1>BXYffMwpu3de^!QB{` zP|`QUn=&Qn4n4px3)E8YInr|GBPjfjAgt03{!~`Md`qKxh43Dpdd!j?ksW1L)L4or zE7h2K)^Di+FPYXdc3Aiz7v=-l^ly}XoX0({vHL=_@qX9gP+lZV8J1WiIGz9?sWIV8 z?1xOF*rwX$SoHO_+Cn`$tHm@+)$Dm$%&Z(;7|{Cn82 zNInGOGuSpc6*CC8`od?OA$aTQ?6b5b$f3j==;Vs2t17X?(zv<}%n?pj>af`G5@$zj zyEn^Av+@Hn*cSEQ;E?!Je>(YeupXDK~*PB8xKKP6kr!$j5=SV95DHTydyYGjWu_a`(T zf2+$MZlUHg4WiO=ZuDq`LW#m%<;c8Mhehei+I4)5dx0aC->g^}`hoo#`m7a&+)Nx7 z`6KQv1(TUE;D$#y(((bn*$gwxGGkZNE!!}#s~7AMe(&$bGo&9QdAZxiR~ZfNCio-B z*ts=ZEfWtGd&b4IQ^H5julM&I+*H&AXDqGzPw9pSC=rRo??GCv%qxkNOHPq7RsQm(Yi$-9GZK$+;6J9lO&dP(%^Vls=2+n?w z@!;34a79~mCeH|sCko@0ldi4>q`F{+;ZbHfb3SC9iZpT+ug1cKF8DAb=kiC225t== zRclB${%|f^EJtuNHT5CgiQ`^KFGWzHzbu7Cy^y01G#NSj;D&sz7ldsW;&kG?=rUR7 zdZ6Phn!E0vM`|3Fv&?WQ%Qt%|tU3L&&g>!B2tP@kTIHxt<=XZK?&ljL>z{HBaZGrJ z6|P}}#$zor#~obZY&!G%gas@+;E55JHEr#jbQxCeF_S zDdISdPR_uh(l+7)=W_TV!!=o)dd}x7zhar#Db*)K_SO-5dW7?rE>=gB_4}H*3cZMl;g;i6bT~M__5cS z@Y(qf3MNx$5)J^{=#_M9u5d%06RL>qRBk5kO4(}}Kd;J%x`ZI3ncl3pe4a~GbL2Lw zXk1>WMx#`UqW>dA?AWTPDF+nOcuyKD%M`a+57a=P1z(e8$x5}h-eG~rRp}?i2lH9n zh&T_hNm%~sX4>Q7RGX{~d(+>lH27yndGHUd&8Cf{N{&S`EfadgSy6yNb&f|vG z>O~@=O|DnRp{&Rd?`&IV8SE&;plq>VOxXS8k112`SV_|+9@yKaEy_*nls}88gASvz z+(8{%vz&EUa!a~PRd&6moO;nmtm~v?k6Fz`ot3H|Z_QGwse@-zSwHOuBB5i0&9&gf zRixc!L_(f^-`_)u8`8gp42RIIn~81iLLx6|B#{p{J@FY(zB)+X;5JD%@Q)ks=*ut* z3y^PRy(!jcmXVAPxG|!i)=LMVVLVhRq-f=Xe z?=~uWe<~sdS6q8mpzi`cM9TpZ>bcvU;Es%;X@TOXY&)vjeWeb*ho0==gXQ71(e>t! zY17YhWRh$+MxwX1w~#^fP1IHi2IqlKx{J*KP!o#^fho@g17l*X^^jMgVZ9nkjBG(&HvsHu*-{ zLVAoVu*f5k!C*s;YrOHKUuP*~IS*S2C6Cp5jbe_kBZEPvF;Wa+w=t%tLoTK=ovQ4~ zwCLGgZ2!9Gsw!;3ig*D%Rm0cH+j&{whW3dWl5v}g9NBcPZH$s8=sy9W3I|_FrQoU^ zzWVug<(Wh z=)M~?f$0WVEQ^ZqXTJ+e;KZ?s)Y}dAYneA(dA-IzoQ*ps4;&vMTm@w|= zt2Mfs&{EXC-O^5~WVndl7Dv-2#Zgz>V0X|LuD=@inNUNNUK;t5Di8Uj`~tCO)qQl> z;~MGU@=>N}y*Eoo{4xExC~t~dld#p6+zXVi1~Em+3(*ISz|^*E)Cse|=C^lPmy!ct zh@b06YWihzqDfUL|88PVYOWV>1DBwiwpCBXD)sbRgK0KC;?5AZR$Vxq5@KK%Xy84z zp4#oV`*{h>w`6{bAodbe6RR4<@k&y7l95p~|GeI<25sp2Ljw+ZjT zE-X04g;2gv$zXm~XZ9mS#edkT42P?NQK#@4sWs3lP1&kaWsO{_a%9a|R%FR)8cVyYtO^rx+fw1&`GAgnc4h^(QU8dQ%AZK|?e>f~BbJ+Zix@kZdy z#Pc>1s-p0QW963Treh688xF0(xnuEWRU1%NA}&ZK;m&kaiJ zbz5?5^h<-V^rKeATz{M!WvcUYC$+C?Ob!0{o%ZrOVi~^P;K(d<6&+n)5MAhnF^Vxt zR3mzL71B~?N^BK#ePU#F^#a$Go;xl(?oqps9&D9;-eY~vqZM%Y^8Wb_)(f%uSC`8h zhMzC@NoX6UJMx~Yr3b+?X5gnqzc1ctfVVIG{Zk&?g0I##f(_ND7%E@I$~84w=$$S2 zr&O??qJGa0pJu$(r+jYK_9J05U(VC)7GEIGVsg%2sQ`i~#$K6Fn8^da_AVL9cNTE2 z32=Asc3wRO!}ZnS3*?p{vXx?Miq7au!?Z@4m6XReonc1n)?1AiZ(rZQ2U7Ri6JwvB zmd{FjUq9I1DJ(HDc3lHS!MlnG`=5Yxi3ji^4~waIK|}n2K5D6SU_J=%@fU_$8pHDZ z@&FKMZ()-13=(Hy`2@e7jf(KVy?@@5{F#jW`ug=uad;c=TLQ2f9$8WY8i9WH@Q(w@ zk$Rm0`Sdv%&ErV&&GWww@x}GTIW4vR0J|53-mY2E(t2# z?Dc9;^-V+b=mm42XseFJ(a8WPfOE@|D}5Aq`N^kYUa;o)13#M;KW#$3Lhs#RUU=vD zOH7o(ee8(*Fkh)oJXDzfO7d%t<@#^IY!(4b7YHzJD+<8;usxwp<)L4a%b`B?nx-la zn6!%lOOd>l$Xxp%fow`IQmCI$F#(WzN;^nPAUy4wp+pOrrMF=B^7+>lsij+flra6WYsw?{L>-9al8PF0#dZpazhy-{F31ov9VZMB_53y_N(@fp~0+7)>ZF6xbOKbwD@Lq2=th4%TKRxJ&dOS9({iSmx&Uz_mhf#in~qL=4f z0FW2mFW~B9h*{lX%5uV^zQ<{UU;@o zA5*`-dwTKDNE3+nMiHnNtRug_dV2V^<@<7i>Q!SD(5A;GVx$0Yb|2WdJmP)3g5RW2 z-d#=#CHXZ5^nm<8JwKd1R@kZEVTXNDPFtm(k9X`AOod)sTRz#~Lig-C2N+Z@++;8U z{bT~bS-{jeghS;PJgMA>8(L4CDbcrz`dE}a$?n%tk6=9*Td2G{Pc9x&wIQD2cXW~X zs@-2BAaj51G!k!=w)vQZZY;5}gbeVqn7G$N<`O=;Q2Di?dr|K!7WL8{s%QrGO5+U3 zcqsT50^HEvo+LRe-zfo4dHkf8bq{ZIqK(FWYqaTU(}|ZfrcnJg3OjO0TLpc*jR#|o ziMq882+wx;Pt%D>yOYp<;`LtwRo3Iyed}zWh|piLs`gUb;2&NY>(B(mAJHQS^I%qIwWxfyg|cR3*26x}V+ICFlXpKsPb z!M#fWvuJM&qQ`nvBYIH)0_bzk8ZN!usr_aps<0~#4Y{vUg0nE8h3An9lixMkn|}S)TrO`GBAzKlO3FfKTMtjAY**uQVrGXude7Vb&jP*)}4_xikRj z1;6rx5S0_54$kS;2eX~8yi;Diqst;cyK}o!PCh;R9;JXziP`c!>HIH@*ZgE%$ZT;A zwvcNw9lLXHfTB)U*BNt`bcV`};0efy3YCq3e85(oebBv>7KYF^J(r(AfR_YA_3r30 z>9cZ2VPa2hB+l6*(7PN^i~6=Fc2d6N?aBu4 zI@cU?i~GA0;1SdCfxLjX^bQ74JQ>73G|;U9phbRYE@W1CfN2M9tx(1lsqOL&ZxQ== zvH4+k!M<*YdDPJ>J<|_=iY@e(9ohM(32)Z|%p@6VchB=_^1jLd2okw!!)}E=QYR!C zCx|M8xe&Dl;|UA$f@;EcmWg-Dy#uq#7qf~JUZ_`OA9Av{_rE}Sr95@%>;h^+ddek# zM}L*{ac3UH>};d?rkQue?Fi6ajYILpm`;GSALW4G{v4BkMQDDnopafFqC>onnD&p8 z-Di*+lt$P0zn4FM?&$!_h4(ROeAZm0ed=E#?rPE4BCkc+eVD%0dH8{}wL;taNshEt z1?F4{NkF`!J-dF6;{9U>hS*!O{4S07JzQUsw>IcdMjf5*TP|d*H(O)+x6XrB#9&VhtUw_wdeTVB2&-Ei%P)%^415dt&8`+y3RukO}c5fLE>hIS-Z*%F) z8*Pq|fq=+a|D*Yu$bZpx^lvC}{~Om3Q?T2U1vn>dZP)?5)mM`E~Nmanv<57 zTo<+#tIMla5&BK6l4x1w>yw5c>57<#5y8#P@lVD^D?e`&2ukBkk9Ea9S)!r8XNHw=S1y?)+sB4NpEo7G!$sN%^y&b z=_4Vn!DR$zb^sdLd7}%WkHKR1tCD1ufGrH^X^vblmQXPc6>p19XDTcwoS3RTajixg zHQFZ)Qwoz6Y{n0p=V{khG5yq3_&gps=xqmI47z>FSJ`B$jelc6Gz4E7V`zON;Gljf zjEc&8|G~%LqP=Jv+q9`~hvKQ2^y`@X*kk}V3e)2|t)o>7J&U(Z;$waBoP3F4 zRiy~R>dZDZu;!x;1}vf4DRpW2{E<(WZ69RhY~+!fSia*vuJrqd4WYkG=MCxGue|r| zlKVgVmF0j~{!7Ax|Lm{&Z%4um|6M7olFax5~~kJJ-yA&UAQVS9Nq&-Jxbj)3#`)CD?a4lIa;ggzg6jX2iO{ zHc&okN9!(oAm~K4nK7s1xP*?ql2>H#-6=Ho(>CoFVsQMLu2*NPy)$Ym-Os<%Raxzbd>#ks6@Psh@eU9e|LpABfj+%LEz7Eeb;F7#3Nq6z=L-apYK` z;zH7R^kGx=n?qttT|_mg175?Zjh%rwRW=^Kxkx;(XVwf!I-uL_lsOi5CjZW^gC32i zo0Pt*nh84&jqHCL|gzmcX+L` z{D#K~VXPx>e#7GoIt*-kYVRV{d*P>em&;gyTRjqwVpV}Gx66LhccyEtid8Zna~del4zCoR3Wo%v8GaK{8x0z z7BZUwbNvU3);B)R#+T2%KWsx@m9#&CUMpCwA!gem$+z0koU=A0RI+LpRc;^O8}?~$ z{~2uAz!mvvSE-EvVMmZBIP3-zf4KApizjsS1}taD>;^SwgvA}5ga_ltGn->m!Iad*V&n2D$QUc?NzYMiSbkAQMzcKCq4G9wCIzUM;?Eq?hgu;BlrEO@(iumc&M#St60uKZ|FZ-IT4wOsPfb;))$jYE3Ho1P$HD%)73;sj(qa2&Z-xAyWlAN!-}0LuCvyD)ouK4cW`WR6+oTu& zsNe}B=>FHy&`?_JQStU^$rj2+enDa|D4ptUc}I32>oi_-`=ja~1UdEYSE1}Cq5&9O z`E!>nqj$HnGZYHGL7+kjU72!&ET@rq`M(UczzoD{Z@n;!$JGnaw^Iu~@&iA(Z3$W_ zNQhnMPXkXte$n+WT}6UTl&lw(udP!pQEEysz2TIo10jnRXjbUMG5*uljK*_IFu=4-HlG7HCo&WodbP@Vb&^h$~ zf2`@hZ35K)b9Mh6!`c7m5$}ZjH$NB=9I$Gxh2wc%tf2Y$TNk7|RWU>8Ym60|X-C(% zuPtMVjBDKUsYP#nDG0FB|JE&Bgb%c1rhT9xxoGerp45OD>svn6^No)~{s%tpO#!Am z`5*W=$1#>kNaJlq*3rseB6;X(MxHW{6FwT57yoSIF4q?8XYk+ zGg!>bOctXpW@ct)S~PE}@Q zR^&OQ_Y+}HCSn$3&m~k4FposgHNc?&C5=czJ)Mo;Hm*Mxx~G#`-CbAYs}WS#t2m*r zp|pSzzVAgU|J5COK|k^l77T;IKYx_3I23--SZ9*0c|Wf(Wa?Uk3?4CqU>#|LmM7W# z>kA9f%*f;!xJXwWWMicu2SPn~0A4|A>Q`}4COgD*ouXI_@??^zG75d?fpD9z^>v(g zYbp`mZL=+<39$A1vLQM*I<;ssGz?9BGdL`t7rQ?3BaiC^A0yjQ(*T{ebVhX}C}9dtl6;8ePVKCCAmsvS>l z1zt^VOsr`=88NL-JCTT%5K)CBys*6NUs*$AS{G?N9*&!IK3-AOD~ z4fZ{+50IO3y)_qmOGA4}7vBo;sPSgXiZP8r7$NDxQ^hP@enn8~4g_t9ZiVk1!}r{8 zAy<*i=)>j9TWVLv5|%U#^QEkJAV~2ocH6W<1@MIZ7)31$hb4}IF-+NOd)NY77?8KK zDR)GM`!&WdDC|G*6)OJ9X${H_t*u9K>ye*O!4-d_aX>8@r*XoSA5)-nWQs&0FD>3#m8p*N?~O=reu)%uBh*EFk) zRQX#!WyyAr#Ih}e44!%F369l(=q+*PN*Y}sS;>GpPMK7?a>(2vP6nRHu_5#p6zO}Z{Fg+P8*%pnuoWG> z0pmgcV~qxN#TMGN2UQKoS&)VS2$7NOkoBx^aSNgr)R=Ozff0r8Q*ctbso*2s>h>wY z>f5Fu0Qzb8A9dS}qCsg{;3CW~x{=z(j=B1NtiIzUsFemuCX9bj*@Luc%Z>9kmq=;N z?`5453g0$|;2{)CB@%aY%oe#Kf|nb>6X{yp1*TSs0Y?ylNbzO`8c53JP4xBPO>e?5 z7Hj+t;UqAqkZ_@}p+{2Esp7)eYLxJ+tFUwsqpp#E7;r4R4B#+(ssbZ11LI8Kgnz)o z4v2q)gyJX143~-Zzllc=YT<`V!^nR`uFl-6D4aRE~t`#|3xawpJiC z4o+gyRl&&gYKk<#LHkvF4dOgNdM=Ty)k9k$lQ|JhoQ!_>ET$F^ui$0 zE76h!64?;aa7a%$U;><^yAMxFunak0``1nl(g~8{O0J(8wT}6EoHH^Mvhk5YIZ~@5 zLAe|iM^-@kG#qNZ(~_R46>Fp^RV`x&OErU2nm;@p>?N_mxr+38dIFFze;$qzs*dax_{jG6vV4`Z{4{wks ze6vu(yG;j}$P-!n8jXIP&1H^djp;cj2=*0$acJ`Py`8BGE{s{xUs|^iyx-_)O$=D0 zHew=z@DCk1(3R&_czxWcBkjf4O}6Oh)BH*=wdbrI7=j09;(qWjnfsvM$l>OCWap=; zRc+w-PV4R6hYU2k2!djcMP!e$29N;;IVa;boF@&F_+dmAe*m_76}j}oPlZlfs8Sha zTm(qC&qlCd2@_}RfC+f&rOT|>lJWI7s{yrU@v14?_wuAZ7~`W@zY~yF_U8-~KoRpH zXhOR-T2*I5*Jwu!t6o1!=rMwk0`1VtZC@-}Ew8ZGSPIz~=ro2z^8Gap5q*&2$KYUE z*)AcHxB6VSFMFEM!c^_Ss81TEU$ z1!`G=_;e4lSc&H?1;5MK3B-jcf-z;f*?&qyEr8Q@?kHH{V*qRwF8!?bg2;rLJ6wxx zF@FtCfa3LA&c+J;w59u`Q=uY0-PRZ4(}R- zFEaQR-vs%Eg3KeGIzIT%-PFkvHMAEm+9d!JWeB(i4al)t4y!36HfD;H7UH2B^1&`f zUx(oyoY`z^60VUR9(CAlt{u`p>)dASAvB|h|z$?HGd?t=i48XGfPg8 zjRmG+Lm5u>=IHW?=6UVV64$JC=8pOGhh#zWUuG)DQ|Bo;+ds?f&qPj8No)M!@r6z- zr>#fwI2~UY8XViZW5w)E{tTA8gc8c2Z-+~$;)PMwC72edpsbTOq#;&xz(O9^u(xS) zEkL_YX^{6b=u%PVsrM!pM)YXcXfaY%{4~U>o<(mrCVl8@$|sn$=^9TU$$i1qE`2)T zOl$LqEM0L*Z__n8Hcc*#NiQGXq-SYyQv48M<2iWhSUMB+Xd#qXKa zrr%ac%HsLXfFTmG0;Y%v{CA^8*2FK=Nw~0IW}vVx`~!k3C zkLhqJbc6OpST}4aAc4)(r)&u4jZT6oy|lMZewT)u)+yAZpcfXdKH=g)?Y-WqG z%GvGj+AOiAJyP!A%{~$kWlA&=Lwyo(k`*hN7&;F%NBl5HXW>Ksr1qUd`_(CuEvm0@ z_-;v)!%8u_zPG&6oBkVK?fG-#QM+6@8$^PB&?&!PeC$lue|g;Ok3IMC*}VFLfTiI7 z3T3c_CH(!%Lc`u>=I35{3Sla;hVV0(GB-sWKW>tIt9n%U=gXSQ!X+#C%hbXWuH+lZ@gt6u`mw7By2c9gzh?4& zKZ6zj6U8L?GAn6qG!zwVDAkXhOVBrF?J9ov$P`Jsb{NA|CSe)&!zo2h>$#QC0*P}_ zZf@J7U8IsrRS3;;XtsMHk;%$ZC&Uol>ZI;j#C7kHEO%gK)>!=juT)8nA6;DbuH3=h z`d$pRO|uxAd|4o~_jbxAb%*TBRhnVc^mTfLVbr`t+ik^E;+iSEwjn$e?>mz!Z`@57 zq?h-3sLuK1+8nO*{$;2GE{1AT!SEikg`5xXfcu%g!SKPP#>M$swzqL0$8upKIXXg9 zFE-xyA1-K(jtlwDzshUbp$uAsMZ@*WEt+Ts$ZS%CLa4`kY{LgRXa;tD>qLji{v0In z*MVWSuf;OElohosZdS}R;Mkhk+-uBVYYbeM3S(P(0Vz-rU5+AhEoWA>;X$J57#5RU z-tQuqb+m?B!z=q^%)_h9$&MPuy^ne3(P_$>mUS*vw7z6T`yQ&7svaq$Ns~FFrRcN; zVOW0Q82f1hE!1wJsY3*M|AVg9o|s-0$#74T{UB0!#TsEARr>(3@5BK6WVyB!CsWp4 zfyE#xeYkmNLxtCTL}S@ew}^FlF3ca-LdEE+fd8c{#*I=*WG)|>V?IEjnq#RDX%Q6d zJ_dF?fYL>YjG?V%K0Se@l&3iRCOihy%J)@kC(JS!7nkmV zZ5NmT|5&XwCiEr_1L!%;=#o**dgNfvL*YwMb+{NL%`KeUIs0@tO?b9B%bhyQx7cHwb0F-!MQnEQP>}V-La=_|MIip*wPJfsg4>{ zxRZ^jg5VVO)`F-~787+Jv;p80vyYJ$h@o8zMORzRp#og#XwDd<4lhaOx*`C2RiQ6v z&v#pFM%59e(wf$rq~cuE@Jur-)(g{)k69Ze8+mn)*#>#2aTJ*m<5HA1ia>kuhsf7_ zV%QgP5CgHqyq>NI`%DactbVv&*G0Cj0sSI9!Z|p@;G^ie(npVKvEX5J!_eB-BHZiyX37dB)p#eHzZ@tc zWAYAG4%v4_+JZNA_ctfV?1~i+8=FoV(L~d$32F9csVs(uYEFkDn&JvAT8W3=TaWwH zpkLOX-c$sY*GtIpI@#M43_*^CNg9ONcw5y;L|bdI%rrC+aaePXxlXPX$R*J49A_Ki z)7a3;$I0Eg_H@rhStDw^BVT|%8>o(vwnvIQZCz%@_CBibxH>$}|E2&5{7*kavMU5V`7I*;#J!#br zJtO=2U=(`U0LqF!OR>vB#Z_vE-;2Tf=--Gq?*ARRs_YlnzT9PL&(mSGKR#REclD87 zjtX&5xq#+Hf1gmxdCaR;`zp49i$G>G@X`emBGkN-Pj*?0V=APx>j<{GeUel)a=u5`cXrn%s z7G6N~1_`nwd-B#x&5J z$!tSsOIm;J6*NvxvNjQVPn+^8H)asg8*UP?cdFAy-IR|jg z{q)2_=jL@`#YVV0HkDJWMw#IQDl%^`qd!KQOk%sIbAmTwzkE~8Rb})C6-U)Bh?GM) zF)D#fX9Deww?TzCizT+QG;&{3cQaG7tAW`=i+Qcfyq0o(wcq6xMHb|QNX-qjwV_d& zOmBz6)8HC5H&>3zjxs)P;*?4lMbBTjMo4Oq-^#m`=35I~nh$z4Z4%_Z4j(clc=4%F z1xMPa)F-RGhZt}eN3qK7xaTDu#`RHATMb4Xzxw0vTDg3t9N`ey^sPdz{tm4{rt?~3 zg`^J{ybKh<>e8C}g0aIn?l6TLt_2V2M4BoELqZ;Z8hxyuNyYB+gAQ*+MtGa-NO26F zgdv6)?Yiw5QI#j1_w8s(U?r+gQ`Y(xotpZjTqy}vlRvq<<0AL_BbLP(qIm7zOhfNO z-d;mNyNj(Yu1-#_Re739c=zBJOg^RwD%K+GT@ zsV+qibHZ_7lzDuTMtkghxEXy%)~2m}08{EXYg12&pF-UOPHp7q95NxYrV7z@xxU31 zV>=Z$Tu3O;VQ|@n@tnMbfDwK?r4CEWpjoruB3ex7j9Z(hrwF>Fv8DC^JLDsj>Lbc?rf|#rxyzKK|4-sZB8NGP|f1Nq@_t z5*R1qo03y_NfGRvw&hc9qukNklp{_%n02xnGz~?-Nt}@Tnw%Ik1Q3!k^K&NbmgcG zCk@uwN8oZvk3>Na!@^uSwp!$#h?;lZHpwZ;WVN7Vcxs2E&+$NXi>J%thYMPB_3aho z8kF|%f^p@#@hZ0S4*__p`JdH$Bl@a~DLmL+h`P&|qOb%Q0>-b|}pD?IZewS`AnKWU1E<{n4AG8KD2eKhs(z_V|VK zH1nZ1!hyC}stryxO3CCRmRs^#(rPGp?ny>k(s|~c-C&WQ`(C-zYqb(hC z1b{U+N3}(q9hw21)$bKveD~~CJ~77u<==JIDl!}zRgn5Gf%~g{_e)^rFSfSYFkBX^ z`B^j#OW-PBR(lQeQ63RfqjSwcT}iAVe&L!;SqFH^=b>pfx>^#I!wNmsez$|UqnPDD zJEwh{cd1{lp!9O)-$#Y**Q~VLqDcWzO_{Dvn25HH>iWeWcbce5Auk|&7Z@whhfiS5pG+0wp#vfocCfvy1AHGbkk6E;Y_=7 zsa7QL=Xj@CzvmvPCxAV@|G}a6TP85bmI@ZCQnj&FEre>NBID6l#8aHiyNOgL%#d%S zku(+(@-nSS={Lwp=-vVX3Hj1WA(SZ0oMzDGG*|wi*4ULhdS|^2-_5(|wBfzj=5&jO zTBQy#C?3b@i;c}C*?is#Tptf5?ZD2w@Z1`g81`tNe0B-8g8Cc?yVg-m@|=FEvD)HV z)_PRBmHGp{pu@3jM6GC@w78rboq_WSPo_ToNaE_S&`(WX{lyu}JF2d?PD_IXOcZFD?ls5%kGj>RxA@MZh>*{kVA zzfX9aaD4YKP zoQn4Qn6j5|_DWUn^+0FhmzLu$1a&;n9=LImX-94^ljZe|5{~pW9(&zg%alJ~G(sb7 zE7T?+=Vi3Tv_|h#qumVZz+VXIO5L4MIsCOPx<{9%eD(m1Eix-h0LoH|N)pQFW|Phy zn+$a*@5YU>Hr>gkjL?(ZrG=JeNbUMu>}nY`UMmvFBE3I>MiDxkVC3zKAfx<75myRl zlEUwoH8HiY^f3YB6g`Hr`p*%?@Q)>wLhQRB$bc*@26ms(xt;{xF5nVpOLJWs8r49C za;I9xRl&O9geP8Ki)K?uXCZ>q#TP|AwW#J+&e}_}sr3SP-Bg&f))12iMurE5`i+Z` zMug7Z?-8S?Sj}}~*@nMH34r==-M1PAGy`6%Wd~~BK5a`6t>3W_jq_xX^~hk1j`qRE zH(lWaP408SwH_piq_D(R-=+o=l3eU=!<}(beuGMHynBz_&9dJ`r^DrZ#KU31P<_KO zW%PQiH>GH@oM{dS_}nvPM_K zm&jMwm>IeI@QLR>{7bc?wsrogBXlNxGyi=+i$enHeTN*l9FnER&tAuK?>HG4FhdD! zNy=bTLYmAuvu1Udv7b==_&-2fZP#>k)cyz$@v%WA;w`MZ;m;mK7U|;!RAQyDsoI_~ zj{j_4$1e@#D4Q}_Zf%&O6AH=SD+%+&s7KC8sbL|1;K(z?+^TmjOS`ipLw90woGvW0 z_R9dK!7A6_CKRgMB6f#~%MyOjb>T-yK0p67>}F{i^i+s8L=>%cM%{fj0(EM62{^HX z`GfMP56#Kfj~@3{qXd7eVCD7;2KdlOm_)2_MQ8-3z4QxJJ=cdNgmJ?b&XCvL zj$ZAXa~?<8!RH;xNS2C}q#0d3b!&KD`&E>u?pqc0Eiq2Mixn{o*TIXs+bA7^ka|9l8kGy|7e$qR? zaxBoEA3gt(TWdk3o!!W60gIs`-jr%pcCj7Lz1A@@?SWB7(+{;X4I(juiC!P&rb6wV z)Lty$mb{#~#W6v8GB=c_qSz0HL?oeI%x1roPNjY5EW&I?Vy`&fAAb`?`C;>ww&bC$ za)$;o1LJY?QhQ3j+N#?|xvRwa!pbDYr`}bWxjc@0$zYih=c>9*mZ%Q5&!eh4=D4z( zqr~4s!G4crmF03Pnc`8}#WT{WHCROx^)}8`gv46&vv< zh4KXXEg#@#6iGHOmi}F>??jhICB+1|cIiU%Y8CI&JX(H$CLWX6Cz4n?iIzpn5Lk%Uwp`Be#sA6 zIaCZzi~)QwnGb{4%u(nC$U&KJ>gzVMEEZXh6c+;u(Wz#@BY4!Om3h=Hf6JPVA)}8O zBxzkAbqG;|!Q!lvz`sMMH+<@?CQ*|>u4O4)fRx|^P1EN_fHKzRkV1Ia&dCU4;8fIX zZh^Ag;;IdDah{q*uR+4?*GFN1G0;N-i&wpof@+Aljgh1aoLgUkHvloCkl;`S5zcK5 z!gs)~lC^ml!gtUDDsLw=lMyy?<$w=nIV^B{lApv+i z7OQ69p;MNEW`+Vl(MEIZGbXZ5+(Mx?~(@~-{2-Y zjptwB^Fo~Yg?sZa^Mev=(eVg(-|h`RPA zdGj=nm5Sy{lK27clK7ZsAeGO`XEr=#`>Mcyz zylZY4$b+|x1=JOux?AMoTX>k`yOx|F8h=&9EOlj02qTX-?G=0CbCr-ZyszU zQZ$5bGso^gDZrh)X0d_`YC_8A^=C%{dYR1U!#|s1kAayBga&RUkiUI-lxL5vp+*ax z>6Iuj|DqU!Mb(bIm$v2n#asiB*$WR}4t75_y$xnPfF_>{&s>!0aoBT4F$O z$T^vIrLJ8gECIhrlz!ulx_KURWn6tBxCnTI$A(&jCfKS4esOS-D)) z0CIxuzs+(Ic;$^mXq-gi;Y_6{B80=%9qLi^fH@rysk`k>ee^UUaj1?|31JQ<6hyf+X^a<B+%*BB*?qK!9tZ}MFR-WZIhWBr2At-%(LA*b~342n^p za8d8t+Z{kZ1Kq@e)K;+dPlM>HChcWif^3SPAPENme}H_DT@;&x`d`7jBL$EF(8LDB zlm?9<`DAsarfg*o2{y0L*Py$!A^C)}<)#P$DUp>G$T(uNwaV5e7+|_$`e@I;hSX{A z*>(Y4hy5zWV6PCWl#qV$@yITjWBv*#T$uh&;sYM=k1-7XO9F^-1l#sGNmLV&Nm;0S zU*oS6#hx?@UP+Z8WowZ&k86QDwGFJ|WO8v*iA62a)L03T`fhE(+R_YPw7NU0h}9Cx zhwO9;lgLZ#IhK$RAzMJxersxN*a3d>7X}^tNnOz4z5IEO>j=?XlD>;ScK&^0A<9{$bj?NEbMGJ3_y$An@ zdu8f87^P@5|B7)pAbvZ~nLK9BJ{g~mduh*R-! z(u|2Xb!#hYaX70z{g&P6Lt&(C-6Sr-OxSE| zI8gY@^`W!>_&}6oV{R#jGonyPSs#ruxlTQ{b0R`kq=afe31Q*&yq8a#`pvUB&%~p9 zTYZ~t;OVZZBiz%MR*d;Hk#}%*vx?de9ObB$VS-(GN37P2RnhYhvj<^uAG{^`^FeR$ z`L&>366g^>^4Peq!$vjP*~Gj6p9DVfcgVZbm*JV+i_15nU7W`^YtN8S$f7?ciydfC z(@u~g-^9Zh&uW15U`I=UEF?vcAngiLNFDaLb?`}uH0F^&R^#~!W#30y?7f%(RqamX z2W-eS{x|^$Wvnei(s~{U@dFm=DdKW`Z)S>19CJDfRd>Twjab+S-DK+O(J{YKU@xSG z)LmiA;i$WK{Ro+~czS_3VUR7pSabKn=;p>$U>>1_hXL{%_OXXnEovVy6>92X>tyfp zf;8PF9C#=%@x+t!4(&2RTZ1?N%(*^difcm!#v0F42IeV@7PP+00=*fmo)UyF_EiKM zTrh-0N6rY!64R)kwN_C^$mKm+XBTDD_Zr!!^7+_6n77>k1Ze?zfHy(~WR?@jqLU-< zXL#pZOYMG^lS9YhwgV*Ul%DzkUS$W71SBQGqd7dDgBNsXq*9v1#qTM2{V}= zccfH>x3@h_Sy2r;bX1Or$u@w~(GaSJ%000M%OY8oWg5|F;;tSHOcP3{wbGzgR2@yq zHiNiKk%P&i9N zvSUo=1KMzIT+;W_haZUTisUR93h0GeA>0rGSj>wG3iXR-^QUlB;nm$QRLSf2q1)Ic zxSQj2p_H4qr+C41ASQF{y^=M|U?Qqgs27+HZ$zF6g!~4Mk%E3R25XI=cOWp@8PC@T zo*x+F@iQvAj%ey8ef^LR4VgT1&-haB3fD$-&qKS#y~u7Ckc8?smbDAPRld3Ygu23O zN6EO8yV&UVTSAwzeY4~a(j#$Ay;FQ0GTblTZK%DbTtrtvfS4#Dc4%-HjVtgKX;sfp zLV!}Qz-`RVU_pDU&88jfp%?bIUp@^F-3Srvq0Q!j+x8#0B>H4S%bG^z7Vk=D$S3lr z5ganaD=VwGja_^)!7jhVj~2+5EN_|LUp5knh?N%Qe1om7bB6r3mS4=S;R>KT6V{yl z!#w%?@kMUF*Qi8n{$`XTN5KOVljs!vuvB_W0?e-T1r zI&AW8BT#?PM^DsXWI3D!Z3EZd)1{MVg3hx7r*kJNI$$}0g4EFz@^(SpTu6IK6Pi$vlk`4r5SS@mVz$NH) zwYhGNao~;d{8L>OoJn`2$$X)J1wG#%U@GhpZWVC+di}5&z#>_qLaEKf8;cE9Stp9sVt6)$SzN{U{AyJyjoHQ+2r>Y?a%VAab zEunvQw{qPdWy?a$NUIPvUX~VF!_Fp(uidH zrrv}`YA5o0Q&Yk$u)UCj%j>G9-oWFVLjeGPtlJU1V{H>NRl-+J&;~SMYKX(B;b#(i zcqG*QFz_3lN^e;6_pp!}=q>5EcotX|(xl8|f_m1h)OS<4>O|+{xhsCERE#lnD&2Ht zYQ^K-P*C{gvt9FWa43Hs<<|4fp5$W8CrTq#UUv~lE^{YPCmOOUcpk3Q40eRJD(Zwm@bBf? z0d9E+gA4+;39Scm#G`+;~j zhdO|cNIP$q!*jl8Y@v%Sk4)h!K1)DwVg8QsJPf=iL+m;DA(+2>k_P24eRg`lI@qBo zpFJ#O`{w{AR%~LGo)h9BQl0#orVN|ZjAL>tR0g_dL^pJmRM3k1yU_9->#rIZU+z_K zCDP9w-pv114UE{QsXLoEIWrpB8Jm2PDF0@G=;<;3*BY3A#b0Cnn-0SL|0=2fFARl$ z{WCIPzzAmRgTd2&)Pi}+MT1ncsyaP@TiDhr}aKegRvqPoK@Z*z> zmSGU833D*i`*+1YU7> zIGiugb%1ocGEVc@tdC)!gg7y#IY{}H^!TK_2EmVcKjQUbT_!(-C#-LigKJaQb(8P5 zTo;KT71yZL2X<^TRee_X9gVPMU>@bogpE(0pLkBIq_7tkn%54kj*3yarBaB)7f#(C z_jOr@Ar$&xeepTOX&!K^)9X0_$(Ss25Stb;=V$qiV=`fW@$$VVz*3O&P!=r0U|7wl zxA*fz=}_x0{bhE85zfI zZx27W>W?w;bj~!baSx?<%xpI1pWzmoVn1AiL=3(cy_V>Nrp|K9KQ~%d+JlM5dyHIf z7L}5*{VW_KE5{xSwnoDW^Z&485A763v6FvCGAg0szd%wT_dqhDMJXBvq4!8W6Dc3; zS7Jjz3S^0*KL~QHDZ1(9!NDJO(6T-Xs0!S934Pj%JU8?kY}7xzT%jZ=+Je=~JXqTd zVB8?0T9dR>ihzV`@)`T7GKMevY2RzZ>ythCB9yOE`t5iQZ)Ml9^!0geI{A8R`hM=DI$0)Fuk9XIAszh? zU17#R9T`63ESG)#+>!Ph4@z!&)Wo{p?+yWF>mb@#1 zBh`mt;2M)Vl1}t+{nBYPr0DK4W|;=Te!gnrl6iBG?z)J}f@x!5x589*RnKOJGh5Mm zop<>-_)P}ng$&bEzQN_pCwnrzwmUjM80l;2NZib&<|lhHSCrW`BRplp-~BCoCtu&} z$oaHoM#%^vm_W&>l^z8Gui$O?7~A{D_&|*I2DO(!IZ&=esvDCa7RNhnoYD?Ep@-dM z6n2`=>=K11Z3sRJ*q?u^L7qdk(MEn;`E$v(I>aY+(QTQ#0k>OcFc3Ft%0&G1l;CE_ zA}^G7(xn-MJ7y-&aMV)n{?QyV2Ph#60hQ=1-XUI6L(|i01Y_#T(<)L3Ng`L1oi|ZF zf9b-Uz}3Vx!nsORjJG#`gze2URW0h2!;UB0WtE-#l!)QK%vqi?XuG%IHdzGk5hd)z za3y9qTshK&JGl$Tm4zI;bw0?WfL2- zfDK~I;H5xY;R;a!=|pC@BNtWF+;|`hATknoIu>%oxX^2i2*$LkwY=)7;(jjXjG{-D91SkZU7KNY|T`mVXv86N2uzZ)Q7 z3l<;pB(xpNc8!6}2=d+>DG26GcxexLIf(m&e#x>qC7VDrN_K~a>PNT*=PxJjr?jPp z;iJ2al>e%#uN(jPHTtzcN)No}Cz9RY=g%S4x`;pRzMEVQ%QHQ+q z0sY-a{6iS)p~n|t9}D#ZZHw=Z9fscq_3J~#J5;z|^tp=vQ+?kI&Nb+1BomSJuQ(W| zR&bRCqcQx+#ND`Tc!9jje9!5pmTdT#J7Wj8+g&4=?e2{fV_MO3kXnWugD7-9EcExk zeUFX_jn@kp01yEEZ!;wT0R4IPUq7M%^nd7k{s+8Cw!bGzDm8BlarnvF+Z`E&fixJT}(|s+>wc0V0gyFJx-3I`}O=pZC}C&xHXv z04)CynST~o{d<7*_XuJ5A41UaO~z`K3HkR$H8S_o=Fc&>kseIx_GXTdgbd~&>u9vv zI%6`A#IfpXLf%{a#{@~ z2!N%8Jb3dt&M_Ec_PvBr$~ux{Zw>36iesA^r5*Jj$8>555k0)2?F`&Q zzuCu!3SFQ*X}a8bNig^y7o!ztiK0#5k>G%w(CV~H{{0bSbbRM1C0y*R_{vQUPD4nx z)&>;}gTLrfK=hNVazGqI7aQp=tk-EmNmI@BlGYVY>O3}**cgZ*r{VFYsm>x5;3M|+ zf=n4ONoEt*^Qdf~=FsN&>xVY>Cc? zDVv9yl60O~$Mqb}gGkqDX5EasQ*W1EV7OEPw?2#W%zK@W ziwhAKk8j(82_hOEvXe-^#YLYi;H&I*auidiZHJx&IDgr??b{bqQL`oosnl2b{WYhb zW`wqbPZ9?96_(F1zACo%51K~U>AT0}%q4HG#f?_ntvnN8oFLeRysF{{7lSUJ+nOwG zj-y2-<31S2Tm8gb*vk_=dC+tgY&=A0WRY?>`<>eExKwzj*KENLQC=z+;ooZiuxAoP z@lm7U>pE5U{I_?#d{3N7|J2cth5A?KA_U<6zciQs>^s>1hlca-o+Vmk%x+Z-y?O0T zvdALAmrPE?A{3hJtM#ukA!6|e#UI4qdJAeQ`Xr|;9Zr|3h6nB!b<1wQ$zwtxiofDW zeo9+{GT=XgAxlaUBTin#iq6SwEUx#t;x*>a#rR%4tY5sXuPj;NW~iX=y~)tGt)KgP z*1icpB)Xn#&zm-Pyq||%z=)d((TbQHZ1vb%ulMpZwirS-5nc;d2aK6-9E_Grnp=~~ zB$!!Pz=RuFvtVf=F@(jb84w)=UX!7gw;*gRo*39_`kc1BGg)rhG-IWC@mcG;ZE6sl zLKGryF6!)USuBi?3EJP4&T_(l_p>#hmQLoC(TU2u1B(cikDUy3H08J)A+<=mfctoM&TWJ!biM96ff!wn1N z1l&mTPk86g{qwVOS!>#3@DMVYM7A^6r#xm!VU-N8938vE-Yf6PMZe|68x@F@6-XQ# z!`ntjp^2*!x0MWR^F5{V{W$VVPNUJ(%*Q{Z?xkWl_QT$4FA&u!vcd@G{nX&B_BVt3!hF>ze|@6#wN((N)Tmm&D#O?lT*m4YXWy*QHEj3dDF_v@=z;XZNc0DXH4uMlb-|f8|tyGFO}^XW%I?q^K(O~{r=yH zyP*=z{@#GU_WL?Wm}d02UQpiC^SI7IeX7}QRfdXUZ-aj%*&Oz)a{n#HFdNxl-nuLK z3l&e{a~g$$cC~H73;2w}jQ^#d5C{BY^@@L=R*midjsEZAY+>!h_#cZFdjGMmVPjxx zW^JO!^}t5D+T!(0nleP?!qUK}pddntIa3ArfDa{8C(UmN8MMQekdQ`@WJQ%pPq+{VhHu z*=QslJZm)-Nw#t#Hg90X`yl9T>T&st^!B~KdC@|j`Sa1M^)kA6EGdV(${oi!*j9b3 zw-QW=X$;RHYAPO9y2!)f@sH4T9L^G22-XOsCG%yK?H$1ltokK}Ipj_4UdsN=mcZf# zD-DG|>`%BO6i!DJplYz^QWemQ43DV$@N_1by7ZvSSiJ4 zntuN;{5l#eahCviH zWJ(7dlWxI9$exxJ*&6c3%sVdAe;mGttQ|C{%&{*glCNxdLyHTSx4I-EZ12$`DCF#C~cPjA885z##uV@^SNs@yJy`6<|bSUY` zc{PfHHy@9yh)i9;>903qm4^d(bMW`}aLrec>wQ}8D_O4gP@=ygA1(z1y27w$JJt?$ z%BKgX3b9bCBLo`C59jgdiOO$pXj=Y`0@!W8Q{;ROnm?bBz&{U~MFC9zaNzt8axBij zZJX|&50-zBO_)q}TlO-+117yXMQ&#aO`Ap`1J2;4YkO@Ap(#_e(6m^ZM2LIGdxTfP3%)*ll4qy zNk^~m&Z#wi!iUKj%l$Y!it1IRNwj2I&9UR_Xso@N528{hHJi{-RW& z>Ou^<+gRCrWUzB_$f$A+oxH>fH@lZ;{&r7fLO%CSw;!`;fY+!SYsE=t_1CF_p|E^C z8c4m54ft?ac)D4mk`P^K(sHZ3f;rcxXF{g}!azxCyt8{b`VGVL65TWb&4o12xr3@5 zyx?h@j5_Isfx-sXy@6x*-<+<(MXvJ=4Erm_DdwqPh~0m;f0MkYE~^K2L2sQBpl5qM zMllNTy86sx&E1_ntrt`N1vZ|hx@q?(*mM6fEffPF{6nz+Sz2ggZU5h-4LSZE;xPXQ z_T{`=30*2+#wf9`Yk=+ohi*L{Hkf&S`Ns)VhEuaqyRR%llzMoHIrFCIG z6ycyx&cq;(dl`sB5mxMEt|r{bQT|Hak&_}cduM4l6M>|<-W4Mh;x$6KQkA5?Lo>uO z`W7cOxFSsLi*k1-z8ETq=Oq5_c2PJbk|k?xqxO#VpaSHu zJ@hUcL6;`=x1b@d{70$M|3lk52H6^{+nQyYt8Cl0ZQHhOtg>y}wq2`i+qQe{eeSt; z_wCdBp6G~+k>h>F8o!jL(cm03c*WOh9uq8iD3M-NcZ2Lx)Jco% zDH-S|)?}+{0TsbP+KcS?x7Rz*bW(l740sPoiCa}`gHiNo5bA;WNz70vH}xmkC{(=% z`2(2OWIG+@ybhU6m8oF{8e+j#p1gW;IiF?f5s6DpqHne)RaZd47`KUs!-6sr=Jfnu z0bTu{0(z>y+p~ zvP;&8|LAReGV>vqN-WD*v*i&gKq$UZC@=iw50(?D{4WK)WIIrPuYn8!$U$g-0I)yb zjb9vMDB5Tk1MU;zH-%1IeBJQ$c5NCvcZ;iG8+tY1zx@7(varT0dGaeEM>Uu5>0fymY1|Fz8&; zte?uD*#HfL$-d+~RM<=ZWvm*+-Vf<7u zA~)MpBY5!TOjxkZs$JL*;-rPihhYTOEp`C>J5Zs^Fxsj+5I)y2nxOKm3A6~Ws1VUL z?03sXu(uGyUqTLjc1#)%T{;z(G;#W*Z6y5~1WtsT*g>H)t6_sq<$haL1nVBLh>|

*j#%2km|{J)M78)g9B$`RSfq2aI#b6-<|`WSfS=k7oMe!xKlcYE9ZxEnh-D4 zz@GLIv}8le--xkQXqM|hHfu6vcg!>--WuJ*xH&QhW@Ts!0R{;*q;iPlQ5AK%EFcR! zbb3|G1jTUMN{{75Y~I=MBqYL@*iu;15#R%EZVVG(O%LWe1kU3T(xObBob^+a1=;sdQ?v1p6wvb}X#v zh?Oj@Qd{#^9-qDXO@Rl}Vt3ZZ>sn&ZwI9%=xZT{A2wo1WTeH1Fg6?b6=g!+~@R)Om zCMQdeWQA(5*9rGKYJbG6H78*!Kz+kY&bpoHS}tZ69Ndxg&z!NVz{b8rkVK%4R^TqC zx2&|Bn+{t>bSxoZ>Mgsukh5%>RRCSo*}E0<8a7Qa?Far+D1VPX#;y`geP%>GkAAO4>LdKz^Y-%Gpf^ZcRe zI+;_29Ifbs4FyZ~XFMsq|0m4>GA_wZ{9%G%jmwZx}eICNc)?9c~kzhCC8AK9JUo<~I8wG?-CLaBjX;8W=kw59wc zsQOP?yaAl?{-Hi;jyHS18PnZvVY>T0Rl2*A1*xyb_JnXkFU@$1&Q@KmVekqx{v-|DPf{Jpj>vYxi?DaI&H^vHc&gHoX6})BP94+uui5>QZ(a z>bodyhx#>ph@Jy@)D%0^W|01<9|{3~7GJXDB*~)YxIWY^^-; zXH;`6Bw?&6rX3AA5~DauYzNIM614BrBzFrSp@Y>%8*1mN5LXpjS6S9rY_Y-WG~PeQ zltE~^NjzR0eVwr_5=b7Mf?Fxt;Q0sZ*~NC9-U~gqsU;6sQuVAGh8<+wr=H+ zJTVk&UUN$2!5@?zYkJO5BvNTFZYl%lXrZjANk(M`Rh3*ZBF&=}bm?(QN|o0-rCDel z9pX~LFA_y(U{9&fi?1~?Tma}M*VXsvxu>>^8yq-*MdP$tCva2TLbF-G zG1tvOwfnl;X|U)^4ycwalb}hiK9d$w%2l+h8ufoMsrPd_E*n1|I6uaT7|onqNUsi} zun3~Q;k*F0s`=`lPL>g1*UjeM&f$H2vQG%fHcU_C`zECGI6LQn_ubLv6zJ&N-X~Yc?* zdb&(jWX;HbI=eSNYgfw z(5$%Jk~n^wZ+8-TCg4QV1s4Khv|{7P{eVCwleuOxR^B$r{v7K*nTp01WiSnUo4IuPiV}d28#tli#Wg_lCVHa*X|eY8M5bC-F(S82ab*Z+;^HJ=6rWDL!UZFoaIru(Ixg-?g2S(!Jnw|8c zzQv{<*P@}5eF74OujoC4uX~5;bB46w#$H`5o)=>-AilnIgM3%!40NM{@Y`e5>m-nmR zYUReRz_c`9sbMf{8(u?SC3()wqiw$+P)@^;UQFvDU!LA4@^?}3q0)313*+yNeTIJB zOEE8pU)M%=y3#<`%MGVCbdRIB0GSoNp|KiiE0G?u-|8iTM53G9_>II-q^#XXP2#bl zu6vE2ugr9~e;$PI9FgA2r_=ZrgRFEj__-VW9twMEz>;ltq*Vx&(Iajqe9ZF^2yf)u zBgq(=(1ef0jl2bMyQq2m=uJB6c}Tvlm<``A%~)E}jzCf#urFRWnP6tD`+ME9ZB6wE z?--W@oGt0ePabL=tfWq^V{5H#$NRyHCl><@j0?Z@ZN0(XG>rJ})12O}R-+@wb6(OG zPoB@Xw^?(X1Byo7_jO+TP+H~UPNDt?0=lt7`iXNYug&k2pgVDCfVKv)1okU>#7vf) z@2#?_&Rc~}+x+M)T3Ov>=z_#eE)gkQkA8IWUqg%O{lOkNq6GX6cg+_>@i4aAt?X+RQ1KzuFqVoy z`Ha1X&9!4nrzMu6K#7geEbem}Q$-T*@{AYbT(1qHG!>L4S1lUVM*QZ4&C||R*M)~Q z8={nyugYO)=F(-B9XO3njmeFyEOcZ-J*E-c<#^+kDKw`LX;`(l3~Th`+rT0@=chG> zm@3LkYHU^L)XP`QN#fgcRLp9Pr--;X@UE_|I+Y_w4x4Bf7RjTBnSG>}%V|m*T}iFL zO;|9dNmVlU+tw`=OE_6ZO9T_<_)Wchi3=6dhO>{;LSR|qx!S{Y8cGWJD%G~NtVxU4 zOH!c6fm=wlAX$V=rV|D|vAsm9^QtOcIZZ-VVnMQpAcmWiha^Iycme^L69)tR)VkGh zOU8?)dM4+bRa;F|V#k>!lu&xP?M2I7tpX8=!gI<)21X+k2rdu3vh0-Ywz#76U?flde>ihD43!*!6O#L}iCov*e8|c^E zyvmkPI)lVamoJ>NhVnT}qb<}(xwPJXY`QXLFIgvaOky&th4XMXpOL4Nb%$z%OJoz+ z0x{ksVv42kU=(oiIA*1`lo~N?=pOpukE$3i*T?8xL-}LJnHe=+*fi^XsXDGM_%7Kc z%?P_SZ@8GmGNlY%X``6kaEQDp&4r7Ylh+~oL_WN!F8Q4D%Fb^Aa|II0ySyz8Ve5y2dL`Y2!O*S*_AQnN~?0`9TM4zn+!OfqD zmWiECU40oQlV@B-%vf45T2GvCM*ba}FG-8;2qnt6KCY=7y^bgfDXv_0lmhG(%IJmL z=guh}mZo}YZ=wg8sUKH4=BO7}QlX*DTR__ATx(GxBfY<5+$c9rF~=|=J|Y!#Mv%1N zfPob+cV53BP_>UWe(kuN2uYJce%P1@n=)NCVGxn{EiV@~yMSqshGUo-O>+h+MZG}VG=yp@Z~KDl3EB@p z4HarYw+tDHaWZbP)*MxaY}6$+4v`S8)+*>Z02Tf_Mdy$-m*jb##U7b@bBRe&8ouUa z^&+m0D4js0WBi*jqS#^|R+>QM(;#c4elXdqyRON5t7)&p-y-PRQ``!}EsYCnjE1)_ zvbL6<9~m&f?(VRjR+?GAZAPQ-Qh$6tve3&3|1G z@PAWWmWoXjjjbKFwj^z-4=Rk)?Sn3e0-(mRY1MBHY^ly{CCWA?6>~hRlUfTIkIK1^ zuBwDREIB5W%<{T)m=tp~1MXdC#68Yol3Gi#kIMh?B(JL*wVjMsF`ZH|T{CCmIX)ob zx+Zy2IcD-X)w*<1(5KvYV!)Wd%mAtw~FLns3j}T-$su+2mEOPoe5#y4WaZoac2n|}LBTX|;wJ(2}&1T(O zOVSvc5D7EN?sLe3RR{oVZTPH;GQ z7##l24XINqQL-lTtApqMYh-0!;S~Fc>?pfyhv&;Gl)E;3jIfZ6aG>0xChTaDtVR;m zr8;|ZS5nX!Uqo(8li6uygO&r$`4Us&a#@81C<6Uu1I3RH(Fi&c)x zXz>x8!xlzlf|fZK*yuK=Pz~o`>^qLflF2vq6Q}4PNB5B+ZZ@Z1R|*X?z2f^l=`JJ% zpeEcSR_;^*B2qHMcYQDdjghd5u5LL42Nj8TQik^XB9iDZ$ZYj5r zX4BQHmlE=DkL(xBxnZXVHAuV^Z2jX!Ug}VZe;C)3(KkBBTcB8$2}&DO2~dL^F=yP4 zi8nGDK%TO8tBgyCjf(calk}8ou6AiKMuVeXT!P9C2*f0h)BH%vaoZ#lv)f=g6G<^f zsc0o-(VgA;yCVkcE?^(zNT4a(j38MgZF9;Q{A4&DG^;Yr*Pj05LKkS4)I{cwNT%FZ zerR1B)*8?5A4#Wkj!;CTP7f8t)Q*uDRT-O5DDJP6e}lBkuJ)Xzy~FiOg_2rNcTqZn zg)#P5n_?e%1xvyF0sRm^IA@_vB9_y+-VOE&V*IXm9tlqnba6+wxdsc~aywhi*B#|F zE(K<)q0KHOArfi%)=Rb8Y|fCam2 zGN>fDK^h`ohx>rtsx`MtxDe4I-kMT2PFvFw`S^}_W^ET}a;tSE`kE40UK5}PFP}h^ zc6Y;E!2LTF9oHKiH`k61)73 zP_H9)#BC{O}c%EhFOSP9mHm%5c6PnK0@yhg35 zg`!)z4F4OUY@X0@Y&L1xkQ&$37TrFBk=DS~ysv{yC8x}zrm$30Z>>DI{1G}${ovVq z3ml#o0g4x^g#7X#+XWi=fJ#$zWlHh$&!Ldd6_|R5oxce=LiL0kM#)oMz^gUSasv*Z5lPy}pt&i>r^6}+4Y+ql9u!vMU$Xb9hj7hP z;wR$3Omcv+;m!)6qTp5=<^wCjsOiitQ9M_b615jFX4;(?bpk{LXo(j{SvG++!-e-+ z+9jW<51~9rT^6932J56xyC}dTP7jZnhnSIv!FM7A-6V*m4X!dZ`E)C}{Th0IM|}j} zct62s`7lx1{M({7pQC04Yr4RM(tfYX4c zDhS_M0HVWy=dllL6(Vz&v<-uG5UmZ=Q{hH+70Ra$cJ8VU*mZz*71nFkknVEO>?0o! zY?l>a5s&(uD#jNW+n<0PqIHmf9cD0m6HtskrI3GDwhel~o-L}6A-vB)-TV!V|$@F`V|}~ocTCFVClNPP-^Ku;zNBX z**05QAWshg4dv>pvOk0QUGu!DPV)?G4hrp+q8PQ~R6j##{@^ZPInJ3sNIM#&x#bEu zB1eAp_TH~b(!NW{y??_sqDCw4ul7H~kM-RQM zlFe>E{O-E5#?iIgEnRgr_6ALIeYg^PRVqI*d({fRIZ5?uRD6T)sgvB8-gMMz1U!1- zZ(i0;(mzi$P2hZMhhHvSv~O?T`d)ncKO@w>vjTFLD0*$is2UaCph^97OQ&jPxZpC* zhjUn0B40l}=X#RIZJs3g_MH@T2M&jyBoU4{lh~FcdA2SGTVF59(!Gmx$Mdyg@135s zZ|z?DZe9c5EV?s`RYW4(5&7tmZziey3_F&hC54Q1 zecZ!{pDm-|cZePS#4_GqiJB-;Eh7dVaJz>Ds9`48&Njr$>~h0#v(A8`v+WJxyT^>P z$i!?$-Ba;?W-A3BD;7vJ==bYS_9Q4wL(x4)R$Z2L2EM)3s3661IW#bI6eY)Tq3n92Jw>*&&5l zgu(Cpb(3g&U`!avWIdo`kT+A%*o^s7H;~p7Zr|_i97Xe4xnA6&WQ8)>(`AKx>|$@i zpX>p;Bix4!znpd5ArxoY%n zXU&LwrT}zL3*A7>!n>dP0KPN8e@XlE?&iz=S_GHW)%Y!65)yRW)DHZDz#V1vL3B&b z9om^Zx(DfwloMFGCuq>T9d>a^a&&EZG-#=9^NXn2!4_--DK#mqGI<5@Qonfo5M1X- z#hT-8kM8w0HSJTk`Q9l>t?}09^`?Eq`ryd>^q(P64~&Lps&D`R5v2d_3`_stGo{$M zINQ58>lvGv{%`)1B`vAg4R)lSms$kx27v1PFq$>6u0H&;wMkhZU|L2`W|Sy@Zj@4Y3#N?>tr+)PA0R))Ph#tUlU18h>$0 z@jCD7T2>b}Da;!k<=ByNC1nY8!Beglnz*iwIv(V%XTGZPio|IQw7L6~k$g8wyUzfd zdBEYh^Q2buzyu4NTYNQihAYX_fVL@5S-x6Y_z5;&S2rsX%_ZK!DOk`V8O=LQjgpy0 z$&n0NY0BpbM;c6lgftJ$Pcv1vEh`vlntM9&rFgx`6iQKTd5T#lBFJoKkE-yT%YHv* zNue6Ov4?3ss#aF3ymjn_1PbymwL(b@f(3iG zap$F%pB)ZZxIu~l9Wds97fizPp3^z})^pybwwo>29^w3m;hBZ7aZti;0O93BAO40r z*~UqPNu!i8PUYb(QORI)=3GmJc?Ni3u5Cnjp%;EtQt&}mQahB=T+zsyWC(ku0KZi@ zG^qR~BC2jVe~ub+s`1+si1fxwn_>ECb}lp40Q^0qBg98>hCIfhjehf5`eFL!u1D5G zZFJ%baeNJNYP##o$i*76U9L=u)s_qIMJiY3BG)(8zHQwEh#ODP_iqngU-^9PXqDte zP6M(iD!xjU@CkG)V_GBowcGMZ(#06$aT2~7&>TO(szU)N66)J`rbPir8_69n$92Cs z8XF=R42gl8gtnlZP~2d3pXT~l%3Y{YKdzyvWF|_PirUl2y>V1KP1`KpIvk|Gv$~+9 zexb?!nDI6O2%*;L0tQ-a+Rnv{9ZEW+Bl~r!WaprVbtAZlT8t{zwbdHIS95%iQb(um z+9NgzIN4YK_^v*N9@gJX2N8-*1LG8)XGqONHKD91PJVXLfr{Y1Wgz~l(LfBA5Rnem z^3w(zP{Me7PmP%gO0Fs1A&uM#htR!c9_ zN&nZ%9y8E*fejV8jU^46liHP(5SPpyHp1#M=(_1AtaLr&*0B$)ctctZ!@$G_LgnpW zl7`|l6W?*k0oHvZJ^ua3VA@&CdA5SX;a@J-*%~K49Gx((< z-9G5elW^KjMW&lGE`F&;WQJ1pHWDq z-#}%)O+Y&;oIJy~ZA@*mXhj&((v^+eqj(_!3Sk{Up;j7qwaJo|K!@A6W6y3Ox=b(o zy%u9)wW&?SwO@^2f#GS72PQrIY3LpX$T#Oj9=H>M-|4mB z2qSo$^N0J4K~CDSTs5&;N6DjoocJOij_ZH3oVhoGT0 zf_khypo#t>Z5^ks**S}A_q>Nl`WsOwq>~E}AIx6PJZG9;*rg+o zW8yI;?My&*!H5%>C9VVE;g)MxGxAjqB>%`Yz441JeEXx67+q?OL4J+-x3Bmj>@jL%zPf+D_;@O_2dPN9D#J3CxOU11^;UD_ zdr1brg09eaPjbk41ldcoClATBpqV5_WR;P%HA5X9u!N zkfb3x6hK08LjcvgG5A{_g5 zm@lW&>ZG~WXyRL8h<%2p<*jg`({0k*Xs^sZs~6Up8Fvl2){B7FkUMx!ot&*Vc=RI? zg5>LhuOk$UbxW6UhgnG>KpNJ!N{T-)&nYOxX?iB!b+Bl>3w#Ekx^7t1Abr!M>gKjH zLvwAO$87R&RKc*HfZbr8mjU3&j$DySyG26H^fAAHY~Cgd$ExJA) z5Z!aPLByXp5fRP+0~5{!(L|kue`*s*62nvoftux?@OQ#h(J6ksudG0v0H^-x9wjle z_okKlyt8FHLTHD@wRZSTV)uU9bsm!COG0Lsyg0owDc#9y+!%U(Ep^?h{Xl|>MMp

WrZ~y>E9W6(iupnQQx5MoPgd6Pef=X$J?wuhJ-eS`?iMoki0ce;>ECQD*Crd3fk_&;3JisjCml|Zc?QD~< z<*I&WE!ZFJwKjdTTPGoKkvtKBIAM5P0I=J@sGN|M=$w!pi!+d!wcC6pkQHrYGSTpk zN1dtJ#$P`m^BDDi*tQ2QB4Bhf<$NnIng;yNY~I#0%(-@F0kWZiod+_uYlaH&Bjur4 zx~1@ECN#W5vVx{79pE%HPa;B-v=eQ=#O#+DO!P*<1xxqeR72}nT;^Hwy|BP$IRtN>e&sFKYr0y}BlQSgQ7ORP*3>Cl0eXHnuH@d zpyh&r?YyMl9C!`uWWvH5YzYgN+WYm~{XX7?-99BySrc?iEH&NMwJsq9k;g7M)zDEC zCt&xwUP)K%&xRU%cy=G$d(kOtgK?MsHE%3PLDr&AEO&#$Ta_lTVx;wl6cOmTX(q9M zcF46VpvTy-GGueC@WCzSr&n4h^JgkR_?x|01_`t<8NNUY&}R4E|6V;x_1Eu#J9QDZ z21U!)*U0R7oMgP;2;DIp1Hx!GjW_m)PEvVW6tVJqX(~x@VA!KtNCeD;W1Ho5eHmQL zfM%fCR&Sr)+cl)40|qVL!!$Fg{$F8j>9WFc{9w&JL(NYdGER};Qy>_tC%U|JTPu7% zw5L{y1&hj;8^#1XsLm)sr%KFBPhy&UmN(t9UX^c5Qi_(qM~_|;2;1pw{a=lZj{brp zfGiPQ_C-*diS=^1zF?uF@81?81`8%jqYcy*$A)+xX39qUeS&kF7rEU8i;k!SB1_%B z9*0cy`D_f3y766Tf|SZ5P4|({BA+TLtT8sf@cwcAqjKdmFb4$ypvnF3N@q?0@PFqB z_)n|w|E+ZX*XtkPe|Q6$H6h(mhFeTYA*C8C^8A5>5PsnY2g^W!7^6}{5rddVJ7AM$ zPsJIVdYU4Ehz#3oqNBWkvJL5I%R3CCqAPcuKXmA{o;cRN^VD8=(cXNOc9trsFeT>Y z(&aLIz4)CkmMfLYW>*{=*KYUz{q~Tme5bJ#kPKak4w9mxMx*Vmp69uIDe^ES>x<(| zwov#oh==EU9_6WrKdWCvNKwdvaBEefc5`I^n8=)QF2&r)zoX{9W?of&OQHmc5b`bn z$1uU>8fqsYMY)}zi6|``SeX&kF6t9vigq?ExDgz0DIrYLU0mHS3;;%Kl~L5T750+n!bf6^R+6#hCr0>roz%cy}=Ds^pWX9hK>&rv?u zfG8sdq-bN*S?LEz`?0Yw%OxMTC5eK#;GwsqX~b$kt;> zAZ7($X#E86Q)JL~gPx`Mq*_{2;zA;&9%CtPT&W^`Yf9~iC;1b*g`g5(k{8XSM&5*Y zmqmqkQ(W>8twcoFHq~DW8e0PI^a4ryoPq|~5TaNxy&DOJ?s2-5;>VZX&c*b{84ze6 zyVh1$;r=wwL8fr=(sjn0t08G0Rq4Fl+)|2=VwQPb^Hce2SAu^(8dnjGG)_jqu$yMn z*d=(BVDdgWvVn7)ddw*kHk{txb$?Ls$O7Yw_Ot{H_~_!pNj`@OatoLZ(rDs?%*E<#Ov$0wTZjkiQ%X2@C!l#OUQT1j0{X>b>42j10X&#GH zNMC*03%kcO)7Vr`XW~cBQsuhE;>WHs)ay;GE7&3Q;qrA9i}S&NAK1`iJO__+SLty$ zH74)zetk$T49?c_?biXm)7Z7KLwf5fnTp?eGwWoM-j%oV(dn@yRRu%7!79j)+bJr4 zSUV@!2GJL%e%o+H#kfe%e~@!hN{kz;Net@#atyZ|bDn1?mKoLk#i+t!++iXfvW;3> zT1X!*jAfd(u=yVKNIH!+sBWhIy9Z>gAbtMDnF0}J65>tKV6cY7yS}Uzu##Mo##~?+ zVJG<=iNki3k_%!D*aJp}#9RZq6EN7)CwVnTe-g$*Ev0p0-ktiIER)5lq$zqyOi@;V zjW0pcVAVC9~1^-Xbng`t7~eeP`(hI4yy-jQhrS$Ab)x?vY5(i zgt_8$Kq`Q0n}`^(x;ynRdNB(Jhdb=^%$A6}8J0eDI8udZ13L%myJExLEhqrjzet>f z;tUMu7Ma|eaDAZBillh{W8I@3oQrRB)3IniGxi?rgSjbA3$HRbERPOW?RNFel^E#r zVpOsB_0Wj;{OcyuzOtY{VBKLvS|&2MBS@}#?ZCfJx3bj!dQr5iXOO)5>eIC|5d>Fz>mh&KlZlYE5ccr6EST3R^WK{)GT*P675x71VqPq<7@iblnbrj{v81v-Fvq79JKzTA z1=t|ZjN#P|I>e|u5;Ztj1+WNhI9iy1oPl@ktcN(IO|p}iN>+$5tr}_B^;x*r2(P?s zvimfu=Z9mx`j;%xr4iJLLfn=?S&H zCW;q+_ee$@HrxE3+*#L5utQBzozCpca(zcGs+4~3$F6eQIs@@Eg<^gXz6hQEZJeJm zKb80hnqDT|Jd{~xZbKHa0V@QxmADmTw-M8%*566cX<%N9nL}u27#%0W9Gb=697-8{ zR_^(mded5h&FpvKy?I?`7AflWAIfoF^c>o))-kv~B=JKnV6!&#<$KJ$0rIQk0PV;= zwh=WZK&8MS8D>+$A7z?cJyli-rkN>D)e{&GXWx__?y#Z-iH*D57#8}#BI)0|qlKXbL*$SptShaj}&dpmf^-u7^Q^dag$jAB+`?W} z?IBd}VZB(wo;Xj)%(s{5jBaMRX&ln%w0!afcOK1XZ4UOxs~KPWt`kZ#wVv~ARKQ%! z3p3dzTWWZ=t98EH*wS!KRhXVVl+Eyq-c>_lJu~#Zl?hU^Jh;B#vMtESFnsm0?{M!B z40GGOCA$K{yvkZ!G#ocIlw?`Hb`~XYa6N3(g!pWicZeZPW}dd3Wi#zqvOMTs0K0ET zU-0_WFn)L)uuBbfcas5$HGecN$6hK`MBl$Z1!Rr4Pxq=CMcH^Q7ssAofIB5OK3nq< zZ<0-hA-){ctIX}wX+P9;7U762zf3ap7*>+VhI3V|GZo5~VVKys`dAOse2Xnz6J3{! zEm_mW?js$^K>TgKN7Y%>T6X+&T0u^6Xj2Fih;3K!FT9~vjOEedEnDzd-|91Z@~9=o zy}9kW!R{yc2=>|!#mIOo;+3$#WLXx^F1gngyL7I7uQ<(w8n~l)kvZPqwRq7jx{8c6 z;ekHXA`780gtEpP-UeAT&PN-k-X%jB0z-CSb94&z~tc=~O1mB+=5hcVP8%)rlVaJ7gT5A@D9B=FTKdjsZT+_QeW3VtK(YVz60M}x10PnyU7 z$qaav-xZ%L9M&*s4dK{GNQ2=KXDYN2(VF<7QNfxSE69z+nwrYwKwZPsJ+@@p?e!Sw zTTvh*R&=M=6|Q#>9PM5Rdchm!c9pgd3;M0FkEE{>!j59!4k2u(5(3@rn2M{^gW|SR z2V{dsM__wzZ-d8z7w)h}2h@={UZ<<8l5KjDKL&BUT^R1(5pT>f?kSkeFNk$gnjc2bdnhfZtA91+KaDQ@(+lti)EIP3Px#*dno&dgl?qD^ zIH%w?vU4(pPU+Qhzz$X2fgXpQzbz97J-H97W0I&{d5=zQeP>OF-nDhfgFr4Y3-Dej z{_#0(JSRAaH3yPgaIf?wPK;4%&kWhr6$B4z3ev9bjyj_e_g-99>6IzzS?viSDti|B zFoBTj(<~@!M+tvWSoEry5xkewpfNl*;-vkRM{93}q-%WS;nqt|Zmq!#_1mNPE6ni3 z$1~t_sKuIh3^16FYX2wlWbl?tWv>)*dsKG}+I+xkJi8R7N5#TVfNl+$?FlKW9gA37 zd{Yq0DL!*`L)QMtnme0A71jKi2l&=~%p zHIiJ0N6q)kzXNvJ>txT!En!tWPu&$!<{T)rjT-ZA~1zr_=g;{Ahv>`kDA*cmegK0W0vpF<$P__E4`$28@0g1G&YwC*+?Y zP`wI!f9JIo>bGb}ug2crrGN{?H*-QY2aELeJZA4%IXAqV_jmRb ztA2x=K`EhD&YwT}7Sb>2m(;GTC7;-?v?ZV1#!JwvA(woge zm8(man65OPf1nbaR!u91pDw{%i8>((_~*5#&$Jlx8^QA*K=Bm&s1(ea2D)-X!+QJj z$15n7<+pJFKaI?`TctAaz`b$zB0KMGbn5#8c5n$`P7>nryK&utKd{EY1J+(XWyWyj z!V=>HFH;#`s>eLOW|j8$P-mTh#;$z>u%(Tg!&ZW{;T?;_X^9xm>X(0hzuF*;EB*c) z({&*W*rZNaL4IqN$$_~Ci|ck}KNg1&9|u&K-SDKhWy=3YX@?nN6GgoHNI`l=bsd3zL0_n=YQ!GvqzcE7{|MmtogOZaf&eW@%?^5hhJpg`q zOc27FbXfwdxm*Op& z879766sIM8??4EDJ|(s%??N#4B7E;d_{Hjhcm+uK248$67R+}&)eE{UUYT@TD)I@= zttXvcrtG+#qL{v|Ny&918`7(_ccb=t)$oo3)vaXloi%Wy___u4Q!&t+e`|;A+da0f zPYXzkP1aW7jy-lyo8pkkFN-!#6yDVi-HT2`SG-Uto^f!9As%k5Xt5^vT2%At?d*gh z0=U;f+Xec0)R%0M5NVn`{+Pw+5e|*=Z6Bgna4TiQ6dUhE;iC*=4577{}~T?mzn z-?jLBhuXK2I?lVJKw6utHVGp4t!;}=uT`!2r0R;*nZ4D2ZR}RxxyECSHIeSM=5y)$ z46t#%SVL9^&NMtKM9$lDsKe8R#dwn}|Fv@zc&;}wsMo-AX{7x&`;Gdv1z z61lm=&mMShAnybl&2MO@yjqRql{v6YQ@=3+4aS4703Wd^gw!lF_ZB>Kt-N|d?3y}o zowCXkjC(>5w*ur#!*Rrw{)so=w9PxA3(jp0WIr|df)B^4gd&FkRwqoL7Gaz)R%vtZ zSurbePe>TTu}PkgiqM{$iWil_=1-jo!96{B0<As5 z$zGAVKwb%Hs2^xHVSX)(&0y^s31w6gOGqx#X}e;dj9Dnb);U^Q2eXLj66qEi_aq+D z>~%ah*(= z*C-%gs#_kEJ|z$nHEBSO>1zwDY+)SflufC2u{}L9?4~=PzFhLp(%{nFL8yqnkAPkR z?*Mdk8FAG>eJ3*!IqP1C=c!* z5q8ZnqdvJgvKEWJ<`A?WW*s!S#1CkSE(V$96-JrooAQO=-HzxnD(E+0{M>vSQDQyS zu`R+ir;hHv=eiZ9$Rf1JLo4lX)za#Hx7o+rm9|e%PdIaCf>bOfozEz#MadRSGHS|9 z2pl1EbQJ-l(O^@GhfL?QAHaXe^2@Pn%;e|*0EWWw|Ao+K)yuCBse-Kto+>NYV zj7{`x{?V}azhhQ7{%La|_&;^;X=>`ocgqgaBW!=5u#qa(kz>V24E2c)51@&Vv9&j; z;%qczx7`pG{kVwl9p5B6(ji&O5>TSSAeW!KKXxHmjcJ;0+UvnaCtPHQ6d<$&T=ASI>x z-WwD7^+t?bWmZ=T2zHSuhbRuuk{Z73MkYLm++L4P8$m&d12Wxd3?Be(mvmePnLE7I zf!YZ?meg^We0zKUQT4D0gZgW{BjRP+m9Wf?vY}!ssdt%69CR4!kb#$xHo9IxlyX}o zeA+XNquhqdyupBrYu^>Nle`1%K`A}QcGyz4-f4WGd5_Rnb9fG|t-7xRKTC}$_oASq z_+#oBh0IR9>Fu9mC@Ld=Nd33j8|mM|G~@vO;^F@-+V{UXhW~osg8W~NrjOEv?E*cD z>;%}Lwb9GRFcq-ksv^Ij_yVg%UbKuEpfaI>MMg!HL%D{N!~Qjx_-*A(pU^FK_)mE0 zE`dGm#e9RoaD&gEx1NV=qZ6qZY4V7Mrw(K5v?DjJW7oH8jdhQw25VdHovp(N(ITxq zN`<39_n4`Pr4p6P^{F}Pu0WadP+j@H$&0jhL(n@bWBCz%Z236|ZR;}yW5Bx_wB<_> zQmx-tq=i@Nt$VMfCQJi*c6+V0Lp%6JSMH-ofLs)IVjw)mv?TU5s`iPGZuDv4h0upf z?RH;5o1$H*)Foqerf*HiU6X?KWE(AXcRt2tICu?Am}l2yJoELK6Ox-R7oV={dQn8{ z73kPn0o4S&amR9^;ulLKL*NG`*;q6V=L@LRU3rQN$QX>K;Zdaw^(9qm6PToOj4*HB z-ytWjumTQ4A!icB+Ysp|hNDpMM1OnVPJ%i2iHf*D33(+$nTYMG`W(yh1j1j{GtJsr z5b*3O$x>ooK|E3zLensf#Gy>$^fd<~Mx(!i@r%NLxFS$_A_?QlHf6_MwfbkW+XkPj$`| zlsPIK>A)^8#G-JmXbJ*pxoPqn&JZi*-@+e^RT5BRyLagytD$Jtlip*xy~l!3lfxr5Kb)Pkk>SX5xf|jltK4Z`C{RBU2T51&( zfIyQ-d(r9cnQ|(c$ABphcQKs>*tEm2W`WrN`)N)?c;yL|`O0K`chZ!jJ(?klOX`c4 zX~lbriCYZ-zziTl4M0Q1UkUh|E=5%J`H_?b3-pWA=S&cGjiH)i=Msz_E|25J|5*2)m2TE$*X=paCU=av6Q zPc)*!L>cnmo@h|8f4|gn{vW34e;}>?6K3^a5kVOK1Ht|OHGjT3wjS;;|4P`chmeRm zUcYK+2r7FOp<#Z}rjXP%xvk>WD?t^%o=L;4CHK0DjiMaWWRG2U)vIYg?U~KiEH=!t z6s6C&0FwL{vOj0#sFO=(`-LyHrH*{EBeBSs)xcviQZ$j64=bjr9ssQXn-_d^{eht& zH1I>(3k&l0gHwQYI}{i{`#KDabyyOK`FHY*+59otSi z9owFq|Cu>+=B$I6cjm+U?WwhD)vDUhb>DkG`>y@FKyd%xMD<^9epvtB=J$2}{QpP( zs5l7uzZ-!61C$aA>pv1W@qh5E|K}{eMiPq>I157f;|E-o8Xti8e6z#s7F zgt;4zV7^{n!Q>uFAo)p6q)sb-|46VvPYk-rFOAXsca8n~Gy_EtuKy`HvPgFkUM-0&75w_3S110r*7y+&47^bkKgvX(%@5)jM!3ayyA|m7n_rf z&zcF0Vc^hHo*vQN;6c+pq(tfLglXG%hFC@0Otg`gLPQ9%QczegoJ$iE6v=Fe$tQ7H zLS4n#M_JUWV@pr=>%R{HzCI{r%Tq1QI-S?yhk~L9%k~eu6yqmgQWb0m* ze0et|nbiC65MjMqWig{f68;z%8N~Qqk)&53m)9hJUITuol}}&xE|c(mtz>AJ{>mHj zfv>3lR|F;a|Ldr;{6pB#|0C8-bN?&^@bjnU`8dFdLd`Cx;fL|bncrsicODK*mZny}U zX*9n1$iB16A7cj8AAc|covuu)Tl^5hhW={#`0~Ac5ZyiKdMsE_qL9_1g*SJK9$IBk zvE}jnl+?bxrVcP{Va{`Gi0GiYs-D*a5uamW<)`2JN0iY;=^~E5qAd8I7LSS`c>hnL z{I9kB?SI7kUxfSLFaAZpS^uJ6c`hAy4ZA+kz(1UDEIW38LY&!)#O^5fw>CSoln#zW zxt(_W4vGI002qG7h%$T^KwNWKT&C*$=3c;};NJkiUTSRRyI%g_jp+Yx0l>asUpma9 z!A=iIlK%n#dmyPiUBB&8Qu-(Pmb@M~^#7Ogl>qto<@YnL&3{e zzirj&9wxqOFMqSn%v}-3$`qDUy*R7kZRJz5OFsRB>)6Jt=|S{4PhWXS+COekyZRAE zr0EFUBX;YKfm*DAl_atm*_D1;z8l#tTy2P1p>p8g?e! zg0&dyP>NMnk1j>E1G`%WB4pDsUdVlS!i@9Iqih=qf(WG8)3n66?0Cdzuc-vR{^3ee z?Y(p|CYqbVMoI}&Ff7yp`#PCc1~wt<5hYT(eQ$_J$YP^jtg(hh4+W;p<>|E*=0JKfCfJ*Yug@B-7;R&>Y=1Ke4U8ep>3sP{ z)phXJT9GT~0k5_TaeF45uB_nE(C~h)O$;~I`ZGFlZC^BUbCbj4ap*7M6Y0t}Cqjo( zyWIQvkC29eIwH4>77{UM{ADHN_UF>QH^rCmVl!sb#}*yz#g1T`Sf7d$rOr zK7=l|@n!i{mk*YwlYhV%5Ylw3h`v@l#ut#`Kjtqg{Rc(E|FJm#^|Jr3>xXc_wBwim z#>jco{I7L(q4mE~x2AjKlkL&x#Tv=f5H#f&V{U8I*zV$(I(V1G1}46qOQtr^aJzr- zBH_Zql3Gc*)kQ_6oxUt0Mc=@?g35h%!XVOiCTmSE6@zN|x1OgCyS|8bc)o^3rVZL> zWAO^WT&9iA%)p8(*?`-V`)}~i_lI>|y{1r*`1{Vh5KIt>Lmo2;`0E^{ic z+OZ-p+D?eBE4X=sYv9MGHS!9H`mPc3UY3xNTq4f1(aAH{mEIPirX2J03YC(_72=$J zza;m&`Xh(1s?owo{N~JyU6>`m&_z3V8cXjPl9Rt*aA2Vpzj)~>9)GYPfnpc*AW>Cx zE5eJT>%!l$<5L~Av_q~V$F5&`OTtSaP=Fk6`a9jc#9Q}pz>PQs2U{do>PSsiP^24E zP5s2YXjc2-m9fc^YtD)$X}uYigIneshNW-Wc(T5jhE)EkBBl$!glh}~-SWz()cmt` zWn`z3F@UkM>`Gvwuzs5vk}Y%6tyw!F2}(P0)>7T}iT7LUO8U{C)9H?5s5Xu)UZTXi z)7}l!CR%i-EGnzNPLTNzO)1Afy`PH=zbM5#IG(bjzb7lZ$N}wn^NPPAX~201AbSfL zAJX_-3wa{J>AO`*sA0F5mEgtKn47JpHBHb0Y%-BP|h&@w>-dqrD zsIT~P0$UD0!~6~b+j$!VBkMJ77Rd+n!y7Sat4$Ka9s-urHg$i#C*OV|yC{2F=T-qW ztJnXyhD_seYa%2(wle@Bml-e8e|GD)jBEDQ#sKxvc@3GH5hyvG5k_?1sy{=Jy)l?% zLQDyoVd}i#d)7(8bt`PS>mVbXt9Kz3_XzzU9`}l#u$PQI@E>>4TZxc&MEJuZuVDnE zm3HU7FcNp2`Gco-Wcb6&t`Q`?IAPNJ4O?18n)m;BbV`(>F#rh@9y2vrfBQP$p!Qk^ zc@A6|-~~WBLG8RVzlrqo#PioQ!GJyG*Jue(hkU5*CJs!2!v-^wlL#h3TV1 zyRiMk=~ZIXNqbL)`wSI#D7$X9m-AM+_uK?HF)+TTil?-jxie~XVGck(kV zZyYYphIL^g0_+j6<4E&X3w-92y<$q8-3#~;XOO-<&_^Vvt&}7HQpGIk)2@8wM)!*R zPZ{+-&Po8|3uxH!-;Fu`{W6>m1pVJFRsQFu_0N4SS0`(Sf8TpK3e&ZpCwkG9RNNb*94I zOa*QHUCtad4+#TW@y)1dHNS;>5Bg6Ysuzj>c5$%<9yuT@f8+<@s#n;=3>J_5N`Os? zk@DT;9>RVldYnNMSX^Z^Rb@^AhjtuB8k#RE>upv0{nq2J;9}Zv_nv4d2G!E@!4R^3 z5q)x+IN6ONce=>?#~U&)Sg?JpUx;N$UaA72vk$t-9ctiun=96);sWvWU+$|)xxda$ zT)Cg1^pp4aiOR=3HU7e|7;2GbtNxk(oazaiJmIGxD9u8r`Hk=EimJPzT-c+!iZZ3;Tix06Oe3xH zu9wnx<7d}a?c}sxFksFL!~LNSfh-g(%(kOj1opZM|4;XFG_9P#=<9K6=RXBha{QNe zWdF;({MTvb|H6MPK5g|Y|KN|D>mMFbDr6SRxU!E9E8rdit*L2&(j{SJwWa7uwoRdh zzjeSidFSsW?~*+Gy({0TxO_jei{ z8#j?uV@t_tUc%%{m8+vE)d$?5_U%YhsYhK=Ij#UR(;BT?wKI8Wu=$VcYV2PND8+TV za5?JC_X-QwoLUmDoIL3nvG0p#=LdG{jo49!2B0@1#MRPo3X3fyn1K8MZQQX;w?)_c)~(XWa&jZyahm zD(b(T2udjV#|42qjey+onk*E7SC7V8=)X(s=5osm)rGY)5grk6e+CnjnQBuIm7rJY zcqW1G7nCl|rFaO=kBxg*a$NyfvQzi~Vki!>GI4u+oYQjE@+rmHl= z^d%sf|6qoV((4l?T1lv`5ZH4h;CaKa=My4UVw6$=6(@gWoxX_$Bd!p;DFuFkP;V9+~UGZ4C>y4Ds_UYIL-1e9< zmq3qGbaA*O_8fg*cd|Tr@co)T)Y#(lEm5iuogvWZaQEC6eIiNee06HC*z9c7u=#TwwsHQ&+KU=6YQKD(Dus84GW7$l= zw5MV=3Xg(NOL37RKrd6ALr0ow8YrH$pRrYnnUlrt z?m1Iv7@pI*Lh`b7Sgpc>f3e_|cn>uXG|}og>XI=J6agxjljY=tikbDoQ=e^#Wc*@= zrRn2Vb-oaMxI(fNWtRM1f9R$a%#jyiYK&TH5Bnx1)3NPHqsMygyYOD%)f+;kqNRw7 zx{e*2FI;*zCseUA`>WPW^A>m|t}<#lk0Pan*N2@*FJZN7I(+#Alz+)j8sZent9I3zoBi~q{e2azhnn+}0g6By$ zCp4Gg;J#4M4g0Cp@G6*IOb>x9A&h!X8LJ^e2hZ4+_a1;b(6`Lx>NoCws{r*=MxGZHNz~ zCveHn8B^Ym$KD>5j~^r8B+#EA{1`WhL`cu~&ST5oe$a11V3t39e$Hx9Z|_|j_6mXK zS>BcLmp%)D_sDybh8+e6f_9@;(wEu~X8-eGyU`qyisb~sx!XeNUp}Ae!rV7g<%`=ZR6HwT-=}kzB)|lG_}0B`mf1nvxBZ?bS*NM!n}iF zh>+JNhy5E$ZyxZI+vQWGgXf;9uhwSFK=8=x9_0YE`|G- zMBquQUz@`eQ(sSFm(>YSS9YyxIgo=RF{EreL`ZQYrfS5Q`Q!5hlDdK13laM?%j0%< z7?lHBUDWS_wqdv61z|7T?35a52E{o^0>t2Q?WVVbyS&N*oopFDHztR)FXQN> z)RG<|Rd(=5A`3f>e=&=z+Z$TTPS&ZunWQtG9ebTfDH^rXF-aX4da}V+uz3$;&0mCP z6iKpGXbt@b#>(JM0P0&+SZwHl@Qq(dZy8kQ!KR+~pMvy0i7|!Pw9rQDN?Y4k&0_kChWa3-}$9tyZfpfA_& zH>FYhc$q&7t3B78?D7S0R*s}N^=FFeX_UcPI&WSjpRr)?{=s;FDKYg)rn2%C{z#zdx746igsLKSWeeV&&6)BTnd<$F$0Hm zF`@OoCNMy@ZXCcx;6NX3Vl%MaLs|7GSIb}0bn)sWJ~iC%=jqvj9djktf98lD6O`?O zfF4LdnHt>@**iFdShHp8EYC7`=>!P<4y%JgODF;tlsm*#e4XNPvx7ecw4UZlb*Omr zsvfli6(^AFd()O-$!j!L(DnP<6=3Hj=ZThGu1 zN*l$EEV3yQ4ht#rf2fbDf~d&uwLNl{`Q<6!${@-<1Gb!SjrvD=+zA}7_b=_?hp4>b zHn0T7_*IU3=PLAq7!gpRKVJj?$f(pQ=#%v%E^|Z%E=HoIEPNLfQAq(L@p&|sP*@j@ zfyi}QnJZgFQ2ZKVguqQw)2=c&9}F@~V|G?fBbuxTOw(6KLa!Ux744Ud8Ex2DljqUs zHMJ1a3)drsboJqcxK=GeLrEIlu~PWGl<>n0gE63KK-9;)Ri|SUWs;SM=#-X?w}82V zkaA{Y$}JTkDL6ax6Odu|E86ScghlszTuKBSRG#=KR6LOt^@tu+NKa=< zid*6PofvT~e!VzfTQg}qTV9ivM@idn-9fsTO>7Ds{Bg@AiS%~`CGupug|S|}hIO4+ zu`@YP(`_;wn|#t=_K1EII}nX!BFy5VYkdGqw2Vj367I?r#J!-ga7vm_4y5S_XxNxL z+uqbzO@~;ttNRm@q^0ikT~i=gLFCGY4ySv&FDDB}$>z)#w;WXTTYP(wW!r0<`A~eE zir=UTlriT-N_GC zyun`}PZY$Yt7HL5ZpkK$@5y|25ka zp!^3s4kG4Pi>tZtna!(QZh!OP_*x>>(T$r5TsQBkk1Ztc00YNh@NIwG2$8GAoIgLv zKx7z2m$_yMhIqY#aibPeblHQh0AmC!GaXOx3wvZfE?Rep(l;%A@^%Hl!9K)8`^lHJ zFrR#WX&^Nt4oHCSA$D-NsP;~U7@K7kicPYm#P{Acx5A(yDyPV)M9r0OU&QU1q>3T~ zTa}&WIH_5R7ji)d=x>Lqy6jh@OxNvX{e+*AH*m^jdjxsxXR`Y^zYDjGH;BmN3s)(4 zQ%{o!fGeb=frqlL9-n!oDEmD9wtuaBHqbIRxK<#-b%QbU@0I|JN)NS%nBb1+AgE3K z+Iw-+c6Gw}4R%GANabSoO3z2(u7N-(5+Ox)eP=+S7OJ{YWoDE1)+#l6+?q0^M90&i zH9C<$ajC)DC>zirHM-iGa+{}~66j><48Ps~T`kL>s^WH}CHbtl&D{9XwEYi(&rQK|58;_mH(s$*{(^3mtE3(TEpD2Zm;jS`)`GdDapx14W

9i z+yE%la(u5i&0<$H!9=`bS($5+3CNv#+WD27UC}GH-2{Pvr_ge%y`RuhY6>pPyiYXquRDr~Cc%6%{*vL<0@v3$l@6l&61T*u6|R(bFlPdN_*T=M2)I9{TV0u*CJVR55QZIals-U!|4? zaKbeOw-DuO7)h7Sk$-EmLbZUnq$C<;__lUIG-A{rrOwYTL16=+g|8_(^O4@aP9%pm z__EL3Rl>U`={h*($LHAMGDm;!1fiFMHqT}VR6TRDt^~=8ciuv0{oN`^NdFO&3zqSn z?;Zz6Z?5~g`B+p{shK2SXe&IZmMT-tvvj^HvlsLt)(K0wi;**0K5mb1$v7pw1IcuL zHL7twxJ9Edb(}(&wR!p#Zk?aHMUA?poU{~Y%960ccF{DZNy=>|_UMgZ4i3)~+eSes zr|0MwE>FBM?p~GjK)#<`n7((nmLt@PVgM~}k>1)krsLj)SEQVJn}FH;P)2ZStRJgZ zjRfnS3ULuHA>HuYEpJz>I~8*j)A6^8+y(9A_>E#CW4Afn3Q4i;%J3R=`lB||SBh`+ z+YMpn;X)l9F7XEXGX__=zVILbAbG6}4nGl_b?xCpv%-FFk4y1Sw4>k4tDEo>3l7XQ z_)#`KRPEh`&am}hRRq7VH3%k{@Puh8+)mzBh@+YKetZ`{QZQyInDVJ2pwYBbdt8(* zDhc3kHMF~I4|%(@l+|j#sNcn!=fk~7Gi2lcjaZFN9I2d^mTcDc)&Mq$9j)A|b7a!) zeT1E&+bYw(x4dPe`F)!~dQFPvi@c*ww?sDY^r21+IH0R(sF_@BEA^*xs&vCai6y2YQ^Ct|!Rl7UBSQ zi|g?$9Teg12{5$x6zccR4(ZC}7kFyxJ-_si6mCm4z2HafAFdsA=8bx@$CNw2!>`#JUPZTmLH51Xi@>#)U45YW<;B;zGMNVVV0C0 zOt`ney?+p=%>?b7!DhJr_0wuW#l)5Wr**KD#4yHlIp3CLvxKs0yNaVi37Su8H+iOX z(T)ijy-rOLn}0KsXszCGnUO$X!?ovKXn5A0{*I-7-RRnttn47--1 zO^bG2B}!G6e7G#z`J7q8#B3yyorPPYJB-oU(ILT-2Ow{9}V@RGW zHG4`-tWec;i`w`R`I%%{NnMfYOfO)ObTqj-ZvuFc4q1GqzUMd#PdbPP_Fy7w`rpCWaA?;Hdg{xL#_YQbD<$MOqNpUf1=McF z+~YVmx6-+v>Z7pNXz`W}PexkJ|Ft778#y{lb%(RtVR@_4XiHW(!aCoXjv0Wn8%zZXOD1*gvWq z@pkVnj3+KVTQ^lELB|R!D@N`MbW%6X9kYHK@ICiT={B(?WO58;z=K)T$*tPX3Cw2j zbWF1Qwy)bP4YF}u6Y@0`D3CV#vNrm%U5ooPTj{JfZq3D`{93H87$%(4p;+JB+%4ZK z=kx8Btifk z?Vagf!^W}8i--^PSjMKfG7`b{NjcNDTJ&)Znnx!YN9G){GwlpS=Qs)YM2Kye@@K?? z_cm~>229h@(jq0)ar6VZ|8Cg6y6S#P?cTrjbwiX~d zG#mep51pyyAW`*&3Xe;gnkf2C=}NgvvCmttF^9jl>%*fF_ z-$8Ig_o%a$Vza>JLzMGN?l^lPN*fAhD@Kce@Q>ND87}Oyo{yF=nI&iJ)KrTs$(&V_ zdEXIBoT&zn3+eZG#G0G_@bKhATbR6NY5Ra>G zJyrto(GQHD7czrvyRE-|@gO?%>!@7~?9o;={M=%#Dc3otjip4gi)q@ z2H&Fn^@GPer}1>qLB)f|Wsy1fassN0XAbC3rRp3GiE_$rleG;w!gneMGD@-h7E zF(L|3)ic$LjJo?Dv(;oFu0_yPQjG}+cAAZKBhlXY9{jM~!^gxglwEBcnbGc&SNJtp z21H9|_}Vp6kV;a8gUm<|G&;LPH^E_iROkrE)u#BM%OX}wvywSNL3dEdfRKY7s-i2d zLl7hSb$L9=bIU%Ba_IB|3l8Wv{V8^%rKeJXt-Kx6kW;Nk2BS$EmWS);^Mr?BE}7pY zp>X^o%XMd_@qNH&+6LQ#SJZDVRL_SFr75vSCfu;#cG#0K?LXl@bjYLnu^V73?>YYZ zbcsRG)LenLLOGtoKVR|NjX#4*&EiIFElZ+zIsM95LCkK`9*e-Jp*nD*Z;3FyfW=cN zQ{eVQvtXsCEQLeHdZUy?A8dJ^UnZ_W;_NwwA{RXL*tpJIycpuG`M@k0<|Vi|fqV|B z@BKOYH`C{iF~@<`C{CGM#ADrNL9y{9l}laYmFqPUW8n_ntZ+3kK$frb5_n`3dx;?pB^tN! zbnsH7L(>A5qDK23VsVe17O<^$SvsL8x^!}2l5X2&#o&Fv{~ z@pm$Q(Wi(FG@h80Y|qc}$K5xRhj#o5$}_wHTB&VauB!OZsBc3<7)W-?D|M5N zavlG#2{cLZ@~ofhI;Y@}aE52}r50jFQ?`BH13%G*StjF8of$)GVanL>;xRw^Y?TLy zEy;84K|Z4ixuOHBLL~WOH&lE(JHB1LO-xFCZCsWSgM;dP91JWRRITr4jt99%2{rZ| z4-`92vTh3|66A5ZesT}w%S5XWL8-)V0j;FDc6H?;oIOsB;3Add3<#4iC38bHPhCvstqHa5QdjbibHV$@So5|}>d+@`jWe`aH`K24>?3D8ipjGzkkK%^Z3(*0& zv~Vbb)61Q5hWd+pm2=he8&)Dd@=r>Qz(QP_o13k$-!;1!p@nlGI4_hm;xSg?;dqPH z^ZF#S&CQ$etJQEn!$qf zb^%F@1ubVo4c;?O;Hs8fe)gR*}}#$`HMgV1Yoc;u6+f_{>c>MuBMs!lQ5{QTAqYd;}R z4n^x4%JQvE?^|@Tw$kN=?cTSJSp@M@m+2~OpwSK*Xg;6B088dOxTty!Ph-)t-^AM5%vaj;9FR<&a%Gvjh1Z|ZYau9^Nm zs{W57UIA3k5#x9Xi7B!boIX|ZX{m-E&=-ns-rj{eS`aBlET$j$a*ZE3D_p=`7|d|SzvB!-3G#p3bZi2HH>&&HuM#sK9=d;rH3CzZ9osPQgIV;dg*X=1kW-HG1=aip;+4AC8f;h z<#3jmMd!x5zvJTQVrnR~jDztuZMuaojUQ4e5w>`iW^-snahM*2Q&^?2Cq(3*@Mr5} zDF=MLeDpo}*C=A%&NCz2P|i=ULY33^fQWq3AJQhhZm0 zQ0-QVdR@BvpNR48++0R5K6_tAOI{8kN_D}v~MFiTB;GVLD|D@VNHX`WCQ z9H}*3q!?+ii8xA8YzlcfjZ<2qVHL+9^A|2`_EL~{R}QAvL99hdn~{)z1`FLyj+>?C zmGk;S{4cJcGA%auFU+-)vT?3m8Q$VzO{fpVH>MH4kb1q@si#rLkgm3@+XVgB8h=;4g2cLLD9%W#!vuh9S(QmT8 z9LR%e7-^T1zX_F6q3(#@l*KHMSkMEe4jR*k`z}~Q4)mm0bsVN!DbZrmD+IODEgCH? zH5EE5xdv(~=c~#L{1|b+(H+`EH1^WCauMPb1F6&-f5@5g#$S6+k)~J+m26={vfC`p z4}*f|Id`;9R9XMt{9AIMzr_~J0+$;UAi_=6k#but^f~MXaT9&G(Gln_zp*Tmt%hFa zPDyCHg*GJDaTp%~>T@jQ!g8jH+l|6q@#g-gza#bwL*LfY&(Jy{~d8zE! zlu+~)N9zx@NLK04@RFxDbVY}B>ALJrrKpADICWQnLu-Bpx2?ZIG>!gY%_vEhW5%?< zWnr|08ooij3hF7h7oVzIUYaFfUH0d1X#~n-4-SkHn2a$0-;M3dy}yvnH+sQUkTC0m zjQaV@Z552ejyUUaqGLaomWR&~3}~Y9vF~~auocS?3Un?ZPErgWPR-!jnlGVFQu^f3 zCVi$6C+nw0>lvY?w_-YpxXV1;>Chq^E!dZ6c(ouag_yA*d=<8_Qarm&2D?s4ScFid zY?dN}gVDW{exChygPZJ=ykN+-&9dX!z@C#r*^9b1vJp+>{RLcd~+l8csyG+s!k{mvKGYuo2v3L^Sm{cn|n;h3Xm!-Ar|{P zwJTg&=Dl|>nQ0SvNStn2SmD-Vflezgdc^|n45}gc$o042RHq`>#)FB7+gr)W7ssIa z<-_;zQ|xQ4#%_cama7{SoFQJd%eY#O`-ye{+ePNR#*W#_k!~<(lW8PT^>lFNy?efK z;P8ED{?IlA<9>QCH>5`;HQhmkGDbDnej=tx#@R-Y?NUpv(B1clEG6DY8f>$GL(Pbn zQSSFCN%c(6c7wh^p?!ntqee^9;+rntMSYqxRbEm%Hyg`Bp6`CKw1Ymgs1s~mO;nnr zK9etfh&Xh+Pd^Ite+acV!O3fdvphM-lYWPUbm_!=qe+T+2#$3gK$4#q{~8A5Tv!qb z3i_FEfeeR;YlbhV+=y73BWwD0Mh*sK-|NJs4mAvH6UQVJ-5u!COL*9bRr4Se9DSbm z;sq;8ucFH5T0Zk5mshTv!>?NEqlm{@Is^Oj#PkvB2K%ZeFe6^XLORw0ucA5i zs(wE`SPoY%A-y?}6~m41&pPrA34B9f!*^v!8jry59DSQ=ph|g?v9D}kL964hl?RPr z`Nq(J!9}|rlJsm6e{+xLJif8{lb^NFqv}1Y2YjuM3Ic3r&0hmU9=}P#I5etfpT$I& zNy3n7l?y8$G(4lXh`j;-v@9nC&lzFM!06bE4}%1!zwqn`VkA;+0^cC<%TP!+?5OZR zj&_|uPutKCy6GUEF?a!PmFp9O9lO~v1x1nmRu4mHUG}Af)4dQ)whvC~^>ZDeRo7+1 z0k+vSpe+u4{=vdR9D$*)&(xo;&U!~%xr<^i zril+qI*LD^wx@AJt1G`&-{{J{Lmk9eYMA?J39gt`>@WOV@;n0Q2CiYweiE5HC zGQ@?+;9Sz0$E6H{Uy3|^f_#Ci{_I*h`>cl+WtL-C0S1Z4TX42#6B?#Cm=o5Tz@WSn z*N4Oe+8O)6fW5-Q;3f`Ui|9Mhnd>`ZTyk8@H3uSkx)ZNWUER>K?zzC*heUT#5NvB} zA&_cc?PZB}i(y^uY8Ao7g!Xc{AIxJ6syX3pZ71TM=$FEW_tli1FKshkQMctBIYv-u z^%XL`en-5_h??dL!Je=jw^VM#9~X_%$NoqQC8^=-6I;6G*Zw50Ci9?2;FSVaeOF1I z0umKbt+Usi5a#w7&dKpkHRCmHnZ;$a64k}cBIYC#nRWRZ`ef}*Ud@Nl6x_nu&r0OS z%H^$bfd_gsf=%RDulE4xsLo}vVfn_Y_mQwqWB~FR?kDWeS8(eZ{aEC`kFEhvUx#*A zAkOKdb{H%M&e454g-A4XU?u)#@>{5p)yyftft`|}ELbO*@8{c~-C9oK2&xC!u~Su; z9$iB9$4r`$IkWk^n#YOIK3azDSkt4hTJQ!&34zd+A?&_PUfB(Y{XNE<)PLRT>2fRL zcRD&@)m&}R>H!2V{N&AHlM}R@QHWz}z5|}u=x+yEV){2c@D2Rw)BgYq4ONGwvBsx0 z^x47SjYTx<+CkurN;b^eA>fS(uQ-f+P; zzCGrGbUO0xY(9}3jt+fc)PmIHyTSI$6zFbAOz>la!d395C zMLWUsA3?{cQQG8*;VoayQoj7d}PpeNo#j>Out6awlr0TudcnC zgy)|M*Q-qObD}*u&=;8eL>Oz$Zm}0`^87?ln=vh!*8}K9B4O0szzsmtoW?c!oHq7tXtN_+&pIFY z+bG%0q}Q9-8%%HZ+VH+g+MGeqi)04frzE%1XGfzqjFg-Zo(OhW&qP}be=W)U3IF{x zTpw(WYY-Xqs0YMdCMx{NC8!(13elW`{7-6{x=(Uph!2J=QAxEHHC}mPJlYAuHM_7O zs9SItJ@gB1{K@CYaV!k72dls-+_9z)85H|l3#79EZG3+S| zAsS6956$Ut11j6ywmi~L z{uA|4SH%f{oD2ZJ?Ip2zRuZ;Wp+==G(JLUQIS_^2%pAFd?hB0pzCj_cAt}S;LsOrU zzzSrH-hjSD<0c!*YnKd}{T`>T>dg3^DXx=B;btJ%gOLvw z^gu_6-Y8_wkT%o!{6-c82Wpc#bNl?}9uxzj%T&OHugM}jqOv4OdkO(-KYln3q7F9G z^lN=tW$7Mmtucs8xJ3@sr;wX_aawll19FF=@jLif2i$=G?E!e@2ie$5V%RX&6%I(+ zu3K6weRfnI2qFy$o)fh>A{_}(5&&_YO-y4C*$GW87KFw~;xScRt@8y`l>6Q?x#Hrq zkzHd%w_5OpCW8C^xci0f3LzLxFoELBe2?R3G}VUR83Dq)^27o)cehNd<9}A?xYsLU zS?b#%iYLG>O{UNf`D6^)rCZ=EjV}S+s}pN1@p_+$MmYNqy*$A9l5++F`T|B3Ez0WE zR;_uK15sK(Ag$i@p8YHZdGWQ|O7wY-^n)Tm*~da%s#(xp`11mPCOXiJm#Hn~qWP=_ zOMs?pt zR=m3b6|h4b^<6ADV>hqIF#P52kUYrqy8#-aYH;4z`2rw5hQ|{>05=>O4-Dr1Qj7jf z1(@MrnFz`TILR8ylG@?s;+mmhOW$r%K{LT-jLk*n6N`a};WunDJdH_tkg9I^>~sLr z@3<^Qk}GWZ%wr(o}hxwXtX$b}<6af>o=(&U8cx4LSQdAXHsLK#3=}4=TIaTyH<&H*4yV1Kf`wm@@ePFw% z!lvy*M@idqfXym8>zMGlMUsMb$_DwWA2bE;G0(x07WLaM9kGZm9cMd2ez`$>6P0u zIzrM%IVJ&|!tSkB5m4rdII`4faOS)v6DricDr2i?nVS8FVCu*-@qngq;03O%=F29a zNkcSN?r98dLW4xVs5%gt;&s=2%tzyRG7E4+sKMop-QfC#9jU~Yq%SY7J{|yA#i&{! zyT?X<;teM}1ZB~`QSeB5CX?%p!oPS>#F(KijB4DjcQ489I;g083GArWk zCJZ=|R8^UN>$mz20ofM2dbvX3VDRYcLGudU5%->~KUHfKXcCzIb2IIX)`O!X;yqmd z>z^Mzz|)oOmHV^Hf5u8VKss2o)a$L4`5~?E&)?P&#Ox6!Ejy-6PIAvKdS%% z`h10QB^%Ys4rPEy3ijF{CCGBvmZ}k5q#4m5!5=d*V82z2)@DpqmjWxQ>oDLwWXDt> zH$dPbbhhaLMCE`yayMB}p$_`-Z^p#@bHPB!x8+{lII8R4`T5ZRR%>!pGifV5&_E1r zXpjX79az{d{azu+rC{y&g6dRqb=}`m!G}CB`GzV>LoqpzGP3$47%`20u*zG?)I@S) zDWU<)%1Fb7F{005vd{9OQr~VggVIeF_-I#b?x;z<;zV(zXb#}a->3_%3c~_)VD7c5 zl|l^DXNcgQ6}01|kPBee@>H=@M4+i_mQ=?mk$Itsp=L^CtLUtHK8lrVivd*_V~gsu z91Gbae;Gm%ApH0Mat)}OF6=RL$F{-~;JKZr}-J&8*;4YR2fpHEb^gN5Qx z#zbI)K7!K1X7y?1h|Z7Vzg??|P(@CEN0uE}#=PQ50;Wq->kS4%8fuxSis_7#yZNJT zgBc=+D;*x~JPk^rPtTSpFDcV(qP+mWC?2D9epAUagC>L#zg;nfwWO_5zzL9Ma@05V z0q)4=_9fN!>iZtBlk|V%L296mAdxenXHZ)?DxQ?6Pua#^vVq`}nX3 ztXo(Rg6Ix=1nU*{h643 z<3n0UYo_)+8u%g4VL(x^jVZ=JP#f%w=slwW@{W*iS%EsOvIM^nMF6;0s=St1yP_EL z(GjJ>x@7~gfwDT$o~t*ZH=e}D%Z}BRUDYc%lcRyA1r$*qSi!z4PjJASU3ttAhQ2RMzb0MKiZA#hnJs?4Su&K-9GvA$lwG2n&q_{lsnf+7^l> zHzagC)#CMz2knZg&(U6fWdtINVGrTnaL?6y;Tt33eantj0kFyx9062Il(Ce@L}8oK zt{j-#G4mJqe;0T`m^VpV>B9;Ur+BjtS_66Mg0=_! z4RKT#lYdHnkboBOO?mA)SH56Fm}*}#+=Z=VqfptQ2(XShhGla>@Jr28=ExZOq}OJI z8yJ^YT5U;l$}{GZYqLR8;3?~6OoM-c-QqPf(i>ySp?5r~xVC$VIhN>l1)T;><9#45 z+bv2{3?L0x{jNB{Ktq)?M}8Ly?rvODkR7**F_tOIP@Z2cTj|^$X5gC7ab!fnUI3Ve zrVbvBhE`yhjCpa1oauCZl}S&1K2o9POiR(E#<6i=I$z+0UF1_n%V#?> z%9GKt{DJQ1hh&~3$B|1_FpC7}_opu?DS1;4@~O`#zxyl0OQ}Q$upEzO@>49}SHe6` zQud`$0x{cdN}jW!D#3_DR+Sw_A7c+kD+P2>xcSfs!4u=vfzwpUh_?K=c?Ink--Uq{ zP#*IH0rWNMt5~YY>K2#{r?Tk(BX9UGWFyK}^~jD*mb@|K-xV&aQ9H(I$%z@##W@xy zHQ?qI2AgM`D1M!Wvj>n1pE8H0%&6cdf-XSMn2hl{g`knC#gG)(a&$xH-+Se-2HOD@ z08Kig>m1$S`G{WViaK2K#Onu=W9{Z(I{huGOJ9qGZ(+fc#`aF!WH2K-OhYgkn7Sl%M@fw(&&T(qiNUZ3m#avz-OXC^chI1;__fnwmvt-UK`?p7bU<8HToxLj+=n+ z8|o7NQf0!jnA~h>S|e8~o|!cBH>99|;EI9b{Craib3L3u80sDfSr;@Na+Jbp%Q*!NCAoh)KKqpBW+@kt_u-0 z(6O&h8Yy2OvM9|;C=70UWD1_V2fPAPtj{Bz@?}6;*RX-5CIq~Bhh?j`*hcK(IdAZ7Ebww5nvs zO@y&mfHDn#{f5E>bKQ@^1b4lM!UTIghq5f%Nk0f3`ILl0gllrU678oQg^YZSN+QTQ zC>8#6gfjn}fkK4mBDZ6wxMV;s*`VB+)Cudu>*(?$NRS?bi8U`_BrD9X`Wfw|R=KmN z1;^2u5;~Hal7=j1P*1|LcSN1~TwS&-?2gnq$CdKJ54peNcjMKEq$nA|s^!2T7vNrx z_LTH%;BtmXo2Rr|j@(TIbsOHL*|mY6v;aQ#i9PN@W*^B7G#$u)M1xO9Z?3YWq^@ZN*I?TZ*fCd$ zE%wtK>=_Y}GGvy3RW40BKGC4ItpH5I9oWgXg=KoCjfftJ^7~Lgg!6|n_S0mDLYeWI z3&@)bh;j(h7HAB0;^fC(W9NxdzUzZJ1+`59k5d7ug#i@|Zske|m3P$@N&i?$d z!$$(pp!oXo&j%pyg!@$bG+f7KeAsXYRH}fuObN>?UBtwxG z73~6b9$IN3SqxcgNELO>-ONzmTiT62!c)PZS}3bLIAG|ZnkHFVld$M?+7-X_=+)Z} z;T>iDM@Uwy=;RnL4UM8ijmnEB96^X0=9snm7$vO0h4AtoP5CdMOx4`98Gxz*^o-&4 ziKO*CW{F>sa(BsXyIO&tQpp>2!MN-*2Z(-TZY!(T#JPqHwL+T6e@p53l>hM90jV9{ZD#e={>qp1sdrYkhr@ z)Cb7oGVsM>erS0;yE%`VwcniiU5gU3sLfTDw0wY+rjH?r-^AiKsMTt>3hj z5%p%$$49u^uru1eAJBBU*haN?(j?uIVTwc5Ll4Ng zxO*5N*?8o7MfjT&5&<&c-y42WKKwm35&>4Ad5F79*zyQf6NvyX@Qj%D=z(O*|Kc#% z@!JcHmKO`IyI2^W{ku2o5dSl29i&`9E_>>9aq%S2EHL&H-e2{`Yb*7M{8$#0!486E2 zEqDv=hAF^>N?(rp^h1hfS*naWHuXq&k!Twx|0~Yuj9|<&wSG&39+aliU8I-ZYF@V} z;Kh^l5~C@-!nyyIsx(%NS~33b?%P^K z6O;J0jeez5Ks~~MamX!p@@;T1;n=@eFlQ#pyQGIG(qlAfZl2thK7ZTc3I3`#Ov{!0 ziIF20p}rzZ39glCADROp@i7${Zn=0P6&cC2i6>9EuEb%%0EKsmyhIGWBc-w0eiLV3 zd95Q=7cAN15bqqA;^NmEjdi}`Z11;Zk(I^Yi++4!(nzYxv9u{Y(lmCm$I>vK$aYGV zNmh5XBISuQ-=-pSkbbw4=Xb$_KA=FnFVS?V)39l=-_*-bUX(Jk3%*J0d1-=#n0Cf<56uUSRxxQ>|Hx%^7w$!S1;T zeOTeFZYG3sSs%Y$Gw8hj2SbywOelYi3Jgrx{l8Nm@`LgI->47&GY0EFug;FYZJo^Q z9BmB#lMM79-XqTcuOh@QZQ0m=9H`x|8n8YIUlz8~P1`&yP2y2YHwb!YFP0x7WV4%r zrlWMDV{%{kO;vFf@8d;VMJn$56_YMdyxK#&f<&ykcJ}o0PsPL()R*dFQwdfTz4h^? zrfD?^c$N9b)fXDYb)WgAnum}Sc{QSqWT;OEI7Wq^p5D!{mX$m{WG30lp4{eV=l#J; zG8XTO_@HE;FC{`q@xJ5e__F!$TqH9L>9y0W(xUM*KAN2BS~a!Kw*+(qm#X2O=mAe? z{yi-w&cPcQ9Zfw~c^aBTR&g~ynTNTWHpb8OCFxcC$`^XfsdIF;QDWbOgCl4JD55Zu zLYJ)vlB9diVEl!|`YP=eEUzEk4?Ul}#}tq(UafFbFJe8M*1Zxm-xvx=1^S7BcJU)) zvheD6GB%he?RmxWBEueL*SQLg82HhQausS0 zm_05d*y&>m zGiR<)m(*+b7d1{=^kcVexfs_6@^o?5x7+2LWLI3YVk_q{3pHU}nNHU;O3#nf9wyMs+D`Q(cIFHE$(|HX zR?hi?yyjQD;V_rnbD5HpIj#EgrXGL?m}sB4(uDyafFc zv%W>UeEtZKb?lbi9s);FA3rXDL6Qk}ZGW68e3~Q~YVA0coL^$@jIqE!M~PQF;5aua zuZmc>^Qc4dRm4LeBam|}RVvye-2pswSglHSP1T8<$jldVm_Jd^OJv`vS~uQtuwqtu zV|rL0!D?%_c0@+6^i_6(_)r+?;k|cQoO^x!ekT^s5a75-Ld~*&&m(kzjvNF15&4v; z^J1=FOV>+#x*VhAsuR|3a?Ro_AQ8=zvW}AYVI{?{em%#Tk+TD!lJ_@=_y9|cq2xOU zKFt~&p#Q1n@Px3Yk=Q>ekt+NCle~|uYb^seKVHc2cW)@Bs7EYX$gcdvU{(U!OC2`4 zwaDLI5b!!2ZmMNmoC@w!`j2avKYd;E2U4r)dHz1Yb?aeK$7_5CiIqGm>J=&# zm_8b)C=$(p6(!d|ZQEqX6Y%79>T`yNPQK=E}l8PTxF~vG25E|0cR1 z3QXXUOimCu-f9rn6;^*#d+?q=e8KsO#MZ_nF7pD>V_kj$B^{F9QD&`LRizjq=Lu7~ z^`SKfSs_BpX~U}Qw|g0^-()+QTZ0Eqco~rMu^C3JOV(Rz6v}yMn(A=mZkt4{QlM^F z^}k)s#_uZlWB}8wDPaN8;XlKsLEJ||GKFQ^w>8FITkFA`C!iUs_6*A?8dShNYzvsd zH@Du2@2RR#9ihe-;&{Zc1(Bz8-z>3Fpvo2#vH{#u+ zR`ZVPxx6fqPPv|^*WMY5s}~q*M&wlh`lTj5h6x9ZG$EFaILiyu2j0j!VQy@Ee&AJLfB#!)PqjIj zcKc9`u_URt<3pg>OEldQo{zG~5IDLCJE_9=%gr2QfY0kmL0Be_b&zlV&mM^(O6K9% z9f8dqjtGm8lDodvs-(UUC%iZkW>~(0Etu00*IP?vec4<<-eE^7>5XfzP3Vzp z?B9DG3W=SccT?YTQ8o5(LwgtmSrD=8URjboaBLFqqn*B&80piuHpWwQ!d^k|mUF_; zM}qeK{Z^60r23&8dUEoUTHg!|-tewhT(4XZYu?DRjsH!$AliDhIbnB+@Tym3+461u z9Fe%`53k9u3_t8Uqdk_-@n!UsWt~09_E6p8dJm4BK4BI$h-3eeVu<4TDR7KaxC+UC zzZ*UC3UNH(ar}-Xay31$J1uPDc67ghcm^X zyjac^4h)Qg=D({ACBZQM|J8>7)vAK>f36af)8V{z2AhV+Mk6lpP!qUbE6yG3l2=HC zz!6Op)8z0`A+VxQBAf~6QYZ|h)iJ?I=rM|f@|99VR^3GM&#fQ=>XYm3if2MZ@|mFK zB#Fk)M0%XA79S?0WCH!dFM7JUS$PY>u) z|LPnvX#!Tx(H2OaSomKc-@V+roPlc1< zcPi;HSW$NyHzj;nd+HVG7KGU*mIuwdoBldnVIL5zygR;2s-)T{EPe3z^*uW@I~iY)yX58UWo z`$dal@tr=L{3rCE%gR{M*)^f}PRH)4#R-{r*wXHBG^*wzvQarN+D(iRSL|==7eW=A zq?L35Me&<)GH%c_sqS-OP%6@sRdeRNYP|zeq{z}k;AcE4>DI)W*kfy8Z5hI6AwPpj zCaaeC>lWqYYfoK7Szk{Y@a6CTIreUAVFkt#Ke4pi3q1PXFm}}#b_eOOY7v>VW}|TQ zn>=s&53Eu+YZoRU?uBDa=T?N!pgL3)=O;cqv+uZHu#V4v029R<&`#MnOz9Dqj4Ys! z3klF(#txJcdEcwEvu!dw@_2F7;k2dX#hcc|VNS_H3S#WY`$$vgCxlguZ(J*zkskzb z4KkT`rVM@Bl>)nYcn5xgu~)IyFV!lOWkosT^i54^Pgc$1VFYcNRnOVT#GWIuJB2lQ ziHTamAugKJIx?a}8oSyEqDqkeHN`K94Q$iRUPNnUt+K9;7ZYI(J!^{CP%C3cnKlWU zx`Z|5A>D+oz{@96;lpPk%Z4{j#`2Bk5%{*p{o6vKowv~fxa;QC5zmpZ>JB6XDbFq3 z^W+A3+54rK#Vn;JQ~%}_-6f}Y1^yYj6yM1cI;-NaB33aoxjZ#aoyg+WVI4kZpr~+y z_CK7!iO}6xL3WmaV`p!KDuF?j_*urLtAqEG!_`=WG$DHND5)%?Tl-AUt3&2ip%Ec6sOjz_?l<>( zflj$_=rYxNY10KRk;5TM^Au&r3PM`dpNTOX(!47-xDxaC;iggNglzH?_s;TSeTJ)D z$P8QXO`bmC+GOcb@lz>VLK)m$Z^Q#(rcib6T8Vgn)v8#zW z(Nq={XAc0=p?bFjk(W0|-tjd}VGnZ@I3E|0Z>QdX6x|=9H$EjxJ-(v5Ue}Ska(}yxMnGNNgP&uP#IC z%kwg;7K}9i>B%Excv#E=4$tdMXmzssEf0Ucu$X3Kp&u)_z?|QP1i)3X7sTg8p@f-3 z+(fCFvnBt9SOu1V-BwvBg5%iy2w%c~&+Brd2wn@abwi##I%T&-Ou3Mzf`?+iv-g2Z z!byTPYbR4SmY85}?=X7EzOnY{*;@#@+q^ViO#@3g>ps!Q0uE?wGgn@tfCWF9`(N?xx@4X%wNuO$Yh)Q=tE=%WJ zHbjRkuES!Tw87+}V6`lOwrw4^JznMf3cCS|+cyucrPokimo{d^i+}N# zlD0Wk7CG^Iq(&aajWyqRrxWi(~x+=Q8V4MczRuvU3Sn@ro{svA76e;zA-zWb8UluqO~fg%2l zi!%v=O(OdPB^jd_b=jjvr_i|2iEiPlSxGo3%gd9~9YT_fRnlTZbn5>2Xa^pxv+9A+ zh*|D=u+JD57*+mzfHU*n4Sx3HaPkj{K@Dh5YgZf;e5*DUz{~?oHIYG*IBsiwfv}#z z!#i#fSJ%~}0<{OSckF9_I7P!cHodV)LbNk(GlHwM)hz#T_69!YvGk>n-}a{14Tt4Y z@tV=n5_g*gCA`6V`sIqFMByrFHgZV^-*qJbf%d9 z;#$GD?JVpA(-W}g+C`_P4QZs1hedMyoGCupt9N=F-+WvE`yOY?nAs|XU! zHYF=us#`C=p6Z&a*VoUown@_{7UtKWW$~MJ?xBB^I<~nJJZxsfPTpMRow;T2Kr?L5 z7gZ*Dn`Wp{k<=)Z$_~qT@oJL@?k&Y9oH)mjh!N4ZR$mU7T{)l*|Kuq=rk`BtZ!TDd zxe?-;_k3aWL2m8I-_R1@_3{h+SQY0S>4JK7a+ar#wtNX#DC3Jj7$jRYj5XUDGYXk4 z606`RJe4+^ARG`n)Y%Rjn@(oD3vog27bjF)6vob*@K8wu(#&;4vpaby|J=#Ke%z1_ z9YTs!c{D>?`XkCZ#dP-@!!L_0hCoRt{zB1^$`N0nkigfqt2 zJu$gd^|l~|d*YHA5feMUX`Ck)K)1nl_yHKpXe&9l<4?* zh)CZ#6^wSHKxW|;=;D^#-FbbKBe%rRDv*O9o}dX@Sy?)nS=-1S3@4Col;ISOsO`F# z`R3J>8P?_#pBqiazlx55ILjHeJ6#mE{D)Vf?Ur4oW`h<90i#u&R%nsCrAiL%B@ldB z+OFO!QfpbZwnq~@-hGbdB-x&8s2)apX zK5>eh-nwQgE2Vgs@C9v-5xk#l|*{X1;^DO{DJ^Tcf~_vCRs!C%kr- zj&AoW_Q-#S-i}J6OYC=uC(wf7FKf8pLr%>WckOuH;{k?<>b=>XuOISB@8NNl@oTi%wT@5&1 zH=Px(yoB9XY7U7lT^`CvwmJ{teUTsd}REo3)SQ zHGq*zt*%@{$$^I~7Z^7t;-!q?;psCUc&aM1?w!AA>Q9z z9|~>FR5@WGH*eA?8mtE2o1vo5za;t(qnSMb-yNdulM~_)kF?ZhO{lwu?y3Yez@HL; z?>X2~E`lLUeSkGG-o&33WR4^RY^inG+>-<<_`d zBY#$%U6Ct|ALt=-`povcAgoLps#IWfip?5Cuiuqiu`>q_>jn0Ach5CE?o53fV5Sb9 z$9?YJj1`B+-)NESW^dmx?6a}nDnk9h-qo%}_NMtZ{!QL3_u?2$j!Om@z!>aa9ADgs zk}KS5CwG0jf!f2+--_tB3lt<*aDN`$Y3a>C>Rdc*=`;4O7l?5$P3-~qh4F@TD2|Q2 zKX~rg`RDxztI$jI{q84gbe zG79^OuA_-iq)q7vOV!Bh(A?kQM1N-}JviCv@$YfDcfF?C+4a>TlJ>TP@dr>|GY(`b=zI8Vd#w@ifpFvIYt{FG@cCJNyp2L+be21taZbl{{or%(tU%{kQadw` zCzZ9aO#8+Sq!PPy85aS_?{rK0;ND( zf@JR}OiuN2C@NF!KF+nqj#;SmCvK=z)AaZ_{SsJPVL?XC|d!F`*RMa%>wjfkM;kA@QrsDWfvrRBN#*torZV{ zm?olop#mx+3BrVy!Mv14d}tJZZ0@c^PxPOoV+v<9rig1r;Z$W;8xzp$OQqIKWY*qvh8jeqSBdx?K47gW z-e0*#=UPTCA~T^5o}u4xD0k|i7*KT&3=n}^?J$MaK3@ACI||xQf&tbcOaSWWrHP<| z(4N=~ME|l#qX?j{>+KTJo(J&Vh7hb9xErto(f9Q>CxK)(gs|P>vvz8yJ$lw}9;j4a z3@tFi-mz(3&(hR(fNO?D;Zb$cv_QHo<-Q2?TiuO$cCkbr} z*3moFXBPkKTp+uDgF@%D%p#x>#Ix#}1{0&GB zITRs3wb(wyPz2SYJ}EE>IW#d>4TY^$frNXuf`UkVaNaOCp}rps?ms8v1`Gm9{kN6x zw-Hm!@A<|3a@|XF1(9;Tf0Q0i^gxey9BvnYhz9Ew5kRA86a58dU`)%I7L{xrV~PpQl3rL6p!1oh zBY87mQKW%Itn4s}grK$|7hrz-Ml7WxcJf1-C-GSQk?kkgnyVO6U8n=Ce@$v59(hM5 zkoJo7fh93fV5L+C02^K1ZHdtIPum1tq4sTR^W0KuAU9mvt3x{B$F%bRUuYF=k^ zF6WBLo0&b}+k1Pi>q@vjMM0K6)cFSCj~4sePr zy^{rxX-nxIA2<}nO3sZ$?suCx zyFG(gcc2VKXO~fW&Icl4t!W;Jw=NoRbs)PVRt)-v1F_~)CrVMozIUl6D z;_8lcpS*)6IqoAUJ-d$n%IuJw8w<@D#2Jl=yDhgzt)mNd^GkW7Kr^Rgp)rwQ;&Y8{@QhH6o=#?1DL1a>kI!r#~^drJbm zHOim)2k(T9_u&Y6B$h&>QD|dhn-r^Z>&90OxE5=irz&&*2R4=qtoHZvm%R6X5@Sne zC(kkQ6IuZ4s|4o?S#rE9dLfCH%3I!vJ|iD7=Bn+LQ5f1Dw${U$x=+^%RptL^=+Fi? zBS7iYA37*>Lji7HAS^=yU3P^fNv^Lw1M>QZQET4Hf zE~J2O){a~yX-*M#EIYqp4W84_yo+HGAlB4U5@A$WN z7jV7n&6}x9?e_!i3rFY`8x-o9mMF2eJtJAd`6f1Fm0Fjn1eCuKwyB<6UTv zUqJucB^VRA#9MLb5?3)QqU!SlTV#Z9{fc`UP+RA`s4|mU0)4zmW*h)_OE18PD_9<; z7```(c471EWIZ}wM{s5KRA*549}rWMDkLMPFN9|^#}V1mp?4Dh5Fr)_?oqZ`b%$zI z)f&qygMYwxFvoNnaI^{^V#7;r*B!zcK7Y65w7rVPyH&7{K8$*dPpUDFW@Em*b0{e* zL+epmv?+e?^azW{n(-DcGQCA{!v5%+<2J94RXj~m$*=P6m)giA-Y_OjU`JxNkFYXF z$uEt<=>ej($L$Q{I@!(ay+`!;&Z08h(5q$dm;ef7k+yEqNs=36tX{~(fPa#D*cY=b zB)35=R;93vq|PpBY)%GgBlQq&i1O;sPNPQKk>B_t`QaMHH1|+@Qi@vF8BasY=u^V) zp^IlxMugJt@$FG{zR_GuL>NVMkL(%*anAaZ`1Xw(K-!hyE6yV9xpjUGI?A2h(D8ai zQ2b=xDkgQP`_SP1^220HVfWxvO&s8Fl!%Z>-Ls*)Md-b=NwTRQJDmh|Ie2 zcyofB$GS>JAa1?HKgn*41wI7a=|bAeMgFm?nmSP5ymRCVYtRF(zkU}K5q7kD%fRiI zDy)At&f4;5-(zzqBaOuY`Y1>_lt?4LBLtRrWqZx%*zGd-h^eSO>28`ZK>zU7`P@5J zKp_$CC^r}!x$<-p^lt!C4k#!9H+7`to`A0aAJxn`2i_r&dg>QYqWCZf!>8FWi+y`4 z^y5Er)t4Ak{9CcXz-Gk$y9o+67|H*m35toSld+?v{TC#R$<)^M3+wrxslEjN=b6cW z4fN_#lefboLGC)zus5Noug|=aIsuecbnTmI!7>@+g@j!6uSxs*z^pLoQmHXb_E8Q@ zWTtb*0)iPVwduIiAZkNOuTT4=;yUY_lVkUBq8b=Fh(YF!jsl!;pG!iv6~bv7d!0%4 zBQ`dRBn#>~yHpTvP1{H6B*`-yxbKfr980IAb|Sn|shX{!V{}$8X{F*?apSf7ZB}em zFV5tj`hrIE260sVZ7nuUN1aJv@(klcsZGgwH+~vT6@6#*N5#NcENsp1u;c!9dGTlx zj%%9df93fB9%R<)Tx8;4XAW%;jM(viI!u8C$+gU=^YlT}SbT8CRak__ZWHY(1VLZUoBqHN9`i!@Rj;R*|N)pdF z?u!;RP5Mw>{l7rd)g<@E5)ts>1(S!u)6{oSzMl)a8t;;mdVYCrQ*;+&B~ZYc-nROB zmwAugv8o3zU&y9)m`R%rUNQ-H9o{#NycMZ>)vGv7Sn{DQtbYf>eesX|y5e!;6l59> zWn4DVG0xD51(8Ngx`m<)8{=V(LBiLO*!WYbIsXW?^GN*Tw`_2|rGO*#VRIen-e#dmU?GySFo2o%-_tmmR4|j2sU~C`MQ8zi56g|MC6Jri-Uf|E3X+dcXISg zjoHnzlc%-HE;gd^_LD*QKmZcL*qrz|(Y6rT_Y38@18=G7rlI&Ja(xsH92gwM|9jUMjFx&&if~t{eV75-B+)LE2<|(&Jp|1%0 zj}RO4X+K>f**eO5EPl<5j=Uz?SJ~81*xM(emCs?=WXj{zxx#+6S|krC5g3dEh=Ni7e_w9@by!LM-!HuXxd^K@ zwCwREahCpQgbqL9tl!M0c%REuCY!9LFzCdX4mW2R7}F(KyA_O_&Dz$w&RkAR0qPH6 zb^?sr`be^FP*7`-!QplhB1rMo_>>^=VeI3n+yiL8qs0UQ)2^Q;&aJ0cXuI+pweB94 zx1ZcTH{adP-XEN8^z=3Xs${plnlIdM0zpE4%oRxiog0&DDB%-SU^6m`UT4al%&Ce>6%D`ZHj ztwL4>ewLp>eYI*dP?4dj#}c(z3gc!tI*aVn5MD0D?O*u{xPt=W0;8H@l3iO23=G5R zLvn*=K-$yxY^pTW#f2u)dF#M(mc>YN^HcFGnvbI*vBkO$adZY*rCvGfMe7M|nV*>R zuspIusDcC#he;tMsdFT?`D95u0*+!#<@8>5{_wPj!(((55<^W{<>=|j_rL^wN`Z%&L3!qfIAj-FGgw+|u*KPZ1k#N!kznTbuu(>MGq(XMw%OtO!t z>dlI>zw>tsuylwtMYX75VV8DA`Q>~^OhjPGQ*U@RD$fp0vxOSW}{TIb;TO`D0PTj+fGrY`QEorix{qdjpWAF+Y7rN zQbRoL9nHp@KLb`BF-4ld@lZLl*|+#-hp>umunb}z8rxiZ$^QwjUG)9wlh&cHCU@9q z(@oH99ZKiRQ8g(^S4!lVDb7)#7r@Jw(Xi<%)wU^+h0LgBx{XKQ@6rl}^;-a0F7Yf530A zZX90T)e_0}Dmc`zy-PMaoh2sJgx57O7lWHS`5PB?;q29P+rI8`*_0SXaL?>|*_oF) zuP!}WH$`t;Y}9P5-NgG#Zn?N>c}}aIH_|f&gU5=uXWmI6*(|zMb@Umkcvo!i`w5w` zT!S;KFb^WCS_2@BvNN*Cb3&!lT;%7P_l2hBv2Q{1rjDIKWpZTk>OJ-{mCyT5)(hfQ z{q4ggK|J9;G)?;`EA>4nDFR|bc?5q`BBKAsJq1wiw#=Y zl>?)nuw8j7HqjH~Q72)YwLT*vZb$ZfKf?f(W31neMy!tA#VwFJW z852Xu62V)V5Q9JmH?Qk51^guZM_B{;Js-H)K@2xhf}32|&bwGs?L^Lm=+~5+jC^FK?G_TKQCVKSt4+C>q*Ovin*+!25!={A9XJ$da-W0 zAxazY$^Bb&)M4bJK$A5|kegC=Cwd?x)i08E)*v?qaw^xaKFdoCP+n}9L|v9`xmo+h z&O2+(7|RNBa{tR@Hug}@Frt(&)fZ9vJebg}vx8WL1>HkWYzx1KJ+dmA2^sO@qjd!ntb#x7Rp`4>!*k}IQ&Z; zqar`?yTogbjXtgG#;^qfh0Typ^?gN>2cr1)ahmOGJ+a$BfVslnIsdRoZ|TAWBU{nK%z`ypi<-GV z9K^cAK~7<{kP8~3=MQPjCr2Xtk%ZkLpt~?XO4ko@xrs9-@ly%n{WH=B2|2mvTJclP ze2W&Y4|6eneS`QOe^4!Eb%^p<%7m1-$tBw1ZujH{=ZlP?0SxJsTFl9mD!i9|NM@i?Dci zCO*Z8Z^xa}I|4a&z0vPkh53!$$UfXwhSidN6jFaUg}p^eZ6TWMGQ_{=NIfCN_~|h9 zgoAK^pNKbC_MP;!Ert)Ts0L{g!12OcGvG9-0$W7>ny7U>OPPYh#{S+BG7_hH>T;d_w0zfCvRR0ZdJn! zggR^BZk`Hm@xpq-TT}n#iXGsE8HmwyhD-Bf7=?XR4#Q&|_BOw@w>9SrOz)_>O*(t=_CFALTG zof2*4Vr%SdX=nS}`adid=>N-lv?)>1YJeEM_v#&eP8GbFjUY*FB!)NoVorx`J-9YI}^rSe0m^6<$tpxfZELomjyFA|6 zCoUVO)LnCn8-*8l;rROM6)gS`$3lSCgTvHluD8Q)jDu}OQZ^Wn=Ia`ss{6-cuD^NzrytaxcgU>&{0Y<$U!@V=lJV@gN zRn08Zhf`P?zq`j*B;SVucH;jdTf9+q5-tO2r}6g`);qz_yukL$TIPp=H@j(f_o8=l z@M#&o7JPRDE#!o|I}D}?=L2MaiU|VpeN9tUuGaC}OD=*^MBNV0iDYD28fM|{!6{`c z{L;9o8T!V?q`gz+-XW9Z0B9eZMPVk=-j_(?DDFvnGN}HpIfql5dpariD3LXh;0-&K zO+Rsd4&I~nnV~%+-vE$^BfRjciGTkxtKtF=S%f`Nus$jMZTom^zh&f!(-VCM9CId^JmGoH@j-bF-*T!FaE zYlRS$Uo}d(aABKwm&ZhRi(Cn5`9U<`*e;wrDLvlOm9S@v(P12;YRWLb1kN!yG)87< zgXT6|RWe&{LN+Z$xdj-nhU`#cVCh$|Px}-#60GOu=Fw z+DZTsY3lEmV4-nl{zrlTD`^}sVJlAiJfWXJct=o< z#~{i9$7*6ErR89U5`ZO%gCkIuKPnqq0X2p zJ5iw*veJ~KO}`{7_s3Jm!4BgNaO|$l*F5#NnC2UWp{xp$MN?SMFY^pb9eDmcIK<8} z)&cW?QZ0dUBpWsZ2Rk(0e-vY;nBN1sY_UT3Qt!0_9gcKzN(}V0J9xHd)xX~zM$Qea zS#K2z{!s{@9I%2;VXWz6;}u7q?lFP1!EB@uy|&O}HOy$4q1X{%1z&pjjk1^Xg(I3x z%D~_ut@kWS?@`W|TAbwx!u1&beZm`z%H%VlbDck)U63|Q5wmbjb%X9*s8u;(XBG2n zqXT%CeuGFC9IsjRm+K%Rh?=ay%1lE<4#WYJnZFX4q*9Z zCFZ_BzQtPS6=I1Ptxf#pw+zTRwv{_2c(bzgDI%Wys!>zt&I%T-OV7%MAsccr zg>8B!f>N(+3;#iR!Nnm^pGd_L0*%q620jpD*7r0*>X^xq6>{{?A=%G1=sm zf+WOh?jsjdKvkypaJkEmO8??WQ%JhTN=x5hELj0^!ZnLP3#MNMrsz+*=%s8v;CEqZ z@4pQHEQ<`N2%60V=TMVbppk{bm$(8OB?s9`)Y`i>SZwrI=}KnHxGII$n}@~=$&yQU zVFqGKnEt@e)dt_vrl?70Ri>MoFA|{s(VgY9bcKHgml#XWV63eAy-u0LU*3nln$W)@ z02y4VwmJbEk%BW=k$eT(myF%-m)w@T98g~x?`bB~;19FyIUkceh(ist(U*HvS&`^a z;?{9JAu-_zBn&`>S3R^+Qze7{eR}`%?i5eKP{~I({e|T*9e$)ngUR^)vZA3F*6+fp zvOL$hxS|Ie{GLB)YXK2|dm?ah5f4PXVan1%c6%fyH`CZCaHnrcQY<#6MMOl^mn}T@ zUvYcSKN+E#Um_bKL{Q&QSs^6vV}4#08?Zl(C}hcg`fP3t>W=2#VKcn&Fd zTQ3*+Wdudma)z%;07gs35Ze|f0-MZ&nGp=ZR1=F+7%3I2e@4JTbAC&?B3RI~E7iOg zXd&q8K{|P){^HvF;+5|;UxFl7$PH3^%KJ0?QN*`^d)X^b1 zxPeBeuM;kEO@F~z;+w5ZC0O2IQ&Xj})m;u?2Jx`V{uh6cjzj6-zg4r6!BYUVOS5$z z?;o+hagaL}h+r;Pj!@#`V&SoqMmmS5W-mgFU6al63y)H=4!J0?W=#=Muv#u-b-VPZ z=on9FTxSq{ns723%O9&(;O?UW)N#Bg51b&bv#LszAOMaov`yLtP))f^h||uZg`vYezRR z59X}Z;SA^&w3a#rq)hf$zVO?+^TZ$#n#Hg?fNgLQPodm^4f+Tyr`li((&?1^XmBJ= z;Mk=cP%?L}=2UQK<4Q%5gZlO?ExDCC`XR)e556+K%fb#vPgU3uYrCa$KTa=P-D>X0 zZ}StIzg8HvyqhF`#wcT%*Ng*}w*J6)E|2kVm zKScgs&g8e8w0yMnzVzr<^SkO2z>WQo*!GV>eyWPq9|vFXEDLLTbmkl^e`elX*JJYw z|NgU(jc=DKjm2mWF$P)OL(~$*7(|ml&1+(szn?gdh(Q?V@5>@hLW~vU(_LC$?ySYx z*o@Y75N3NKWNk}RnmS_7mjX*dsIbQokuh4o_Xc>b!rpVx+qP)?`v_-Eh4I=MC7B33 ziZ}-&7L-+R*mP;v20KH!VaRtFOK->GqG>z!HFRK_UbQuDDj+?M_F)V!HVl-rv$Hm5 z@99rWeuZwqBJHNlNZ0&&JJ!_~2>{akaxj=^>RP==PANsiApcu`uw;y!E(F`6%f{30 z8!ww+OT&1;hv|a<@bXcFv|=#q3Pm946dOfG#6-rg)%AbEPqv_~{mfEq)w(N)e7;0(m>_CEebxuM(m55IA{ic_J-(Kxazjb9^9H zmt}X(a`l?Ua5zbFHry z)Q?UWwH-e2RBC^$z@)`+rS(LPWE*kvQVa!zwb|}?LJ8zOiPRoqzZB3UODr>IvKLXI z1r)dX?#c*5H+cIaRdoo1L_fP%D4~h=bwKLf(RN3iHw15HzVI5LYEXCdcFa|_`{j`gLE4T8hAsQ0)a7Hm%R*cp4n==FL69nU!0w;^DaH( zUDUj6+MKShFD()UzHy8YiS6?e_oN>A_GD+iVvanuloJKLGL9gT-ipQvfstc_dkU`OorRr&6#0De1ebp*B5>A{JwkE$ND+fg^Jps zIC*xgLEo7hb`#E*3Vq_=_-P6np0qKXe7Z^Oh24iSas=I*?D1Yqs%LcEOv2MjtaH}zDoU>2WXmKUFh zOPY(jihO;Q-cgSY0$jtVu9w4o&6RwR?rVj6R);@tQ+`DC3^B`iV>6Gr(~`uYfZ;&J z0SM##AV%uj1g9H9bm=Ikj#%#iS@`KA%=s(NP-9ZX*vziIhqO<$vd1V(cqKG6jybC0re zcWDL3|F_nz1D>k)4<8~UdlfQ6*&|!_s6<(nQT8>m-HS@YNJ=4ul7>QNL&~URMKUrf zN`;U#Y`L=j@6GR*d+t5AfBNu6!$BD?PS) zT38CZ?TTFu`uwwT@mY4;-hdlDMWITnKJTSDa~>rZ-Rk1le}Q{;XW0|ucRd_MB?)VK zqxXIuin4x{&HJ|GMj0b&KF(CccXxaJ0u^osmMSzg_mvKU`gndd1CxZ55och8Z%_3{ zxq=z?kNLIya;IKDW@3p6UE9I@yJq+>t(1FZ%DwTYdVCe)!wouOYkA@-$vp#u4>C3f z2DFtZ7cc-t1K20xpI8lj83ze z%lM}!bzJdCjO)(g^8YONL514J)#Qlf%@|{$@4n6tTFAkTxdso;^B2nH&v!rlqC6pg z+?84-!$Y^|MemUsDb5eYGZ(<4U6cbElO7e37h7l)8bA-_yZaT|4)$JGziojHxF1@_ z66@;eFPQnbW?M?Q8l|25eQaqKr`7w1Vq+oGa}B%rj?>=UDYS#@^d_6|XZq%;uToVv zt0+nonwcGmRo!rAPWk$yz9&teyr`}xBr#<+(v)c$`Ss*Yt=s6vw4VEf!hXF`DTB&W z?(}6G0b{7Q4Vd)TEvIXa#6RXsHArs}mfyeAxk>k^nk$;eRbe1?aJKfq$D=)8ri#$J z+WlUlo6a{KO`V%RJziT<6)vPP=)uKO&wESacGT6NE}VCUZB`0rgW3Bb%3pa$36fi$ zckn0hQpqTogd5;Fm&iDW)8w3CAGz+#+u~W9&&BTX`uVAt-lSGBF9DB`w(dyH!55oqC6mUhGWQ0($uqPm zp&V#5L~&dcst;{)D^X`@%WY+Q#Y@}S**W!cU!v&Or-x@0u31uW*Yk$&Zxp|f(xb97 zcTKU)y`J-v4-5yGDXYc2)}5*n+Rb&tR6)HhKNoXkLVopJ*2LOrp_DzjhC5r?$G+71 z<;!+vohVB|>*Xd#r({QaHtxF@_o8%jS`sCAfdseQL%;UpY>|92zOU(bkO; zgT~*pTlLMD9X=*+aFji-*=C$5C19iDMj83FZC`)(!HR-3UHAD7^;AiwgJuN^yxhg-E2`;qV80E6LPTu2%*-%!0 ze_qOGqvXo`voh z^zQ}FIh8t>xU|$)e{&NSrT*%jl78UP?yVl09?82ByajyT=EUr#V~ko zR`;b+W`&_oneaeDw9dhAN%BpiNjF+oZBb0IFm-)rWgPp#CiK=*%XbB_gCCq#Q#-8V z<084MchNI8HFu;MD4wueO~18+Jn@E)V)`BWaQa-Ug*0LAbO8mm!L_=rPfAUm$SL{s z*I4Cy?usz%yr%R?;xJ8c!O5AinL_8CZ7cIO+t&_jD?@vZUZ^v>B;DZBXC>rTr1!d~ zSf*4}x^L?4mAkdMcSR4M8DCn3y52|HoFbGRdzOK2^Fee(%jtW@++>NFUSUV6vRc$IFVdQtcoTCLps#W35a2FYaBFSQY93-qy}o|HSBkpn;?;5Qmh<(`K5bGLb(^6WsLNJKRihk` zYB>=5v7z5J;8H3}VX}es;C$F9du-1G-jHoYT|iQDyT`6k zx5pNYu{MeE3^zqO1rvw1#bz4&Pz#$>UZ0uf`xYF0o_UM5WEQ)yDQ%;!mz#zvRrZKG zne~Y#{Zrj-j)fGG`t>GZ8J00Oz73LX%I+TCpfSJWLhQViMu^YO5guq^)rdmKdiF!B z%NNKF>AG}@J?nVH_*|VeH^BF$dgf|UL?hSdV zn6qtqf`2|Tj4vW@Au{jYxd0tezUrG|Sats46=RzPr!Cw{qzd=&S$Q)_C=|xo+*6`! z|31M~W)qr##U6Pwed*lz>m>exO@%aG1~$zR9|g}Ea2B$A=~|uDo#x+0Wp?OwZ2N}& z;qR{r@a;HWy4`l`Zp%6&cFO0Cxy5THuY8R*q}AXlx2WifFysv}^mY?v?B-@Ww65Dc zV9jN*cRDyVSJ&y~=2=+xH%R8|_y&2%)bV^|DP*0zy4OwVlWuTxMs)V%42J2R zLvrk8=e(ht-qoWBSH=I6lSsd%9m^gf#4%1P4oeb=L^gdgBL1@TDbinVpQ6Cmd zcSUncsixtZ%WHB3J8O5WP!;tHGE^CMbX#kT#d2yLO)7pQGog;5GJ={D=-wXNj>W2I z4WGDp(C(@RQ#ND9EZ@oJweg==>%N#DA2L`N8yycx5+-{azI_@?sTGmFf5iLb=dDpx zT~dB(@s3Rw>C}%kVX^vJ?ORkH3HY_kO|Yqc?M;TF4c{nbq;?rbMXF2q2_Jv=IM<=g zcIAHcaK2tj$j)Kd*UYU!QZDGilZ$60j)CTFxx3kx%Ned5Ly3E3TrKTf- zE>_Gpb{b1x?tVOW!^mV$uM(ME5(`%#zX2~x-H|$1lXR)rL6ZiyQx&^6junUbkc~;+ zV;59mUUxlxmr-qp&nmN6+o6HQ7k7Rz<=U7?1wk<^zZxsZLUg}1R`B&l`TF}fq0s0b zV@>LxqlADsZSN|6CjBWD%CVCn$c5h3S-S7Hj8^JY z9ramx##iLw#Sh(~z0Uc3)6!WE6L&2>MhvE{v$Jnjy|-=@ogl}f;vqhJmtV>wG=8p@ zYv}zYRaT$9nu;r)(>SSYyMOu>=Y?0<+0$&aJ^Ke8zCBGTHQX9@jovz*m5KgDb!??` zarT<*?b_})=@h$ksFYP*_v&9`ldpVqh~tDGv+*GotnRyZZTYk2w-3c1)Bmcd;A!sH z_a&ZDN-fLQ%{%XA5&P#)9l-zN2+jNlt`?eh|1khgyQW~QI!u0-A*AyofimfmtI zSHaSmAG^Tfd^_DG!5A(c>;xly1x5`LTYkIXkK;sswh+Mu7lL0S66#|+_14yU4c~he zz<2+!=ajZ+8+B-#7fTdHR;P=p_BJbH;}(|C;E*S`UE4)sF5a%~%*df&VM*$SSgifJ zLykc-**PYm=pEDoe%)bWql^=kxeuOlA8~qCdNOzmoA7wu)YMGFY1grM$A&HO6CJPE z=Pd3_Hu#T^zBF%4>|ejVDCIh@${ju7#t_S^1*0;DUOBT{X&0^D*cw%;U7q^#!2262 zMt9zxIA5k8bM@Be@2lKitTL*pc*A63Ws>c$1w)9W}VJfVHd z8!j-qB^fJ`qaALz`Lt-u@QqB?w8sXDk%pV2Sp|DT~d5_P9nmAX(GkN_f#gs~+`DIOWoq_5#!=f&+jY{6)tgFo2>y$5c ze#w{Xi0^;?z`pUW(anA;BkoyBj#7oGmOXZ%ccVGSD>-Vr>nULFxfTjT9ARf}DNmFPJjrit7}&8jL#?&s z(?Iww)=8sN&%JaG?!Euo&+5h3hzd%ZS+n8W2OIWC=S=C4hf-cQ`TS5Th)a&zqO`ub zTH$leC6}v|ecyefU(2|$Xwp{F=14^eq)vM~3GTgWY0v%AH3uV@eD0q59yzPC4d^=F zUR#0DI^SAbC;w)@;tJszf10!^;h9tM24d%*7O_75+LN@RX3rzx`*s>uZLbqOUE4~Y z1-3`dxSXykn$d9_MMsF&-yJl+tWa}h$hlgl-OeT*efnLRL*3I)mxHYGU!e12Ha`#i zaxie!hly^6+=(mS@(em?lWv`DR%?@<*BBUq>MbkQ?Pv3;>hz)hs$XEO=X z-@Uq^Jyq?ja|WU=1VmIC=`L)O(>knRXi0?r41vAH_)0ci`R z-9aG>#DQ*gjw!t~JL(mufb?YH4s(Nek+k8Pz>GPg6L*UjBm2 zJhyzo`-zMwIpwWeIrmg!wI=0vbR|ws+0Qbj-&rxZCvECVyw`{mxQ#70OnQ7L*=455 z)QsDDb{`IjI&rR9J+?98$ir^U`f0-&+UJ^9Pj)NYB~E&juRCKC_K1R)vdrX(ooVVU zU&RK)eG5NAjikz2kq&U7_a^8DaG?f*sKM4#!o;$);nW+Aaqz}~5bxn>p!0kCM?=U( zrEpVIFKL?HfB?efUvX_(3~N%(4j2dNK$JfkEMFMtDF0@(A_!jKS6LZa9KGeq;C`SK z!e!F*43PFBz2gB4%E#XsRimD&y&n!FZx@RftgUay~;f&qjlO>=WA<(nnVlWPb%W$AC=vSA38wr8ANrA7Bt2zP$ z$`b-v`rKxm0UIvE8STMATw+up1R8SqjYD834KBlh^1!J}Kt)2}*y_90$iOr7_&_B> zAnB?$gWWt!jLL+-luHt~_<ZV_1mst^LnBelEvfka#e>za6JgbN=iOJp6B zYLq7gjyx{Ik-pq8B$0PoU0mt301%hq$mX>$B$0U%>|@UU2vQ(W!I36lL3T-|ERlPJ zu+Ni203R;Hk!J*8NFw{7E>_J#V95@b;Ybrf7?Q|8&k{C7A|ua$r{dz(mdHT(K7eyJ zOTdss4k~8|DrEvKg3E9uMiPc3vXCQ-JpaGv7aZ9w1w#^fsQN*vrxrLsxC}?yOXDNu zh&*)VwAyukfWl=sl1T=JB=Qi~;n`ji?t>$VJY??bQ=|p>a2d|oEDPf#^3X4wAO}Md zdFU5TP=X_Z-&(bvLfg_1Llw26M3Yk-8JB*Xa zLs3T3>LKJHnt%-F?1m$WJhaQXj^k%x*ZYX*^#_GU07k%x2yjei_h;$zEi z4nq=oC~!k&7V=rv4M!4rh^AHRN(eJJLLkGBkjYLMCy|HpchYk50~g>j9LeMgLlSw& zWYj*N37~Kpj`W2g<%v9WA(Q_xvTwWLNFoot9qf?20{Czl&UprnUt4)14?Q)aa0>wt zT!tfkF)$>Nhpx1AnIUtUgu;+S9`diG+JKCd34}7l+QASRIRZx#dB{yrN|yvO4#r93p(4tNtJDw}S%VC3m63RSqymwLRA1gP zrO*U(D3HM@4=-6J;309-v&BIe8<^1HXdr0&*@sr|04CQ6++TL-vKoIC6@RFy&Un3>0 zIc+FR;1=lS)PYqu;*mp;n$!QI55Ty4qNP#$oje1aQQ)3(va)FjH;e{a8B`WX}bNI5^&?w+V3<|d)h45_dc&FoS0lM;)G7zK( zlV+Np0&xi%B!j@B3w}z!h|Ku&))mlH0iGzdw3ClF#=#w&WC((~f;&H! zgHqc-R>LnH8ol%f#go<%!dbwv9h^TaSAtt|U_>TYkckLmyEh6Ge$|tR)(TL%GJt%(zXc1jJ9tcXlj?6^5ML<{fPhaNc;O**( zvXe!SR_-^FSqX?41NDJ z9fG#(6_mV1fItY$THsM{TizcLkk<0dc30mUaQGe)wOiy3WU6Jfn+$@s3)9zVs}bAn z%gw(+NLp)5J$-x{INMl2FD(JHjKyta>ZP-7vB`dXMG!@{dvEqIIM4FHm6YF;)axSoxB2Nbvffgn!O?T*(676=lNhadx(IF8Q$#zb0O zMb_RKX5hQ`z#)1t&cLDHdM8OehoMB(S(i(SB2p9Lkngd=E82vkpq}A@(9MjSc z73fzDeS?aLsViq(mG9?t~M-y_RJx2 zaL@YRBmR|xHzG9ZUX)ny4Cwwsi5wg&_IDTw)h=Byt2WZP4b~7KT!xQjR!JjsFT=s| zKgBK)-X49Zz7LS$GJGtyNhZc~yl-%Y2l!LUQYh-%aBEYNPK!$f;amv4BLmZtEgnErkcpqq}&STOGHhph#*$Bv#4ePF%CV`{yU7M)%sd8 zgvWz9#wb|)08?wcw8P-{xG`W=<;O}Ag0gWrN7r2p5EKQL$l$93jz-HbowSObU(uB< zv=C$sCZKQ?A~wI|`S*NWe3z%4Gs*=)#mm@N@*7q|kRupa!Bsf8EWdQpDh!NG`Xqq! zSimU`k1E*1zvS`7NE5Gg5m`?KK^n8^(=1kCJxVa>hewqMNBqX zxL&95|3|(!viY}PKswW8m%4o9UJ$Og}O-33KEa_d0Rk&+K2ocG?mB9eF%uxfTJPJ#tf}ecPE~{YDb+2cm z{ImxYze9|0jb;H1Q-7!K-k$Dg-1BJ(SE=z|I1L}^e#ux~$)szK|KdXU!ri{6|HrWy zH5b1kyvUkR4*nB Date: Tue, 22 Oct 2024 14:04:55 +0000 Subject: [PATCH 064/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/api_c/include/c_api.h | 130 ++++++----- source/api_c/include/deepmd.hpp | 121 +++++----- source/api_c/src/c_api.cc | 213 +++++++++--------- source/api_cc/include/DeepPot.h | 61 ++--- source/api_cc/include/DeepPotPT.h | 24 +- source/api_cc/src/DeepPot.cc | 22 +- source/api_cc/src/DeepPotTF.cc | 11 +- .../api_cc/tests/test_deeppot_dpa1_pt_spin.cc | 50 ++-- source/lmp/pair_deepmd.cpp | 3 +- 9 files changed, 331 insertions(+), 304 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index a4bdb6f422..7826e9aa18 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -162,20 +162,22 @@ extern void DP_DeepPotCompute(DP_DeepPot* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (double version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *with spin input. (double version) * @attention The number of frames is assumed to be 1. * @param[in] dp The DP to use. * @param[in] natoms The number of atoms. * @param[in] coord The coordinates of atoms. The array should be of size natoms *x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be *of size natoms x 3. * @param[in] atype The atom types. The array should contain natoms ints. * @param[in] box The cell of the region. The array should be of size 9. Pass *NULL if pbc is not used. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force. The array should be of size + *natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size *natoms. @@ -185,17 +187,17 @@ extern void DP_DeepPotCompute(DP_DeepPot* dp, *Pass NULL if not required. **/ extern void DP_DeepPotComputeSP(DP_DeepPot* dp, - const int natom, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); + const int natom, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP. (float version) @@ -229,20 +231,22 @@ extern void DP_DeepPotComputef(DP_DeepPot* dp, float* atomic_virial); /** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (float version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *with spin input. (float version) * @attention The number of frames is assumed to be 1. * @param[in] dp The DP to use. * @param[in] natoms The number of atoms. * @param[in] coord The coordinates of atoms. The array should be of size natoms *x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be *of size natoms x 3. * @param[in] atype The atom types. The array should contain natoms ints. * @param[in] box The cell of the region. The array should be of size 9. Pass *NULL if pbc is not used. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force. The array should be of size + *natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size *natoms. @@ -252,17 +256,17 @@ extern void DP_DeepPotComputef(DP_DeepPot* dp, *Pass NULL if not required. **/ extern void DP_DeepPotComputefSP(DP_DeepPot* dp, - const int natom, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); + const int natom, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor @@ -412,15 +416,16 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (double version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *with spin input. (double version) * @version 2 * @param[in] dp The DP to use. * @param[in] nframes The number of frames. * @param[in] natoms The number of atoms. * @param[in] coord The coordinates of atoms. The array should be of size natoms *x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. * @param[in] atype The atom types. The array should contain natoms ints. * @param[in] box The cell of the region. The array should be of size 9. Pass *NULL if pbc is not used. @@ -440,20 +445,20 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, *Pass NULL if not required. **/ extern void DP_DeepPotCompute2SP(DP_DeepPot* dp, - const int nframes, - const int natom, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); + const int nframes, + const int natom, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP. (float version) @@ -495,15 +500,16 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, float* atomic_virial); /** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP with spin input. (float version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *with spin input. (float version) * @version 2 * @param[in] dp The DP to use. * @param[in] nframes The number of frames. * @param[in] natoms The number of atoms. * @param[in] coord The coordinates of atoms. The array should be of size natoms *x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. * @param[in] atype The atom types. The array should contain natoms ints. * @param[in] box The cell of the region. The array should be of size 9. Pass *NULL if pbc is not used. @@ -523,20 +529,20 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, *Pass NULL if not required. **/ extern void DP_DeepPotComputef2SP(DP_DeepPot* dp, - const int nframes, - const int natom, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); + const int nframes, + const int natom, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index a952075789..ede64be1bb 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -98,59 +98,61 @@ inline void _DP_DeepPotCompute(DP_DeepPot *dp, // support spin template inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, - const int nframes, - const int natom, - const FPTYPE *coord, - const FPTYPE *spin, - const int *atype, - const FPTYPE *cell, - const FPTYPE *fparam, - const FPTYPE *aparam, - double *energy, - FPTYPE *force, - FPTYPE *force_mag, - FPTYPE *virial, - FPTYPE *atomic_energy, - FPTYPE *atomic_virial); + const int nframes, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); template <> inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, - const int nframes, - const int natom, - const double *coord, - const double *spin, - const int *atype, - const double *cell, - const double *fparam, - const double *aparam, - double *energy, - double *force, - double *force_mag, - double *virial, - double *atomic_energy, - double *atomic_virial) { - DP_DeepPotCompute2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, aparam, - energy, force, force_mag, virial, atomic_energy, atomic_virial); + const int nframes, + const int natom, + const double *coord, + const double *spin, + const int *atype, + const double *cell, + const double *fparam, + const double *aparam, + double *energy, + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { + DP_DeepPotCompute2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, + atomic_virial); } template <> inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, - const int nframes, - const int natom, - const float *coord, - const float *spin, - const int *atype, - const float *cell, - const float *fparam, - const float *aparam, - double *energy, - float *force, - float *force_mag, - float *virial, - float *atomic_energy, - float *atomic_virial) { - DP_DeepPotComputef2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, aparam, - energy, force, force_mag, virial, atomic_energy, atomic_virial); + const int nframes, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { + DP_DeepPotComputef2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, + atomic_virial); } template @@ -941,15 +943,16 @@ class DeepPot { }; // support spin /** - * @brief Evaluate the energy, force, magnetic force and virial by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force and virial by using this + *DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. * @param[out] virial The virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). @@ -999,9 +1002,9 @@ class DeepPot { const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeSP(dp, nframes, natoms, coord_, spin_, atype_, box_, - fparam__, aparam__, ener_, force_, force_mag_, virial_, - nullptr, nullptr); + _DP_DeepPotComputeSP(dp, nframes, natoms, coord_, spin_, atype_, + box_, fparam__, aparam__, ener_, force_, + force_mag_, virial_, nullptr, nullptr); DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; /** @@ -1072,8 +1075,8 @@ class DeepPot { }; /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. @@ -1082,8 +1085,8 @@ class DeepPot { * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). @@ -1140,9 +1143,9 @@ class DeepPot { const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotCompute(dp, nframes, natoms, coord_, spin_, atype_, box_, - fparam__, aparam__, ener_, force_, force_mag_, virial_, - atomic_ener_, atomic_virial_); + _DP_DeepPotCompute( + dp, nframes, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, + ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 85166cb598..f54e89fdc0 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -254,20 +254,20 @@ template void DP_DeepPotCompute_variant(DP_DeepPot* dp, // support spin template inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const VALUETYPE* coord, - const VALUETYPE* spin, - const int* atype, - const VALUETYPE* cell, - const VALUETYPE* fparam, - const VALUETYPE* aparam, - double* energy, - VALUETYPE* force, - VALUETYPE* force_mag, - VALUETYPE* virial, - VALUETYPE* atomic_energy, - VALUETYPE* atomic_virial) { + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { // init C++ vectors from C arrays std::vector coord_(coord, coord + nframes * natoms * 3); std::vector spin_(spin, spin + nframes * natoms * 3); @@ -288,8 +288,8 @@ inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, std::vector e; std::vector f, fm, v, ae, av; - DP_REQUIRES_OK(dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, - fparam_, aparam_)); + DP_REQUIRES_OK(dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, + cell_, fparam_, aparam_)); // copy from C++ vectors to C arrays, if not NULL pointer if (energy) { std::copy(e.begin(), e.end(), energy); @@ -312,37 +312,36 @@ inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, } template void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); template void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); - + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); template inline void DP_DeepPotComputeNList_variant(DP_DeepPot* dp, @@ -1322,20 +1321,20 @@ void DP_DeepPotCompute(DP_DeepPot* dp, atomic_virial); } void DP_DeepPotComputeSP(DP_DeepPot* dp, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, NULL, - NULL, energy, force, force_mag, virial, atomic_energy, - atomic_virial); + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, + NULL, NULL, energy, force, force_mag, + virial, atomic_energy, atomic_virial); } void DP_DeepPotComputef(DP_DeepPot* dp, @@ -1354,20 +1353,20 @@ void DP_DeepPotComputef(DP_DeepPot* dp, } void DP_DeepPotComputefSP(DP_DeepPot* dp, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, NULL, - NULL, energy, force, force_mag, virial, atomic_energy, - atomic_virial); + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, + NULL, NULL, energy, force, force_mag, + virial, atomic_energy, atomic_virial); } void DP_DeepPotComputeNList(DP_DeepPot* dp, @@ -1465,23 +1464,23 @@ void DP_DeepPotCompute2(DP_DeepPot* dp, atomic_energy, atomic_virial); } void DP_DeepPotCompute2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotCompute_variant_sp(dp, nframes, natoms, coord, spin, atype, cell, - fparam, aparam, energy, force, force_mag, virial, - atomic_energy, atomic_virial); + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepPotCompute_variant_sp( + dp, nframes, natoms, coord, spin, atype, cell, fparam, aparam, energy, + force, force_mag, virial, atomic_energy, atomic_virial); } void DP_DeepPotComputef2(DP_DeepPot* dp, @@ -1503,23 +1502,23 @@ void DP_DeepPotComputef2(DP_DeepPot* dp, } void DP_DeepPotComputef2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotCompute_variant_sp(dp, nframes, natoms, coord, spin, atype, cell, - fparam, aparam, energy, force, force_mag, virial, - atomic_energy, atomic_virial); + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepPotCompute_variant_sp( + dp, nframes, natoms, coord, spin, atype, cell, fparam, aparam, energy, + force, force_mag, virial, atomic_energy, atomic_virial); } void DP_DeepPotComputeNList2(DP_DeepPot* dp, diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 9173470dec..eaf9995794 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -89,8 +89,8 @@ class DeepPotBase { /** @} */ /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @note The double precision interface is used by i-PI, GROMACS, ABACUS, and *CP2k. * @param[out] ener The system energy. @@ -101,8 +101,8 @@ class DeepPotBase { * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -204,8 +204,8 @@ class DeepPotBase { /** @} */ /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @note The double precision interface is used by LAMMPS and AMBER. * @param[out] ener The system energy. * @param[out] force The force on each atom. @@ -215,8 +215,8 @@ class DeepPotBase { * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -435,15 +435,16 @@ class DeepPot { const std::vector& aparam = std::vector()); /** @} */ /** - * @brief Evaluate the energy, force, magnetic force and virial by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force and virial by using this + *DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. * @param[out] virial The virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -530,15 +531,16 @@ class DeepPot { const std::vector& aparam = std::vector()); /** @} */ /** - * @brief Evaluate the energy, force, magnetic force and virial by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force and virial by using this + *DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. * @param[out] virial The virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -632,8 +634,8 @@ class DeepPot { /** @} */ /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. @@ -642,8 +644,8 @@ class DeepPot { * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -742,8 +744,8 @@ class DeepPot { /** @} */ /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using this DP with spin input. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. @@ -752,8 +754,8 @@ class DeepPot { * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -1076,15 +1078,16 @@ class DeepPotModelDevi { const std::vector& aparam = std::vector()); /** - * @brief Evaluate the energy, force, magnetic force and virial by using these DP models with spin input. + * @brief Evaluate the energy, force, magnetic force and virial by using these + *DP models with spin input. * @param[out] all_ener The system energies of all models. * @param[out] all_force The forces on each atom of all models. * @param[out] all_force_mag The magnetic forces on each atom of all models. * @param[out] all_virial The virials of all models. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -1157,8 +1160,8 @@ class DeepPotModelDevi { const std::vector& aparam = std::vector()); /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using these DP models with spin input. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using these DP models with spin input. * @param[out] all_ener The system energies of all models. * @param[out] all_force The forces on each atom of all models. * @param[out] all_force_mag The magnetic forces on each atom of all models. @@ -1167,8 +1170,8 @@ class DeepPotModelDevi { * @param[out] all_atom_virial The atomic virials of all models. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index 39a2e43488..d77b7fa485 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -74,10 +74,10 @@ class DeepPotPT : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); - - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using this DP with spin input. + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. @@ -86,8 +86,8 @@ class DeepPotPT : public DeepPotBase { * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. @@ -156,10 +156,10 @@ class DeepPotPT : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); - - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, and atomic virial - *by using this DP with spin input. + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. @@ -168,8 +168,8 @@ class DeepPotPT : public DeepPotBase { * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be of size - *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9. diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 3af999b641..d69e749ac2 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -148,8 +148,9 @@ void DeepPot::compute(ENERGYTYPE& dener, const std::vector& aparam_) { std::vector dener_; std::vector datom_energy_, datom_virial_; - dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, - dspin_, datype_, dbox, fparam_, aparam_, false); + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + false); dener = dener_[0]; } @@ -165,8 +166,9 @@ void DeepPot::compute(std::vector& dener, const std::vector& fparam_, const std::vector& aparam_) { std::vector datom_energy_, datom_virial_; - dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, - dspin_, datype_, dbox, fparam_, aparam_, false); + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + false); } template void DeepPot::compute(ENERGYTYPE& dener, @@ -213,7 +215,6 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam); - template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -487,8 +488,9 @@ void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam_, const std::vector& aparam_) { std::vector dener_; - dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, - dspin_, datype_, dbox, fparam_, aparam_, true); + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + true); dener = dener_[0]; } template @@ -504,8 +506,9 @@ void DeepPot::compute(std::vector& dener, const std::vector& dbox, const std::vector& fparam_, const std::vector& aparam_) { - dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, - dspin_, datype_, dbox, fparam_, aparam_, true); + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + true); } template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -559,7 +562,6 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam); - template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 882e1a55f0..9e85a2bdbf 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -695,7 +695,7 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, const std::vector& fparam_, const std::vector& aparam_, const bool atomic) { - std::cout<<"not support"<( @@ -758,7 +758,6 @@ template void DeepPotTF::compute>( const std::vector& aparam, const bool atomic); - template void DeepPotTF::compute(ENERGYVTYPE& dener, std::vector& dforce_, @@ -1254,8 +1253,8 @@ void DeepPotTF::computew(std::vector& ener, const std::vector& fparam, const std::vector& aparam, const bool atomic) { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, atype, box, - fparam, aparam, atomic); + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, fparam, aparam, atomic); } void DeepPotTF::computew(std::vector& ener, std::vector& force, @@ -1270,8 +1269,8 @@ void DeepPotTF::computew(std::vector& ener, const std::vector& fparam, const std::vector& aparam, const bool atomic) { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, atype, box, - fparam, aparam, atomic); + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, fparam, aparam, atomic); } void DeepPotTF::computew(std::vector& ener, std::vector& force, diff --git a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc index 538794517f..df325ab5de 100644 --- a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc +++ b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc @@ -23,9 +23,9 @@ class TestInferDeepPotDpaPtSpin : public ::testing::Test { std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; - + std::vector atype = {0, 1, 1, 0, 1, 1}; std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; // Generated by the following Python code: @@ -37,7 +37,7 @@ class TestInferDeepPotDpaPtSpin : public ::testing::Test { // 3.51, 2.51, 2.60, 4.27, 3.22, 1.56 // ]).reshape(1, -1) // spin = np.array([ - // 0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + // 0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., // 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0. // ]).reshape(1, -1) // atype = np.array([0, 1, 1, 0, 1, 1]) @@ -47,23 +47,36 @@ class TestInferDeepPotDpaPtSpin : public ::testing::Test { // np.set_printoptions(precision=16) // print(f"{e.ravel()=} {f.ravel()=} {fm.ravel()=} {ae.ravel()=}") - std::vector expected_e = { - -5.835211567762678, -5.071189078159807, -5.044361601406714, - -5.582324154346981, -5.059906899269188, -5.074135576182056}; + std::vector expected_e = {-5.835211567762678, -5.071189078159807, + -5.044361601406714, -5.582324154346981, + -5.059906899269188, -5.074135576182056}; std::vector expected_f = { - -0.0619881702551019, 0.0646720543680939, 0.2137632336140025, - 0.037800173877136 , -0.096327623008356 , -0.1531911892384847, - -0.112204927558682 , 0.0299145670766557, -0.0589474826303666, + -0.0619881702551019, 0.0646720543680939, 0.2137632336140025, + 0.037800173877136, -0.096327623008356, -0.1531911892384847, + -0.112204927558682, 0.0299145670766557, -0.0589474826303666, 0.2278904556868233, 0.0382061907026398, 0.0888060647788163, - -0.0078898845686437, 0.0019385598635839, -0.0791616129664364, - -0.083607647181527 , -0.0384037490026167, -0.0112690135575317}; + -0.0078898845686437, 0.0019385598635839, -0.0791616129664364, + -0.083607647181527, -0.0384037490026167, -0.0112690135575317}; std::vector expected_fm = { - -3.0778301386623275, -1.3135930534661662, -0.8332043979367366, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - -0.5452347545527696, -0.2051506559632127, -0.4908015055951312, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0,}; + -3.0778301386623275, + -1.3135930534661662, + -0.8332043979367366, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + -0.5452347545527696, + -0.2051506559632127, + -0.4908015055951312, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + }; int natoms; double expected_tot_e; @@ -144,7 +157,8 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { deepmd::DeepPot& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, atype, box); + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 6df66ff8ed..695d0879f6 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -903,7 +903,8 @@ void PairDeepMD::compute(int eflag, int vflag) { try { const vector &dcoord_const = dcoord; const vector &dspin_const = dspin; - deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord_const, dspin_const, dtype, dbox); + deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord_const, + dspin_const, dtype, dbox); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } From 114898f111f15086fea513edd05bf4c6d342e725 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:08:07 +0800 Subject: [PATCH 065/193] bump version --- source/api_c/include/c_api.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 7826e9aa18..25ba602655 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -10,9 +10,9 @@ extern "C" { /** @file */ /** C API version. Bumped whenever the API is changed. - * @since API version 22 + * @since API version 23 */ -#define DP_C_API_VERSION 22 +#define DP_C_API_VERSION 23 /** * @brief Neighbor list. From c2515eda6255f499f624fe9ea2cadbe35e486c22 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 22 Oct 2024 22:02:43 -0400 Subject: [PATCH 066/193] feat(jax): energy model (no grad support) (#4226) Add JAX energy model without grad support. The grad support needs discussion. Array API is not supported in this PR as it needs more effort. (JAX has more APIs than Array API) This PR also fixes a `skip_tf` bug introduced in #3357. When no `@property` was added, `xx.skip_tf` is always cast to `True`. ## Summary by CodeRabbit ## Release Notes - **New Features** - Enhanced `BaseAtomicModel` and `DPAtomicModel` classes with improved array compatibility and new output definitions. - Introduced new classes and attributes for better model flexibility and customization. - Added `EnergyFittingNet` and `DOSFittingNet` for advanced fitting capabilities. - New functions `get_standard_model` and `get_model` for flexible model creation based on input data. - Added `BaseDescriptor` and `BaseFitting` classes to streamline descriptor and fitting processes. - Introduced `EnergyModel` class for improved atomic model handling. - **Bug Fixes** - Updated serialization logic for consistency across models. - **Tests** - Enhanced testing framework to support JAX operations and added methods for JAX model evaluation. --------- Signed-off-by: Jinzhe Zeng --- .../dpmodel/atomic_model/base_atomic_model.py | 20 +++--- .../dpmodel/atomic_model/dp_atomic_model.py | 10 ++- deepmd/dpmodel/model/make_model.py | 35 +++++------ deepmd/dpmodel/model/transform_output.py | 4 +- deepmd/jax/atomic_model/__init__.py | 1 + deepmd/jax/atomic_model/base_atomic_model.py | 18 ++++++ deepmd/jax/atomic_model/dp_atomic_model.py | 30 +++++++++ deepmd/jax/descriptor/__init__.py | 11 ++++ deepmd/jax/descriptor/base_descriptor.py | 9 +++ deepmd/jax/descriptor/dpa1.py | 5 ++ deepmd/jax/descriptor/se_e2_a.py | 5 ++ deepmd/jax/fitting/__init__.py | 9 +++ deepmd/jax/fitting/base_fitting.py | 9 +++ deepmd/jax/fitting/fitting.py | 5 ++ deepmd/jax/model/__init__.py | 6 ++ deepmd/jax/model/base_model.py | 6 ++ deepmd/jax/model/ener_model.py | 24 +++++++ deepmd/jax/model/model.py | 63 +++++++++++++++++++ source/tests/consistent/model/common.py | 23 +++++++ source/tests/consistent/model/test_ener.py | 26 ++++++++ 20 files changed, 289 insertions(+), 30 deletions(-) create mode 100644 deepmd/jax/atomic_model/__init__.py create mode 100644 deepmd/jax/atomic_model/base_atomic_model.py create mode 100644 deepmd/jax/atomic_model/dp_atomic_model.py create mode 100644 deepmd/jax/descriptor/base_descriptor.py create mode 100644 deepmd/jax/fitting/base_fitting.py create mode 100644 deepmd/jax/model/__init__.py create mode 100644 deepmd/jax/model/base_model.py create mode 100644 deepmd/jax/model/ener_model.py create mode 100644 deepmd/jax/model/model.py diff --git a/deepmd/dpmodel/atomic_model/base_atomic_model.py b/deepmd/dpmodel/atomic_model/base_atomic_model.py index c29a76b3f1..6307b19f41 100644 --- a/deepmd/dpmodel/atomic_model/base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/base_atomic_model.py @@ -1,13 +1,15 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import copy +import math from typing import ( Optional, ) +import array_api_compat import numpy as np from deepmd.dpmodel.common import ( NativeOP, + to_numpy_array, ) from deepmd.dpmodel.output_def import ( FittingOutputDef, @@ -172,17 +174,18 @@ def forward_common_atomic( ret_dict["mask"][ff,ii] == 0 indicating the ii-th atom of the ff-th frame is virtual. """ + xp = array_api_compat.array_namespace(extended_coord, extended_atype, nlist) _, nloc, _ = nlist.shape atype = extended_atype[:, :nloc] if self.pair_excl is not None: pair_mask = self.pair_excl.build_type_exclude_mask(nlist, extended_atype) # exclude neighbors in the nlist - nlist = np.where(pair_mask == 1, nlist, -1) + nlist = xp.where(pair_mask == 1, nlist, -1) ext_atom_mask = self.make_atom_mask(extended_atype) ret_dict = self.forward_atomic( extended_coord, - np.where(ext_atom_mask, extended_atype, 0), + xp.where(ext_atom_mask, extended_atype, 0), nlist, mapping=mapping, fparam=fparam, @@ -191,13 +194,13 @@ def forward_common_atomic( ret_dict = self.apply_out_stat(ret_dict, atype) # nf x nloc - atom_mask = ext_atom_mask[:, :nloc].astype(np.int32) + atom_mask = ext_atom_mask[:, :nloc].astype(xp.int32) if self.atom_excl is not None: atom_mask *= self.atom_excl.build_type_exclude_mask(atype) for kk in ret_dict.keys(): out_shape = ret_dict[kk].shape - out_shape2 = np.prod(out_shape[2:]) + out_shape2 = math.prod(out_shape[2:]) ret_dict[kk] = ( ret_dict[kk].reshape([out_shape[0], out_shape[1], out_shape2]) * atom_mask[:, :, None] @@ -232,14 +235,15 @@ def serialize(self) -> dict: "rcond": self.rcond, "preset_out_bias": self.preset_out_bias, "@variables": { - "out_bias": self.out_bias, - "out_std": self.out_std, + "out_bias": to_numpy_array(self.out_bias), + "out_std": to_numpy_array(self.out_std), }, } @classmethod def deserialize(cls, data: dict) -> "BaseAtomicModel": - data = copy.deepcopy(data) + # do not deep copy Descriptor and Fitting class + data = data.copy() variables = data.pop("@variables") obj = cls(**data) for kk in variables.keys(): diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index 7e576eb484..fe049021fe 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -169,14 +169,20 @@ def serialize(self) -> dict: ) return dd + # for subclass overriden + base_descriptor_cls = BaseDescriptor + """The base descriptor class.""" + base_fitting_cls = BaseFitting + """The base fitting class.""" + @classmethod def deserialize(cls, data) -> "DPAtomicModel": data = copy.deepcopy(data) check_version_compatibility(data.pop("@version", 1), 2, 2) data.pop("@class") data.pop("type") - descriptor_obj = BaseDescriptor.deserialize(data.pop("descriptor")) - fitting_obj = BaseFitting.deserialize(data.pop("fitting")) + descriptor_obj = cls.base_descriptor_cls.deserialize(data.pop("descriptor")) + fitting_obj = cls.base_fitting_cls.deserialize(data.pop("fitting")) data["descriptor"] = descriptor_obj data["fitting"] = fitting_obj obj = super().deserialize(data) diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index 8cdb7e1f25..dc90f10da7 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -3,6 +3,7 @@ Optional, ) +import array_api_compat import numpy as np from deepmd.dpmodel.atomic_model.base_atomic_model import ( @@ -75,7 +76,8 @@ def __init__( else: self.atomic_model: T_AtomicModel = T_AtomicModel(*args, **kwargs) self.precision_dict = PRECISION_DICT - self.reverse_precision_dict = RESERVED_PRECISON_DICT + # not supported by flax + # self.reverse_precision_dict = RESERVED_PRECISON_DICT self.global_np_float_precision = GLOBAL_NP_FLOAT_PRECISION self.global_ener_float_precision = GLOBAL_ENER_FLOAT_PRECISION @@ -253,9 +255,7 @@ def input_type_cast( str, ]: """Cast the input data to global float type.""" - input_prec = self.reverse_precision_dict[ - self.precision_dict[coord.dtype.name] - ] + input_prec = RESERVED_PRECISON_DICT[self.precision_dict[coord.dtype.name]] ### ### type checking would not pass jit, convert to coord prec anyway ### @@ -264,10 +264,7 @@ def input_type_cast( for vv in [box, fparam, aparam] ] box, fparam, aparam = _lst - if ( - input_prec - == self.reverse_precision_dict[self.global_np_float_precision] - ): + if input_prec == RESERVED_PRECISON_DICT[self.global_np_float_precision]: return coord, box, fparam, aparam, input_prec else: pp = self.global_np_float_precision @@ -286,8 +283,7 @@ def output_type_cast( ) -> dict[str, np.ndarray]: """Convert the model output to the input prec.""" do_cast = ( - input_prec - != self.reverse_precision_dict[self.global_np_float_precision] + input_prec != RESERVED_PRECISON_DICT[self.global_np_float_precision] ) pp = self.precision_dict[input_prec] odef = self.model_output_def() @@ -366,6 +362,7 @@ def _format_nlist( nnei: int, extra_nlist_sort: bool = False, ): + xp = array_api_compat.array_namespace(extended_coord, nlist) n_nf, n_nloc, n_nnei = nlist.shape extended_coord = extended_coord.reshape([n_nf, -1, 3]) nall = extended_coord.shape[1] @@ -373,10 +370,10 @@ def _format_nlist( if n_nnei < nnei: # make a copy before revise - ret = np.concatenate( + ret = xp.concat( [ nlist, - -1 * np.ones([n_nf, n_nloc, nnei - n_nnei], dtype=nlist.dtype), + -1 * xp.ones([n_nf, n_nloc, nnei - n_nnei], dtype=nlist.dtype), ], axis=-1, ) @@ -385,16 +382,16 @@ def _format_nlist( n_nf, n_nloc, n_nnei = nlist.shape # make a copy before revise m_real_nei = nlist >= 0 - ret = np.where(m_real_nei, nlist, 0) + ret = xp.where(m_real_nei, nlist, 0) coord0 = extended_coord[:, :n_nloc, :] index = ret.reshape(n_nf, n_nloc * n_nnei, 1).repeat(3, axis=2) - coord1 = np.take_along_axis(extended_coord, index, axis=1) + coord1 = xp.take_along_axis(extended_coord, index, axis=1) coord1 = coord1.reshape(n_nf, n_nloc, n_nnei, 3) - rr = np.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) - rr = np.where(m_real_nei, rr, float("inf")) - rr, ret_mapping = np.sort(rr, axis=-1), np.argsort(rr, axis=-1) - ret = np.take_along_axis(ret, ret_mapping, axis=2) - ret = np.where(rr > rcut, -1, ret) + rr = xp.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) + rr = xp.where(m_real_nei, rr, float("inf")) + rr, ret_mapping = xp.sort(rr, axis=-1), xp.argsort(rr, axis=-1) + ret = xp.take_along_axis(ret, ret_mapping, axis=2) + ret = xp.where(rr > rcut, -1, ret) ret = ret[..., :nnei] # not extra_nlist_sort and n_nnei <= nnei: elif n_nnei == nnei: diff --git a/deepmd/dpmodel/model/transform_output.py b/deepmd/dpmodel/model/transform_output.py index 43c275b1be..928c33f3bd 100644 --- a/deepmd/dpmodel/model/transform_output.py +++ b/deepmd/dpmodel/model/transform_output.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import array_api_compat import numpy as np from deepmd.dpmodel.common import ( @@ -23,6 +24,7 @@ def fit_output_to_model_output( the model output. """ + xp = array_api_compat.get_namespace(coord_ext) model_ret = dict(fit_ret.items()) for kk, vv in fit_ret.items(): vdef = fit_output_def[kk] @@ -31,7 +33,7 @@ def fit_output_to_model_output( if vdef.reducible: kk_redu = get_reduce_name(kk) # cast to energy prec brefore reduction - model_ret[kk_redu] = np.sum( + model_ret[kk_redu] = xp.sum( vv.astype(GLOBAL_ENER_FLOAT_PRECISION), axis=atom_axis ) if vdef.r_differentiable: diff --git a/deepmd/jax/atomic_model/__init__.py b/deepmd/jax/atomic_model/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/jax/atomic_model/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/jax/atomic_model/base_atomic_model.py b/deepmd/jax/atomic_model/base_atomic_model.py new file mode 100644 index 0000000000..90920879c2 --- /dev/null +++ b/deepmd/jax/atomic_model/base_atomic_model.py @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.jax.common import ( + to_jax_array, +) +from deepmd.jax.utils.exclude_mask import ( + AtomExcludeMask, + PairExcludeMask, +) + + +def base_atomic_model_set_attr(name, value): + if name in {"out_bias", "out_std"}: + value = to_jax_array(value) + elif name == "pair_excl" and value is not None: + value = PairExcludeMask(value.ntypes, value.exclude_types) + elif name == "atom_excl" and value is not None: + value = AtomExcludeMask(value.ntypes, value.exclude_types) + return value diff --git a/deepmd/jax/atomic_model/dp_atomic_model.py b/deepmd/jax/atomic_model/dp_atomic_model.py new file mode 100644 index 0000000000..077209e29a --- /dev/null +++ b/deepmd/jax/atomic_model/dp_atomic_model.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.atomic_model.dp_atomic_model import DPAtomicModel as DPAtomicModelDP +from deepmd.jax.atomic_model.base_atomic_model import ( + base_atomic_model_set_attr, +) +from deepmd.jax.common import ( + flax_module, +) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.jax.fitting.base_fitting import ( + BaseFitting, +) + + +@flax_module +class DPAtomicModel(DPAtomicModelDP): + base_descriptor_cls = BaseDescriptor + """The base descriptor class.""" + base_fitting_cls = BaseFitting + """The base fitting class.""" + + def __setattr__(self, name: str, value: Any) -> None: + value = base_atomic_model_set_attr(name, value) + return super().__setattr__(name, value) diff --git a/deepmd/jax/descriptor/__init__.py b/deepmd/jax/descriptor/__init__.py index 6ceb116d85..ed59493268 100644 --- a/deepmd/jax/descriptor/__init__.py +++ b/deepmd/jax/descriptor/__init__.py @@ -1 +1,12 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.jax.descriptor.dpa1 import ( + DescrptDPA1, +) +from deepmd.jax.descriptor.se_e2_a import ( + DescrptSeA, +) + +__all__ = [ + "DescrptSeA", + "DescrptDPA1", +] diff --git a/deepmd/jax/descriptor/base_descriptor.py b/deepmd/jax/descriptor/base_descriptor.py new file mode 100644 index 0000000000..7dec3cd6d4 --- /dev/null +++ b/deepmd/jax/descriptor/base_descriptor.py @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.dpmodel.descriptor.make_base_descriptor import ( + make_base_descriptor, +) +from deepmd.jax.env import ( + jnp, +) + +BaseDescriptor = make_base_descriptor(jnp.ndarray) diff --git a/deepmd/jax/descriptor/dpa1.py b/deepmd/jax/descriptor/dpa1.py index a9b0404970..0528e4bb93 100644 --- a/deepmd/jax/descriptor/dpa1.py +++ b/deepmd/jax/descriptor/dpa1.py @@ -16,6 +16,9 @@ flax_module, to_jax_array, ) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) from deepmd.jax.utils.exclude_mask import ( PairExcludeMask, ) @@ -76,6 +79,8 @@ def __setattr__(self, name: str, value: Any) -> None: return super().__setattr__(name, value) +@BaseDescriptor.register("dpa1") +@BaseDescriptor.register("se_atten") @flax_module class DescrptDPA1(DescrptDPA1DP): def __setattr__(self, name: str, value: Any) -> None: diff --git a/deepmd/jax/descriptor/se_e2_a.py b/deepmd/jax/descriptor/se_e2_a.py index a60a4e9af1..d1a6e9a8d9 100644 --- a/deepmd/jax/descriptor/se_e2_a.py +++ b/deepmd/jax/descriptor/se_e2_a.py @@ -8,6 +8,9 @@ flax_module, to_jax_array, ) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) from deepmd.jax.utils.exclude_mask import ( PairExcludeMask, ) @@ -16,6 +19,8 @@ ) +@BaseDescriptor.register("se_e2_a") +@BaseDescriptor.register("se_a") @flax_module class DescrptSeA(DescrptSeADP): def __setattr__(self, name: str, value: Any) -> None: diff --git a/deepmd/jax/fitting/__init__.py b/deepmd/jax/fitting/__init__.py index 6ceb116d85..e72314dcab 100644 --- a/deepmd/jax/fitting/__init__.py +++ b/deepmd/jax/fitting/__init__.py @@ -1 +1,10 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.jax.fitting.fitting import ( + DOSFittingNet, + EnergyFittingNet, +) + +__all__ = [ + "EnergyFittingNet", + "DOSFittingNet", +] diff --git a/deepmd/jax/fitting/base_fitting.py b/deepmd/jax/fitting/base_fitting.py new file mode 100644 index 0000000000..fd9f3a416d --- /dev/null +++ b/deepmd/jax/fitting/base_fitting.py @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.dpmodel.fitting.make_base_fitting import ( + make_base_fitting, +) +from deepmd.jax.env import ( + jnp, +) + +BaseFitting = make_base_fitting(jnp.ndarray) diff --git a/deepmd/jax/fitting/fitting.py b/deepmd/jax/fitting/fitting.py index 284213c70a..f979db4d41 100644 --- a/deepmd/jax/fitting/fitting.py +++ b/deepmd/jax/fitting/fitting.py @@ -9,6 +9,9 @@ flax_module, to_jax_array, ) +from deepmd.jax.fitting.base_fitting import ( + BaseFitting, +) from deepmd.jax.utils.exclude_mask import ( AtomExcludeMask, ) @@ -33,6 +36,7 @@ def setattr_for_general_fitting(name: str, value: Any) -> Any: return value +@BaseFitting.register("ener") @flax_module class EnergyFittingNet(EnergyFittingNetDP): def __setattr__(self, name: str, value: Any) -> None: @@ -40,6 +44,7 @@ def __setattr__(self, name: str, value: Any) -> None: return super().__setattr__(name, value) +@BaseFitting.register("dos") @flax_module class DOSFittingNet(DOSFittingNetDP): def __setattr__(self, name: str, value: Any) -> None: diff --git a/deepmd/jax/model/__init__.py b/deepmd/jax/model/__init__.py new file mode 100644 index 0000000000..05a60c4ffe --- /dev/null +++ b/deepmd/jax/model/__init__.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .ener_model import ( + EnergyModel, +) + +__all__ = ["EnergyModel"] diff --git a/deepmd/jax/model/base_model.py b/deepmd/jax/model/base_model.py new file mode 100644 index 0000000000..fee4855da3 --- /dev/null +++ b/deepmd/jax/model/base_model.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.dpmodel.model.base_model import ( + make_base_model, +) + +BaseModel = make_base_model() diff --git a/deepmd/jax/model/ener_model.py b/deepmd/jax/model/ener_model.py new file mode 100644 index 0000000000..79c5a29e88 --- /dev/null +++ b/deepmd/jax/model/ener_model.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.model import EnergyModel as EnergyModelDP +from deepmd.jax.atomic_model.dp_atomic_model import ( + DPAtomicModel, +) +from deepmd.jax.common import ( + flax_module, +) +from deepmd.jax.model.base_model import ( + BaseModel, +) + + +@BaseModel.register("ener") +@flax_module +class EnergyModel(EnergyModelDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "atomic_model": + value = DPAtomicModel.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/deepmd/jax/model/model.py b/deepmd/jax/model/model.py new file mode 100644 index 0000000000..7fa3efda6e --- /dev/null +++ b/deepmd/jax/model/model.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from copy import ( + deepcopy, +) + +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.jax.fitting.base_fitting import ( + BaseFitting, +) +from deepmd.jax.model.base_model import ( + BaseModel, +) + + +def get_standard_model(data: dict): + """Get a Model from a dictionary. + + Parameters + ---------- + data : dict + The data to construct the model. + """ + data = deepcopy(data) + descriptor_type = data["descriptor"].pop("type") + data["descriptor"]["type_map"] = data["type_map"] + fitting_type = data["fitting_net"].pop("type") + data["fitting_net"]["type_map"] = data["type_map"] + descriptor = BaseDescriptor.get_class_by_type(descriptor_type)( + **data["descriptor"], + ) + fitting = BaseFitting.get_class_by_type(fitting_type)( + ntypes=descriptor.get_ntypes(), + dim_descrpt=descriptor.get_dim_out(), + mixed_types=descriptor.mixed_types(), + **data["fitting_net"], + ) + return BaseModel.get_class_by_type(fitting_type)( + descriptor=descriptor, + fitting=fitting, + type_map=data["type_map"], + atom_exclude_types=data.get("atom_exclude_types", []), + pair_exclude_types=data.get("pair_exclude_types", []), + ) + + +def get_model(data: dict): + """Get a model from a dictionary. + + Parameters + ---------- + data : dict + The data to construct the model. + """ + model_type = data.get("type", "standard") + if model_type == "standard": + if "spin" in data: + raise NotImplementedError("Spin model is not implemented yet.") + else: + return get_standard_model(data) + else: + return BaseModel.get_class_by_type(model_type).get_model(data) diff --git a/source/tests/consistent/model/common.py b/source/tests/consistent/model/common.py index 294edec1d6..4112e09cff 100644 --- a/source/tests/consistent/model/common.py +++ b/source/tests/consistent/model/common.py @@ -6,8 +6,12 @@ from deepmd.common import ( make_default_mesh, ) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from ..common import ( + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, ) @@ -20,6 +24,11 @@ GLOBAL_TF_FLOAT_PRECISION, tf, ) +if INSTALLED_JAX: + from deepmd.jax.common import to_jax_array as numpy_to_jax + from deepmd.jax.env import ( + jnp, + ) class ModelTest: @@ -62,3 +71,17 @@ def eval_pt_model(self, pt_obj: Any, natoms, coords, atype, box) -> Any: box=numpy_to_torch(box), ).items() } + + def eval_jax_model(self, jax_obj: Any, natoms, coords, atype, box) -> Any: + def assert_jax_array(arr): + assert isinstance(arr, jnp.ndarray) or arr is None + return arr + + return { + kk: to_numpy_array(assert_jax_array(vv)) + for kk, vv in jax_obj( + numpy_to_jax(coords), + numpy_to_jax(atype), + box=numpy_to_jax(box), + ).items() + } diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index 692e1287dc..78a2aac703 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -13,6 +13,7 @@ ) from ..common import ( + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -36,6 +37,12 @@ model_args, ) +if INSTALLED_JAX: + from deepmd.jax.model.ener_model import EnergyModel as EnergyModelJAX + from deepmd.jax.model.model import get_model as get_model_jax +else: + EnergyModelJAX = None + @parameterized( ( @@ -84,14 +91,20 @@ def data(self) -> dict: tf_class = EnergyModelTF dp_class = EnergyModelDP pt_class = EnergyModelPT + jax_class = EnergyModelJAX args = model_args() + @property def skip_tf(self): return ( self.data["pair_exclude_types"] != [] or self.data["atom_exclude_types"] != [] ) + @property + def skip_jax(self): + return not INSTALLED_JAX + def pass_data_to_cls(self, cls, data) -> Any: """Pass data to the class.""" data = data.copy() @@ -99,6 +112,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_dp(data) elif cls is EnergyModelPT: return get_model_pt(data) + elif cls is EnergyModelJAX: + return get_model_jax(data) return cls(**data, **self.addtional_data) def setUp(self): @@ -168,6 +183,15 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_model( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: @@ -176,4 +200,6 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret["energy"].ravel(), ret["atom_energy"].ravel()) elif backend is self.RefBackend.TF: return (ret[0].ravel(), ret[1].ravel()) + elif backend is self.RefBackend.JAX: + return (ret["energy_redu"].ravel(), ret["energy"].ravel()) raise ValueError(f"Unknown backend: {backend}") From dccb0e58cac6079ce226c511aefcc0ee9a02a22c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 22 Oct 2024 22:38:00 -0400 Subject: [PATCH 067/193] docs: fix docs for CMAKE_ARGS (#4238) There is a typo. ## Summary by CodeRabbit - **Documentation** - Enhanced installation instructions for the DeePMD-kit, including detailed steps for setting environment variables. - Expanded section on installing the Python interface with commands for verifying TensorFlow and PyTorch installations. - Clarified installation instructions for Horovod and mpi4py, emphasizing environment variable usage. - Updated requirements for C++ interface installation, specifying necessary CMake versions for different backends. - Added new environment variables and detailed CMake configuration options for improved installation control. Signed-off-by: Jinzhe Zeng --- doc/install/install-from-source.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index 4079a8d424..3f65375865 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -198,7 +198,7 @@ Enable compilation optimization for the native machine's CPU type. Do not enable **Type**: string -Control high (double) or low (float) precision of training. +Additional CMake arguments. ::: :::{envvar} FLAGS From 911f41b4a08fd8787ca7dc469c69b158d5d48968 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 23 Oct 2024 00:28:58 -0400 Subject: [PATCH 068/193] fix(pt): set `weights_only=True` for `torch.load` (#4147) Fix #4143. ## Summary by CodeRabbit - **New Features** - Enhanced model loading efficiency by only loading model weights, which reduces memory usage and improves performance. - **Bug Fixes** - Streamlined the loading process across various components, ensuring that only essential model parameters are loaded, thus optimizing the overall functionality. - **Tests** - Updated tests to reflect the new loading behavior, ensuring that only model weights are loaded in various test scenarios for improved clarity and performance. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/entrypoints/main.py | 8 ++++++-- deepmd/pt/infer/deep_eval.py | 4 +++- deepmd/pt/infer/inference.py | 2 +- deepmd/pt/train/training.py | 4 +++- deepmd/pt/utils/finetune.py | 2 +- deepmd/pt/utils/serialization.py | 2 +- source/tests/pt/model/test_descriptor_dpa1.py | 10 ++++++---- source/tests/pt/model/test_descriptor_dpa2.py | 4 ++-- source/tests/pt/model/test_saveload_dpa1.py | 4 +++- source/tests/pt/model/test_saveload_se_e2_a.py | 4 +++- 10 files changed, 29 insertions(+), 15 deletions(-) diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index 7c8a95c5e7..71f81b0a12 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -283,7 +283,9 @@ def train( # update init_model or init_frz_model config if necessary if (init_model is not None or init_frz_model is not None) and use_pretrain_script: if init_model is not None: - init_state_dict = torch.load(init_model, map_location=DEVICE) + init_state_dict = torch.load( + init_model, map_location=DEVICE, weights_only=True + ) if "model" in init_state_dict: init_state_dict = init_state_dict["model"] config["model"] = init_state_dict["_extra_state"]["model_params"] @@ -380,7 +382,9 @@ def change_bias( output: Optional[str] = None, ): if input_file.endswith(".pt"): - old_state_dict = torch.load(input_file, map_location=env.DEVICE) + old_state_dict = torch.load( + input_file, map_location=env.DEVICE, weights_only=True + ) model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict)) model_params = model_state_dict["_extra_state"]["model_params"] elif input_file.endswith(".pth"): diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 0a77a38135..acf985974c 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -103,7 +103,9 @@ def __init__( self.output_def = output_def self.model_path = model_file if str(self.model_path).endswith(".pt"): - state_dict = torch.load(model_file, map_location=env.DEVICE) + state_dict = torch.load( + model_file, map_location=env.DEVICE, weights_only=True + ) if "model" in state_dict: state_dict = state_dict["model"] self.input_param = state_dict["_extra_state"]["model_params"] diff --git a/deepmd/pt/infer/inference.py b/deepmd/pt/infer/inference.py index dfb7abdb21..b3d120cbc4 100644 --- a/deepmd/pt/infer/inference.py +++ b/deepmd/pt/infer/inference.py @@ -34,7 +34,7 @@ def __init__( - config: The Dict-like configuration with training options. """ # Model - state_dict = torch.load(model_ckpt, map_location=DEVICE) + state_dict = torch.load(model_ckpt, map_location=DEVICE, weights_only=True) if "model" in state_dict: state_dict = state_dict["model"] model_params = state_dict["_extra_state"]["model_params"] diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 10e841682a..0f7c030a84 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -400,7 +400,9 @@ def get_lr(lr_params): optimizer_state_dict = None if resuming: log.info(f"Resuming from {resume_model}.") - state_dict = torch.load(resume_model, map_location=DEVICE) + state_dict = torch.load( + resume_model, map_location=DEVICE, weights_only=True + ) if "model" in state_dict: optimizer_state_dict = ( state_dict["optimizer"] if finetune_model is None else None diff --git a/deepmd/pt/utils/finetune.py b/deepmd/pt/utils/finetune.py index 2dd2230b54..96a420bf6a 100644 --- a/deepmd/pt/utils/finetune.py +++ b/deepmd/pt/utils/finetune.py @@ -136,7 +136,7 @@ def get_finetune_rules( Fine-tuning rules in a dict format, with `model_branch`: FinetuneRuleItem pairs. """ multi_task = "model_dict" in model_config - state_dict = torch.load(finetune_model, map_location=env.DEVICE) + state_dict = torch.load(finetune_model, map_location=env.DEVICE, weights_only=True) if "model" in state_dict: state_dict = state_dict["model"] last_model_params = state_dict["_extra_state"]["model_params"] diff --git a/deepmd/pt/utils/serialization.py b/deepmd/pt/utils/serialization.py index aab6d100a5..1c6ea096aa 100644 --- a/deepmd/pt/utils/serialization.py +++ b/deepmd/pt/utils/serialization.py @@ -33,7 +33,7 @@ def serialize_from_file(model_file: str) -> dict: model = get_model(model_def_script) model.load_state_dict(saved_model.state_dict()) elif model_file.endswith(".pt"): - state_dict = torch.load(model_file, map_location="cpu") + state_dict = torch.load(model_file, map_location="cpu", weights_only=True) if "model" in state_dict: state_dict = state_dict["model"] model_def_script = state_dict["_extra_state"]["model_params"] diff --git a/source/tests/pt/model/test_descriptor_dpa1.py b/source/tests/pt/model/test_descriptor_dpa1.py index 488cc2f7ff..a3d696516a 100644 --- a/source/tests/pt/model/test_descriptor_dpa1.py +++ b/source/tests/pt/model/test_descriptor_dpa1.py @@ -245,13 +245,15 @@ def test_descriptor_block(self): des = DescrptBlockSeAtten( **dparams, ).to(env.DEVICE) - des.load_state_dict(torch.load(self.file_model_param)) + des.load_state_dict(torch.load(self.file_model_param, weights_only=True)) coord = self.coord atype = self.atype box = self.cell # handel type_embedding type_embedding = TypeEmbedNet(ntypes, 8, use_tebd_bias=True).to(env.DEVICE) - type_embedding.load_state_dict(torch.load(self.file_type_embed)) + type_embedding.load_state_dict( + torch.load(self.file_type_embed, weights_only=True) + ) ## to save model parameters # torch.save(des.state_dict(), 'model_weights.pth') @@ -299,8 +301,8 @@ def test_descriptor(self): **dparams, ).to(env.DEVICE) target_dict = des.state_dict() - source_dict = torch.load(self.file_model_param) - type_embd_dict = torch.load(self.file_type_embed) + source_dict = torch.load(self.file_model_param, weights_only=True) + type_embd_dict = torch.load(self.file_type_embed, weights_only=True) target_dict = translate_se_atten_and_type_embd_dicts_to_dpa1( target_dict, source_dict, diff --git a/source/tests/pt/model/test_descriptor_dpa2.py b/source/tests/pt/model/test_descriptor_dpa2.py index ac04bfc417..17d609a2f9 100644 --- a/source/tests/pt/model/test_descriptor_dpa2.py +++ b/source/tests/pt/model/test_descriptor_dpa2.py @@ -123,10 +123,10 @@ def test_descriptor(self): **dparams, ).to(env.DEVICE) target_dict = des.state_dict() - source_dict = torch.load(self.file_model_param) + source_dict = torch.load(self.file_model_param, weights_only=True) # type_embd of repformer is removed source_dict.pop("type_embedding.embedding.embedding_net.layers.0.bias") - type_embd_dict = torch.load(self.file_type_embed) + type_embd_dict = torch.load(self.file_type_embed, weights_only=True) target_dict = translate_type_embd_dicts_to_dpa2( target_dict, source_dict, diff --git a/source/tests/pt/model/test_saveload_dpa1.py b/source/tests/pt/model/test_saveload_dpa1.py index 3da06938b5..5b2b6cd583 100644 --- a/source/tests/pt/model/test_saveload_dpa1.py +++ b/source/tests/pt/model/test_saveload_dpa1.py @@ -85,7 +85,9 @@ def get_model_result(self, read=False, model_file="tmp_model.pt"): optimizer = torch.optim.Adam(wrapper.parameters(), lr=self.start_lr) optimizer.zero_grad() if read: - wrapper.load_state_dict(torch.load(model_file, map_location=env.DEVICE)) + wrapper.load_state_dict( + torch.load(model_file, map_location=env.DEVICE, weights_only=True) + ) os.remove(model_file) else: torch.save(wrapper.state_dict(), model_file) diff --git a/source/tests/pt/model/test_saveload_se_e2_a.py b/source/tests/pt/model/test_saveload_se_e2_a.py index 56ea3283d9..d226f628bc 100644 --- a/source/tests/pt/model/test_saveload_se_e2_a.py +++ b/source/tests/pt/model/test_saveload_se_e2_a.py @@ -85,7 +85,9 @@ def get_model_result(self, read=False, model_file="tmp_model.pt"): optimizer = torch.optim.Adam(wrapper.parameters(), lr=self.start_lr) optimizer.zero_grad() if read: - wrapper.load_state_dict(torch.load(model_file, map_location=env.DEVICE)) + wrapper.load_state_dict( + torch.load(model_file, map_location=env.DEVICE, weights_only=True) + ) os.remove(model_file) else: torch.save(wrapper.state_dict(), model_file) From b8e57f2d87d2cf33f1c036992186a5fcb858c9df Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 23 Oct 2024 05:07:30 -0400 Subject: [PATCH 069/193] docs: fix parameter links (#4239) It was forgotten in https://github.com/deepmodeling/deepmd-kit/pull/2549 ## Summary by CodeRabbit - **New Features** - Introduced the DPA-1 model with an attention mechanism for molecular simulation. - Added the `se_a_mask` descriptor for DP/MM simulations with dynamic atom counts. - Expanded support for multi-task fine-tuning in PyTorch. - **Documentation Enhancements** - Improved clarity and detail in various documents, including model compression, DPLR model training, and fine-tuning processes. - Updated references to follow a standardized format, enhancing navigation and understanding. - **Bug Fixes** - Corrected references and parameters across multiple documents to ensure accuracy in model configurations and training instructions. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/utils/argcheck.py | 41 +++++++++++++++++-------------- doc/freeze/compress.md | 2 +- doc/model/dplr.md | 2 +- doc/model/dprc.md | 6 ++--- doc/model/overall.md | 2 +- doc/model/train-energy-spin.md | 4 +-- doc/model/train-energy.md | 8 +++--- doc/model/train-fitting-dos.md | 4 +-- doc/model/train-fitting-tensor.md | 4 +-- doc/model/train-hybrid.md | 2 +- doc/model/train-se-a-mask.md | 22 ++++++++--------- doc/model/train-se-atten.md | 24 +++++++++--------- doc/model/train-se-e2-a.md | 18 +++++++------- doc/model/train-se-e2-r.md | 4 +-- doc/model/train-se-e3-tebd.md | 4 +-- doc/model/train-se-e3.md | 4 +-- doc/train/finetuning.md | 2 +- doc/train/gpu-limitations.md | 2 +- doc/train/training-advanced.md | 2 +- 19 files changed, 80 insertions(+), 77 deletions(-) diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 1a5e1cc3b2..b3f3b26fd0 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1387,24 +1387,27 @@ def descrpt_se_a_mask_args(): def descrpt_variant_type_args(exclude_hybrid: bool = False) -> Variant: - link_lf = make_link("loc_frame", "model/descriptor[loc_frame]") - link_se_e2_a = make_link("se_e2_a", "model/descriptor[se_e2_a]") - link_se_e2_r = make_link("se_e2_r", "model/descriptor[se_e2_r]") - link_se_e3 = make_link("se_e3", "model/descriptor[se_e3]") - link_se_a_tpe = make_link("se_a_tpe", "model/descriptor[se_a_tpe]") - link_hybrid = make_link("hybrid", "model/descriptor[hybrid]") - link_se_atten = make_link("se_atten", "model/descriptor[se_atten]") - link_se_atten_v2 = make_link("se_atten_v2", "model/descriptor[se_atten_v2]") - doc_descrpt_type = "The type of the descritpor. See explanation below. \n\n\ -- `loc_frame`: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.\n\n\ -- `se_e2_a`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.\n\n\ -- `se_e2_r`: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.\n\n\ -- `se_e3`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.\n\n\ -- `se_a_tpe`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.\n\n\ -- `se_atten`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.\n\n\ -- `se_atten_v2`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.\n\n\ -- `se_a_mask`: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). *aparam* are required as an indicator matrix for the real/virtual sign of input atoms. \n\n\ -- `hybrid`: Concatenate of a list of descriptors as a new descriptor." + link_lf = make_link("loc_frame", "model[standard]/descriptor[loc_frame]") + link_se_e2_a = make_link("se_e2_a", "model[standard]/descriptor[se_e2_a]") + link_se_e2_r = make_link("se_e2_r", "model[standard]/descriptor[se_e2_r]") + link_se_e3 = make_link("se_e3", "model[standard]/descriptor[se_e3]") + link_se_a_tpe = make_link("se_a_tpe", "model[standard]/descriptor[se_a_tpe]") + link_hybrid = make_link("hybrid", "model[standard]/descriptor[hybrid]") + link_se_atten = make_link("se_atten", "model[standard]/descriptor[se_atten]") + link_se_atten_v2 = make_link( + "se_atten_v2", "model[standard]/descriptor[se_atten_v2]" + ) + link_se_a_mask = make_link("se_a_mask", "model[standard]/descriptor[se_a_mask]") + doc_descrpt_type = f"The type of the descritpor. See explanation below. \n\n\ +- {link_lf}: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.\n\n\ +- {link_se_e2_a}: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.\n\n\ +- {link_se_e2_r}: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.\n\n\ +- {link_se_e3}: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.\n\n\ +- {link_se_a_tpe}: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.\n\n\ +- {link_se_atten}: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.\n\n\ +- {link_se_atten_v2}: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.\n\n\ +- {link_se_a_mask}: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). *aparam* are required as an indicator matrix for the real/virtual sign of input atoms. \n\n\ +- {link_hybrid}: Concatenate of a list of descriptors as a new descriptor." return Variant( "type", @@ -1692,7 +1695,7 @@ def fitting_variant_type_args(): # --- Modifier configurations: --- # def modifier_dipole_charge(): doc_model_name = "The name of the frozen dipole model file." - doc_model_charge_map = f"The charge of the WFCC. The list length should be the same as the {make_link('sel_type', 'model/fitting_net[dipole]/sel_type')}. " + doc_model_charge_map = f"The charge of the WFCC. The list length should be the same as the {make_link('sel_type', 'model[standard]/fitting_net[dipole]/sel_type')}. " doc_sys_charge_map = f"The charge of real atoms. The list length should be the same as the {make_link('type_map', 'model/type_map')}" doc_ewald_h = "The grid spacing of the FFT grid. Unit is A" doc_ewald_beta = f"The splitting parameter of Ewald sum. Unit is A^{-1}" diff --git a/doc/freeze/compress.md b/doc/freeze/compress.md index 3cce96c993..e26c85e45a 100644 --- a/doc/freeze/compress.md +++ b/doc/freeze/compress.md @@ -99,7 +99,7 @@ The model compression interface requires the version of DeePMD-kit used in the o Descriptors with `se_e2_a`, `se_e3`, `se_e2_r` and `se_atten_v2` types are supported by the model compression feature. `Hybrid` mixed with the above descriptors is also supported. -Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. **Available activation functions for descriptor:** diff --git a/doc/model/dplr.md b/doc/model/dplr.md index ec95f9f424..91c2251346 100644 --- a/doc/model/dplr.md +++ b/doc/model/dplr.md @@ -58,7 +58,7 @@ Two settings make the training input script different from an energy training in }, ``` -The type of fitting is set to {ref}`dipole `. The dipole is associated with type 0 atoms (oxygens), by the setting `"dipole_type": [0]`. What we trained is the displacement of the WC from the corresponding oxygen atom. It shares the same training input as the atomic dipole because both are 3-dimensional vectors defined on atoms. +The type of fitting is set to {ref}`dipole `. The dipole is associated with type 0 atoms (oxygens), by the setting `"dipole_type": [0]`. What we trained is the displacement of the WC from the corresponding oxygen atom. It shares the same training input as the atomic dipole because both are 3-dimensional vectors defined on atoms. The loss section is provided as follows ```json diff --git a/doc/model/dprc.md b/doc/model/dprc.md index 33dde237d7..d9ce24b600 100644 --- a/doc/model/dprc.md +++ b/doc/model/dprc.md @@ -140,7 +140,7 @@ As described in the paper, the DPRc model only corrects $E_\text{QM}$ and $E_\te :::: -{ref}`exclude_types ` can be generated by the following Python script: +{ref}`exclude_types ` can be generated by the following Python script: ```py from itertools import combinations_with_replacement, product @@ -163,7 +163,7 @@ print( ) ``` -Also, DPRc assumes MM atom energies ({ref}`atom_ener `) are zero: +Also, DPRc assumes MM atom energies ({ref}`atom_ener `) are zero: ```json "fitting_net": { @@ -173,7 +173,7 @@ Also, DPRc assumes MM atom energies ({ref}`atom_ener ` only works when {ref}`descriptor/set_davg_zero ` of the QM/MM part is `true`. +Note that {ref}`atom_ener ` only works when {ref}`descriptor/set_davg_zero ` of the QM/MM part is `true`. ## Run MD simulations diff --git a/doc/model/overall.md b/doc/model/overall.md index 102a8fc671..7f67c6545d 100644 --- a/doc/model/overall.md +++ b/doc/model/overall.md @@ -42,7 +42,7 @@ A model has two parts, a descriptor that maps atomic configuration to a set of s } ``` -The two subsections, {ref}`descriptor ` and {ref}`fitting_net `, define the descriptor and the fitting net, respectively. +The two subsections, {ref}`descriptor ` and {ref}`fitting_net `, define the descriptor and the fitting net, respectively. The {ref}`type_map ` is optional, which provides the element names (but not necessarily same as the actual name of the element) of the corresponding atom types. A water model, as in this example, has two kinds of atoms. The atom types are internally recorded as integers, e.g., `0` for oxygen and `1` for hydrogen here. A mapping from the atom type to their names is provided by {ref}`type_map `. diff --git a/doc/model/train-energy-spin.md b/doc/model/train-energy-spin.md index 9f4e3cf04b..ec169892f2 100644 --- a/doc/model/train-energy-spin.md +++ b/doc/model/train-energy-spin.md @@ -11,9 +11,9 @@ keeping other sections the same as the normal energy model's input script. Note that when adding spin into the model, there will be some implicit modifications automatically done by the program: - In the TensorFlow backend, the `se_e2_a` descriptor will treat those atom types with spin as new (virtual) types, - and duplicate their corresponding selected numbers of neighbors ({ref}`sel `) from their real atom types. + and duplicate their corresponding selected numbers of neighbors ({ref}`sel `) from their real atom types. - In the PyTorch backend, if spin settings are added, all the types (with or without spin) will have their virtual types. - The `se_e2_a` descriptor will thus double the {ref}`sel ` list, + The `se_e2_a` descriptor will thus double the {ref}`sel ` list, while in other descriptors with mixed types (such as `dpa1` or `dpa2`), the sel number will not be changed for clarity. If you are using descriptors with mixed types, to achieve better performance, you should manually extend your sel number (maybe double) depending on the balance between performance and efficiency. diff --git a/doc/model/train-energy.md b/doc/model/train-energy.md index c1da1f4c1f..75d31d4670 100644 --- a/doc/model/train-energy.md +++ b/doc/model/train-energy.md @@ -79,7 +79,7 @@ Benefiting from the relative force loss, small forces can be fitted more accurat ## The fitting network -The construction of the fitting net is given by section {ref}`fitting_net ` +The construction of the fitting net is given by section {ref}`fitting_net ` ```json "fitting_net" : { @@ -89,9 +89,9 @@ The construction of the fitting net is given by section {ref}`fitting_net ` specifies the size of the fitting net. If two neighboring layers are of the same size, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. -- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. -- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. +- {ref}`neuron ` specifies the size of the fitting net. If two neighboring layers are of the same size, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. +- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. +- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. ## Loss diff --git a/doc/model/train-fitting-dos.md b/doc/model/train-fitting-dos.md index 4c4366a1e1..d04dbc669c 100644 --- a/doc/model/train-fitting-dos.md +++ b/doc/model/train-fitting-dos.md @@ -16,11 +16,11 @@ $deepmd_source_dir/examples/dos/input.json The training and validation data are also provided our examples. But note that **the data provided along with the examples are of limited amount, and should not be used to train a production model.** -Similar to the `input.json` used in `ener` mode, training JSON is also divided into {ref}`model `, {ref}`learning_rate `, {ref}`loss ` and {ref}`training `. Most keywords remain the same as `ener` mode, and their meaning can be found [here](train-se-e2-a.md). To fit the `dos`, one needs to modify {ref}`model/fitting_net ` and {ref}`loss `. +Similar to the `input.json` used in `ener` mode, training JSON is also divided into {ref}`model `, {ref}`learning_rate `, {ref}`loss ` and {ref}`training `. Most keywords remain the same as `ener` mode, and their meaning can be found [here](train-se-e2-a.md). To fit the `dos`, one needs to modify {ref}`model[standard]/fitting_net ` and {ref}`loss `. ## The fitting Network -The {ref}`fitting_net ` section tells DP which fitting net to use. +The {ref}`fitting_net ` section tells DP which fitting net to use. The JSON of `dos` type should be provided like diff --git a/doc/model/train-fitting-tensor.md b/doc/model/train-fitting-tensor.md index 4d5cb22707..c6b54c69ef 100644 --- a/doc/model/train-fitting-tensor.md +++ b/doc/model/train-fitting-tensor.md @@ -30,7 +30,7 @@ $deepmd_source_dir/examples/water_tensor/polar/polar_input_torch.json The training and validation data are also provided our examples. But note that **the data provided along with the examples are of limited amount, and should not be used to train a production model.** -Similar to the `input.json` used in `ener` mode, training JSON is also divided into {ref}`model `, {ref}`learning_rate `, {ref}`loss ` and {ref}`training `. Most keywords remain the same as `ener` mode, and their meaning can be found [here](train-se-e2-a.md). To fit a tensor, one needs to modify {ref}`model/fitting_net ` and {ref}`loss `. +Similar to the `input.json` used in `ener` mode, training JSON is also divided into {ref}`model `, {ref}`learning_rate `, {ref}`loss ` and {ref}`training `. Most keywords remain the same as `ener` mode, and their meaning can be found [here](train-se-e2-a.md). To fit a tensor, one needs to modify {ref}`model[standard]/fitting_net ` and {ref}`loss `. ## Theory @@ -72,7 +72,7 @@ The tensorial models can be used to calculate IR spectrum and Raman spectrum.[^1 ## The fitting Network -The {ref}`fitting_net ` section tells DP which fitting net to use. +The {ref}`fitting_net ` section tells DP which fitting net to use. ::::{tab-set} diff --git a/doc/model/train-hybrid.md b/doc/model/train-hybrid.md index c0a55d9eb5..1219d208a7 100644 --- a/doc/model/train-hybrid.md +++ b/doc/model/train-hybrid.md @@ -25,7 +25,7 @@ This way, one can set the different cutoff radii for different descriptors.[^1] ## Instructions -To use the descriptor in DeePMD-kit, one firstly set the {ref}`type ` to {ref}`hybrid `, then provide the definitions of the descriptors by the items in the `list`, +To use the descriptor in DeePMD-kit, one firstly set the {ref}`type ` to {ref}`hybrid `, then provide the definitions of the descriptors by the items in the `list`, ```json "descriptor" :{ diff --git a/doc/model/train-se-a-mask.md b/doc/model/train-se-a-mask.md index 6757fbefbd..69f344b138 100644 --- a/doc/model/train-se-a-mask.md +++ b/doc/model/train-se-a-mask.md @@ -29,7 +29,7 @@ A complete training input script of this example can be found in the directory. $deepmd_source_dir/examples/zinc_protein/zinc_se_a_mask.json ``` -The construction of the descriptor is given by section {ref}`descriptor `. An example of the descriptor is provided as follows +The construction of the descriptor is given by section {ref}`descriptor `. An example of the descriptor is provided as follows ```json "descriptor" :{ @@ -43,13 +43,13 @@ The construction of the descriptor is given by section {ref}`descriptor ` of the descriptor is set to `"se_a_mask"`. -- {ref}`sel ` gives the maximum number of atoms in input coordinates. It is a list, the length of which is the same as the number of atom types in the system, and `sel[i]` denotes the maximum number of atoms with type `i`. -- The {ref}`neuron ` specifies the size of the embedding net. From left to right the members denote the sizes of each hidden layer from the input end to the output end, respectively. If the outer layer is twice the size of the inner layer, then the inner layer is copied and concatenated, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. -- The {ref}`axis_neuron ` specifies the size of the submatrix of the embedding matrix, the axis matrix as explained in the [DeepPot-SE paper](https://arxiv.org/abs/1805.09003) -- If the option {ref}`type_one_side ` is set to `true`, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters. -- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. -- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. +- The {ref}`type ` of the descriptor is set to `"se_a_mask"`. +- {ref}`sel ` gives the maximum number of atoms in input coordinates. It is a list, the length of which is the same as the number of atom types in the system, and `sel[i]` denotes the maximum number of atoms with type `i`. +- The {ref}`neuron ` specifies the size of the embedding net. From left to right the members denote the sizes of each hidden layer from the input end to the output end, respectively. If the outer layer is twice the size of the inner layer, then the inner layer is copied and concatenated, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. +- The {ref}`axis_neuron ` specifies the size of the submatrix of the embedding matrix, the axis matrix as explained in the [DeepPot-SE paper](https://arxiv.org/abs/1805.09003) +- If the option {ref}`type_one_side ` is set to `true`, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters. +- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. +- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. To make the `aparam.npy` used for descriptor `se_a_mask`, two variables in `fitting_net` section are needed. @@ -63,9 +63,9 @@ To make the `aparam.npy` used for descriptor `se_a_mask`, two variables in `fitt } ``` -- `neuron`, `resnet_dt` and `seed` are the same as the {ref}`fitting_net ` section for fitting energy. -- {ref}`numb_aparam ` gives the dimesion of the `aparam.npy` file. In this example, it is set to 1 and stores the real/virtual sign of the atoms. For real/virtual atoms, the corresponding sign in `aparam.npy` is set to 1/0. -- {ref}`use_aparam_as_mask ` is set to `true` to use the `aparam.npy` as the mask of the atoms in the descriptor `se_a_mask`. +- `neuron`, `resnet_dt` and `seed` are the same as the {ref}`fitting_net ` section for fitting energy. +- {ref}`numb_aparam ` gives the dimesion of the `aparam.npy` file. In this example, it is set to 1 and stores the real/virtual sign of the atoms. For real/virtual atoms, the corresponding sign in `aparam.npy` is set to 1/0. +- {ref}`use_aparam_as_mask ` is set to `true` to use the `aparam.npy` as the mask of the atoms in the descriptor `se_a_mask`. Finally, to make a reasonable fitting task with `se_a_mask` descriptor for DP/MM simulations, the loss function with `se_a_mask` is designed to include the atomic forces difference in specific atoms of the input particles only. More details about the selection of the specific atoms can be found in paper [DP/MM](left to be filled). diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 24950d9595..bebce78365 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -104,17 +104,17 @@ An example of the DPA-1 descriptor is provided as follows } ``` -- The {ref}`type ` of the descriptor is set to `"se_atten"`, which will use DPA-1 structures. -- {ref}`rcut ` is the cut-off radius for neighbor searching, and the {ref}`rcut_smth ` gives where the smoothing starts. -- **{ref}`sel `** gives the maximum possible number of neighbors in the cut-off radius. It is an int. Note that this number highly affects the efficiency of training, which we usually use less than 200. (We use 120 for training 56 elements in [OC2M dataset](https://github.com/Open-Catalyst-Project/ocp/blob/main/DATASET.md)) -- The {ref}`neuron ` specifies the size of the embedding net. From left to right the members denote the sizes of each hidden layer from the input end to the output end, respectively. If the outer layer is twice the size of the inner layer, then the inner layer is copied and concatenated, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. -- The {ref}`axis_neuron ` specifies the size of the submatrix of the embedding matrix, the axis matrix as explained in the [DeepPot-SE paper](https://arxiv.org/abs/1805.09003) -- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. -- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. -- {ref}`attn ` sets the length of a hidden vector during scale-dot attention computation. -- {ref}`attn_layer ` sets the number of layers in attention mechanism. -- {ref}`attn_mask ` determines whether to mask the diagonal in the attention weights and False is recommended. -- {ref}`attn_dotr ` determines whether to dot the relative coordinates on the attention weights as a gated scheme, True is recommended. +- The {ref}`type ` of the descriptor is set to `"se_atten"`, which will use DPA-1 structures. +- {ref}`rcut ` is the cut-off radius for neighbor searching, and the {ref}`rcut_smth ` gives where the smoothing starts. +- **{ref}`sel `** gives the maximum possible number of neighbors in the cut-off radius. It is an int. Note that this number highly affects the efficiency of training, which we usually use less than 200. (We use 120 for training 56 elements in [OC2M dataset](https://github.com/Open-Catalyst-Project/ocp/blob/main/DATASET.md)) +- The {ref}`neuron ` specifies the size of the embedding net. From left to right the members denote the sizes of each hidden layer from the input end to the output end, respectively. If the outer layer is twice the size of the inner layer, then the inner layer is copied and concatenated, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. +- The {ref}`axis_neuron ` specifies the size of the submatrix of the embedding matrix, the axis matrix as explained in the [DeepPot-SE paper](https://arxiv.org/abs/1805.09003) +- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. +- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. +- {ref}`attn ` sets the length of a hidden vector during scale-dot attention computation. +- {ref}`attn_layer ` sets the number of layers in attention mechanism. +- {ref}`attn_mask ` determines whether to mask the diagonal in the attention weights and False is recommended. +- {ref}`attn_dotr ` determines whether to dot the relative coordinates on the attention weights as a gated scheme, True is recommended. ### Descriptor `"se_atten_v2"` @@ -138,7 +138,7 @@ You can use descriptor `"se_atten_v2"` and do not need to set `tebd_input_mode` Practical evidence demonstrates that `"se_atten_v2"` offers better and more stable performance compared to `"se_atten"`. -Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. ### Fitting `"ener"` diff --git a/doc/model/train-se-e2-a.md b/doc/model/train-se-e2-a.md index 2412bbc64e..81b95399e0 100644 --- a/doc/model/train-se-e2-a.md +++ b/doc/model/train-se-e2-a.md @@ -70,7 +70,7 @@ $deepmd_source_dir/examples/water/se_e2_a/input.json With the training input script, data are also provided in the example directory. One may train the model with the DeePMD-kit from the directory. -The construction of the descriptor is given by section {ref}`descriptor `. An example of the descriptor is provided as follows +The construction of the descriptor is given by section {ref}`descriptor `. An example of the descriptor is provided as follows ```json "descriptor" :{ @@ -86,11 +86,11 @@ The construction of the descriptor is given by section {ref}`descriptor ` of the descriptor is set to `"se_e2_a"`. -- {ref}`rcut ` is the cut-off radius for neighbor searching, and the {ref}`rcut_smth ` gives where the smoothing starts. -- {ref}`sel ` gives the maximum possible number of neighbors in the cut-off radius. It is a list, the length of which is the same as the number of atom types in the system, and `sel[i]` denotes the maximum possible number of neighbors with type `i`. -- The {ref}`neuron ` specifies the size of the embedding net. From left to right the members denote the sizes of each hidden layer from the input end to the output end, respectively. If the outer layer is twice the size of the inner layer, then the inner layer is copied and concatenated, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. -- If the option {ref}`type_one_side ` is set to `true`, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters. -- The {ref}`axis_neuron ` specifies the size of the submatrix of the embedding matrix, the axis matrix as explained in the [DeepPot-SE paper](https://arxiv.org/abs/1805.09003) -- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. -- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. +- The {ref}`type ` of the descriptor is set to `"se_e2_a"`. +- {ref}`rcut ` is the cut-off radius for neighbor searching, and the {ref}`rcut_smth ` gives where the smoothing starts. +- {ref}`sel ` gives the maximum possible number of neighbors in the cut-off radius. It is a list, the length of which is the same as the number of atom types in the system, and `sel[i]` denotes the maximum possible number of neighbors with type `i`. +- The {ref}`neuron ` specifies the size of the embedding net. From left to right the members denote the sizes of each hidden layer from the input end to the output end, respectively. If the outer layer is twice the size of the inner layer, then the inner layer is copied and concatenated, then a [ResNet architecture](https://arxiv.org/abs/1512.03385) is built between them. +- If the option {ref}`type_one_side ` is set to `true`, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters. +- The {ref}`axis_neuron ` specifies the size of the submatrix of the embedding matrix, the axis matrix as explained in the [DeepPot-SE paper](https://arxiv.org/abs/1805.09003) +- If the option {ref}`resnet_dt ` is set to `true`, then a timestep is used in the ResNet. +- {ref}`seed ` gives the random seed that is used to generate random numbers when initializing the model parameters. diff --git a/doc/model/train-se-e2-r.md b/doc/model/train-se-e2-r.md index f427310196..316bde43b4 100644 --- a/doc/model/train-se-e2-r.md +++ b/doc/model/train-se-e2-r.md @@ -52,7 +52,7 @@ A complete training input script of this example can be found in the directory $deepmd_source_dir/examples/water/se_e2_r/input.json ``` -The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.md). The only difference lies in the {ref}`descriptor ` section +The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.md). The only difference lies in the {ref}`descriptor ` section ```json "descriptor": { @@ -68,4 +68,4 @@ The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.m }, ``` -The type of the descriptor is set by the key {ref}`type `. +The type of the descriptor is set by the key {ref}`type `. diff --git a/doc/model/train-se-e3-tebd.md b/doc/model/train-se-e3-tebd.md index 8b49b0c220..5935a8920a 100644 --- a/doc/model/train-se-e3-tebd.md +++ b/doc/model/train-se-e3-tebd.md @@ -56,7 +56,7 @@ A complete training input script of this example can be found in the directory $deepmd_source_dir/examples/water/se_e3_tebd/input.json ``` -The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.md). The only difference lies in the {ref}`descriptor ` section +The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.md). The only difference lies in the {ref}`descriptor ` section ```json "descriptor": { @@ -75,4 +75,4 @@ The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.m }, ``` -The type of the descriptor is set by the key {ref}`type `. +The type of the descriptor is set by the key {ref}`type `. diff --git a/doc/model/train-se-e3.md b/doc/model/train-se-e3.md index d650d72493..3d82c42c9e 100644 --- a/doc/model/train-se-e3.md +++ b/doc/model/train-se-e3.md @@ -48,7 +48,7 @@ A complete training input script of this example can be found in the directory $deepmd_source_dir/examples/water/se_e3/input.json ``` -The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.md). The only difference lies in the `descriptor ` section +The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.md). The only difference lies in the `descriptor ` section ```json "descriptor": { @@ -63,4 +63,4 @@ The training input script is very similar to that of [`se_e2_a`](train-se-e2-a.m }, ``` -The type of the descriptor is set by the key {ref}`type `. +The type of the descriptor is set by the key {ref}`type `. diff --git a/doc/train/finetuning.md b/doc/train/finetuning.md index 4fbe95b2fd..669d1319bd 100644 --- a/doc/train/finetuning.md +++ b/doc/train/finetuning.md @@ -36,7 +36,7 @@ The elements in the training dataset must be contained in the pre-trained datase The finetune procedure will inherit the model structures in `pretrained.pb`, and thus it will ignore the model parameters in `input.json`, -such as {ref}`descriptor `, {ref}`fitting_net `, +such as {ref}`descriptor `, {ref}`fitting_net `, {ref}`type_embedding ` and {ref}`type_map `. However, you can still set the `trainable` parameters in each part of `input.json` to control the training procedure. diff --git a/doc/train/gpu-limitations.md b/doc/train/gpu-limitations.md index 92577fd65c..44c9697dd4 100644 --- a/doc/train/gpu-limitations.md +++ b/doc/train/gpu-limitations.md @@ -5,5 +5,5 @@ If you use DeePMD-kit in a GPU environment, the acceptable value range of some v 1. The number of atom types of a given system must be less than 128. 2. The maximum distance between an atom and its neighbors must be less than 128. It can be controlled by setting the rcut value of training parameters. 3. Theoretically, the maximum number of atoms that a single GPU can accept is about 10,000,000. However, this value is limited by the GPU memory size currently, usually within 1000,000 atoms even in the model compression mode. -4. The total sel value of training parameters(in `model/descriptor` section) must be less than 4096. +4. The total sel value of training parameters(in `model[standard]/descriptor` section) must be less than 4096. 5. The size of the last layer of the embedding net must be less than 1024 during the model compression process. diff --git a/doc/train/training-advanced.md b/doc/train/training-advanced.md index 9be12e9fb8..d21feb2126 100644 --- a/doc/train/training-advanced.md +++ b/doc/train/training-advanced.md @@ -114,7 +114,7 @@ The section {ref}`mixed_precision ` specifies the mixe - {ref}`output_prec ` precision used in the output tensors, only `float32` is supported currently. - {ref}`compute_prec ` precision used in the computing tensors, only `float16` is supported currently. Note there are several limitations about mixed precision training: -- Only {ref}`se_e2_a ` type descriptor is supported by the mixed precision training workflow. +- Only {ref}`se_e2_a ` type descriptor is supported by the mixed precision training workflow. - The precision of the embedding net and the fitting net are forced to be set to `float32`. Other keys in the {ref}`training ` section are explained below: From 18026eb427a197ce4cc4c0e2b24a382e31b093f7 Mon Sep 17 00:00:00 2001 From: Lysithea <52808607+CaRoLZhangxy@users.noreply.github.com> Date: Wed, 23 Oct 2024 17:23:51 +0800 Subject: [PATCH 070/193] fix(cc): fix message passing when nghost is 0 and send list is empty (#4237) fix errors mentioned in following pr: https://github.com/deepmodeling/deepmd-kit/pull/4220 https://github.com/deepmodeling/deepmd-kit/pull/4209 https://github.com/deepmodeling/deepmd-kit/pull/4144 ## Summary by CodeRabbit - **New Features** - Enhanced message passing logic in the computation process for improved efficiency. - Added new test functions to evaluate DeepMD model performance under various conditions. - **Bug Fixes** - Improved error handling and assertions in test cases to ensure robustness. - **Refactor** - Streamlined tensor operations in the communication process to enhance clarity and reduce unnecessary computations. - Removed outdated test cases related to neighbor list handling in the DeepPot class. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- source/api_cc/src/DeepPotPT.cc | 22 ++++--- source/lmp/tests/test_lammps_dpa_pt_nopbc.py | 8 --- source/op/pt/comm.cc | 63 ++++++++++++-------- 3 files changed, 47 insertions(+), 46 deletions(-) diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 84629042f4..4c7aac19b8 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -168,7 +168,7 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, nlist_data.copy_from_nlist(lmp_list); nlist_data.shuffle_exclude_empty(fwd_map); nlist_data.padding(); - if (do_message_passing == 1 && nghost > 0) { + if (do_message_passing == 1) { int nswap = lmp_list.nswap; torch::Tensor sendproc_tensor = torch::from_blob(lmp_list.sendproc, {nswap}, int32_option); @@ -180,10 +180,14 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, torch::from_blob(lmp_list.recvnum, {nswap}, int32_option); torch::Tensor sendnum_tensor = torch::from_blob(lmp_list.sendnum, {nswap}, int32_option); - torch::Tensor communicator_tensor = torch::from_blob( - const_cast(lmp_list.world), {1}, torch::kInt64); - // torch::Tensor communicator_tensor = - // torch::tensor(lmp_list.world, int32_option); + torch::Tensor communicator_tensor; + if (lmp_list.world == 0) { + communicator_tensor = torch::empty({1}, torch::kInt64); + } else { + communicator_tensor = torch::from_blob( + const_cast(lmp_list.world), {1}, torch::kInt64); + } + torch::Tensor nswap_tensor = torch::tensor(nswap, int32_option); int total_send = std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); @@ -196,12 +200,6 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, comm_dict.insert("recv_num", recvnum_tensor); comm_dict.insert("communicator", communicator_tensor); } - if (do_message_passing == 1 && nghost == 0) { - // for the situation that no ghost atoms (e.g. serial nopbc) - // set the mapping arange(nloc) is enough - auto option = torch::TensorOptions().device(device).dtype(torch::kInt64); - mapping_tensor = at::arange(nloc_real, option).unsqueeze(0); - } } at::Tensor firstneigh = createNlistTensor(nlist_data.jlist); firstneigh_tensor = firstneigh.to(torch::kInt64).to(device); @@ -224,7 +222,7 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, .to(device); } c10::Dict outputs = - (do_message_passing == 1 && nghost > 0) + (do_message_passing == 1) ? module .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, firstneigh_tensor, mapping_tensor, fparam_tensor, diff --git a/source/lmp/tests/test_lammps_dpa_pt_nopbc.py b/source/lmp/tests/test_lammps_dpa_pt_nopbc.py index 15fe2c0bc2..b0909bfc03 100644 --- a/source/lmp/tests/test_lammps_dpa_pt_nopbc.py +++ b/source/lmp/tests/test_lammps_dpa_pt_nopbc.py @@ -681,14 +681,6 @@ def test_pair_deepmd_si(lammps_si): [(["--balance"],), ([],)], ) def test_pair_deepmd_mpi(balance_args: list): - if balance_args == []: - # python:5331 terminated with signal 11 at PC=7f3e940e3806 SP=7ffd5787edc0. Backtrace: - # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0x95806)[0x7f3e940e3806] - # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0x8f76e)[0x7f3e940dd76e] - # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0x9a38a)[0x7f3e940e838a] - # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(_Z9border_opRKN2at6TensorES2_S2_S2_S2_S2_S2_S2_S2_+0x8e)[0x7f3e940dda63] - # /home/runner/work/deepmd-kit/deepmd-kit/dp_test/lib/libdeepmd_op_pt.so(+0xaeac3)[0x7f3e940fcac3] - pytest.skip(reason="Known segfault, see comments for details") with tempfile.NamedTemporaryFile() as f: sp.check_call( [ diff --git a/source/op/pt/comm.cc b/source/op/pt/comm.cc index a25dfbd542..71a2b0e118 100644 --- a/source/op/pt/comm.cc +++ b/source/op/pt/comm.cc @@ -87,16 +87,18 @@ class Border : public torch::autograd::Function { int mpi_init = 0; MPI_Initialized(&mpi_init); int cuda_aware = 1; - int me; + int me = 0; MPI_Comm world; int world_size = 0; - unpack_communicator(communicator_tensor, world); - MPI_Comm_rank(world, &me); - MPI_Comm_size(world, &world_size); + if (mpi_init) { + unpack_communicator(communicator_tensor, world); + MPI_Comm_rank(world, &me); + MPI_Comm_size(world, &world_size); + } MPI_Datatype mpi_type = get_mpi_type(); MPI_Request request; #if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) - if (world_size != 1) { + if (world_size >= 1) { int version, subversion; MPI_Get_version(&version, &subversion); if (version >= 4) { @@ -120,11 +122,15 @@ class Border : public torch::autograd::Function { for (int iswap = 0; iswap < nswap; ++iswap) { int nrecv = recvnum[iswap]; int nsend = sendnum[iswap]; - torch::Tensor isendlist = - torch::from_blob(sendlist[iswap], {nsend}, int32_options) - .to(recv_g1_tensor.device()); - torch::Tensor send_g1_tensor = recv_g1_tensor.index_select(0, isendlist); - FPTYPE* send_g1 = send_g1_tensor.data_ptr(); + torch::Tensor isendlist; + torch::Tensor send_g1_tensor; + FPTYPE* send_g1; + if (nsend != 0) { + isendlist = torch::from_blob(sendlist[iswap], {nsend}, int32_options) + .to(recv_g1_tensor.device()); + send_g1_tensor = recv_g1_tensor.index_select(0, isendlist); + send_g1 = send_g1_tensor.data_ptr(); + } #ifdef USE_MPI if (sendproc[iswap] != me) { if (nrecv) { @@ -207,15 +213,17 @@ class Border : public torch::autograd::Function { MPI_Initialized(&mpi_init); int world_size = 0; int cuda_aware = 1; + int me = 0; MPI_Comm world; - unpack_communicator(communicator_tensor, world); - int me; - MPI_Comm_rank(world, &me); - MPI_Comm_size(world, &world_size); + if (mpi_init) { + unpack_communicator(communicator_tensor, world); + MPI_Comm_rank(world, &me); + MPI_Comm_size(world, &world_size); + } MPI_Datatype mpi_type = get_mpi_type(); MPI_Request request; #if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) - if (world_size != 1) { + if (world_size >= 1) { int version, subversion; MPI_Get_version(&version, &subversion); if (version >= 4) { @@ -248,17 +256,20 @@ class Border : public torch::autograd::Function { int nlocal = nlocal_tensor.item(); int nghost = nghost_tensor.item(); int ntotal = nlocal + nghost; - - torch::Tensor send_g1_tensor = d_local_g1_tensor; - - int max_recvnum = sendnum_tensor.max().item(); - auto options = torch::TensorOptions() - .dtype(d_local_g1_tensor.dtype()) - .device(d_local_g1_tensor.device()); - torch::Tensor recv_g1_tensor = - torch::empty({max_recvnum, tensor_size}, options); - FPTYPE* recv_g1 = recv_g1_tensor.data_ptr(); - FPTYPE* send_g1 = send_g1_tensor.data_ptr() + ntotal * tensor_size; + torch::Tensor send_g1_tensor; + torch::Tensor recv_g1_tensor; + FPTYPE* recv_g1; + FPTYPE* send_g1; + if (nswap != 0) { + send_g1_tensor = d_local_g1_tensor; + int max_recvnum = sendnum_tensor.max().item(); + auto options = torch::TensorOptions() + .dtype(d_local_g1_tensor.dtype()) + .device(d_local_g1_tensor.device()); + recv_g1_tensor = torch::empty({max_recvnum, tensor_size}, options); + recv_g1 = recv_g1_tensor.data_ptr(); + send_g1 = send_g1_tensor.data_ptr() + ntotal * tensor_size; + } int end = ntotal; auto int32_options = torch::TensorOptions().dtype(torch::kInt32); From a74d963d50e74164d18b66f2e0c4dcda49ff7cd0 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 23 Oct 2024 18:13:11 +0800 Subject: [PATCH 071/193] feat(pt): support CPU parallel training with PT (#4224) Fix #4132. ## Summary by CodeRabbit - **New Features** - Enhanced backend selection for distributed training, allowing for flexible use of NCCL or Gloo based on availability. - **Bug Fixes** - Corrected indentation for improved code clarity. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/entrypoints/main.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index 71f81b0a12..c56e7f0731 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -105,8 +105,7 @@ def get_trainer( local_rank = os.environ.get("LOCAL_RANK") if local_rank is not None: local_rank = int(local_rank) - assert dist.is_nccl_available() - dist.init_process_group(backend="nccl") + dist.init_process_group(backend="cuda:nccl,cpu:gloo") def prepare_trainer_input_single( model_params_single, data_dict_single, rank=0, seed=None From 8e777859c7f17e8812e4ed25237ca6b4f7cc04a3 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 23 Oct 2024 10:12:56 -0400 Subject: [PATCH 072/193] ci: limit downloaded files in build_docker (#4242) This download-artifact step fails randomly. Limiting the files to download should reduce the possibility of failure. ## Summary by CodeRabbit - **New Features** - Enhanced artifact downloading patterns for improved specificity in the build process. - Expanded artifact access during the PyPI index build. - **Improvements** - Maintained job structure and dependencies for consistent build and deployment processes. - Defined permissions for critical jobs to ensure secure operations. Signed-off-by: Jinzhe Zeng --- .github/workflows/build_wheel.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index c4d2ed3486..db5745e241 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -148,6 +148,7 @@ jobs: - uses: actions/download-artifact@v4 with: path: source/install/docker/dist + pattern: cibw-*-manylinux_x86_64-cu${{ matrix.cuda_version }}* merge-multiple: true - name: Log in to the Container registry uses: docker/login-action@v3 @@ -180,6 +181,7 @@ jobs: - uses: actions/download-artifact@v4 with: path: dist/packages + pattern: cibw-* merge-multiple: true - uses: actions/setup-python@v5 name: Install Python From b78832df216009e3c628cc7083bc56413e6068c0 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:31:16 +0800 Subject: [PATCH 073/193] fix(pt): make int `rcut` safe after jit op (#4222) Fix #3974. ## Summary by CodeRabbit - **New Features** - Improved type consistency for neighbor statistics calculations by ensuring the `rcut` and `rcut_smth` parameters are always floats. - Enhanced handling of input parameters in various descriptor classes for better robustness. - **Bug Fixes** - Enhanced performance and reliability of neighbor statistics operations through constructor updates. - **Documentation** - Updated method signatures for clarity and consistency. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../atomic_model/pairtab_atomic_model.py | 4 ++-- deepmd/pt/model/descriptor/repformer_layer.py | 4 ++-- deepmd/pt/model/descriptor/repformers.py | 4 ++-- deepmd/pt/model/descriptor/se_a.py | 4 ++-- deepmd/pt/model/descriptor/se_atten.py | 4 ++-- deepmd/pt/model/descriptor/se_r.py | 4 ++-- deepmd/pt/model/descriptor/se_t.py | 4 ++-- deepmd/pt/model/descriptor/se_t_tebd.py | 4 ++-- deepmd/pt/utils/neighbor_stat.py | 2 +- source/tests/pt/model/test_jit.py | 22 +++++++++++++++++++ 10 files changed, 39 insertions(+), 17 deletions(-) diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 2918bba947..9a7ea14cfb 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -74,8 +74,8 @@ def __init__( super().__init__(type_map, **kwargs) super().init_out_stat() self.tab_file = tab_file - self.rcut = rcut - self.tab = self._set_pairtab(tab_file, rcut) + self.rcut = float(rcut) + self.tab = self._set_pairtab(tab_file, self.rcut) self.type_map = type_map self.ntypes = len(type_map) diff --git a/deepmd/pt/model/descriptor/repformer_layer.py b/deepmd/pt/model/descriptor/repformer_layer.py index 92e2404469..5270c94112 100644 --- a/deepmd/pt/model/descriptor/repformer_layer.py +++ b/deepmd/pt/model/descriptor/repformer_layer.py @@ -605,8 +605,8 @@ def __init__( ): super().__init__() self.epsilon = 1e-4 # protection of 1./nnei - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.ntypes = ntypes sel = [sel] if isinstance(sel, int) else sel self.nnei = sum(sel) diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index ad4ead4d74..f237088a16 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -193,8 +193,8 @@ def __init__( Random seed for parameter initialization. """ super().__init__() - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.ntypes = ntypes self.nlayers = nlayers sel = [sel] if isinstance(sel, int) else sel diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index e939a2541b..ffd645f2b9 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -395,8 +395,8 @@ def __init__( - axis_neuron: Number of columns of the sub-matrix of the embedding matrix. """ super().__init__() - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.neuron = neuron self.filter_neuron = self.neuron self.axis_neuron = axis_neuron diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py index c028230e9b..c3174a2011 100644 --- a/deepmd/pt/model/descriptor/se_atten.py +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -149,8 +149,8 @@ def __init__( """ super().__init__() del type - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.neuron = neuron self.filter_neuron = self.neuron self.axis_neuron = axis_neuron diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index e82bb23dac..4492a6c6b5 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -74,8 +74,8 @@ def __init__( **kwargs, ): super().__init__() - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.neuron = neuron self.filter_neuron = self.neuron self.set_davg_zero = set_davg_zero diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 072457b48f..49dbdaf027 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -446,8 +446,8 @@ def __init__( Random seed for initializing the network parameters. """ super().__init__() - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.neuron = neuron self.filter_neuron = self.neuron self.set_davg_zero = set_davg_zero diff --git a/deepmd/pt/model/descriptor/se_t_tebd.py b/deepmd/pt/model/descriptor/se_t_tebd.py index 437a464709..c140527f31 100644 --- a/deepmd/pt/model/descriptor/se_t_tebd.py +++ b/deepmd/pt/model/descriptor/se_t_tebd.py @@ -512,8 +512,8 @@ def __init__( seed: Optional[Union[int, list[int]]] = None, ): super().__init__() - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.neuron = neuron self.filter_neuron = self.neuron self.tebd_dim = tebd_dim diff --git a/deepmd/pt/utils/neighbor_stat.py b/deepmd/pt/utils/neighbor_stat.py index d427dc758a..7d52bfaae1 100644 --- a/deepmd/pt/utils/neighbor_stat.py +++ b/deepmd/pt/utils/neighbor_stat.py @@ -44,7 +44,7 @@ def __init__( mixed_types: bool, ) -> None: super().__init__() - self.rcut = rcut + self.rcut = float(rcut) self.ntypes = ntypes self.mixed_types = mixed_types diff --git a/source/tests/pt/model/test_jit.py b/source/tests/pt/model/test_jit.py index 248ccf9173..1f1034c330 100644 --- a/source/tests/pt/model/test_jit.py +++ b/source/tests/pt/model/test_jit.py @@ -144,5 +144,27 @@ def tearDown(self): JITTest.tearDown(self) +class TestEnergyModelDPA2IntRcut(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["model"]["descriptor"]["repinit"]["rcut"] = int( + self.config["model"]["descriptor"]["repinit"]["rcut"] + ) + self.config["model"]["descriptor"]["repinit"]["rcut_smth"] = int( + self.config["model"]["descriptor"]["repinit"]["rcut_smth"] + ) + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + if __name__ == "__main__": unittest.main() From 0fa1b435355cbcfe10af8e766a38fe34c502738f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 23 Oct 2024 10:58:02 -0400 Subject: [PATCH 074/193] style: enable TorchFix in pre-commit (#4230) Enable [TorchFix](https://github.com/pytorch-labs/torchfix). Need to resolve the following issues before merging: - #4229 - #4143 ## Summary by CodeRabbit - **New Features** - Integrated `flake8` for enhanced code quality checks. - Added `ruff-format` hook for improved linting. - Implemented consistent license header insertion for various file types. - **Bug Fixes** - Updated `check-added-large-files` hook to enforce a maximum file size of 1024 KB. - **Documentation** - Updated linting configuration with new rules under `[tool.flake8]`. --------- Signed-off-by: Jinzhe Zeng --- .pre-commit-config.yaml | 8 ++++++++ pyproject.toml | 7 +++++++ 2 files changed, 15 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 30efa6b062..53fdd9b71c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,6 +38,14 @@ repos: - id: ruff-format exclude: ^source/3rdparty types_or: [python, pyi, jupyter] + - repo: https://github.com/pycqa/flake8 + # flake8 cannot autofix + rev: "7.1.1" + hooks: + - id: flake8 + additional_dependencies: + - torchfix==0.6.0 + - flake8-pyproject==1.2.3 # numpydoc - repo: https://github.com/Carreau/velin rev: 0.0.12 diff --git a/pyproject.toml b/pyproject.toml index b13dceeb07..4dbff24f13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -419,3 +419,10 @@ plugins = ["source.3rdparty.coverage_plugins.jit_plugin"] load-plugins = "deepmd_checker" disable = "all" enable = "E8001,E8002" + +[tool.flake8] +select = [ + "TOR0", + "TOR1", + "TOR2", +] From c2d05602de39f07ae799e7b58be7db93a6098584 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 24 Oct 2024 02:57:26 +0800 Subject: [PATCH 075/193] Chore(pt): slim uts for dpa1 (#4244) ## Summary by CodeRabbit - **Tests** - Simplified test parameters and logic for the `TestDPA1` class, reducing complexity in test cases. - Streamlined conditions for skipping tests based on specific parameters, enhancing test efficiency. --------- Signed-off-by: Duo <50307526+iProzd@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- .../tests/consistent/descriptor/test_dpa1.py | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/source/tests/consistent/descriptor/test_dpa1.py b/source/tests/consistent/descriptor/test_dpa1.py index ed7884adb9..3d80e310d0 100644 --- a/source/tests/consistent/descriptor/test_dpa1.py +++ b/source/tests/consistent/descriptor/test_dpa1.py @@ -52,22 +52,22 @@ (4,), # tebd_dim ("concat", "strip"), # tebd_input_mode (True,), # resnet_dt - (True, False), # type_one_side + (True,), # type_one_side (20,), # attn (0, 2), # attn_layer - (True, False), # attn_dotr + (True,), # attn_dotr ([], [[0, 1]]), # excluded_types (0.0,), # env_protection (True, False), # set_davg_zero (1.0,), # scaling_factor - (True, False), # normalize + (True,), # normalize (None, 1.0), # temperature (1e-5,), # ln_eps - (True, False), # smooth_type_embedding + (True,), # smooth_type_embedding (True,), # concat_output_tebd ("float64",), # precision (True, False), # use_econf_tebd - (False, True), # use_tebd_bias + (False,), # use_tebd_bias ) class TestDPA1(CommonTest, DescriptorTest, unittest.TestCase): @property @@ -127,11 +127,9 @@ def data(self) -> dict: def is_meaningless_zero_attention_layer_tests( self, attn_layer: int, - attn_dotr: bool, - normalize: bool, temperature: Optional[float], ) -> bool: - return attn_layer == 0 and (attn_dotr or normalize or temperature is not None) + return attn_layer == 0 and (temperature is not None) @property def skip_pt(self) -> bool: @@ -158,8 +156,6 @@ def skip_pt(self) -> bool: ) = self.param return CommonTest.skip_pt or self.is_meaningless_zero_attention_layer_tests( attn_layer, - attn_dotr, - normalize, temperature, ) @@ -188,8 +184,6 @@ def skip_dp(self) -> bool: ) = self.param return CommonTest.skip_dp or self.is_meaningless_zero_attention_layer_tests( attn_layer, - attn_dotr, - normalize, temperature, ) @@ -218,8 +212,6 @@ def skip_jax(self) -> bool: ) = self.param return not INSTALLED_JAX or self.is_meaningless_zero_attention_layer_tests( attn_layer, - attn_dotr, - normalize, temperature, ) @@ -250,8 +242,6 @@ def skip_array_api_strict(self) -> bool: not INSTALLED_ARRAY_API_STRICT or self.is_meaningless_zero_attention_layer_tests( attn_layer, - attn_dotr, - normalize, temperature, ) ) @@ -290,8 +280,6 @@ def skip_tf(self) -> bool: ) or self.is_meaningless_zero_attention_layer_tests( attn_layer, - attn_dotr, - normalize, temperature, ) ) From 0f817e16bd8eedc5ddc79d707799269be9273bde Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 23 Oct 2024 19:45:48 -0400 Subject: [PATCH 076/193] style: extend no-explicit-dtype check to xp and jnp (#4247) ## Summary by CodeRabbit - **New Features** - Expanded the `DPChecker` to recognize additional libraries ("xp" and "jnp") for enhanced validation of function calls. - **Bug Fixes** - Improved compatibility of the `offset` calculation in the `xp_take_along_axis` function to ensure it matches the data type of the `indices` array. --------- Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/array_api.py | 2 +- source/checker/deepmd_checker.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deepmd/dpmodel/array_api.py b/deepmd/dpmodel/array_api.py index 360df78a7b..322bf0e151 100644 --- a/deepmd/dpmodel/array_api.py +++ b/deepmd/dpmodel/array_api.py @@ -61,7 +61,7 @@ def xp_take_along_axis(arr, indices, axis): else: indices = xp.reshape(indices, (0, 0)) - offset = (xp.arange(indices.shape[0]) * m)[:, xp.newaxis] + offset = (xp.arange(indices.shape[0], dtype=indices.dtype) * m)[:, xp.newaxis] indices = xp.reshape(offset + indices, (-1,)) out = xp.take(arr, indices) diff --git a/source/checker/deepmd_checker.py b/source/checker/deepmd_checker.py index d763835fdc..0e11ed71c7 100644 --- a/source/checker/deepmd_checker.py +++ b/source/checker/deepmd_checker.py @@ -37,7 +37,7 @@ def visit_call(self, node): if ( isinstance(node.func, Attribute) and isinstance(node.func.expr, Name) - and node.func.expr.name in {"np", "tf", "torch"} + and node.func.expr.name in {"np", "tf", "torch", "xp", "jnp"} and node.func.attrname in { # https://pytorch.org/docs/stable/torch.html#creation-ops From c870ccf2489ef609f1738642b6a7e662b627a3b2 Mon Sep 17 00:00:00 2001 From: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:40:00 +0800 Subject: [PATCH 077/193] fix(np/pt): explicit dtype and device. (#4241) fix: https://github.com/deepmodeling/deepmd-kit/issues/4016 ## Summary by CodeRabbit ## Release Notes - **New Features** - Enhanced precision handling for various models and computations, ensuring consistent data types across multiple components. - Introduced new parameters for model fitting processes to improve flexibility and control over data handling. - **Bug Fixes** - Improved error handling in several methods to prevent runtime issues related to data type mismatches and invalid configurations. - **Documentation** - Updated comments and documentation strings for clarity on parameter usage and expected behaviors. - **Refactor** - Streamlined code by enhancing type safety and consistency in tensor operations, particularly in statistical computations and model fitting processes. - **Tests** - Improved test structure for better maintainability without altering existing functionality. --------- Signed-off-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Co-authored-by: Han Wang Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../dpmodel/atomic_model/base_atomic_model.py | 11 +++++-- .../atomic_model/linear_atomic_model.py | 9 +++++- .../atomic_model/pairtab_atomic_model.py | 8 +++-- deepmd/dpmodel/common.py | 27 ++++++++++++++++ deepmd/dpmodel/fitting/general_fitting.py | 30 +++++++++++++----- .../dpmodel/fitting/polarizability_fitting.py | 2 +- deepmd/dpmodel/infer/deep_eval.py | 7 +++-- deepmd/dpmodel/utils/nlist.py | 11 ++++--- deepmd/infer/model_devi.py | 4 +-- deepmd/pt/infer/deep_eval.py | 19 +++++++----- .../model/atomic_model/linear_atomic_model.py | 11 +++++-- .../atomic_model/pairtab_atomic_model.py | 8 +++-- deepmd/pt/model/descriptor/descriptor.py | 12 +++++-- deepmd/pt/model/descriptor/repformers.py | 20 +++++++++--- deepmd/pt/model/descriptor/se_a.py | 8 +++-- deepmd/pt/model/descriptor/se_atten.py | 8 +++-- deepmd/pt/model/descriptor/se_r.py | 20 +++++++++--- deepmd/pt/model/descriptor/se_t.py | 8 +++-- deepmd/pt/model/descriptor/se_t_tebd.py | 8 +++-- deepmd/pt/model/model/__init__.py | 2 +- deepmd/pt/model/task/ener.py | 7 +++-- deepmd/pt/model/task/fitting.py | 4 ++- deepmd/pt/train/training.py | 4 ++- deepmd/pt/utils/nlist.py | 27 ++++++++++++---- deepmd/pt/utils/stat.py | 8 ++++- deepmd/utils/data.py | 12 +++---- deepmd/utils/data_system.py | 10 ++++-- deepmd/utils/out_stat.py | 8 +++-- deepmd/utils/pair_tab.py | 23 +++++++++----- deepmd/utils/spin.py | 22 +++++++++---- source/tests/common/test_auto_batch_size.py | 5 ++- source/tests/common/test_common.py | 31 +++++++++++++++++++ 32 files changed, 301 insertions(+), 93 deletions(-) create mode 100644 source/tests/common/test_common.py diff --git a/deepmd/dpmodel/atomic_model/base_atomic_model.py b/deepmd/dpmodel/atomic_model/base_atomic_model.py index 6307b19f41..b615c81d1f 100644 --- a/deepmd/dpmodel/atomic_model/base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/base_atomic_model.py @@ -19,6 +19,9 @@ AtomExcludeMask, PairExcludeMask, ) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) from deepmd.utils.finetune import ( get_index_between_two_maps, map_atom_exclude_types, @@ -56,8 +59,12 @@ def init_out_stat(self): [self.atomic_output_def()[kk].size for kk in self.bias_keys] ) self.n_out = len(self.bias_keys) - out_bias_data = np.zeros([self.n_out, ntypes, self.max_out_size]) # pylint: disable=no-explicit-dtype - out_std_data = np.ones([self.n_out, ntypes, self.max_out_size]) # pylint: disable=no-explicit-dtype + out_bias_data = np.zeros( + [self.n_out, ntypes, self.max_out_size], dtype=GLOBAL_NP_FLOAT_PRECISION + ) + out_std_data = np.ones( + [self.n_out, ntypes, self.max_out_size], dtype=GLOBAL_NP_FLOAT_PRECISION + ) self.out_bias = out_bias_data self.out_std = out_std_data diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 79a51635d2..880c92f504 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -12,6 +12,9 @@ get_multiple_nlist_key, nlist_distinguish_types, ) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) from deepmd.utils.version import ( check_version_compatibility, ) @@ -286,7 +289,11 @@ def _compute_weight( """This should be a list of user defined weights that matches the number of models to be combined.""" nmodels = len(self.models) nframes, nloc, _ = nlists_[0].shape - return [np.ones((nframes, nloc, 1)) / nmodels for _ in range(nmodels)] # pylint: disable=no-explicit-dtype + # the dtype of weights is the interface data type. + return [ + np.ones((nframes, nloc, 1), dtype=GLOBAL_NP_FLOAT_PRECISION) / nmodels + for _ in range(nmodels) + ] def get_dim_fparam(self) -> int: """Get the number (dimension) of frame parameters of this atomic model.""" diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index 22471d3f32..2899f106bc 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -204,9 +204,10 @@ def forward_atomic( self.tab.ntypes, self.tab.ntypes, self.tab.nspline, 4 ) - # (nframes, nloc, nnei) + # (nframes, nloc, nnei), index type is int64. j_type = extended_atype[ - np.arange(extended_atype.shape[0])[:, None, None], masked_nlist # pylint: disable=no-explicit-dtype + np.arange(extended_atype.shape[0], dtype=np.int64)[:, None, None], + masked_nlist, ] raw_atomic_energy = self._pair_tabulated_inter( @@ -303,7 +304,8 @@ def _get_pairwise_dist(coords: np.ndarray, nlist: np.ndarray) -> np.ndarray: np.ndarray The pairwise distance between the atoms (nframes, nloc, nnei). """ - batch_indices = np.arange(nlist.shape[0])[:, None, None] # pylint: disable=no-explicit-dtype + # index type is int64 + batch_indices = np.arange(nlist.shape[0], dtype=np.int64)[:, None, None] neighbor_atoms = coords[batch_indices, nlist] loc_atoms = coords[:, : nlist.shape[1], :] pairwise_dr = loc_atoms[:, :, None, :] - neighbor_atoms diff --git a/deepmd/dpmodel/common.py b/deepmd/dpmodel/common.py index d9d57d2d6c..5c75229e49 100644 --- a/deepmd/dpmodel/common.py +++ b/deepmd/dpmodel/common.py @@ -50,6 +50,33 @@ DEFAULT_PRECISION = "float64" +def get_xp_precision( + xp: Any, + precision: str, +): + """Get the precision from the API compatible namespace.""" + if precision == "float16" or precision == "half": + return xp.float16 + elif precision == "float32" or precision == "single": + return xp.float32 + elif precision == "float64" or precision == "double": + return xp.float64 + elif precision == "int32": + return xp.int32 + elif precision == "int64": + return xp.int64 + elif precision == "bool": + return bool + elif precision == "default": + return get_xp_precision(xp, RESERVED_PRECISON_DICT[PRECISION_DICT[precision]]) + elif precision == "global": + return get_xp_precision(xp, RESERVED_PRECISON_DICT[GLOBAL_NP_FLOAT_PRECISION]) + elif precision == "bfloat16": + return ml_dtypes.bfloat16 + else: + raise ValueError(f"unsupported precision {precision} for {xp}") + + class NativeOP(ABC): """The unit operation of a native model.""" diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index fd80ccb4aa..62aafc6207 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -14,9 +14,11 @@ from deepmd.dpmodel import ( DEFAULT_PRECISION, + PRECISION_DICT, NativeOP, ) from deepmd.dpmodel.common import ( + get_xp_precision, to_numpy_array, ) from deepmd.dpmodel.utils import ( @@ -27,6 +29,9 @@ from deepmd.dpmodel.utils.seed import ( child_seed, ) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) from deepmd.utils.finetune import ( get_index_between_two_maps, map_atom_exclude_types, @@ -133,6 +138,11 @@ def __init__( self.trainable = [self.trainable] * (len(self.neuron) + 1) self.activation_function = activation_function self.precision = precision + if self.precision.lower() not in PRECISION_DICT: + raise ValueError( + f"Unsupported precision '{self.precision}'. Supported options are: {list(PRECISION_DICT.keys())}" + ) + self.prec = PRECISION_DICT[self.precision.lower()] self.layer_name = layer_name self.use_aparam_as_mask = use_aparam_as_mask self.spin = spin @@ -146,18 +156,20 @@ def __init__( net_dim_out = self._net_out_dim() # init constants if bias_atom_e is None: - self.bias_atom_e = np.zeros([self.ntypes, net_dim_out]) # pylint: disable=no-explicit-dtype + self.bias_atom_e = np.zeros( + [self.ntypes, net_dim_out], dtype=GLOBAL_NP_FLOAT_PRECISION + ) else: assert bias_atom_e.shape == (self.ntypes, net_dim_out) - self.bias_atom_e = bias_atom_e + self.bias_atom_e = bias_atom_e.astype(GLOBAL_NP_FLOAT_PRECISION) if self.numb_fparam > 0: - self.fparam_avg = np.zeros(self.numb_fparam) # pylint: disable=no-explicit-dtype - self.fparam_inv_std = np.ones(self.numb_fparam) # pylint: disable=no-explicit-dtype + self.fparam_avg = np.zeros(self.numb_fparam, dtype=self.prec) + self.fparam_inv_std = np.ones(self.numb_fparam, dtype=self.prec) else: self.fparam_avg, self.fparam_inv_std = None, None if self.numb_aparam > 0: - self.aparam_avg = np.zeros(self.numb_aparam) # pylint: disable=no-explicit-dtype - self.aparam_inv_std = np.ones(self.numb_aparam) # pylint: disable=no-explicit-dtype + self.aparam_avg = np.zeros(self.numb_aparam, dtype=self.prec) + self.aparam_inv_std = np.ones(self.numb_aparam, dtype=self.prec) else: self.aparam_avg, self.aparam_inv_std = None, None # init networks @@ -410,7 +422,9 @@ def _call_common( # calcualte the prediction if not self.mixed_types: - outs = xp.zeros([nf, nloc, net_dim_out]) # pylint: disable=no-explicit-dtype + outs = xp.zeros( + [nf, nloc, net_dim_out], dtype=get_xp_precision(xp, self.precision) + ) for type_i in range(self.ntypes): mask = xp.tile( xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, net_dim_out) @@ -436,4 +450,4 @@ def _call_common( exclude_mask = self.emask.build_type_exclude_mask(atype) # nf x nloc x nod outs = outs * xp.astype(exclude_mask[:, :, None], outs.dtype) - return {self.var_name: outs} + return {self.var_name: xp.astype(outs, get_xp_precision(xp, "global"))} diff --git a/deepmd/dpmodel/fitting/polarizability_fitting.py b/deepmd/dpmodel/fitting/polarizability_fitting.py index 73a691f482..2d96eec580 100644 --- a/deepmd/dpmodel/fitting/polarizability_fitting.py +++ b/deepmd/dpmodel/fitting/polarizability_fitting.py @@ -303,7 +303,7 @@ def call( bias = self.constant_matrix[atype] # (nframes, nloc, 1) bias = np.expand_dims(bias, axis=-1) * self.scale[atype] - eye = np.eye(3) # pylint: disable=no-explicit-dtype + eye = np.eye(3, dtype=descriptor.dtype) eye = np.tile(eye, (nframes, nloc, 1, 1)) # (nframes, nloc, 3, 3) bias = np.expand_dims(bias, axis=-1) * eye diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index 695edb29d2..2b1e74c8de 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -24,6 +24,9 @@ from deepmd.dpmodel.utils.serialization import ( load_dp_model, ) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) from deepmd.infer.deep_dipole import ( DeepDipole, ) @@ -340,12 +343,12 @@ def _eval_model( if batch_output[dp_name] is not None: out = batch_output[dp_name].reshape(shape) else: - out = np.full(shape, np.nan) # pylint: disable=no-explicit-dtype + out = np.full(shape, np.nan, dtype=GLOBAL_NP_FLOAT_PRECISION) results.append(out) else: shape = self._get_output_shape(odef, nframes, natoms) results.append( - np.full(np.abs(shape), np.nan) # pylint: disable=no-explicit-dtype + np.full(np.abs(shape), np.nan, dtype=GLOBAL_NP_FLOAT_PRECISION) ) # this is kinda hacky return tuple(results) diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index c56f1bc061..3ef17fc6b9 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -131,7 +131,7 @@ def build_neighbor_list( nlist = nlist[:, :, :nsel] else: rr = xp.concatenate( - [rr, xp.ones([batch_size, nloc, nsel - nnei]) + rcut], # pylint: disable=no-explicit-dtype + [rr, xp.ones([batch_size, nloc, nsel - nnei], dtype=rr.dtype) + rcut], axis=-1, ) nlist = xp.concatenate( @@ -277,7 +277,8 @@ def extend_coord_with_ghosts( """ xp = array_api_compat.array_namespace(coord, atype) nf, nloc = atype.shape - aidx = xp.tile(xp.arange(nloc)[xp.newaxis, :], (nf, 1)) # pylint: disable=no-explicit-dtype + # int64 for index + aidx = xp.tile(xp.arange(nloc, dtype=xp.int64)[xp.newaxis, :], (nf, 1)) if cell is None: nall = nloc extend_coord = coord @@ -289,9 +290,9 @@ def extend_coord_with_ghosts( to_face = to_face_distance(cell) nbuff = xp.astype(xp.ceil(rcut / to_face), xp.int64) nbuff = xp.max(nbuff, axis=0) - xi = xp.arange(-int(nbuff[0]), int(nbuff[0]) + 1, 1) # pylint: disable=no-explicit-dtype - yi = xp.arange(-int(nbuff[1]), int(nbuff[1]) + 1, 1) # pylint: disable=no-explicit-dtype - zi = xp.arange(-int(nbuff[2]), int(nbuff[2]) + 1, 1) # pylint: disable=no-explicit-dtype + xi = xp.arange(-int(nbuff[0]), int(nbuff[0]) + 1, 1, dtype=xp.int64) + yi = xp.arange(-int(nbuff[1]), int(nbuff[1]) + 1, 1, dtype=xp.int64) + zi = xp.arange(-int(nbuff[2]), int(nbuff[2]) + 1, 1, dtype=xp.int64) xyz = xp.linalg.outer(xi, xp.asarray([1, 0, 0]))[:, xp.newaxis, xp.newaxis, :] xyz = ( xyz diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py index 29e1eec741..68100ba739 100644 --- a/deepmd/infer/model_devi.py +++ b/deepmd/infer/model_devi.py @@ -328,7 +328,7 @@ def calc_model_devi( forces = np.array(forces) virials = np.array(virials) - devi = [np.arange(coord.shape[0]) * frequency] # pylint: disable=no-explicit-dtype + devi = [np.arange(coord.shape[0], dtype=np.int64) * frequency] if real_data is None: devi += list(calc_model_devi_v(virials, relative=relative_v)) devi_f = list(calc_model_devi_f(forces, relative=relative, atomic=atomic)) @@ -502,7 +502,7 @@ def make_model_devi( nframes_tot += coord.shape[0] devis.append(devi) devis = np.vstack(devis) - devis[:, 0] = np.arange(nframes_tot) * frequency # pylint: disable=no-explicit-dtype + devis[:, 0] = np.arange(nframes_tot, dtype=np.int64) * frequency write_model_devi_out(devis, output, header=system, atomic=atomic) devis_coll.append(devis) return devis_coll diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index acf985974c..8f0b686e7b 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -396,6 +396,7 @@ def _eval_model( request_defs: list[OutputVariableDef], ): model = self.dp.to(DEVICE) + prec = NP_PRECISION_DICT[RESERVED_PRECISON_DICT[GLOBAL_PT_FLOAT_PRECISION]] nframes = coords.shape[0] if len(atom_types.shape) == 1: @@ -405,9 +406,7 @@ def _eval_model( natoms = len(atom_types[0]) coord_input = torch.tensor( - coords.reshape([nframes, natoms, 3]).astype( - NP_PRECISION_DICT[RESERVED_PRECISON_DICT[GLOBAL_PT_FLOAT_PRECISION]] - ), + coords.reshape([nframes, natoms, 3]).astype(prec), dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE, ) @@ -418,9 +417,7 @@ def _eval_model( ) if cells is not None: box_input = torch.tensor( - cells.reshape([nframes, 3, 3]).astype( - NP_PRECISION_DICT[RESERVED_PRECISON_DICT[GLOBAL_PT_FLOAT_PRECISION]] - ), + cells.reshape([nframes, 3, 3]).astype(prec), dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE, ) @@ -462,7 +459,7 @@ def _eval_model( else: shape = self._get_output_shape(odef, nframes, natoms) results.append( - np.full(np.abs(shape), np.nan) # pylint: disable=no-explicit-dtype + np.full(np.abs(shape), np.nan, dtype=prec) ) # this is kinda hacky return tuple(results) @@ -542,7 +539,13 @@ def _eval_model_spin( else: shape = self._get_output_shape(odef, nframes, natoms) results.append( - np.full(np.abs(shape), np.nan) # pylint: disable=no-explicit-dtype + np.full( + np.abs(shape), + np.nan, + dtype=NP_PRECISION_DICT[ + RESERVED_PRECISON_DICT[GLOBAL_PT_FLOAT_PRECISION] + ], + ) ) # this is kinda hacky return tuple(results) diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 8d27fbcac4..0aa5afc67f 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -90,7 +90,9 @@ def __init__( self.rcuts = torch.tensor( self.get_model_rcuts(), dtype=torch.float64, device=env.DEVICE ) - self.nsels = torch.tensor(self.get_model_nsels(), device=env.DEVICE) # pylint: disable=no-explicit-dtype + self.nsels = torch.tensor( + self.get_model_nsels(), device=env.DEVICE, dtype=torch.int32 + ) if isinstance(weights, str): assert weights in ["sum", "mean"] @@ -299,8 +301,11 @@ def remap_atype(ori_map: list[str], new_map: list[str]) -> torch.Tensor: """ type_2_idx = {atp: idx for idx, atp in enumerate(ori_map)} # this maps the atype in the new map to the original map - mapping = torch.tensor( # pylint: disable=no-explicit-dtype - [type_2_idx[new_map[idx]] for idx in range(len(new_map))], device=env.DEVICE + # int32 should be enough for number of atom types. + mapping = torch.tensor( + [type_2_idx[new_map[idx]] for idx in range(len(new_map))], + device=env.DEVICE, + dtype=torch.int32, ) return mapping diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 9a7ea14cfb..28a165d501 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -269,9 +269,11 @@ def forward_atomic( # i_type : (nframes, nloc), this is atype. # j_type : (nframes, nloc, nnei) j_type = extended_atype[ - torch.arange(extended_atype.size(0), device=extended_coord.device)[ # pylint: disable=no-explicit-dtype - :, None, None - ], + torch.arange( + extended_atype.size(0), + device=extended_coord.device, + dtype=torch.int64, + )[:, None, None], masked_nlist, ] diff --git a/deepmd/pt/model/descriptor/descriptor.py b/deepmd/pt/model/descriptor/descriptor.py index 78a4608108..03173a7693 100644 --- a/deepmd/pt/model/descriptor/descriptor.py +++ b/deepmd/pt/model/descriptor/descriptor.py @@ -145,8 +145,16 @@ def share_params(self, base_class, shared_level, resume=False): base_env.stats[kk] += self.get_stats()[kk] mean, stddev = base_env() if not base_class.set_davg_zero: - base_class.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - base_class.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + base_class.mean.copy_( + torch.tensor( + mean, device=env.DEVICE, dtype=base_class.mean.dtype + ) + ) + base_class.stddev.copy_( + torch.tensor( + stddev, device=env.DEVICE, dtype=base_class.stddev.dtype + ) + ) # must share, even if not do stat self.mean = base_class.mean self.stddev = base_class.stddev diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index f237088a16..023a84b3ee 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -466,8 +466,16 @@ def forward( comm_dict["recv_num"], g1, comm_dict["communicator"], - torch.tensor(nloc), # pylint: disable=no-explicit-dtype,no-explicit-device - torch.tensor(nall - nloc), # pylint: disable=no-explicit-dtype,no-explicit-device + torch.tensor( + nloc, + dtype=torch.int32, + device=env.DEVICE, + ), # should be int of c++ + torch.tensor( + nall - nloc, + dtype=torch.int32, + device=env.DEVICE, + ), # should be int of c++ ) g1_ext = ret[0].unsqueeze(0) g1, g2, h2 = ll.forward( @@ -530,8 +538,12 @@ def compute_input_stats( self.stats = env_mat_stat.stats mean, stddev = env_mat_stat() if not self.set_davg_zero: - self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + self.mean.copy_( + torch.tensor(mean, device=env.DEVICE, dtype=self.mean.dtype) + ) + self.stddev.copy_( + torch.tensor(stddev, device=env.DEVICE, dtype=self.stddev.dtype) + ) def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index ffd645f2b9..8f3c7605d5 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -556,8 +556,12 @@ def compute_input_stats( self.stats = env_mat_stat.stats mean, stddev = env_mat_stat() if not self.set_davg_zero: - self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + self.mean.copy_( + torch.tensor(mean, device=env.DEVICE, dtype=self.mean.dtype) + ) + self.stddev.copy_( + torch.tensor(stddev, device=env.DEVICE, dtype=self.stddev.dtype) + ) def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py index c3174a2011..8f418c28f9 100644 --- a/deepmd/pt/model/descriptor/se_atten.py +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -364,8 +364,12 @@ def compute_input_stats( self.stats = env_mat_stat.stats mean, stddev = env_mat_stat() if not self.set_davg_zero: - self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + self.mean.copy_( + torch.tensor(mean, device=env.DEVICE, dtype=self.mean.dtype) + ) + self.stddev.copy_( + torch.tensor(stddev, device=env.DEVICE, dtype=self.stddev.dtype) + ) def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index 4492a6c6b5..12677a3daf 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -207,8 +207,16 @@ def share_params(self, base_class, shared_level, resume=False): base_env.stats[kk] += self.get_stats()[kk] mean, stddev = base_env() if not base_class.set_davg_zero: - base_class.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - base_class.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + base_class.mean.copy_( + torch.tensor( + mean, device=env.DEVICE, dtype=base_class.mean.dtype + ) + ) + base_class.stddev.copy_( + torch.tensor( + stddev, device=env.DEVICE, dtype=base_class.stddev.dtype + ) + ) self.mean = base_class.mean self.stddev = base_class.stddev # self.load_state_dict(base_class.state_dict()) # this does not work, because it only inits the model @@ -267,8 +275,12 @@ def compute_input_stats( self.stats = env_mat_stat.stats mean, stddev = env_mat_stat() if not self.set_davg_zero: - self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + self.mean.copy_( + torch.tensor(mean, device=env.DEVICE, dtype=self.mean.dtype) + ) + self.stddev.copy_( + torch.tensor(stddev, device=env.DEVICE, dtype=self.stddev.dtype) + ) def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 49dbdaf027..666eba6baf 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -606,8 +606,12 @@ def compute_input_stats( self.stats = env_mat_stat.stats mean, stddev = env_mat_stat() if not self.set_davg_zero: - self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + self.mean.copy_( + torch.tensor(mean, device=env.DEVICE, dtype=self.mean.dtype) + ) + self.stddev.copy_( + torch.tensor(stddev, device=env.DEVICE, dtype=self.stddev.dtype) + ) def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" diff --git a/deepmd/pt/model/descriptor/se_t_tebd.py b/deepmd/pt/model/descriptor/se_t_tebd.py index c140527f31..9ee9b4dc0b 100644 --- a/deepmd/pt/model/descriptor/se_t_tebd.py +++ b/deepmd/pt/model/descriptor/se_t_tebd.py @@ -698,8 +698,12 @@ def compute_input_stats( self.stats = env_mat_stat.stats mean, stddev = env_mat_stat() if not self.set_davg_zero: - self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype - self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype + self.mean.copy_( + torch.tensor(mean, device=env.DEVICE, dtype=self.mean.dtype) + ) + self.stddev.copy_( + torch.tensor(stddev, device=env.DEVICE, dtype=self.stddev.dtype) + ) def get_stats(self) -> dict[str, StatItem]: """Get the statistics of the descriptor.""" diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 613baf440e..f3896ec853 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -100,7 +100,7 @@ def get_spin_model(model_params): if not model_params["spin"]["use_spin"] or isinstance( model_params["spin"]["use_spin"][0], int ): - use_spin = np.full(len(model_params["type_map"]), False) # pylint: disable=no-explicit-dtype + use_spin = np.full(len(model_params["type_map"]), False, dtype=bool) use_spin[model_params["spin"]["use_spin"]] = True model_params["spin"]["use_spin"] = use_spin.tolist() # include virtual spin and placeholder types diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index 2048c05ba9..e0c5b0951e 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -126,10 +126,13 @@ def __init__( self.use_tebd = use_tebd self.out_dim = out_dim if bias_atom_e is None: - bias_atom_e = np.zeros([self.ntypes]) # pylint: disable=no-explicit-dtype + # place holder, dtype does not matter + bias_atom_e = np.zeros([self.ntypes], dtype=np.float64) if not use_tebd: assert self.ntypes == len(bias_atom_e), "Element count mismatches!" - bias_atom_e = torch.tensor(bias_atom_e, device=env.DEVICE) # pylint: disable=no-explicit-dtype + bias_atom_e = torch.tensor( + bias_atom_e, device=env.DEVICE, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) self.register_buffer("bias_atom_e", bias_atom_e) filter_layers_dipole = [] diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 10f88519e1..6e9829e4b6 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -177,7 +177,9 @@ def __init__( # init constants if bias_atom_e is None: bias_atom_e = np.zeros([self.ntypes, net_dim_out], dtype=np.float64) - bias_atom_e = torch.tensor(bias_atom_e, dtype=self.prec, device=device) + bias_atom_e = torch.tensor( + bias_atom_e, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=device + ) bias_atom_e = bias_atom_e.view([self.ntypes, net_dim_out]) if not self.mixed_types: assert self.ntypes == bias_atom_e.shape[0], "Element count mismatches!" diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 0f7c030a84..211e1ba564 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -943,7 +943,9 @@ def log_loss_valid(_task_key="Default"): continue if self.multi_task: chosen_index_list = dp_random.choice( - np.arange(self.num_model), # pylint: disable=no-explicit-dtype + np.arange( + self.num_model, dtype=np.int32 + ), # int32 should be enough for # models... p=np.array(self.model_prob), size=self.world_size, replace=True, diff --git a/deepmd/pt/utils/nlist.py b/deepmd/pt/utils/nlist.py index 1060b40ce1..c30ec6dd02 100644 --- a/deepmd/pt/utils/nlist.py +++ b/deepmd/pt/utils/nlist.py @@ -148,7 +148,13 @@ def _trim_mask_distinguish_nlist( nlist = nlist[:, :, :nsel] else: rr = torch.cat( - [rr, torch.ones([batch_size, nloc, nsel - nnei], device=rr.device) + rcut], # pylint: disable=no-explicit-dtype + [ + rr, + torch.ones( + [batch_size, nloc, nsel - nnei], device=rr.device, dtype=rr.dtype + ) + + rcut, + ], dim=-1, ) nlist = torch.cat( @@ -428,7 +434,10 @@ def extend_coord_with_ghosts( """ device = coord.device nf, nloc = atype.shape - aidx = torch.tile(torch.arange(nloc, device=device).unsqueeze(0), [nf, 1]) # pylint: disable=no-explicit-dtype + # int64 for index + aidx = torch.tile( + torch.arange(nloc, device=device, dtype=torch.int64).unsqueeze(0), [nf, 1] + ) if cell is None: nall = nloc extend_coord = coord.clone() @@ -443,13 +452,19 @@ def extend_coord_with_ghosts( # nf x 3 # *2: ghost copies on + and - directions # +1: central cell - nbuff = torch.ceil(rcut / to_face).to(torch.long) + nbuff = torch.ceil(rcut / to_face).to(torch.int64) # 3 nbuff = torch.amax(nbuff, dim=0) # faster than torch.max nbuff_cpu = nbuff.cpu() - xi = torch.arange(-nbuff_cpu[0], nbuff_cpu[0] + 1, 1, device="cpu") # pylint: disable=no-explicit-dtype - yi = torch.arange(-nbuff_cpu[1], nbuff_cpu[1] + 1, 1, device="cpu") # pylint: disable=no-explicit-dtype - zi = torch.arange(-nbuff_cpu[2], nbuff_cpu[2] + 1, 1, device="cpu") # pylint: disable=no-explicit-dtype + xi = torch.arange( + -nbuff_cpu[0], nbuff_cpu[0] + 1, 1, device="cpu", dtype=torch.int64 + ) + yi = torch.arange( + -nbuff_cpu[1], nbuff_cpu[1] + 1, 1, device="cpu", dtype=torch.int64 + ) + zi = torch.arange( + -nbuff_cpu[2], nbuff_cpu[2] + 1, 1, device="cpu", dtype=torch.int64 + ) eye_3 = torch.eye(3, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device="cpu") xyz = xi.view(-1, 1, 1, 1) * eye_3[0] xyz = xyz + yi.view(1, -1, 1, 1) * eye_3[1] diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 23fb12f2a4..831d2bef76 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -583,7 +583,13 @@ def compute_output_stats_atomic( # correction for missing types missing_types = ntypes - merged_natoms[kk].max() - 1 if missing_types > 0: - nan_padding = np.empty((missing_types, bias_atom_e[kk].shape[1])) # pylint: disable=no-explicit-dtype + assert ( + bias_atom_e[kk].dtype is std_atom_e[kk].dtype + ), "bias and std should be of the same dtypes" + nan_padding = np.empty( + (missing_types, bias_atom_e[kk].shape[1]), + dtype=bias_atom_e[kk].dtype, + ) nan_padding.fill(np.nan) bias_atom_e[kk] = np.concatenate([bias_atom_e[kk], nan_padding], axis=0) std_atom_e[kk] = np.concatenate([std_atom_e[kk], nan_padding], axis=0) diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 4c77bcf59a..72e3d58660 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -266,7 +266,7 @@ def get_batch(self, batch_size: int) -> dict: iterator_1 = self.iterator + batch_size if iterator_1 >= set_size: iterator_1 = set_size - idx = np.arange(self.iterator, iterator_1) # pylint: disable=no-explicit-dtype + idx = np.arange(self.iterator, iterator_1, dtype=np.int64) self.iterator += batch_size ret = self._get_subdata(self.batch_set, idx) return ret @@ -290,7 +290,7 @@ def get_test(self, ntests: int = -1) -> dict: else self.test_set["type"].shape[0] ) # print('ntest', self.test_set['type'].shape[0], ntests, ntests_) - idx = np.arange(ntests_) # pylint: disable=no-explicit-dtype + idx = np.arange(ntests_, dtype=np.int64) ret = self._get_subdata(self.test_set, idx=idx) if self.modifier is not None: self.modifier.modify_data(ret, self) @@ -378,14 +378,14 @@ def _idx_map_sel(self, atom_type, type_sel): new_types.append(ii) new_types = np.array(new_types, dtype=int) natoms = new_types.shape[0] - idx = np.arange(natoms) # pylint: disable=no-explicit-dtype + idx = np.arange(natoms, dtype=np.int64) idx_map = np.lexsort((idx, new_types)) return idx_map def _get_natoms_2(self, ntypes): sample_type = self.atom_type natoms = len(sample_type) - natoms_vec = np.zeros(ntypes).astype(int) # pylint: disable=no-explicit-dtype + natoms_vec = np.zeros(ntypes, dtype=np.int64) for ii in range(ntypes): natoms_vec[ii] = np.count_nonzero(sample_type == ii) return natoms, natoms_vec @@ -435,7 +435,7 @@ def _load_test_set(self, shuffle_test: bool): def _shuffle_data(self, data): ret = {} nframes = data["coord"].shape[0] - idx = np.arange(nframes) # pylint: disable=no-explicit-dtype + idx = np.arange(nframes, dtype=np.int64) # the training times of each frame idx = np.repeat(idx, np.reshape(data["numb_copy"], (nframes,))) dp_random.shuffle(idx) @@ -676,7 +676,7 @@ def _load_type_mix(self, set_name: DPPath): def _make_idx_map(self, atom_type): natoms = atom_type.shape[0] - idx = np.arange(natoms) # pylint: disable=no-explicit-dtype + idx = np.arange(natoms, dtype=np.int64) if self.sort_atoms: idx_map = np.lexsort((idx, atom_type)) else: diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 7bec0b16f4..2b5fb6e6db 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -437,7 +437,9 @@ def get_batch_standard(self, sys_idx: Optional[int] = None) -> dict: self.pick_idx = sys_idx else: # prob = self._get_sys_probs(sys_probs, auto_prob_style) - self.pick_idx = dp_random.choice(np.arange(self.nsystems), p=self.sys_probs) # pylint: disable=no-explicit-dtype + self.pick_idx = dp_random.choice( + np.arange(self.nsystems, dtype=np.int32), p=self.sys_probs + ) b_data = self.data_systems[self.pick_idx].get_batch( self.batch_size[self.pick_idx] ) @@ -457,7 +459,9 @@ def get_batch_mixed(self) -> dict: batch_size = self.batch_size[0] batch_data = [] for _ in range(batch_size): - self.pick_idx = dp_random.choice(np.arange(self.nsystems), p=self.sys_probs) # pylint: disable=no-explicit-dtype + self.pick_idx = dp_random.choice( + np.arange(self.nsystems, dtype=np.int32), p=self.sys_probs + ) bb_data = self.data_systems[self.pick_idx].get_batch(1) bb_data["natoms_vec"] = self.natoms_vec[self.pick_idx] bb_data["default_mesh"] = self.default_mesh[self.pick_idx] @@ -721,7 +725,7 @@ def prob_sys_size_ext(keywords, nsystems, nbatch): block_weights.append(weight) nblocks = len(block_str) block_probs = np.array(block_weights) / np.sum(block_weights) - sys_probs = np.zeros([nsystems]) # pylint: disable=no-explicit-dtype + sys_probs = np.zeros([nsystems], dtype=np.float64) for ii in range(nblocks): nbatch_block = nbatch[block_stt[ii] : block_end[ii]] tmp_prob = [float(i) for i in nbatch_block] / np.sum(nbatch_block) diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index 43af191e62..bc765645dc 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -7,6 +7,10 @@ import numpy as np +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) + def compute_stats_from_redu( output_redu: np.ndarray, @@ -115,8 +119,8 @@ def compute_stats_from_atomic( # compute output bias nframes, nloc, ndim = output.shape ntypes = atype.max() + 1 - output_bias = np.zeros((ntypes, ndim)) # pylint: disable=no-explicit-dtype - output_std = np.zeros((ntypes, ndim)) # pylint: disable=no-explicit-dtype + output_bias = np.zeros((ntypes, ndim), dtype=GLOBAL_NP_FLOAT_PRECISION) + output_std = np.zeros((ntypes, ndim), dtype=GLOBAL_NP_FLOAT_PRECISION) for type_i in range(ntypes): mask = atype == type_i output_bias[type_i] = ( diff --git a/deepmd/utils/pair_tab.py b/deepmd/utils/pair_tab.py index cddc358f27..89f66cc994 100644 --- a/deepmd/utils/pair_tab.py +++ b/deepmd/utils/pair_tab.py @@ -36,6 +36,7 @@ class PairTab: def __init__(self, filename: str, rcut: Optional[float] = None) -> None: """Constructor.""" + self.data_type = np.float64 self.reinit(filename, rcut) def reinit(self, filename: str, rcut: Optional[float] = None) -> None: @@ -56,7 +57,7 @@ def reinit(self, filename: str, rcut: Optional[float] = None) -> None: if filename is None: self.tab_info, self.tab_data = None, None return - self.vdata = np.loadtxt(filename) + self.vdata = np.loadtxt(filename, dtype=self.data_type) self.rmin = self.vdata[0][0] self.rmax = self.vdata[-1][0] self.hh = self.vdata[1][0] - self.vdata[0][0] @@ -168,11 +169,14 @@ def _check_table_upper_boundary(self) -> None: # if table values decay to `0` before rcut, pad table with `0`s. elif self.rcut > self.rmax: - pad_zero = np.zeros((rcut_idx - upper_idx, self.ncol)) # pylint: disable=no-explicit-dtype - pad_zero[:, 0] = np.linspace( # pylint: disable=no-explicit-dtype + pad_zero = np.zeros( + (rcut_idx - upper_idx, self.ncol), dtype=self.vdata.dtype + ) + pad_zero[:, 0] = np.linspace( self.rmax + self.hh, self.rmax + self.hh * (rcut_idx - upper_idx), rcut_idx - upper_idx, + dtype=self.vdata.dtype, ) self.vdata = np.concatenate((self.vdata, pad_zero), axis=0) else: @@ -186,12 +190,15 @@ def _check_table_upper_boundary(self) -> None: log.warning( "The rcut goes beyond table upper boundary, performing extrapolation." ) - pad_extrapolation = np.zeros((rcut_idx - upper_idx, self.ncol)) # pylint: disable=no-explicit-dtype + pad_extrapolation = np.zeros( + (rcut_idx - upper_idx, self.ncol), dtype=self.vdata.dtype + ) - pad_extrapolation[:, 0] = np.linspace( # pylint: disable=no-explicit-dtype + pad_extrapolation[:, 0] = np.linspace( self.rmax + self.hh, self.rmax + self.hh * (rcut_idx - upper_idx), rcut_idx - upper_idx, + dtype=self.vdata.dtype, ) # need to calculate table values to fill in with cubic spline pad_extrapolation = self._extrapolate_table(pad_extrapolation) @@ -252,7 +259,9 @@ def _extrapolate_table(self, pad_extrapolation: np.array) -> np.array: return pad_extrapolation def _make_data(self): - data = np.zeros([self.ntypes * self.ntypes * 4 * self.nspline]) # pylint: disable=no-explicit-dtype + data = np.zeros( + [self.ntypes * self.ntypes * 4 * self.nspline], dtype=self.data_type + ) stride = 4 * self.nspline idx_iter = 0 xx = self.vdata[:, 0] @@ -262,7 +271,7 @@ def _make_data(self): cs = CubicSpline(xx, vv, bc_type="clamped") dd = cs(xx, 1) dd *= self.hh - dtmp = np.zeros(stride) # pylint: disable=no-explicit-dtype + dtmp = np.zeros(stride, dtype=self.data_type) for ii in range(self.nspline): dtmp[ii * 4 + 0] = 2 * vv[ii] - 2 * vv[ii + 1] + dd[ii] + dd[ii + 1] dtmp[ii * 4 + 1] = ( diff --git a/deepmd/utils/spin.py b/deepmd/utils/spin.py index 41ea52df88..9ea5fc0713 100644 --- a/deepmd/utils/spin.py +++ b/deepmd/utils/spin.py @@ -6,6 +6,10 @@ import numpy as np +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) + class Spin: """Class for spin, mainly processes the spin type-related information. @@ -36,6 +40,7 @@ def __init__( use_spin: list[bool], virtual_scale: Union[list[float], float], ) -> None: + type_dtype = np.int32 self.ntypes_real = len(use_spin) self.ntypes_spin = use_spin.count(True) self.use_spin = np.array(use_spin) @@ -43,19 +48,24 @@ def __init__( self.ntypes_real_and_spin = self.ntypes_real + self.ntypes_spin self.ntypes_placeholder = self.ntypes_real - self.ntypes_spin self.ntypes_input = 2 * self.ntypes_real # with placeholder for input types - self.real_type = np.arange(self.ntypes_real) # pylint: disable=no-explicit-dtype - self.spin_type = np.arange(self.ntypes_real)[self.use_spin] + self.ntypes_real # pylint: disable=no-explicit-dtype + self.real_type = np.arange(self.ntypes_real, dtype=type_dtype) + self.spin_type = self.real_type[self.use_spin] + self.ntypes_real self.real_and_spin_type = np.concatenate([self.real_type, self.spin_type]) self.placeholder_type = ( - np.arange(self.ntypes_real)[~self.use_spin] + self.ntypes_real # pylint: disable=no-explicit-dtype + np.arange(self.ntypes_real, dtype=type_dtype)[~self.use_spin] + + self.ntypes_real ) - self.spin_placeholder_type = np.arange(self.ntypes_real) + self.ntypes_real # pylint: disable=no-explicit-dtype - self.input_type = np.arange(self.ntypes_real * 2) # pylint: disable=no-explicit-dtype + self.spin_placeholder_type = ( + np.arange(self.ntypes_real, dtype=type_dtype) + self.ntypes_real + ) + self.input_type = np.arange(self.ntypes_real * 2, dtype=type_dtype) if isinstance(virtual_scale, list): if len(virtual_scale) == self.ntypes_real: self.virtual_scale = virtual_scale elif len(virtual_scale) == self.ntypes_spin: - self.virtual_scale = np.zeros(self.ntypes_real) # pylint: disable=no-explicit-dtype + self.virtual_scale = np.zeros( + self.ntypes_real, dtype=GLOBAL_NP_FLOAT_PRECISION + ) self.virtual_scale[self.use_spin] = virtual_scale else: raise ValueError( diff --git a/source/tests/common/test_auto_batch_size.py b/source/tests/common/test_auto_batch_size.py index cc1e6bf25a..9cb86d4923 100644 --- a/source/tests/common/test_auto_batch_size.py +++ b/source/tests/common/test_auto_batch_size.py @@ -1,6 +1,9 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import os import unittest +from unittest.mock import ( + patch, +) import array_api_strict as xp @@ -77,7 +80,7 @@ def test_execute_oom_cpu(self): self.assertEqual(nb, 128) self.assertEqual(result.shape, (128, 2)) - @unittest.mock.patch.dict(os.environ, {"DP_INFER_BATCH_SIZE": "256"}, clear=True) + @patch.dict(os.environ, {"DP_INFER_BATCH_SIZE": "256"}, clear=True) def test_execute_oom_environment_variables(self): # DP_INFER_BATCH_SIZE = 256 = 128 * 2, nb is always 128 auto_batch_size = CustomizedAutoBatchSizeGPU(999, 2.0) diff --git a/source/tests/common/test_common.py b/source/tests/common/test_common.py new file mode 100644 index 0000000000..fe9054d6ad --- /dev/null +++ b/source/tests/common/test_common.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import array_api_compat +import ml_dtypes +import numpy as np + +from deepmd.dpmodel.common import ( + get_xp_precision, +) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) + + +class TestGetXPPrecision(unittest.TestCase): + def test(self): + aa = np.zeros(3) + xp = array_api_compat.array_namespace(aa) + self.assertEqual(get_xp_precision(xp, "float16"), xp.float16) + self.assertEqual(get_xp_precision(xp, "float32"), xp.float32) + self.assertEqual(get_xp_precision(xp, "float64"), xp.float64) + self.assertEqual(get_xp_precision(xp, "single"), xp.float32) + self.assertEqual(get_xp_precision(xp, "double"), xp.float64) + self.assertEqual(get_xp_precision(xp, "global"), GLOBAL_NP_FLOAT_PRECISION) + self.assertEqual(get_xp_precision(xp, "default"), GLOBAL_NP_FLOAT_PRECISION) + self.assertEqual(get_xp_precision(xp, "bfloat16"), ml_dtypes.bfloat16) + + # Test invalid input + with self.assertRaises(ValueError): + get_xp_precision(xp, "invalid_precision") From 4d5004830384577922664ca70431aef1c818e799 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 24 Oct 2024 04:43:14 -0400 Subject: [PATCH 078/193] feat(jax): checkpoint I/O (#4236) Implement a JAX checkpoint format. I name it `*.jax` as I don't find existing conventions. ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced serialization and deserialization functionalities for JAX backend models. - Added support for the `.jax` file suffix in the backend configuration. - Enhanced attribute handling logic across various classes to ensure proper processing of non-null values. - **Bug Fixes** - Enhanced cleanup processes in the test suite to improve reliability. - **Chores** - Updated dependencies in the project configuration for better JAX compatibility. - Adjusted linting rules to accommodate JAX-related code. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/backend/jax.py | 17 +++- deepmd/jax/atomic_model/base_atomic_model.py | 3 + deepmd/jax/common.py | 14 +++ deepmd/jax/descriptor/dpa1.py | 3 + deepmd/jax/descriptor/se_e2_a.py | 3 + deepmd/jax/fitting/fitting.py | 3 + deepmd/jax/utils/exclude_mask.py | 5 + deepmd/jax/utils/serialization.py | 97 ++++++++++++++++++++ deepmd/jax/utils/type_embed.py | 3 + pyproject.toml | 6 +- source/tests/consistent/io/test_io.py | 12 ++- 11 files changed, 156 insertions(+), 10 deletions(-) create mode 100644 deepmd/jax/utils/serialization.py diff --git a/deepmd/backend/jax.py b/deepmd/backend/jax.py index db92d6bed1..bb2fba5a7c 100644 --- a/deepmd/backend/jax.py +++ b/deepmd/backend/jax.py @@ -32,14 +32,13 @@ class JAXBackend(Backend): name = "JAX" """The formal name of the backend.""" features: ClassVar[Backend.Feature] = ( - Backend.Feature(0) + Backend.Feature.IO # Backend.Feature.ENTRY_POINT # | Backend.Feature.DEEP_EVAL # | Backend.Feature.NEIGHBOR_STAT - # | Backend.Feature.IO ) """The features of the backend.""" - suffixes: ClassVar[list[str]] = [] + suffixes: ClassVar[list[str]] = [".jax"] """The suffixes of the backend.""" def is_available(self) -> bool: @@ -94,7 +93,11 @@ def serialize_hook(self) -> Callable[[str], dict]: Callable[[str], dict] The serialize hook of the backend. """ - raise NotImplementedError + from deepmd.jax.utils.serialization import ( + serialize_from_file, + ) + + return serialize_from_file @property def deserialize_hook(self) -> Callable[[str, dict], None]: @@ -105,4 +108,8 @@ def deserialize_hook(self) -> Callable[[str, dict], None]: Callable[[str, dict], None] The deserialize hook of the backend. """ - raise NotImplementedError + from deepmd.jax.utils.serialization import ( + deserialize_to_file, + ) + + return deserialize_to_file diff --git a/deepmd/jax/atomic_model/base_atomic_model.py b/deepmd/jax/atomic_model/base_atomic_model.py index 90920879c2..ffd58daf5e 100644 --- a/deepmd/jax/atomic_model/base_atomic_model.py +++ b/deepmd/jax/atomic_model/base_atomic_model.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from deepmd.jax.common import ( + ArrayAPIVariable, to_jax_array, ) from deepmd.jax.utils.exclude_mask import ( @@ -11,6 +12,8 @@ def base_atomic_model_set_attr(name, value): if name in {"out_bias", "out_std"}: value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) elif name == "pair_excl" and value is not None: value = PairExcludeMask(value.ntypes, value.exclude_types) elif name == "atom_excl" and value is not None: diff --git a/deepmd/jax/common.py b/deepmd/jax/common.py index 9c144a41d1..f372e97eb5 100644 --- a/deepmd/jax/common.py +++ b/deepmd/jax/common.py @@ -81,3 +81,17 @@ def __setattr__(self, name: str, value: Any) -> None: return super().__setattr__(name, value) return FlaxModule + + +class ArrayAPIVariable(nnx.Variable): + def __array__(self, *args, **kwargs): + return self.value.__array__(*args, **kwargs) + + def __array_namespace__(self, *args, **kwargs): + return self.value.__array_namespace__(*args, **kwargs) + + def __dlpack__(self, *args, **kwargs): + return self.value.__dlpack__(*args, **kwargs) + + def __dlpack_device__(self, *args, **kwargs): + return self.value.__dlpack_device__(*args, **kwargs) diff --git a/deepmd/jax/descriptor/dpa1.py b/deepmd/jax/descriptor/dpa1.py index 0528e4bb93..fef9bd5448 100644 --- a/deepmd/jax/descriptor/dpa1.py +++ b/deepmd/jax/descriptor/dpa1.py @@ -13,6 +13,7 @@ NeighborGatedAttentionLayer as NeighborGatedAttentionLayerDP, ) from deepmd.jax.common import ( + ArrayAPIVariable, flax_module, to_jax_array, ) @@ -65,6 +66,8 @@ class DescrptBlockSeAtten(DescrptBlockSeAttenDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"mean", "stddev"}: value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) elif name in {"embeddings", "embeddings_strip"}: if value is not None: value = NetworkCollection.deserialize(value.serialize()) diff --git a/deepmd/jax/descriptor/se_e2_a.py b/deepmd/jax/descriptor/se_e2_a.py index d1a6e9a8d9..31c147ad9d 100644 --- a/deepmd/jax/descriptor/se_e2_a.py +++ b/deepmd/jax/descriptor/se_e2_a.py @@ -5,6 +5,7 @@ from deepmd.dpmodel.descriptor.se_e2_a import DescrptSeAArrayAPI as DescrptSeADP from deepmd.jax.common import ( + ArrayAPIVariable, flax_module, to_jax_array, ) @@ -26,6 +27,8 @@ class DescrptSeA(DescrptSeADP): def __setattr__(self, name: str, value: Any) -> None: if name in {"dstd", "davg"}: value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) elif name in {"embeddings"}: if value is not None: value = NetworkCollection.deserialize(value.serialize()) diff --git a/deepmd/jax/fitting/fitting.py b/deepmd/jax/fitting/fitting.py index f979db4d41..cef1f667b3 100644 --- a/deepmd/jax/fitting/fitting.py +++ b/deepmd/jax/fitting/fitting.py @@ -6,6 +6,7 @@ from deepmd.dpmodel.fitting.dos_fitting import DOSFittingNet as DOSFittingNetDP from deepmd.dpmodel.fitting.ener_fitting import EnergyFittingNet as EnergyFittingNetDP from deepmd.jax.common import ( + ArrayAPIVariable, flax_module, to_jax_array, ) @@ -29,6 +30,8 @@ def setattr_for_general_fitting(name: str, value: Any) -> Any: "aparam_inv_std", }: value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) elif name == "emask": value = AtomExcludeMask(value.ntypes, value.exclude_types) elif name == "nets": diff --git a/deepmd/jax/utils/exclude_mask.py b/deepmd/jax/utils/exclude_mask.py index a6cf210f94..18d13d9400 100644 --- a/deepmd/jax/utils/exclude_mask.py +++ b/deepmd/jax/utils/exclude_mask.py @@ -6,6 +6,7 @@ from deepmd.dpmodel.utils.exclude_mask import AtomExcludeMask as AtomExcludeMaskDP from deepmd.dpmodel.utils.exclude_mask import PairExcludeMask as PairExcludeMaskDP from deepmd.jax.common import ( + ArrayAPIVariable, flax_module, to_jax_array, ) @@ -16,6 +17,8 @@ class AtomExcludeMask(AtomExcludeMaskDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"type_mask"}: value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) return super().__setattr__(name, value) @@ -24,4 +27,6 @@ class PairExcludeMask(PairExcludeMaskDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"type_mask"}: value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) return super().__setattr__(name, value) diff --git a/deepmd/jax/utils/serialization.py b/deepmd/jax/utils/serialization.py new file mode 100644 index 0000000000..43070f8a07 --- /dev/null +++ b/deepmd/jax/utils/serialization.py @@ -0,0 +1,97 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from pathlib import ( + Path, +) + +import orbax.checkpoint as ocp + +from deepmd.jax.env import ( + jax, + nnx, +) +from deepmd.jax.model.model import ( + BaseModel, + get_model, +) + + +def deserialize_to_file(model_file: str, data: dict) -> None: + """Deserialize the dictionary to a model file. + + Parameters + ---------- + model_file : str + The model file to be saved. + data : dict + The dictionary to be deserialized. + """ + if model_file.endswith(".jax"): + model = BaseModel.deserialize(data["model"]) + model_def_script = data["model_def_script"] + _, state = nnx.split(model) + with ocp.Checkpointer( + ocp.CompositeCheckpointHandler("state", "model_def_script") + ) as checkpointer: + checkpointer.save( + Path(model_file).absolute(), + ocp.args.Composite( + state=ocp.args.StandardSave(state.to_pure_dict()), + model_def_script=ocp.args.JsonSave(model_def_script), + ), + ) + else: + raise ValueError("JAX backend only supports converting .jax directory") + + +def serialize_from_file(model_file: str) -> dict: + """Serialize the model file to a dictionary. + + Parameters + ---------- + model_file : str + The model file to be serialized. + + Returns + ------- + dict + The serialized model data. + """ + if model_file.endswith(".jax"): + with ocp.Checkpointer( + ocp.CompositeCheckpointHandler("state", "model_def_script") + ) as checkpointer: + data = checkpointer.restore( + Path(model_file).absolute(), + ocp.args.Composite( + state=ocp.args.StandardRestore(), + model_def_script=ocp.args.JsonRestore(), + ), + ) + state = data.state + + # convert str "1" to int 1 key + def convert_str_to_int_key(item: dict): + for key, value in item.copy().items(): + if isinstance(value, dict): + convert_str_to_int_key(value) + if key.isdigit(): + item[int(key)] = item.pop(key) + + convert_str_to_int_key(state) + + model_def_script = data.model_def_script + abstract_model = get_model(model_def_script) + graphdef, abstract_state = nnx.split(abstract_model) + abstract_state.replace_by_pure_dict(state) + model = nnx.merge(graphdef, abstract_state) + model_dict = model.serialize() + data = { + "backend": "JAX", + "jax_version": jax.__version__, + "model": model_dict, + "model_def_script": model_def_script, + "@variables": {}, + } + return data + else: + raise ValueError("JAX backend only supports converting .jax directory") diff --git a/deepmd/jax/utils/type_embed.py b/deepmd/jax/utils/type_embed.py index 3143460244..30cd9f45a9 100644 --- a/deepmd/jax/utils/type_embed.py +++ b/deepmd/jax/utils/type_embed.py @@ -5,6 +5,7 @@ from deepmd.dpmodel.utils.type_embed import TypeEmbedNet as TypeEmbedNetDP from deepmd.jax.common import ( + ArrayAPIVariable, flax_module, to_jax_array, ) @@ -18,6 +19,8 @@ class TypeEmbedNet(TypeEmbedNetDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"econf_tebd"}: value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) if name in {"embedding_net"}: value = EmbeddingNet.deserialize(value.serialize()) return super().__setattr__(name, value) diff --git a/pyproject.toml b/pyproject.toml index 4dbff24f13..3bd18d42a6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -137,7 +137,10 @@ cu12 = [ ] jax = [ 'jax>=0.4.33;python_version>="3.10"', - 'flax>=0.8.0;python_version>="3.10"', + 'flax>=0.10.0;python_version>="3.10"', + 'orbax-checkpoint;python_version>="3.10"', + # The pinning of ml_dtypes may conflict with TF + # 'jax-ai-stack;python_version>="3.10"', ] [tool.deepmd_build_backend.scripts] @@ -402,6 +405,7 @@ banned-module-level-imports = [ # Also ignore `E402` in all `__init__.py` files. "deepmd/tf/**" = ["TID253"] "deepmd/pt/**" = ["TID253"] +"deepmd/jax/**" = ["TID253"] "source/tests/tf/**" = ["TID253"] "source/tests/pt/**" = ["TID253"] "source/tests/universal/pt/**" = ["TID253"] diff --git a/source/tests/consistent/io/test_io.py b/source/tests/consistent/io/test_io.py index 71e4002128..feafde234d 100644 --- a/source/tests/consistent/io/test_io.py +++ b/source/tests/consistent/io/test_io.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy +import shutil import unittest from pathlib import ( Path, @@ -60,15 +61,17 @@ def save_data_to_model(self, model_file: str, data: dict) -> None: def tearDown(self): prefix = "test_consistent_io_" + self.__class__.__name__.lower() for ii in Path(".").glob(prefix + ".*"): - if Path(ii).exists(): + if Path(ii).is_file(): Path(ii).unlink() + elif Path(ii).is_dir(): + shutil.rmtree(ii) def test_data_equal(self): prefix = "test_consistent_io_" + self.__class__.__name__.lower() - for backend_name in ("tensorflow", "pytorch", "dpmodel"): + for backend_name in ("tensorflow", "pytorch", "dpmodel", "jax"): with self.subTest(backend_name=backend_name): backend = Backend.get_backend(backend_name)() - if not backend.is_available: + if not backend.is_available(): continue reference_data = copy.deepcopy(self.data) self.save_data_to_model(prefix + backend.suffixes[0], reference_data) @@ -80,6 +83,7 @@ def test_data_equal(self): "backend", "tf_version", "pt_version", + "jax_version", "@variables", # dpmodel only "software", @@ -123,7 +127,7 @@ def test_deep_eval(self): rets = [] for backend_name in ("tensorflow", "pytorch", "dpmodel"): backend = Backend.get_backend(backend_name)() - if not backend.is_available: + if not backend.is_available(): continue reference_data = copy.deepcopy(self.data) self.save_data_to_model(prefix + backend.suffixes[0], reference_data) From 02580c2fd9e1f35b7fb9b3e6320a67deb553f163 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 24 Oct 2024 05:32:34 -0400 Subject: [PATCH 079/193] ci: bump TF to 2.18, PT to 2.5 (#4228) This is prepared for the upcoming TF 2.18, which needs CUDNN 9. In the future, I may move all pinnings into pyproject.toml... ## Summary by CodeRabbit - **New Features** - Enhanced dependency management for CUDA and Python workflows. - Introduced new jobs for better organization of test duration handling. - **Bug Fixes** - Updated TensorFlow and Torch versions for improved compatibility and performance. - Refined version requirements for TensorFlow based on detected CUDA versions. - **Documentation** - Adjusted testing commands and linting configurations for clarity and compliance. - **Chores** - Streamlined caching mechanisms to optimize test duration tracking. --------- Signed-off-by: Jinzhe Zeng --- .github/workflows/test_cuda.yml | 4 ++-- .github/workflows/test_python.yml | 2 +- backend/find_pytorch.py | 18 ++++++++++++++++++ backend/find_tensorflow.py | 6 +++--- pyproject.toml | 4 +--- 5 files changed, 25 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index d60a9c909a..6bf4c8552f 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -47,7 +47,7 @@ jobs: && sudo apt-get -y install cuda-12-3 libcudnn8=8.9.5.*-1+cuda12.3 if: false # skip as we use nvidia image - run: python -m pip install -U uv - - run: source/install/uv_with_retry.sh pip install --system "tensorflow>=2.15.0rc0" "torch==2.3.1.*" + - run: source/install/uv_with_retry.sh pip install --system "tensorflow~=2.18.0rc2" "torch~=2.5.0" - run: | export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') @@ -63,7 +63,7 @@ jobs: CUDA_VISIBLE_DEVICES: 0 - name: Download libtorch run: | - wget https://download.pytorch.org/libtorch/cu121/libtorch-cxx11-abi-shared-with-deps-2.2.1%2Bcu121.zip -O libtorch.zip + wget https://download.pytorch.org/libtorch/cu124/libtorch-cxx11-abi-shared-with-deps-2.5.0%2Bcu124.zip -O libtorch.zip unzip libtorch.zip - run: | export CMAKE_PREFIX_PATH=$GITHUB_WORKSPACE/libtorch diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index 87d7266e03..e46bddd98a 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -26,7 +26,7 @@ jobs: - run: python -m pip install -U uv - run: | source/install/uv_with_retry.sh pip install --system mpich - source/install/uv_with_retry.sh pip install --system "torch==2.3.0+cpu.cxx11.abi" -i https://download.pytorch.org/whl/ + source/install/uv_with_retry.sh pip install --system torch -i https://download.pytorch.org/whl/cpu export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') source/install/uv_with_retry.sh pip install --system --only-binary=horovod -e .[cpu,test,jax] horovod[tensorflow-cpu] mpi4py env: diff --git a/backend/find_pytorch.py b/backend/find_pytorch.py index e01f4e84fe..6ca4ddb0ab 100644 --- a/backend/find_pytorch.py +++ b/backend/find_pytorch.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import importlib import os +import platform import site from functools import ( lru_cache, @@ -22,6 +23,9 @@ Union, ) +from packaging.specifiers import ( + SpecifierSet, +) from packaging.version import ( Version, ) @@ -104,6 +108,20 @@ def get_pt_requirement(pt_version: str = "") -> dict: """ if pt_version is None: return {"torch": []} + if ( + os.environ.get("CIBUILDWHEEL", "0") == "1" + and platform.system() == "Linux" + and platform.machine() == "x86_64" + ): + cuda_version = os.environ.get("CUDA_VERSION", "12.2") + if cuda_version == "" or cuda_version in SpecifierSet(">=12,<13"): + # CUDA 12.2, cudnn 9 + pt_version = "2.5.0" + elif cuda_version in SpecifierSet(">=11,<12"): + # CUDA 11.8, cudnn 8 + pt_version = "2.3.1" + else: + raise RuntimeError("Unsupported CUDA version") from None if pt_version == "": pt_version = os.environ.get("PYTORCH_VERSION", "") diff --git a/backend/find_tensorflow.py b/backend/find_tensorflow.py index 5b0de0b2dd..1fc3a8a6d9 100644 --- a/backend/find_tensorflow.py +++ b/backend/find_tensorflow.py @@ -85,14 +85,14 @@ def find_tensorflow() -> tuple[Optional[str], list[str]]: if os.environ.get("CIBUILDWHEEL", "0") == "1": cuda_version = os.environ.get("CUDA_VERSION", "12.2") if cuda_version == "" or cuda_version in SpecifierSet(">=12,<13"): - # CUDA 12.2 + # CUDA 12.2, cudnn 9 requires.extend( [ - "tensorflow-cpu>=2.15.0rc0; platform_machine=='x86_64' and platform_system == 'Linux'", + "tensorflow-cpu>=2.18.0rc0; platform_machine=='x86_64' and platform_system == 'Linux'", ] ) elif cuda_version in SpecifierSet(">=11,<12"): - # CUDA 11.8 + # CUDA 11.8, cudnn 8 requires.extend( [ "tensorflow-cpu>=2.5.0rc0,<2.15; platform_machine=='x86_64' and platform_system == 'Linux'", diff --git a/pyproject.toml b/pyproject.toml index 3bd18d42a6..6f0404174d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -132,7 +132,7 @@ cu12 = [ "nvidia-curand-cu12", "nvidia-cusolver-cu12", "nvidia-cusparse-cu12", - "nvidia-cudnn-cu12<9", + "nvidia-cudnn-cu12", "nvidia-cuda-nvcc-cu12", ] jax = [ @@ -279,8 +279,6 @@ PATH = "/usr/lib64/mpich/bin:$PATH" UV_EXTRA_INDEX_URL = "https://download.pytorch.org/whl/cpu" # trick to find the correction version of mpich CMAKE_PREFIX_PATH="/opt/python/cp311-cp311/" -# PT 2.4.0 requires cudnn 9, incompatible with TF with cudnn 8 -PYTORCH_VERSION = "2.3.1" [tool.cibuildwheel.windows] test-extras = ["cpu", "torch"] From a66afd34ec81187dc196b0557bd672bf39da8fc7 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 25 Oct 2024 04:18:38 -0400 Subject: [PATCH 080/193] ci: bump tensorflow/build image to 2.18 (#4252) Fix the CI error. ## Summary by CodeRabbit - **Chores** - Updated TensorFlow build version from 2.15 to 2.18 in the C library build workflow. This change ensures compatibility with the latest TensorFlow features and improvements during the library's build process. Signed-off-by: Jinzhe Zeng --- .github/workflows/package_c.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/package_c.yml b/.github/workflows/package_c.yml index c5a3a3a7b0..f5e4a97d56 100644 --- a/.github/workflows/package_c.yml +++ b/.github/workflows/package_c.yml @@ -18,7 +18,7 @@ jobs: strategy: matrix: include: - - tensorflow_build_version: "2.15" + - tensorflow_build_version: "2.18" tensorflow_version: "" filename: libdeepmd_c.tar.gz - tensorflow_build_version: "2.14" From 659f90d0507c66cd13c8c3de408de61710f413d3 Mon Sep 17 00:00:00 2001 From: Jia-Xin Zhu <53895049+ChiahsinChu@users.noreply.github.com> Date: Sat, 26 Oct 2024 11:20:09 +0800 Subject: [PATCH 081/193] feat(pt): support loss plugin for external package (#4248) support loss plugin for external package in pt backend ## Summary by CodeRabbit - **New Features** - Introduced a new loss class, `TaskLoss`, enhancing loss functionality in training. - Updated loss selection process to dynamically accommodate new loss types. - **Bug Fixes** - Improved error handling for unsupported loss types, allowing for more flexible integration. - Integrated plugin registry functionality into the `TaskLoss` class for better registration within the system. --- deepmd/pt/loss/loss.py | 25 ++++++++++++++++++++++++- deepmd/pt/train/training.py | 4 +++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/deepmd/pt/loss/loss.py b/deepmd/pt/loss/loss.py index 1a091e074e..5447c8735b 100644 --- a/deepmd/pt/loss/loss.py +++ b/deepmd/pt/loss/loss.py @@ -9,9 +9,12 @@ from deepmd.utils.data import ( DataRequirementItem, ) +from deepmd.utils.plugin import ( + make_plugin_registry, +) -class TaskLoss(torch.nn.Module, ABC): +class TaskLoss(torch.nn.Module, ABC, make_plugin_registry("loss")): def __init__(self, **kwargs): """Construct loss.""" super().__init__() @@ -38,3 +41,23 @@ def display_if_exist(loss: torch.Tensor, find_property: float) -> torch.Tensor: whether the property is found """ return loss if bool(find_property) else torch.nan + + @classmethod + def get_loss(cls, loss_params: dict) -> "TaskLoss": + """Get the loss module by the parameters. + + By default, all the parameters are directly passed to the constructor. + If not, override this method. + + Parameters + ---------- + loss_params : dict + The loss parameters + + Returns + ------- + TaskLoss + The loss module + """ + loss = cls(**loss_params) + return loss diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 211e1ba564..466080d34c 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -28,6 +28,7 @@ EnergySpinLoss, EnergyStdLoss, PropertyLoss, + TaskLoss, TensorLoss, ) from deepmd.pt.model.model import ( @@ -1260,7 +1261,8 @@ def get_loss(loss_params, start_lr, _ntypes, _model): loss_params["task_dim"] = task_dim return PropertyLoss(**loss_params) else: - raise NotImplementedError + loss_params["starter_learning_rate"] = start_lr + return TaskLoss.get_class_by_type(loss_type).get_loss(loss_params) def get_single_model( From fa61d697f2227ee658b8ccf82e8700aa9b44a9a8 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 26 Oct 2024 01:17:57 -0400 Subject: [PATCH 082/193] feat(jax/array-api): se_e2_r (#4257) ## Summary by CodeRabbit - **New Features** - Introduced a new descriptor class, `DescrptSeR`, enhancing compatibility with JAX and Array API. - Added custom logic for attribute handling in the new descriptor class. - **Bug Fixes** - Improved error handling and type conversion for tensor operations. - **Tests** - Enhanced testing framework for the `DescrptSeR` descriptor, including support for JAX and Array API Strict backends. - Updated test class to better reflect the focus on the `DescrptSeR` descriptor. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- deepmd/dpmodel/descriptor/se_r.py | 38 ++++++++----- deepmd/jax/descriptor/__init__.py | 4 ++ deepmd/jax/descriptor/se_e2_r.py | 41 ++++++++++++++ .../array_api_strict/descriptor/se_e2_r.py | 32 +++++++++++ .../tests/consistent/descriptor/test_se_r.py | 55 ++++++++++++++++++- 5 files changed, 155 insertions(+), 15 deletions(-) create mode 100644 deepmd/jax/descriptor/se_e2_r.py create mode 100644 source/tests/array_api_strict/descriptor/se_e2_r.py diff --git a/deepmd/dpmodel/descriptor/se_r.py b/deepmd/dpmodel/descriptor/se_r.py index c9d27175d6..6d0ddc5621 100644 --- a/deepmd/dpmodel/descriptor/se_r.py +++ b/deepmd/dpmodel/descriptor/se_r.py @@ -6,6 +6,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( @@ -13,6 +14,10 @@ PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.common import ( + get_xp_precision, + to_numpy_array, +) from deepmd.dpmodel.utils import ( EmbeddingNet, EnvMat, @@ -25,9 +30,6 @@ from deepmd.dpmodel.utils.update_sel import ( UpdateSel, ) -from deepmd.env import ( - GLOBAL_NP_FLOAT_PRECISION, -) from deepmd.utils.data_system import ( DeepmdDataSystem, ) @@ -144,7 +146,7 @@ def __init__( self.env_protection = env_protection in_dim = 1 # not considiering type embedding - self.embeddings = NetworkCollection( + embeddings = NetworkCollection( ntypes=self.ntypes, ndim=(1 if self.type_one_side else 2), network_type="embedding_network", @@ -152,7 +154,7 @@ def __init__( if not self.type_one_side: raise NotImplementedError("type_one_side == False not implemented") for ii in range(self.ntypes): - self.embeddings[(ii,)] = EmbeddingNet( + embeddings[(ii,)] = EmbeddingNet( in_dim, self.neuron, self.activation_function, @@ -160,8 +162,9 @@ def __init__( self.precision, seed=child_seed(seed, ii), ) + self.embeddings = embeddings self.env_mat = EnvMat(self.rcut, self.rcut_smth, protection=self.env_protection) - self.nnei = np.sum(self.sel) + self.nnei = np.sum(self.sel).item() self.davg = np.zeros( [self.ntypes, self.nnei, 1], dtype=PRECISION_DICT[self.precision] ) @@ -169,6 +172,7 @@ def __init__( [self.ntypes, self.nnei, 1], dtype=PRECISION_DICT[self.precision] ) self.orig_sel = self.sel + self.sel_cumsum = [0, *np.cumsum(self.sel).tolist()] def __setitem__(self, key, value): if key in ("avg", "data_avg", "davg"): @@ -279,8 +283,9 @@ def cal_g( ss, ll, ): + xp = array_api_compat.array_namespace(ss) nf, nloc, nnei = ss.shape[0:3] - ss = ss.reshape(nf, nloc, nnei, 1) + ss = xp.reshape(ss, (nf, nloc, nnei, 1)) # nf x nloc x nnei x ng gg = self.embeddings[(ll,)].call(ss) return gg @@ -321,29 +326,34 @@ def call( sw The smooth switch function. """ + xp = array_api_compat.array_namespace(coord_ext) del mapping # nf x nloc x nnei x 1 rr, diff, ww = self.env_mat.call( coord_ext, atype_ext, nlist, self.davg, self.dstd, True ) nf, nloc, nnei, _ = rr.shape - sec = np.append([0], np.cumsum(self.sel)) + sec = self.sel_cumsum ng = self.neuron[-1] - xyz_scatter = np.zeros([nf, nloc, ng], dtype=PRECISION_DICT[self.precision]) + xyz_scatter = xp.zeros( + [nf, nloc, ng], dtype=get_xp_precision(xp, self.precision) + ) exclude_mask = self.emask.build_type_exclude_mask(nlist, atype_ext) + rr = xp.astype(rr, xyz_scatter.dtype) for tt in range(self.ntypes): mm = exclude_mask[:, :, sec[tt] : sec[tt + 1]] tr = rr[:, :, sec[tt] : sec[tt + 1], :] - tr = tr * mm[:, :, :, None] + tr = tr * xp.astype(mm[:, :, :, None], tr.dtype) gg = self.cal_g(tr, tt) - gg = np.mean(gg, axis=2) + gg = xp.mean(gg, axis=2) # nf x nloc x ng x 1 xyz_scatter += gg * (self.sel[tt] / self.nnei) res_rescale = 1.0 / 5.0 res = xyz_scatter * res_rescale - res = res.reshape(nf, nloc, ng).astype(GLOBAL_NP_FLOAT_PRECISION) + res = xp.reshape(res, (nf, nloc, ng)) + res = xp.astype(res, get_xp_precision(xp, "global")) return res, None, None, None, ww def serialize(self) -> dict: @@ -369,8 +379,8 @@ def serialize(self) -> dict: "env_mat": self.env_mat.serialize(), "embeddings": self.embeddings.serialize(), "@variables": { - "davg": self.davg, - "dstd": self.dstd, + "davg": to_numpy_array(self.davg), + "dstd": to_numpy_array(self.dstd), }, "type_map": self.type_map, } diff --git a/deepmd/jax/descriptor/__init__.py b/deepmd/jax/descriptor/__init__.py index ed59493268..3ed096f9c1 100644 --- a/deepmd/jax/descriptor/__init__.py +++ b/deepmd/jax/descriptor/__init__.py @@ -5,8 +5,12 @@ from deepmd.jax.descriptor.se_e2_a import ( DescrptSeA, ) +from deepmd.jax.descriptor.se_e2_r import ( + DescrptSeR, +) __all__ = [ "DescrptSeA", + "DescrptSeR", "DescrptDPA1", ] diff --git a/deepmd/jax/descriptor/se_e2_r.py b/deepmd/jax/descriptor/se_e2_r.py new file mode 100644 index 0000000000..96ff4103dd --- /dev/null +++ b/deepmd/jax/descriptor/se_e2_r.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_r import DescrptSeR as DescrptSeRDP +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.jax.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.jax.utils.network import ( + NetworkCollection, +) + + +@BaseDescriptor.register("se_e2_r") +@BaseDescriptor.register("se_r") +@flax_module +class DescrptSeR(DescrptSeRDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"dstd", "davg"}: + value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) + elif name in {"embeddings"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/descriptor/se_e2_r.py b/source/tests/array_api_strict/descriptor/se_e2_r.py new file mode 100644 index 0000000000..839e536cea --- /dev/null +++ b/source/tests/array_api_strict/descriptor/se_e2_r.py @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_r import DescrptSeR as DescrptSeRDP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.exclude_mask import ( + PairExcludeMask, +) +from ..utils.network import ( + NetworkCollection, +) + + +class DescrptSeR(DescrptSeRDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"dstd", "davg"}: + value = to_array_api_strict_array(value) + elif name in {"embeddings"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) diff --git a/source/tests/consistent/descriptor/test_se_r.py b/source/tests/consistent/descriptor/test_se_r.py index 7103f60aa7..e851106c44 100644 --- a/source/tests/consistent/descriptor/test_se_r.py +++ b/source/tests/consistent/descriptor/test_se_r.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -33,6 +35,17 @@ descrpt_se_r_args, ) +if INSTALLED_JAX: + from deepmd.jax.descriptor.se_e2_r import DescrptSeR as DescrptSeRJAX +else: + DescrptSeRJAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.se_e2_r import ( + DescrptSeR as DescrptSeRArrayAPIStrict, + ) +else: + DescrptSeRArrayAPIStrict = None + @parameterized( (True, False), # resnet_dt @@ -40,7 +53,7 @@ ([], [[0, 1]]), # excluded_types ("float32", "float64"), # precision ) -class TestSeA(CommonTest, DescriptorTest, unittest.TestCase): +class TestSeR(CommonTest, DescriptorTest, unittest.TestCase): @property def data(self) -> dict: ( @@ -81,9 +94,31 @@ def skip_dp(self) -> bool: ) = self.param return not type_one_side or CommonTest.skip_dp + @property + def skip_jax(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + precision, + ) = self.param + return not type_one_side or not INSTALLED_JAX + + @property + def skip_array_api_strict(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + precision, + ) = self.param + return not type_one_side or not INSTALLED_ARRAY_API_STRICT + tf_class = DescrptSeRTF dp_class = DescrptSeRDP pt_class = DescrptSeRPT + jax_class = DescrptSeRJAX + array_api_strict_class = DescrptSeRArrayAPIStrict args = descrpt_se_r_args() def setUp(self): @@ -148,6 +183,24 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From 5394854d80d1d0baae8fb6ed65b70ff055888a11 Mon Sep 17 00:00:00 2001 From: Jia-Xin Zhu <53895049+ChiahsinChu@users.noreply.github.com> Date: Sun, 27 Oct 2024 01:08:06 +0800 Subject: [PATCH 083/193] feat(pt): support `use_aparam_as_mask` for pt backend (#4246) support `use_aparam_as_mask` for pt backend ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced `use_aparam_as_mask` parameter in `GeneralFitting`, `InvarFitting`, and `EnerFitting` classes, allowing users to conditionally exclude atomic parameters from fitting processes. - Added `seed` parameter to `InvarFitting` for enhanced control over randomness. - New test method `test_use_aparam_as_mask` in `TestInvarFitting` to validate behavior based on the new parameter. - **Bug Fixes** - Improved error handling for `use_aparam_as_mask` in various classes. - **Tests** - Enhanced parameterization in multiple test classes to accommodate new features related to atomic parameters. - Updated test methods in `TestInvarFitting` to include `use_aparam_as_mask` for comprehensive testing. --- deepmd/dpmodel/fitting/general_fitting.py | 8 +++- deepmd/dpmodel/fitting/invar_fitting.py | 4 -- deepmd/pt/model/task/fitting.py | 15 +++++-- deepmd/pt/model/task/invar_fitting.py | 5 ++- deepmd/tf/fit/ener.py | 32 +++++++++----- source/tests/consistent/fitting/common.py | 8 +++- source/tests/consistent/fitting/test_dos.py | 22 ++++++++++ source/tests/consistent/fitting/test_ener.py | 34 ++++++++++++-- .../tests/consistent/fitting/test_property.py | 18 ++++++++ source/tests/pt/model/test_ener_fitting.py | 44 ++++++++++++++++++- 10 files changed, 162 insertions(+), 28 deletions(-) diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index 62aafc6207..25d15b2e75 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -173,7 +173,11 @@ def __init__( else: self.aparam_avg, self.aparam_inv_std = None, None # init networks - in_dim = self.dim_descrpt + self.numb_fparam + self.numb_aparam + in_dim = ( + self.dim_descrpt + + self.numb_fparam + + (0 if self.use_aparam_as_mask else self.numb_aparam) + ) self.nets = NetworkCollection( 1 if not self.mixed_types else 0, self.ntypes, @@ -401,7 +405,7 @@ def _call_common( axis=-1, ) # check aparam dim, concate to input descriptor - if self.numb_aparam > 0: + if self.numb_aparam > 0 and not self.use_aparam_as_mask: assert aparam is not None, "aparam should not be None" if aparam.shape[-1] != self.numb_aparam: raise ValueError( diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 893853bb38..2a251834fe 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -139,10 +139,6 @@ def __init__( raise NotImplementedError("tot_ener_zero is not implemented") if spin is not None: raise NotImplementedError("spin is not implemented") - if use_aparam_as_mask: - raise NotImplementedError("use_aparam_as_mask is not implemented") - if use_aparam_as_mask: - raise NotImplementedError("use_aparam_as_mask is not implemented") if layer_name is not None: raise NotImplementedError("layer_name is not implemented") diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 6e9829e4b6..15837aca98 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -126,6 +126,8 @@ class GeneralFitting(Fitting): length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. + use_aparam_as_mask: bool + If True, the aparam will not be used in fitting net for embedding. """ def __init__( @@ -147,6 +149,7 @@ def __init__( trainable: Union[bool, list[bool]] = True, remove_vaccum_contribution: Optional[list[bool]] = None, type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, **kwargs, ): super().__init__() @@ -164,6 +167,7 @@ def __init__( self.rcond = rcond self.seed = seed self.type_map = type_map + self.use_aparam_as_mask = use_aparam_as_mask # order matters, should be place after the assignment of ntypes self.reinit_exclude(exclude_types) self.trainable = trainable @@ -208,7 +212,11 @@ def __init__( else: self.aparam_avg, self.aparam_inv_std = None, None - in_dim = self.dim_descrpt + self.numb_fparam + self.numb_aparam + in_dim = ( + self.dim_descrpt + + self.numb_fparam + + (0 if self.use_aparam_as_mask else self.numb_aparam) + ) self.filter_layers = NetworkCollection( 1 if not self.mixed_types else 0, @@ -293,13 +301,12 @@ def serialize(self) -> dict: # "trainable": self.trainable , # "atom_ener": self.atom_ener , # "layer_name": self.layer_name , - # "use_aparam_as_mask": self.use_aparam_as_mask , # "spin": self.spin , ## NOTICE: not supported by far "tot_ener_zero": False, "trainable": [self.trainable] * (len(self.neuron) + 1), "layer_name": None, - "use_aparam_as_mask": False, + "use_aparam_as_mask": self.use_aparam_as_mask, "spin": None, } @@ -441,7 +448,7 @@ def _forward_common( dim=-1, ) # check aparam dim, concate to input descriptor - if self.numb_aparam > 0: + if self.numb_aparam > 0 and not self.use_aparam_as_mask: assert aparam is not None, "aparam should not be None" assert self.aparam_avg is not None assert self.aparam_inv_std is not None diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index 230046b74b..e76e1d2063 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -77,7 +77,8 @@ class InvarFitting(GeneralFitting): The `set_davg_zero` key in the descrptor should be set. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. - + use_aparam_as_mask: bool + If True, the aparam will not be used in fitting net for embedding. """ def __init__( @@ -99,6 +100,7 @@ def __init__( exclude_types: list[int] = [], atom_ener: Optional[list[Optional[torch.Tensor]]] = None, type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, **kwargs, ): self.dim_out = dim_out @@ -122,6 +124,7 @@ def __init__( if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0 else [x is not None for x in atom_ener], type_map=type_map, + use_aparam_as_mask=use_aparam_as_mask, **kwargs, ) diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index b01574cf87..330ea57179 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -384,7 +384,7 @@ def _build_lower( ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam]) ext_fparam = tf.cast(ext_fparam, self.fitting_precision) layer = tf.concat([layer, ext_fparam], axis=1) - if aparam is not None: + if aparam is not None and not self.use_aparam_as_mask: ext_aparam = tf.slice( aparam, [0, start_index * self.numb_aparam], @@ -561,7 +561,7 @@ def build( trainable=False, initializer=tf.constant_initializer(self.fparam_inv_std), ) - if self.numb_aparam > 0: + if self.numb_aparam > 0 and not self.use_aparam_as_mask: t_aparam_avg = tf.get_variable( "t_aparam_avg", self.numb_aparam, @@ -576,6 +576,13 @@ def build( trainable=False, initializer=tf.constant_initializer(self.aparam_inv_std), ) + else: + t_aparam_avg = tf.zeros( + self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION + ) + t_aparam_istd = tf.ones( + self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION + ) inputs = tf.reshape(inputs, [-1, natoms[0], self.dim_descrpt]) if len(self.atom_ener): @@ -602,12 +609,11 @@ def build( fparam = (fparam - t_fparam_avg) * t_fparam_istd aparam = None - if not self.use_aparam_as_mask: - if self.numb_aparam > 0: - aparam = input_dict["aparam"] - aparam = tf.reshape(aparam, [-1, self.numb_aparam]) - aparam = (aparam - t_aparam_avg) * t_aparam_istd - aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) + if self.numb_aparam > 0 and not self.use_aparam_as_mask: + aparam = input_dict["aparam"] + aparam = tf.reshape(aparam, [-1, self.numb_aparam]) + aparam = (aparam - t_aparam_avg) * t_aparam_istd + aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) atype_nall = tf.reshape(atype, [-1, natoms[1]]) self.atype_nloc = tf.slice( @@ -783,7 +789,7 @@ def init_variables( self.fparam_inv_std = get_tensor_by_name_from_graph( graph, f"fitting_attr{suffix}/t_fparam_istd" ) - if self.numb_aparam > 0: + if self.numb_aparam > 0 and not self.use_aparam_as_mask: self.aparam_avg = get_tensor_by_name_from_graph( graph, f"fitting_attr{suffix}/t_aparam_avg" ) @@ -883,7 +889,7 @@ def deserialize(cls, data: dict, suffix: str = ""): if fitting.numb_fparam > 0: fitting.fparam_avg = data["@variables"]["fparam_avg"] fitting.fparam_inv_std = data["@variables"]["fparam_inv_std"] - if fitting.numb_aparam > 0: + if fitting.numb_aparam > 0 and not fitting.use_aparam_as_mask: fitting.aparam_avg = data["@variables"]["aparam_avg"] fitting.aparam_inv_std = data["@variables"]["aparam_inv_std"] return fitting @@ -922,7 +928,11 @@ def serialize(self, suffix: str = "") -> dict: "nets": self.serialize_network( ntypes=self.ntypes, ndim=0 if self.mixed_types else 1, - in_dim=self.dim_descrpt + self.numb_fparam + self.numb_aparam, + in_dim=( + self.dim_descrpt + + self.numb_fparam + + (0 if self.use_aparam_as_mask else self.numb_aparam) + ), neuron=self.n_neuron, activation_function=self.activation_function_name, resnet_dt=self.resnet_dt, diff --git a/source/tests/consistent/fitting/common.py b/source/tests/consistent/fitting/common.py index bdd4b7cf81..95557d9ab8 100644 --- a/source/tests/consistent/fitting/common.py +++ b/source/tests/consistent/fitting/common.py @@ -18,7 +18,7 @@ class FittingTest: """Useful utilities for descriptor tests.""" - def build_tf_fitting(self, obj, inputs, natoms, atype, fparam, suffix): + def build_tf_fitting(self, obj, inputs, natoms, atype, fparam, aparam, suffix): t_inputs = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_inputs") t_natoms = tf.placeholder(tf.int32, natoms.shape, name="i_natoms") t_atype = tf.placeholder(tf.int32, [None], name="i_atype") @@ -30,6 +30,12 @@ def build_tf_fitting(self, obj, inputs, natoms, atype, fparam, suffix): ) extras["fparam"] = t_fparam feed_dict[t_fparam] = fparam + if aparam is not None: + t_aparam = tf.placeholder( + GLOBAL_TF_FLOAT_PRECISION, [None, None], name="i_aparam" + ) + extras["aparam"] = t_aparam + feed_dict[t_aparam] = aparam t_out = obj.build( t_inputs, t_natoms, diff --git a/source/tests/consistent/fitting/test_dos.py b/source/tests/consistent/fitting/test_dos.py index 4a78b69341..774e3f655e 100644 --- a/source/tests/consistent/fitting/test_dos.py +++ b/source/tests/consistent/fitting/test_dos.py @@ -58,6 +58,7 @@ ("float64", "float32"), # precision (True, False), # mixed_types (0, 1), # numb_fparam + (0, 1), # numb_aparam (10, 20), # numb_dos ) class TestDOS(CommonTest, FittingTest, unittest.TestCase): @@ -68,6 +69,7 @@ def data(self) -> dict: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return { @@ -75,6 +77,7 @@ def data(self) -> dict: "resnet_dt": resnet_dt, "precision": precision, "numb_fparam": numb_fparam, + "numb_aparam": numb_aparam, "seed": 20240217, "numb_dos": numb_dos, } @@ -86,6 +89,7 @@ def skip_pt(self) -> bool: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return CommonTest.skip_pt @@ -115,6 +119,9 @@ def setUp(self): # inconsistent if not sorted self.atype.sort() self.fparam = -np.ones((1,), dtype=GLOBAL_NP_FLOAT_PRECISION) + self.aparam = np.zeros_like( + self.atype, dtype=GLOBAL_NP_FLOAT_PRECISION + ).reshape(-1, 1) @property def addtional_data(self) -> dict: @@ -123,6 +130,7 @@ def addtional_data(self) -> dict: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return { @@ -137,6 +145,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return self.build_tf_fitting( @@ -145,6 +154,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: self.natoms, self.atype, self.fparam if numb_fparam else None, + self.aparam if numb_aparam else None, suffix, ) @@ -154,6 +164,7 @@ def eval_pt(self, pt_obj: Any) -> Any: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return ( @@ -163,6 +174,9 @@ def eval_pt(self, pt_obj: Any) -> Any: fparam=torch.from_numpy(self.fparam).to(device=PT_DEVICE) if numb_fparam else None, + aparam=torch.from_numpy(self.aparam).to(device=PT_DEVICE) + if numb_aparam + else None, )["dos"] .detach() .cpu() @@ -175,12 +189,14 @@ def eval_dp(self, dp_obj: Any) -> Any: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return dp_obj( self.inputs, self.atype.reshape(1, -1), fparam=self.fparam if numb_fparam else None, + aparam=self.aparam if numb_aparam else None, )["dos"] def eval_jax(self, jax_obj: Any) -> Any: @@ -189,6 +205,7 @@ def eval_jax(self, jax_obj: Any) -> Any: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return np.asarray( @@ -196,6 +213,7 @@ def eval_jax(self, jax_obj: Any) -> Any: jnp.asarray(self.inputs), jnp.asarray(self.atype.reshape(1, -1)), fparam=jnp.asarray(self.fparam) if numb_fparam else None, + aparam=jnp.asarray(self.aparam) if numb_aparam else None, )["dos"] ) @@ -206,6 +224,7 @@ def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param return np.asarray( @@ -213,6 +232,7 @@ def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: array_api_strict.asarray(self.inputs), array_api_strict.asarray(self.atype.reshape(1, -1)), fparam=array_api_strict.asarray(self.fparam) if numb_fparam else None, + aparam=array_api_strict.asarray(self.aparam) if numb_aparam else None, )["dos"] ) @@ -230,6 +250,7 @@ def rtol(self) -> float: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param if precision == "float64": @@ -247,6 +268,7 @@ def atol(self) -> float: precision, mixed_types, numb_fparam, + numb_aparam, numb_dos, ) = self.param if precision == "float64": diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index ba2be1d86b..e32410a0ec 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -60,6 +60,7 @@ ("float64", "float32", "bfloat16"), # precision (True, False), # mixed_types (0, 1), # numb_fparam + ((0, False), (1, False), (1, True)), # (numb_aparam, use_aparam_as_mask) ([], [-12345.6, None]), # atom_ener ) class TestEner(CommonTest, FittingTest, unittest.TestCase): @@ -70,6 +71,7 @@ def data(self) -> dict: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return { @@ -77,8 +79,10 @@ def data(self) -> dict: "resnet_dt": resnet_dt, "precision": precision, "numb_fparam": numb_fparam, + "numb_aparam": numb_aparam, "seed": 20240217, "atom_ener": atom_ener, + "use_aparam_as_mask": use_aparam_as_mask, } @property @@ -88,6 +92,7 @@ def skip_pt(self) -> bool: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return CommonTest.skip_pt @@ -101,6 +106,7 @@ def skip_array_api_strict(self) -> bool: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param # TypeError: The array_api_strict namespace does not support the dtype 'bfloat16' @@ -123,6 +129,9 @@ def setUp(self): # inconsistent if not sorted self.atype.sort() self.fparam = -np.ones((1,), dtype=GLOBAL_NP_FLOAT_PRECISION) + self.aparam = np.zeros_like( + self.atype, dtype=GLOBAL_NP_FLOAT_PRECISION + ).reshape(-1, 1) @property def addtional_data(self) -> dict: @@ -131,6 +140,7 @@ def addtional_data(self) -> dict: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return { @@ -145,6 +155,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return self.build_tf_fitting( @@ -153,6 +164,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: self.natoms, self.atype, self.fparam if numb_fparam else None, + self.aparam if numb_aparam else None, suffix, ) @@ -162,15 +174,23 @@ def eval_pt(self, pt_obj: Any) -> Any: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return ( pt_obj( torch.from_numpy(self.inputs).to(device=PT_DEVICE), torch.from_numpy(self.atype.reshape(1, -1)).to(device=PT_DEVICE), - fparam=torch.from_numpy(self.fparam).to(device=PT_DEVICE) - if numb_fparam - else None, + fparam=( + torch.from_numpy(self.fparam).to(device=PT_DEVICE) + if numb_fparam + else None + ), + aparam=( + torch.from_numpy(self.aparam).to(device=PT_DEVICE) + if numb_aparam + else None + ), )["energy"] .detach() .cpu() @@ -183,12 +203,14 @@ def eval_dp(self, dp_obj: Any) -> Any: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return dp_obj( self.inputs, self.atype.reshape(1, -1), fparam=self.fparam if numb_fparam else None, + aparam=self.aparam if numb_aparam else None, )["energy"] def eval_jax(self, jax_obj: Any) -> Any: @@ -197,6 +219,7 @@ def eval_jax(self, jax_obj: Any) -> Any: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return np.asarray( @@ -204,6 +227,7 @@ def eval_jax(self, jax_obj: Any) -> Any: jnp.asarray(self.inputs), jnp.asarray(self.atype.reshape(1, -1)), fparam=jnp.asarray(self.fparam) if numb_fparam else None, + aparam=jnp.asarray(self.aparam) if numb_aparam else None, )["energy"] ) @@ -214,6 +238,7 @@ def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param return np.asarray( @@ -221,6 +246,7 @@ def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: array_api_strict.asarray(self.inputs), array_api_strict.asarray(self.atype.reshape(1, -1)), fparam=array_api_strict.asarray(self.fparam) if numb_fparam else None, + aparam=array_api_strict.asarray(self.aparam) if numb_aparam else None, )["energy"] ) @@ -238,6 +264,7 @@ def rtol(self) -> float: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param if precision == "float64": @@ -257,6 +284,7 @@ def atol(self) -> float: precision, mixed_types, numb_fparam, + (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param if precision == "float64": diff --git a/source/tests/consistent/fitting/test_property.py b/source/tests/consistent/fitting/test_property.py index a9fb6b694a..beb21d9c04 100644 --- a/source/tests/consistent/fitting/test_property.py +++ b/source/tests/consistent/fitting/test_property.py @@ -40,6 +40,7 @@ ("float64", "float32"), # precision (True, False), # mixed_types (0, 1), # numb_fparam + (0, 1), # numb_aparam (1, 3), # task_dim (True, False), # intensive ) @@ -51,6 +52,7 @@ def data(self) -> dict: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param @@ -59,6 +61,7 @@ def data(self) -> dict: "resnet_dt": resnet_dt, "precision": precision, "numb_fparam": numb_fparam, + "numb_aparam": numb_aparam, "seed": 20240217, "task_dim": task_dim, "intensive": intensive, @@ -71,6 +74,7 @@ def skip_pt(self) -> bool: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param @@ -95,6 +99,9 @@ def setUp(self): # inconsistent if not sorted self.atype.sort() self.fparam = -np.ones((1,), dtype=GLOBAL_NP_FLOAT_PRECISION) + self.aparam = np.zeros_like( + self.atype, dtype=GLOBAL_NP_FLOAT_PRECISION + ).reshape(-1, 1) @property def addtional_data(self) -> dict: @@ -103,6 +110,7 @@ def addtional_data(self) -> dict: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param @@ -118,6 +126,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param @@ -127,6 +136,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: self.natoms, self.atype, self.fparam if numb_fparam else None, + self.aparam if numb_aparam else None, suffix, ) @@ -136,6 +146,7 @@ def eval_pt(self, pt_obj: Any) -> Any: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param @@ -146,6 +157,9 @@ def eval_pt(self, pt_obj: Any) -> Any: fparam=torch.from_numpy(self.fparam).to(device=PT_DEVICE) if numb_fparam else None, + aparam=torch.from_numpy(self.aparam).to(device=PT_DEVICE) + if numb_aparam + else None, )["property"] .detach() .cpu() @@ -158,6 +172,7 @@ def eval_dp(self, dp_obj: Any) -> Any: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param @@ -165,6 +180,7 @@ def eval_dp(self, dp_obj: Any) -> Any: self.inputs, self.atype.reshape(1, -1), fparam=self.fparam if numb_fparam else None, + aparam=self.aparam if numb_aparam else None, )["property"] def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: @@ -181,6 +197,7 @@ def rtol(self) -> float: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param @@ -199,6 +216,7 @@ def atol(self) -> float: precision, mixed_types, numb_fparam, + numb_aparam, task_dim, intensive, ) = self.param diff --git a/source/tests/pt/model/test_ener_fitting.py b/source/tests/pt/model/test_ener_fitting.py index 5c55766455..acf0a47769 100644 --- a/source/tests/pt/model/test_ener_fitting.py +++ b/source/tests/pt/model/test_ener_fitting.py @@ -36,6 +36,7 @@ def setUp(self): def test_consistency( self, ): + # ValueError: matmul: Input operand 1 has a mismatch in its core dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?) (size 1600 is different from 1604) rng = np.random.default_rng(GLOBAL_SEED) nf, nloc, nnei = self.nlist.shape dd0 = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE) @@ -46,13 +47,14 @@ def test_consistency( ) atype = torch.tensor(self.atype_ext[:, :nloc], dtype=int, device=env.DEVICE) - for od, mixed_types, nfp, nap, et, nn in itertools.product( + for od, mixed_types, nfp, nap, et, nn, use_aparam_as_mask in itertools.product( [1, 3], [True, False], [0, 3], [0, 4], [[], [0], [1]], [[4, 4, 4], []], + [True, False], ): ft0 = InvarFitting( "foo", @@ -65,6 +67,7 @@ def test_consistency( exclude_types=et, neuron=nn, seed=GLOBAL_SEED, + use_aparam_as_mask=use_aparam_as_mask, ).to(env.DEVICE) ft1 = DPInvarFitting.deserialize(ft0.serialize()) ft2 = InvarFitting.deserialize(ft0.serialize()) @@ -105,12 +108,13 @@ def test_consistency( def test_jit( self, ): - for od, mixed_types, nfp, nap, et in itertools.product( + for od, mixed_types, nfp, nap, et, use_aparam_as_mask in itertools.product( [1, 3], [True, False], [0, 3], [0, 4], [[], [0]], + [True, False], ): ft0 = InvarFitting( "foo", @@ -122,6 +126,7 @@ def test_jit( mixed_types=mixed_types, exclude_types=et, seed=GLOBAL_SEED, + use_aparam_as_mask=use_aparam_as_mask, ).to(env.DEVICE) torch.jit.script(ft0) @@ -146,3 +151,38 @@ def test_get_set(self): np.testing.assert_allclose( foo, np.reshape(ifn0[ii].detach().cpu().numpy(), foo.shape) ) + + def test_use_aparam_as_mask(self): + nap = 4 + dd0 = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE) + + for od, mixed_types, nfp, et, nn in itertools.product( + [1, 3], + [True, False], + [0, 3], + [[], [0], [1]], + [[4, 4, 4], []], + ): + ft0 = InvarFitting( + "foo", + self.nt, + dd0.dim_out, + od, + numb_fparam=nfp, + numb_aparam=nap, + mixed_types=mixed_types, + exclude_types=et, + neuron=nn, + seed=GLOBAL_SEED, + use_aparam_as_mask=True, + ).to(env.DEVICE) + in_dim = ft0.dim_descrpt + ft0.numb_fparam + assert ft0.filter_layers[0].in_dim == in_dim + + ft1 = DPInvarFitting.deserialize(ft0.serialize()) + in_dim = ft1.dim_descrpt + ft1.numb_fparam + assert ft1.nets[0].in_dim == in_dim + + ft2 = InvarFitting.deserialize(ft0.serialize()) + in_dim = ft2.dim_descrpt + ft2.numb_fparam + assert ft2.filter_layers[0].in_dim == in_dim From 13e247ecb528d78ae8443c2a98bca37fa8459940 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sun, 27 Oct 2024 02:25:18 +0800 Subject: [PATCH 084/193] fix(tf): fix compress suffix in DescrptDPA1Compat (#4243) Fix #4114 . ## Summary by CodeRabbit - **New Features** - Enhanced compression capabilities in descriptor models with new optional parameters for improved flexibility. - Improved serialization processes for attention layers, allowing for better handling of scaling factors and normalization. - Dynamic tensor name construction in utility functions to accommodate varying suffixes. - **Bug Fixes** - Adjusted method parameters to ensure compatibility and functionality with new suffix options. - **Tests** - Introduced a new test suite to validate the functionality of the TensorFlow-based descriptor model, ensuring consistent output with the updated features. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/tf/descriptor/se_atten.py | 57 ++++++- deepmd/tf/utils/compress.py | 4 +- deepmd/tf/utils/tabulate.py | 2 +- ...del_compression_dpa1_compat_suffix_only.py | 153 ++++++++++++++++++ 4 files changed, 212 insertions(+), 4 deletions(-) create mode 100644 source/tests/tf/test_model_compression_dpa1_compat_suffix_only.py diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 963e81ecf0..8d101f151c 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -423,6 +423,7 @@ def enable_compression( table_stride_2: float = 0.1, check_frequency: int = -1, suffix: str = "", + tebd_suffix: str = "", ) -> None: """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. @@ -444,6 +445,8 @@ def enable_compression( The overflow check frequency suffix : str, optional The suffix of the scope + tebd_suffix : str, optional + The suffix of the type embedding scope, only for DescrptDPA1Compat """ # do some checks before the mocel compression process assert ( @@ -496,7 +499,9 @@ def enable_compression( min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2 ) - self.final_type_embedding = get_two_side_type_embedding(self, graph) + self.final_type_embedding = get_two_side_type_embedding( + self, graph, suffix=tebd_suffix + ) type_side_suffix = get_extra_embedding_net_suffix(type_one_side=False) self.matrix = get_extra_side_embedding_net_variable( self, graph_def, type_side_suffix, "matrix", suffix @@ -2248,6 +2253,56 @@ def build( self.dout = tf.concat([self.dout, atom_embed], axis=-1) return self.dout + def enable_compression( + self, + min_nbor_dist: float, + graph: tf.Graph, + graph_def: tf.GraphDef, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + suffix: str = "", + tebd_suffix: str = "", + ) -> None: + """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + graph : tf.Graph + The graph of the model + graph_def : tf.GraphDef + The graph_def of the model + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + suffix : str, optional + The suffix of the scope + tebd_suffix : str, optional + Same as suffix. + """ + assert ( + tebd_suffix == "" + ), "DescrptDPA1Compat must use the same tebd_suffix as suffix!" + super().enable_compression( + min_nbor_dist, + graph, + graph_def, + table_extrapolate=table_extrapolate, + table_stride_1=table_stride_1, + table_stride_2=table_stride_2, + check_frequency=check_frequency, + suffix=suffix, + tebd_suffix=suffix, + ) + def init_variables( self, graph: tf.Graph, diff --git a/deepmd/tf/utils/compress.py b/deepmd/tf/utils/compress.py index 0bce633573..f96b59920f 100644 --- a/deepmd/tf/utils/compress.py +++ b/deepmd/tf/utils/compress.py @@ -20,8 +20,8 @@ def get_type_embedding(self, graph): return type_embedding -def get_two_side_type_embedding(self, graph): - type_embedding = get_tensor_by_name_from_graph(graph, "t_typeebd") +def get_two_side_type_embedding(self, graph, suffix=""): + type_embedding = get_tensor_by_name_from_graph(graph, f"t_typeebd{suffix}") type_embedding = type_embedding.astype(self.filter_np_precision) type_embedding_shape = type_embedding.shape diff --git a/deepmd/tf/utils/tabulate.py b/deepmd/tf/utils/tabulate.py index 1dc6128f62..d68f5cadf7 100644 --- a/deepmd/tf/utils/tabulate.py +++ b/deepmd/tf/utils/tabulate.py @@ -126,7 +126,7 @@ def __init__( self.dstd = get_tensor_by_name_from_graph( self.graph, f"descrpt_attr{self.suffix}/t_std" ) - self.ntypes = get_tensor_by_name_from_graph(self.graph, "descrpt_attr/ntypes") + self.ntypes = self.descrpt.get_ntypes() self.embedding_net_nodes = get_embedding_net_nodes_from_graph_def( self.graph_def, suffix=self.suffix diff --git a/source/tests/tf/test_model_compression_dpa1_compat_suffix_only.py b/source/tests/tf/test_model_compression_dpa1_compat_suffix_only.py new file mode 100644 index 0000000000..5557305a7a --- /dev/null +++ b/source/tests/tf/test_model_compression_dpa1_compat_suffix_only.py @@ -0,0 +1,153 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np + +from deepmd.common import ( + make_default_mesh, +) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.tf.descriptor.se_atten import DescrptDPA1Compat as tf_SeAtten +from deepmd.tf.env import ( + GLOBAL_TF_FLOAT_PRECISION, + default_tf_session_config, + tf, +) +from deepmd.tf.utils.sess import ( + run_sess, +) + + +def build_tf_descriptor(obj, natoms, coords, atype, box, suffix): + t_coord = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_coord") + t_type = tf.placeholder(tf.int32, [None], name="i_type") + t_natoms = tf.placeholder(tf.int32, natoms.shape, name="i_natoms") + t_box = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [9], name="i_box") + t_mesh = tf.placeholder(tf.int32, [None], name="i_mesh") + t_des = obj.build( + t_coord, + t_type, + t_natoms, + t_box, + t_mesh, + {}, + suffix=suffix, + ) + return [t_des], { + t_coord: coords, + t_type: atype, + t_natoms: natoms, + t_box: box, + t_mesh: make_default_mesh(True, False), + } + + +def build_eval_tf(sess, obj, natoms, coords, atype, box, suffix): + t_out, feed_dict = build_tf_descriptor(obj, natoms, coords, atype, box, suffix) + + t_out_indentity = [ + tf.identity(tt, name=f"o_{ii}_{suffix}") for ii, tt in enumerate(t_out) + ] + run_sess(sess, tf.global_variables_initializer()) + return run_sess( + sess, + t_out_indentity, + feed_dict=feed_dict, + ) + + +class TestDescriptorSeA(unittest.TestCase): + def setUp(self): + self.device = "cpu" + self.seed = 21 + self.sel = [9, 10] + self.rcut_smth = 5.80 + self.rcut = 6.00 + self.neuron = [6, 12, 24] + self.axis_neuron = 3 + self.ntypes = 2 + self.coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + # self.atype = np.array([0, 0, 1, 1, 1, 1], dtype=np.int32) + self.box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + self.suffix = "test" + self.type_one_side = False + self.se_a_tf = tf_SeAtten( + self.rcut, + self.rcut_smth, + self.sel, + self.ntypes, + self.neuron, + self.axis_neuron, + type_one_side=self.type_one_side, + seed=21, + precision="float32", + tebd_input_mode="strip", + temperature=1.0, + attn_layer=0, + ) + + def test_tf_pt_consistent( + self, + ): + with tf.Session(config=default_tf_session_config) as sess: + graph = tf.get_default_graph() + ret = build_eval_tf( + sess, + self.se_a_tf, + self.natoms, + self.coords, + self.atype, + self.box, + self.suffix, + ) + output_graph_def = tf.graph_util.convert_variables_to_constants( + sess, + graph.as_graph_def(), + [f"o_{ii}_{self.suffix}" for ii, _ in enumerate(ret)], + ) + with tf.Graph().as_default() as new_graph: + tf.import_graph_def(output_graph_def, name="") + self.se_a_tf.init_variables( + new_graph, + output_graph_def, + suffix=self.suffix, + ) + self.se_a_tf.enable_compression( + 1.0, + new_graph, + output_graph_def, + suffix=self.suffix, + ) + + +if __name__ == "__main__": + unittest.main() From aba932c78169804360c9bc4a4d1f39e3c81149eb Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 28 Oct 2024 00:56:57 -0400 Subject: [PATCH 085/193] ci: skip `test_data_equal` on the GPU machine (#4260) This test crashes on the machine iZ0xih0eykcp6eddga4w5iZ with exit code 1: https://github.com/deepmodeling/deepmd-kit/actions/runs/11533273426/job/32106001782 ## Summary by CodeRabbit - **Bug Fixes** - Enhanced test execution control to ensure compatibility with CPU environments during continuous integration. - **Tests** - Updated the `test_data_equal` method to conditionally skip tests based on the testing device and CI status. - Retained cleanup procedures in the `tearDown` method to ensure proper test environment management. Signed-off-by: Jinzhe Zeng --- source/tests/consistent/io/test_io.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/tests/consistent/io/test_io.py b/source/tests/consistent/io/test_io.py index feafde234d..df81c24ff5 100644 --- a/source/tests/consistent/io/test_io.py +++ b/source/tests/consistent/io/test_io.py @@ -21,6 +21,11 @@ DeepEval, ) +from ...utils import ( + CI, + TEST_DEVICE, +) + infer_path = Path(__file__).parent.parent.parent / "infer" @@ -66,6 +71,7 @@ def tearDown(self): elif Path(ii).is_dir(): shutil.rmtree(ii) + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_data_equal(self): prefix = "test_consistent_io_" + self.__class__.__name__.lower() for backend_name in ("tensorflow", "pytorch", "dpmodel", "jax"): From 39cddd4832873ca89c3b22e01e6edc1101f1de54 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 28 Oct 2024 01:02:10 -0400 Subject: [PATCH 086/193] feat(dev): setup devcontainer for developers (#4263) See `.devcontainer/READMD.md` for details. ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced a development container setup with a new Dockerfile. - Added scripts for building C++ and Python components, downloading the LibTorch library, and setting up the environment for LAMMPS simulations. - New README documentation for the development environment setup. - New configuration file for the development container to streamline setup processes. - **Bug Fixes** - Expanded `.gitignore` to prevent unnecessary files from being tracked. - **Chores** - Enhanced dependency management in `pyproject.toml` for improved organization and clarity. --------- Signed-off-by: GitHub Signed-off-by: Jinzhe Zeng --- .devcontainer/Dockerfile | 3 +++ .devcontainer/READMD.md | 35 ++++++++++++++++++++++++++++++ .devcontainer/build_cxx.sh | 21 ++++++++++++++++++ .devcontainer/build_py.sh | 8 +++++++ .devcontainer/devcontainer.json | 17 +++++++++++++++ .devcontainer/download_libtorch.sh | 8 +++++++ .devcontainer/gdb_lmp | 9 ++++++++ .devcontainer/lmp | 9 ++++++++ .gitignore | 5 +++++ pyproject.toml | 20 +++++++++++++++++ 10 files changed, 135 insertions(+) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/READMD.md create mode 100755 .devcontainer/build_cxx.sh create mode 100755 .devcontainer/build_py.sh create mode 100644 .devcontainer/devcontainer.json create mode 100755 .devcontainer/download_libtorch.sh create mode 100755 .devcontainer/gdb_lmp create mode 100755 .devcontainer/lmp diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000..18a2acda7f --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,3 @@ +FROM mcr.microsoft.com/devcontainers/cpp:1-ubuntu-24.04 + +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ diff --git a/.devcontainer/READMD.md b/.devcontainer/READMD.md new file mode 100644 index 0000000000..8e600a143f --- /dev/null +++ b/.devcontainer/READMD.md @@ -0,0 +1,35 @@ +# DeePMD-kit devcontainer environment + +This [devcontainer](https://vscode.js.cn/docs/devcontainers/devcontainer-cli) environment setups Python and C++ environment to develop DeePMD-kit. +One can setup locally or use [GitHub Codespaces](https://docs.github.com/en/codespaces) by clicking the Code button on the DeePMD-kit repository page. +The whole setup process requires about 10 minutes, so one needs to be patient. + +## Python environment + +The following packages are installed into the Python environment `.venv`: + +- DeePMD-kit (in edit mode) +- Backends including TensorFlow, PyTorch, JAX +- LAMMPS +- MPICH +- CMake +- pre-commit (including hooks) +- Test packages including pytest +- Doc packages including sphinx + +## C++ interface + +The C++ interface with TensorFlow and PyTorch support is installed into `dp` directory. + +When calling and debuging LAMMPS with DeePMD-kit, use the following scripts instead of the regular `lmp`: + +- `.devcontainer/lmp` +- `.devcontainer/gdb_lmp` + +## Rebuild + +Usually the Python package does not need to reinstall. +But when one wants to recompile the C++ code, the following scripts can be executed. + +- `.devcontainer/build_cxx.sh` +- `.devcontainer/build_py.sh` diff --git a/.devcontainer/build_cxx.sh b/.devcontainer/build_cxx.sh new file mode 100755 index 0000000000..442539301e --- /dev/null +++ b/.devcontainer/build_cxx.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -ev + +NPROC=$(nproc --all) +SCRIPT_PATH=$(dirname $(realpath -s $0)) + +export CMAKE_PREFIX_PATH=${SCRIPT_PATH}/../libtorch +TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') + +mkdir -p ${SCRIPT_PATH}/../buildcxx/ +cd ${SCRIPT_PATH}/../buildcxx/ +cmake -D ENABLE_TENSORFLOW=ON \ + -D ENABLE_PYTORCH=ON \ + -D CMAKE_INSTALL_PREFIX=${SCRIPT_PATH}/../dp/ \ + -D LAMMPS_VERSION=stable_29Aug2024_update1 \ + -D CMAKE_BUILD_TYPE=Debug \ + -D BUILD_TESTING:BOOL=TRUE \ + -D TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \ + ${SCRIPT_PATH}/../source +cmake --build . -j${NPROC} +cmake --install . diff --git a/.devcontainer/build_py.sh b/.devcontainer/build_py.sh new file mode 100755 index 0000000000..8e9a006a4f --- /dev/null +++ b/.devcontainer/build_py.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -ev + +SCRIPT_PATH=$(dirname $(realpath -s $0)) +cd ${SCRIPT_PATH}/.. + +uv sync --dev --python 3.12 --extra cpu --extra torch --extra jax --extra lmp --extra test --extra docs +pre-commit install diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..27c40bbe6a --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,17 @@ +{ + "name": "DeePMD-kit", + "build": { + "dockerfile": "Dockerfile" + }, + "features": { + "ghcr.io/devcontainers/features/github-cli:1": {} + }, + "postCreateCommand": ".devcontainer/build_py.sh && .devcontainer/download_libtorch.sh && .devcontainer/build_cxx.sh && pre-commit install-hooks", + "remoteEnv": { + "PATH": "${containerEnv:PATH}:${containerWorkspaceFolder}/.venv/bin", + "DP_ENABLE_PYTORCH": "1", + "DP_VARIANT": "cpu", + "LMP_CXX11_ABI_0": "1", + "UV_EXTRA_INDEX_URL": "https://download.pytorch.org/whl/cpu" + } +} diff --git a/.devcontainer/download_libtorch.sh b/.devcontainer/download_libtorch.sh new file mode 100755 index 0000000000..d78b559997 --- /dev/null +++ b/.devcontainer/download_libtorch.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -ev + +SCRIPT_PATH=$(dirname $(realpath -s $0)) +cd ${SCRIPT_PATH}/.. + +wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-2.5.0%2Bcpu.zip -O ~/libtorch.zip +unzip ~/libtorch.zip diff --git a/.devcontainer/gdb_lmp b/.devcontainer/gdb_lmp new file mode 100755 index 0000000000..33e883780b --- /dev/null +++ b/.devcontainer/gdb_lmp @@ -0,0 +1,9 @@ +#!/bin/bash +SCRIPT_PATH=$(dirname $(realpath -s $0)) + +export CMAKE_PREFIX_PATH=${SCRIPT_PATH}/../libtorch +TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') + +env LAMMPS_PLUGIN_PATH=${SCRIPT_PATH}/../dp/lib/deepmd_lmp \ + LD_LIBRARY_PATH=${SCRIPT_PATH}/../dp/lib:${CMAKE_PREFIX_PATH}/lib:${TENSORFLOW_ROOT} \ + gdb ${SCRIPT_PATH}/../.venv/lib/python3.12/site-packages/lammps/lmp "$@" diff --git a/.devcontainer/lmp b/.devcontainer/lmp new file mode 100755 index 0000000000..c8e781aa57 --- /dev/null +++ b/.devcontainer/lmp @@ -0,0 +1,9 @@ +#!/bin/bash +SCRIPT_PATH=$(dirname $(realpath -s $0)) + +export CMAKE_PREFIX_PATH=${SCRIPT_PATH}/../libtorch +TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') + +env LAMMPS_PLUGIN_PATH=${SCRIPT_PATH}/../dp/lib/deepmd_lmp \ + LD_LIBRARY_PATH=${SCRIPT_PATH}/../dp/lib:${CMAKE_PREFIX_PATH}/lib:${TENSORFLOW_ROOT} \ + ${SCRIPT_PATH}/../.venv/bin/lmp "$@" diff --git a/.gitignore b/.gitignore index c531a76177..c574da757a 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,8 @@ build_c_tests build_c/ libdeepmd_c/ .uv/ +libtorch/ +uv.lock +buildcxx/ +node_modules/ +*.bib.original diff --git a/pyproject.toml b/pyproject.toml index 6f0404174d..0a1b2e6731 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -136,9 +136,14 @@ cu12 = [ "nvidia-cuda-nvcc-cu12", ] jax = [ + # below is a funny workaround for + # https://github.com/astral-sh/uv/issues/8601 'jax>=0.4.33;python_version>="3.10"', + 'jax>=0.4.33;python_version>="3.10"', + 'flax>=0.10.0;python_version>="3.10"', 'flax>=0.10.0;python_version>="3.10"', 'orbax-checkpoint;python_version>="3.10"', + 'orbax-checkpoint;python_version>="3.10"', # The pinning of ml_dtypes may conflict with TF # 'jax-ai-stack;python_version>="3.10"', ] @@ -146,6 +151,13 @@ jax = [ [tool.deepmd_build_backend.scripts] dp = "deepmd.main:main" +[dependency-groups] +dev = [ + "pre-commit", + "cmake", + "mpich", +] + [tool.setuptools_scm] [tool.scikit-build] @@ -428,3 +440,11 @@ select = [ "TOR1", "TOR2", ] + +[tool.uv.sources] +mpich = { index = "mpi4py" } + +[[tool.uv.index]] +name = "mpi4py" +url = "https://pypi.anaconda.org/mpi4py/simple" +explicit = true From 04e1159b3f9b3bccd82ab91f0204f65c86cda914 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 28 Oct 2024 15:39:25 -0400 Subject: [PATCH 087/193] fix(pt): set device for PT C++ (#4261) Fix #4171. ## Summary by CodeRabbit - **New Features** - Improved GPU initialization to ensure the correct device is utilized. - Enhanced error handling for clearer context on exceptions. - **Bug Fixes** - Updated error handling in multiple methods to catch and rethrow specific exceptions. - Added logic to handle communication-related tensors during computation. --------- Signed-off-by: Jinzhe Zeng --- source/api_cc/src/DeepPotPT.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 4c7aac19b8..780a8007f3 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -80,6 +80,9 @@ void DeepPotPT::init(const std::string& model, device = torch::Device(torch::kCPU); std::cout << "load model from: " << model << " to cpu " << std::endl; } else { +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM + DPErrcheck(DPSetDevice(gpu_id)); +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM std::cout << "load model from: " << model << " to gpu " << gpu_id << std::endl; } From 8f546cf262a15aa33032941162f8c6561423b33d Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 28 Oct 2024 19:54:48 -0400 Subject: [PATCH 088/193] docs: fix word spellings (#4264) Signed-off-by: Jinzhe Zeng Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> --- backend/read_env.py | 2 +- deepmd/__init__.py | 2 +- deepmd/backend/suffix.py | 2 +- deepmd/calculator.py | 2 +- deepmd/common.py | 6 +-- .../dpmodel/atomic_model/base_atomic_model.py | 2 +- .../dpmodel/atomic_model/dp_atomic_model.py | 4 +- .../atomic_model/linear_atomic_model.py | 4 +- .../atomic_model/make_base_atomic_model.py | 4 +- deepmd/dpmodel/common.py | 2 +- deepmd/dpmodel/descriptor/descriptor.py | 2 +- deepmd/dpmodel/descriptor/dpa1.py | 14 +++--- deepmd/dpmodel/descriptor/dpa2.py | 8 ++-- deepmd/dpmodel/descriptor/hybrid.py | 6 +-- .../descriptor/make_base_descriptor.py | 4 +- deepmd/dpmodel/descriptor/repformers.py | 8 ++-- deepmd/dpmodel/descriptor/se_e2_a.py | 8 ++-- deepmd/dpmodel/descriptor/se_r.py | 8 ++-- deepmd/dpmodel/descriptor/se_t.py | 6 +-- deepmd/dpmodel/descriptor/se_t_tebd.py | 14 +++--- deepmd/dpmodel/fitting/general_fitting.py | 14 +++--- deepmd/dpmodel/fitting/invar_fitting.py | 4 +- deepmd/dpmodel/fitting/property_fitting.py | 2 +- deepmd/dpmodel/infer/deep_eval.py | 4 +- deepmd/dpmodel/model/base_model.py | 2 +- deepmd/dpmodel/model/dp_model.py | 2 +- deepmd/dpmodel/model/make_model.py | 8 ++-- deepmd/dpmodel/model/transform_output.py | 2 +- deepmd/dpmodel/output_def.py | 4 +- deepmd/dpmodel/utils/neighbor_stat.py | 2 +- deepmd/dpmodel/utils/network.py | 6 +-- deepmd/dpmodel/utils/nlist.py | 10 ++-- deepmd/dpmodel/utils/region.py | 2 +- deepmd/driver.py | 2 +- deepmd/entrypoints/test.py | 4 +- deepmd/env.py | 2 +- deepmd/infer/__init__.py | 2 +- deepmd/infer/deep_eval.py | 4 +- deepmd/infer/model_devi.py | 2 +- deepmd/loggers/loggers.py | 8 ++-- deepmd/pt/cxx_op.py | 2 +- deepmd/pt/infer/deep_eval.py | 4 +- .../model/atomic_model/base_atomic_model.py | 8 ++-- .../pt/model/atomic_model/dp_atomic_model.py | 2 +- .../model/atomic_model/linear_atomic_model.py | 4 +- .../atomic_model/pairtab_atomic_model.py | 4 +- deepmd/pt/model/descriptor/descriptor.py | 2 +- deepmd/pt/model/descriptor/dpa1.py | 8 ++-- deepmd/pt/model/descriptor/dpa2.py | 8 ++-- deepmd/pt/model/descriptor/hybrid.py | 6 +-- deepmd/pt/model/descriptor/repformer_layer.py | 4 +- deepmd/pt/model/descriptor/repformers.py | 6 +-- deepmd/pt/model/descriptor/se_a.py | 8 ++-- deepmd/pt/model/descriptor/se_atten.py | 4 +- deepmd/pt/model/descriptor/se_r.py | 8 ++-- deepmd/pt/model/descriptor/se_t.py | 8 ++-- deepmd/pt/model/descriptor/se_t_tebd.py | 12 ++--- deepmd/pt/model/model/dp_linear_model.py | 2 +- deepmd/pt/model/model/dp_model.py | 2 +- deepmd/pt/model/model/dp_zbl_model.py | 2 +- deepmd/pt/model/model/frozen.py | 2 +- deepmd/pt/model/model/make_model.py | 8 ++-- deepmd/pt/model/model/spin_model.py | 8 ++-- deepmd/pt/model/network/init.py | 2 +- deepmd/pt/model/network/network.py | 2 +- deepmd/pt/model/task/denoise.py | 2 +- deepmd/pt/model/task/ener.py | 2 +- deepmd/pt/model/task/fitting.py | 12 ++--- deepmd/pt/model/task/invar_fitting.py | 4 +- deepmd/pt/model/task/property.py | 2 +- deepmd/pt/model/task/type_predict.py | 2 +- deepmd/pt/train/wrapper.py | 2 +- deepmd/pt/utils/dataloader.py | 2 +- deepmd/pt/utils/env_mat_stat.py | 2 +- deepmd/pt/utils/neighbor_stat.py | 2 +- deepmd/pt/utils/nlist.py | 6 +-- deepmd/pt/utils/region.py | 2 +- deepmd/pt/utils/stat.py | 2 +- deepmd/tf/cluster/local.py | 2 +- deepmd/tf/descriptor/descriptor.py | 6 +-- deepmd/tf/descriptor/hybrid.py | 8 ++-- deepmd/tf/descriptor/loc_frame.py | 4 +- deepmd/tf/descriptor/se.py | 4 +- deepmd/tf/descriptor/se_a.py | 8 ++-- deepmd/tf/descriptor/se_a_ebd_v2.py | 2 +- deepmd/tf/descriptor/se_a_ef.py | 2 +- deepmd/tf/descriptor/se_a_mask.py | 4 +- deepmd/tf/descriptor/se_atten.py | 10 ++-- deepmd/tf/descriptor/se_r.py | 4 +- deepmd/tf/descriptor/se_t.py | 4 +- deepmd/tf/entrypoints/freeze.py | 6 +-- deepmd/tf/entrypoints/ipi.py | 2 +- deepmd/tf/entrypoints/main.py | 2 +- deepmd/tf/entrypoints/train.py | 2 +- deepmd/tf/entrypoints/transfer.py | 12 ++--- deepmd/tf/env.py | 8 ++-- deepmd/tf/fit/dipole.py | 8 ++-- deepmd/tf/fit/dos.py | 8 ++-- deepmd/tf/fit/ener.py | 12 ++--- deepmd/tf/fit/polar.py | 14 +++--- deepmd/tf/infer/deep_dipole.py | 2 +- deepmd/tf/infer/deep_eval.py | 18 ++++---- deepmd/tf/infer/deep_tensor.py | 4 +- deepmd/tf/loss/ener.py | 2 +- deepmd/tf/model/ener.py | 4 +- deepmd/tf/model/frozen.py | 2 +- deepmd/tf/model/linear.py | 2 +- deepmd/tf/model/model.py | 8 ++-- deepmd/tf/model/pairtab.py | 4 +- deepmd/tf/model/pairwise_dprc.py | 2 +- deepmd/tf/nvnmd/data/data.py | 4 +- deepmd/tf/nvnmd/entrypoints/mapt.py | 6 +-- deepmd/tf/nvnmd/utils/encode.py | 2 +- deepmd/tf/nvnmd/utils/network.py | 2 +- deepmd/tf/op/__init__.py | 2 +- deepmd/tf/op/_dotmul_flt_nvnmd_grad.py | 2 +- deepmd/tf/op/_matmul_flt2fix_nvnmd.py | 2 +- deepmd/tf/op/_matmul_flt_nvnmd_grad.py | 2 +- deepmd/tf/train/run_options.py | 8 ++-- deepmd/tf/train/trainer.py | 6 +-- deepmd/tf/utils/learning_rate.py | 2 +- deepmd/tf/utils/neighbor_stat.py | 2 +- deepmd/tf/utils/network.py | 2 +- deepmd/tf/utils/nlist.py | 2 +- deepmd/tf/utils/sess.py | 2 +- deepmd/tf/utils/tabulate.py | 2 +- deepmd/utils/argcheck.py | 46 +++++++++---------- deepmd/utils/batch_size.py | 2 +- deepmd/utils/data.py | 6 +-- deepmd/utils/data_system.py | 10 ++-- deepmd/utils/econf_embd.py | 2 +- deepmd/utils/out_stat.py | 4 +- deepmd/utils/summary.py | 2 +- deepmd/utils/weight_avg.py | 2 +- doc/README | 2 +- doc/development/coding-conventions.rst | 2 +- doc/development/create-a-model-pt.md | 2 +- doc/development/create-a-model-tf.md | 2 +- doc/getting-started/quick_start.ipynb | 8 ++-- doc/install/install-from-source.md | 4 +- doc/install/install-tf.2.12.md | 2 +- doc/install/install-tf.2.8.md | 2 +- doc/model/dprc.md | 2 +- doc/model/train-energy-spin.md | 2 +- doc/model/train-se-a-mask.md | 2 +- doc/nvnmd/nvnmd.md | 6 +-- doc/third-party/lammps-command.md | 4 +- doc/train/finetuning.md | 2 +- doc/troubleshooting/precision.md | 4 +- pyproject.toml | 6 +-- source/CMakeLists.txt | 4 +- source/api_c/include/deepmd.hpp | 8 ++-- source/api_c/src/c_api.cc | 2 +- source/api_cc/include/DeepTensor.h | 18 ++++---- source/api_cc/include/DeepTensorTF.h | 14 +++--- source/api_cc/include/commonTF.h | 6 +-- source/api_cc/src/DataModifierTF.cc | 2 +- source/api_cc/src/DeepPotTF.cc | 4 +- source/api_cc/src/DeepTensorTF.cc | 4 +- source/api_cc/src/common.cc | 2 +- source/cmake/Findtensorflow.cmake | 2 +- source/cmake/tf_version.cpp | 2 +- source/gmx/dp_gmx_patch | 2 +- source/gmx/src/gmx_plugin.cpp | 2 +- source/install/build_tf.py | 6 +-- source/lib/include/ComputeDescriptor.h | 2 +- source/lib/include/coord.h | 4 +- source/lib/include/neighbor_list.h | 4 +- source/lib/include/prod_force.h | 2 +- source/lib/src/gpu/tabulate.cu | 6 +-- source/lib/tests/test_fmt_nlist.cc | 8 ++-- source/lmp/pppm_dplr.cpp | 4 +- source/op/tf/descrpt.cc | 2 +- source/op/tf/descrpt_se_a_ef.cc | 2 +- source/op/tf/descrpt_se_a_ef_para.cc | 2 +- source/op/tf/descrpt_se_a_ef_vert.cc | 2 +- source/op/tf/descrpt_se_a_mask.cc | 4 +- source/op/tf/neighbor_stat.cc | 2 +- source/op/tf/pairwise.cc | 2 +- source/op/tf/prod_env_mat_multi_device.cc | 6 +-- .../op/tf/prod_env_mat_multi_device_nvnmd.cc | 8 ++-- .../common/dpmodel/test_pairtab_preprocess.py | 2 +- source/tests/common/test_argument_parser.py | 4 +- source/tests/consistent/common.py | 2 +- .../tests/consistent/test_type_embedding.py | 2 +- source/tests/pt/model/test_descriptor_dpa1.py | 2 +- source/tests/pt/model/test_embedding_net.py | 2 +- source/tests/pt/model/test_fitting_net.py | 2 +- .../tests/pt/model/test_make_hessian_model.py | 2 +- source/tests/pt/model/test_model.py | 4 +- source/tests/pt/model/test_nlist.py | 2 +- source/tests/pt/model/test_unused_params.py | 4 +- source/tests/pt/test_training.py | 2 +- source/tests/tf/common.py | 2 +- source/tests/tf/test_model_pairtab.py | 2 +- .../common/cases/atomic_model/utils.py | 2 +- 196 files changed, 448 insertions(+), 442 deletions(-) diff --git a/backend/read_env.py b/backend/read_env.py index edc3600115..3b217926d6 100644 --- a/backend/read_env.py +++ b/backend/read_env.py @@ -43,7 +43,7 @@ def get_argument_from_env() -> tuple[str, list, list, dict, str, str]: """ cmake_args = [] extra_scripts = {} - # get variant option from the environment varibles, available: cpu, cuda, rocm + # get variant option from the environment variables, available: cpu, cuda, rocm dp_variant = os.environ.get("DP_VARIANT", "cpu").lower() if dp_variant == "cpu" or dp_variant == "": cmake_minimum_required_version = "3.16" diff --git a/deepmd/__init__.py b/deepmd/__init__.py index 1ce4beb723..6f2b65ba63 100644 --- a/deepmd/__init__.py +++ b/deepmd/__init__.py @@ -17,7 +17,7 @@ def DeepPotential(*args, **kwargs): - """Factory function that forwards to DeepEval (for compatbility + """Factory function that forwards to DeepEval (for compatibility and performance). Parameters diff --git a/deepmd/backend/suffix.py b/deepmd/backend/suffix.py index d694b43488..e77aecb5d9 100644 --- a/deepmd/backend/suffix.py +++ b/deepmd/backend/suffix.py @@ -23,7 +23,7 @@ def format_model_suffix( """Check and format the suffixes of a filename. When preferred_backend is not given, this method checks the suffix of the filename - is within the suffixes of the any backends (with the given feature) and doesn't do formating. + is within the suffixes of the any backends (with the given feature) and doesn't do formatting. When preferred_backend is given, strict_prefer must be given. If strict_prefer is True and the suffix is not within the suffixes of the preferred backend, or strict_prefer is False and the suffix is not within the suffixes of the any backend with the given feature, diff --git a/deepmd/calculator.py b/deepmd/calculator.py index 032fa2bcfa..6f863ab09b 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -32,7 +32,7 @@ class DP(Calculator): """Implementation of ASE deepmd calculator. - Implemented propertie are `energy`, `forces` and `stress` + Implemented properties are `energy`, `forces` and `stress` Parameters ---------- diff --git a/deepmd/common.py b/deepmd/common.py index fdfeef0e6d..185722f4a8 100644 --- a/deepmd/common.py +++ b/deepmd/common.py @@ -77,7 +77,7 @@ def select_idx_map(atom_types: np.ndarray, select_types: np.ndarray) -> np.ndarr Parameters ---------- atom_types : np.ndarray - array specifing type for each atoms as integer + array specifying type for each atoms as integer select_types : np.ndarray types of atoms you want to find indices for @@ -126,7 +126,7 @@ def make_default_mesh(pbc: bool, mixed_type: bool) -> np.ndarray: def j_deprecated( jdata: dict[str, "_DICT_VAL"], key: str, deprecated_key: list[str] = [] ) -> "_DICT_VAL": - """Assert that supplied dictionary conaines specified key. + """Assert that supplied dictionary contains specified key. Parameters ---------- @@ -218,7 +218,7 @@ def get_np_precision(precision: "_PRECISION") -> np.dtype: Returns ------- np.dtype - numpy presicion constant + numpy precision constant Raises ------ diff --git a/deepmd/dpmodel/atomic_model/base_atomic_model.py b/deepmd/dpmodel/atomic_model/base_atomic_model.py index b615c81d1f..4e7620bdda 100644 --- a/deepmd/dpmodel/atomic_model/base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/base_atomic_model.py @@ -158,7 +158,7 @@ def forward_common_atomic( Parameters ---------- extended_coord - extended coodinates, shape: nf x (nall x 3) + extended coordinates, shape: nf x (nall x 3) extended_atype extended atom typs, shape: nf x nall for a type < 0 indicating the atomic is virtual. diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index fe049021fe..a621ece27e 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -100,7 +100,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region + coordinates in extended region extended_atype atomic type in extended region nlist @@ -169,7 +169,7 @@ def serialize(self) -> dict: ) return dd - # for subclass overriden + # for subclass overridden base_descriptor_cls = BaseDescriptor """The base descriptor class.""" base_fitting_cls = BaseFitting diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 880c92f504..5d86472674 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -162,7 +162,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region, (nframes, nall * 3) + coordinates in extended region, (nframes, nall * 3) extended_atype atomic type in extended region, (nframes, nall) nlist @@ -341,7 +341,7 @@ class DPZBLLinearEnergyAtomicModel(LinearEnergyAtomicModel): Mapping atom type to the name (str) of the type. For example `type_map[1]` gives the name of the type 1. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. """ diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index 6c0fc88e2c..99a92c23a4 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -152,7 +152,7 @@ def make_atom_mask( self, atype: t_tensor, ) -> t_tensor: - """The atoms with type < 0 are treated as virutal atoms, + """The atoms with type < 0 are treated as virtual atoms, which serves as place-holders for multi-frame calculations with different number of atoms in different frames. @@ -164,7 +164,7 @@ def make_atom_mask( Returns ------- mask - True for real atoms and False for virutal atoms. + True for real atoms and False for virtual atoms. """ # supposed to be supported by all backends diff --git a/deepmd/dpmodel/common.py b/deepmd/dpmodel/common.py index 5c75229e49..f834754195 100644 --- a/deepmd/dpmodel/common.py +++ b/deepmd/dpmodel/common.py @@ -30,7 +30,7 @@ "int64": np.int64, "bool": bool, "default": GLOBAL_NP_FLOAT_PRECISION, - # NumPy doesn't have bfloat16 (and does't plan to add) + # NumPy doesn't have bfloat16 (and doesn't plan to add) # ml_dtypes is a solution, but it seems not supporting np.save/np.load # hdf5 hasn't supported bfloat16 as well (see https://forum.hdfgroup.org/t/11975) "bfloat16": ml_dtypes.bfloat16, diff --git a/deepmd/dpmodel/descriptor/descriptor.py b/deepmd/dpmodel/descriptor/descriptor.py index 6d0644f856..746c02eb68 100644 --- a/deepmd/dpmodel/descriptor/descriptor.py +++ b/deepmd/dpmodel/descriptor/descriptor.py @@ -110,7 +110,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index add9cb9f71..2f2b12e03c 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -358,11 +358,11 @@ def get_dim_emb(self) -> int: return self.se_atten.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -385,7 +385,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -459,7 +459,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -602,7 +602,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -793,11 +793,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/dpmodel/descriptor/dpa2.py b/deepmd/dpmodel/descriptor/dpa2.py index 285dc724a7..1dbb14961e 100644 --- a/deepmd/dpmodel/descriptor/dpa2.py +++ b/deepmd/dpmodel/descriptor/dpa2.py @@ -624,11 +624,11 @@ def get_dim_emb(self) -> int: return self.repformers.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -653,7 +653,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -1021,7 +1021,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/hybrid.py b/deepmd/dpmodel/descriptor/hybrid.py index 3aa8882db1..4eb14f29cf 100644 --- a/deepmd/dpmodel/descriptor/hybrid.py +++ b/deepmd/dpmodel/descriptor/hybrid.py @@ -63,7 +63,7 @@ def __init__( for ii in range(1, self.numb_descrpt): assert ( self.descrpt_list[ii].get_ntypes() == self.descrpt_list[0].get_ntypes() - ), f"number of atom types in {ii}th descrptor {self.descrpt_list[0].__class__.__name__} does not match others" + ), f"number of atom types in {ii}th descriptor {self.descrpt_list[0].__class__.__name__} does not match others" # if hybrid sel is larger than sub sel, the nlist needs to be cut for each type hybrid_sel = self.get_sel() self.nlist_cut_idx: list[np.ndarray] = [] @@ -161,7 +161,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -284,7 +284,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/make_base_descriptor.py b/deepmd/dpmodel/descriptor/make_base_descriptor.py index a9b434d5f5..b9c1e93387 100644 --- a/deepmd/dpmodel/descriptor/make_base_descriptor.py +++ b/deepmd/dpmodel/descriptor/make_base_descriptor.py @@ -116,7 +116,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ pass @@ -194,7 +194,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/repformers.py b/deepmd/dpmodel/descriptor/repformers.py index ec8be21a53..ef79ecdd28 100644 --- a/deepmd/dpmodel/descriptor/repformers.py +++ b/deepmd/dpmodel/descriptor/repformers.py @@ -307,11 +307,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -1480,7 +1480,7 @@ def call( """ Parameters ---------- - g1_ext : nf x nall x ng1 extended single-atom chanel + g1_ext : nf x nall x ng1 extended single-atom channel g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) @@ -1489,7 +1489,7 @@ def call( Returns ------- - g1: nf x nloc x ng1 updated single-atom chanel + g1: nf x nloc x ng1 updated single-atom channel g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant """ diff --git a/deepmd/dpmodel/descriptor/se_e2_a.py b/deepmd/dpmodel/descriptor/se_e2_a.py index d29ce8862e..feebe57af7 100644 --- a/deepmd/dpmodel/descriptor/se_e2_a.py +++ b/deepmd/dpmodel/descriptor/se_e2_a.py @@ -281,7 +281,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -359,7 +359,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -486,7 +486,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -525,7 +525,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- diff --git a/deepmd/dpmodel/descriptor/se_r.py b/deepmd/dpmodel/descriptor/se_r.py index 6d0ddc5621..0f646e143c 100644 --- a/deepmd/dpmodel/descriptor/se_r.py +++ b/deepmd/dpmodel/descriptor/se_r.py @@ -48,7 +48,7 @@ @BaseDescriptor.register("se_e2_r") @BaseDescriptor.register("se_r") class DescrptSeR(NativeOP, BaseDescriptor): - r"""DeepPot-SE_R constructed from only the radial imformation of atomic configurations. + r"""DeepPot-SE_R constructed from only the radial information of atomic configurations. Parameters @@ -237,7 +237,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -308,7 +308,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -414,7 +414,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/se_t.py b/deepmd/dpmodel/descriptor/se_t.py index f2ea751c50..4dc4c965fb 100644 --- a/deepmd/dpmodel/descriptor/se_t.py +++ b/deepmd/dpmodel/descriptor/se_t.py @@ -225,7 +225,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -279,7 +279,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -405,7 +405,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/se_t_tebd.py b/deepmd/dpmodel/descriptor/se_t_tebd.py index 147a335926..ca89c23968 100644 --- a/deepmd/dpmodel/descriptor/se_t_tebd.py +++ b/deepmd/dpmodel/descriptor/se_t_tebd.py @@ -199,11 +199,11 @@ def get_dim_emb(self) -> int: return self.se_ttebd.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -226,7 +226,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -300,7 +300,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -418,7 +418,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -576,11 +576,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index 25d15b2e75..e55f57c774 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -56,7 +56,7 @@ class GeneralFitting(NativeOP, BaseFitting): neuron Number of neurons :math:`N` in each hidden layer of the fitting net bias_atom_e - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt Time-step `dt` in the resnet construction: :math:`y = x + dt * \phi (Wx + b)` @@ -88,9 +88,9 @@ class GeneralFitting(NativeOP, BaseFitting): exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. remove_vaccum_contribution: list[bool], optional - Remove vaccum contribution before the bias is added. The list assigned each + Remove vacuum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same - length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + length as `ntypes` signaling if or not removing the vacuum contribution for the atom types in the list. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. seed: Optional[Union[int, list[int]]] @@ -375,10 +375,10 @@ def _call_common( ) xx = descriptor if self.remove_vaccum_contribution is not None: - # TODO: comput the input for vaccum when setting remove_vaccum_contribution - # Idealy, the input for vaccum should be computed; + # TODO: comput the input for vacuum when setting remove_vaccum_contribution + # Ideally, the input for vacuum should be computed; # we consider it as always zero for convenience. - # Needs a compute_input_stats for vaccum passed from the + # Needs a compute_input_stats for vacuum passed from the # descriptor. xx_zeros = xp.zeros_like(xx) else: @@ -424,7 +424,7 @@ def _call_common( axis=-1, ) - # calcualte the prediction + # calculate the prediction if not self.mixed_types: outs = xp.zeros( [nf, nloc, net_dim_out], dtype=get_xp_precision(xp, self.precision) diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 2a251834fe..3f8607109b 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -28,7 +28,7 @@ @GeneralFitting.register("invar") @fitting_check_output class InvarFitting(GeneralFitting): - r"""Fitting the energy (or a rotationally invariant porperty of `dim_out`) of the system. The force and the virial can also be trained. + r"""Fitting the energy (or a rotationally invariant property of `dim_out`) of the system. The force and the virial can also be trained. Lets take the energy fitting task as an example. The potential energy :math:`E` is a fitting network function of the descriptor :math:`\mathcal{D}`: @@ -90,7 +90,7 @@ class InvarFitting(GeneralFitting): Suppose that we have :math:`N_l` hidden layers in the fitting net, this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descriptor should be set. activation_function The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| precision diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index 1a8fe44aae..a1b6fe7638 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -20,7 +20,7 @@ @InvarFitting.register("property") class PropertyFittingNet(InvarFitting): - r"""Fitting the rotationally invariant porperties of `task_dim` of the system. + r"""Fitting the rotationally invariant properties of `task_dim` of the system. Parameters ---------- diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index 2b1e74c8de..c1f3e4630b 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -52,7 +52,7 @@ class DeepEval(DeepEvalBackend): - """NumPy backend implementaion of DeepEval. + """NumPy backend implementation of DeepEval. Parameters ---------- @@ -374,5 +374,5 @@ def _get_output_shape(self, odef, nframes, natoms): raise RuntimeError("unknown category") def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" return json.loads(self.model.get_model_def_script()) diff --git a/deepmd/dpmodel/model/base_model.py b/deepmd/dpmodel/model/base_model.py index 3f71003bad..777697b4b7 100644 --- a/deepmd/dpmodel/model/base_model.py +++ b/deepmd/dpmodel/model/base_model.py @@ -171,7 +171,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/model/dp_model.py b/deepmd/dpmodel/model/dp_model.py index eda0414398..769bba0b20 100644 --- a/deepmd/dpmodel/model/dp_model.py +++ b/deepmd/dpmodel/model/dp_model.py @@ -27,7 +27,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index dc90f10da7..afe2eaffb6 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -190,7 +190,7 @@ def call_lower( Parameters ---------- extended_coord - coodinates in extended region. nf x (nall x 3). + coordinates in extended region. nf x (nall x 3). extended_atype atomic type in extended region. nf x nall. nlist @@ -319,7 +319,7 @@ def format_nlist( the `nlist` is pad with -1. 3. If the number of neighbors in the `nlist` is larger than sum(self.sel), - the nearest sum(sel) neighbors will be preseved. + the nearest sum(sel) neighbors will be preserved. Known limitations: @@ -329,7 +329,7 @@ def format_nlist( Parameters ---------- extended_coord - coodinates in extended region. nf x nall x 3 + coordinates in extended region. nf x nall x 3 extended_atype atomic type in extended region. nf x nall nlist @@ -340,7 +340,7 @@ def format_nlist( Returns ------- formated_nlist - the formated nlist. + the formatted nlist. """ n_nf, n_nloc, n_nnei = nlist.shape diff --git a/deepmd/dpmodel/model/transform_output.py b/deepmd/dpmodel/model/transform_output.py index 928c33f3bd..107455a6d5 100644 --- a/deepmd/dpmodel/model/transform_output.py +++ b/deepmd/dpmodel/model/transform_output.py @@ -32,7 +32,7 @@ def fit_output_to_model_output( atom_axis = -(len(shap) + 1) if vdef.reducible: kk_redu = get_reduce_name(kk) - # cast to energy prec brefore reduction + # cast to energy prec before reduction model_ret[kk_redu] = xp.sum( vv.astype(GLOBAL_ENER_FLOAT_PRECISION), axis=atom_axis ) diff --git a/deepmd/dpmodel/output_def.py b/deepmd/dpmodel/output_def.py index 2ceb4f412a..bfee338d64 100644 --- a/deepmd/dpmodel/output_def.py +++ b/deepmd/dpmodel/output_def.py @@ -166,7 +166,7 @@ class OutputVariableDef: r_differentiable If the variable is differentiated with respect to coordinates of atoms. Only reducible variable are differentiable. - Negative derivative w.r.t. coordinates will be calcualted. (e.g. force) + Negative derivative w.r.t. coordinates will be calculated. (e.g. force) c_differentiable If the variable is differentiated with respect to the cell tensor (pbc case). Only reducible variable @@ -178,7 +178,7 @@ class OutputVariableDef: category : int The category of the output variable. r_hessian : bool - If hessian is requred + If hessian is required magnetic : bool If the derivatives of variable have magnetic parts. intensive : bool diff --git a/deepmd/dpmodel/utils/neighbor_stat.py b/deepmd/dpmodel/utils/neighbor_stat.py index 744a4476cd..43ca2cadd1 100644 --- a/deepmd/dpmodel/utils/neighbor_stat.py +++ b/deepmd/dpmodel/utils/neighbor_stat.py @@ -21,7 +21,7 @@ class NeighborStatOP(NativeOP): - """Class for getting neighbor statics data information. + """Class for getting neighbor statistics data information. Parameters ---------- diff --git a/deepmd/dpmodel/utils/network.py b/deepmd/dpmodel/utils/network.py index 339035ff4e..5140a88c97 100644 --- a/deepmd/dpmodel/utils/network.py +++ b/deepmd/dpmodel/utils/network.py @@ -600,7 +600,7 @@ class EN(T_Network): resnet_dt Use time step at the resnet architecture. precision - Floating point precision for the model paramters. + Floating point precision for the model parameters. seed : int, optional Random seed. bias : bool, Optional @@ -704,7 +704,7 @@ class FN(T_EmbeddingNet): resnet_dt Use time step at the resnet architecture. precision - Floating point precision for the model paramters. + Floating point precision for the model parameters. bias_out The last linear layer has bias. seed : int, optional @@ -794,7 +794,7 @@ def deserialize(cls, data: dict) -> "FittingNet": class NetworkCollection: """A collection of networks for multiple elements. - The number of dimesions for types might be 0, 1, or 2. + The number of dimensions for types might be 0, 1, or 2. - 0: embedding or fitting with type embedding, in () - 1: embedding with type_one_side, or fitting, in (type_i) - 2: embedding without type_one_side, in (type_i, type_j) diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index 3ef17fc6b9..b827032588 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -48,7 +48,7 @@ def extend_input_and_build_neighbor_list( return extended_coord, extended_atype, mapping, nlist -## translated from torch implemantation by chatgpt +## translated from torch implementation by chatgpt def build_neighbor_list( coord: np.ndarray, atype: np.ndarray, @@ -57,7 +57,7 @@ def build_neighbor_list( sel: Union[int, list[int]], distinguish_types: bool = True, ) -> np.ndarray: - """Build neightbor list for a single frame. keeps nsel neighbors. + """Build neighbor list for a single frame. keeps nsel neighbors. Parameters ---------- @@ -185,7 +185,7 @@ def get_multiple_nlist_key(rcut: float, nsel: int) -> str: return str(rcut) + "_" + str(nsel) -## translated from torch implemantation by chatgpt +## translated from torch implementation by chatgpt def build_multiple_neighbor_list( coord: np.ndarray, nlist: np.ndarray, @@ -243,7 +243,7 @@ def build_multiple_neighbor_list( return ret -## translated from torch implemantation by chatgpt +## translated from torch implementation by chatgpt def extend_coord_with_ghosts( coord: np.ndarray, atype: np.ndarray, @@ -272,7 +272,7 @@ def extend_coord_with_ghosts( extended_atype: np.ndarray extended atom type of shape [-1, nall]. index_mapping: np.ndarray - maping extended index to the local index + mapping extended index to the local index """ xp = array_api_compat.array_namespace(coord, atype) diff --git a/deepmd/dpmodel/utils/region.py b/deepmd/dpmodel/utils/region.py index 8102020827..8b24cbf948 100644 --- a/deepmd/dpmodel/utils/region.py +++ b/deepmd/dpmodel/utils/region.py @@ -59,7 +59,7 @@ def normalize_coord( Parameters ---------- coord : np.ndarray - orignal coordinates of shape [*, na, 3]. + original coordinates of shape [*, na, 3]. cell : np.ndarray simulation cell shape [*, 3, 3]. diff --git a/deepmd/driver.py b/deepmd/driver.py index 998edcbc18..30916259aa 100644 --- a/deepmd/driver.py +++ b/deepmd/driver.py @@ -3,7 +3,7 @@ # Derived from https://github.com/deepmodeling/dpdata/blob/18a0ed5ebced8b1f6887038883d46f31ae9990a4/dpdata/plugins/deepmd.py#L361-L443 # under LGPL-3.0-or-later license. -# The original deepmd driver maintained in the dpdata package will be overriden. +# The original deepmd driver maintained in the dpdata package will be overridden. # The class in the dpdata package needs to handle different situations for v1 and v2 interface, # which is too complex with the development of deepmd-kit. # So, it will be a good idea to ship it with DeePMD-kit itself. diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index ad445fdea1..d9ccf392f5 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -248,7 +248,7 @@ def save_txt_file( header : str, optional header string to use in file, by default "" append : bool, optional - if true file will be appended insted of overwriting, by default False + if true file will be appended instead of overwriting, by default False """ flags = "ab" if append else "w" with fname.open(flags) as fp: @@ -1015,7 +1015,7 @@ def test_polar( detail_file : Optional[str] file where test details will be output atomic : bool - wheter to use glovbal version of polar potential + whether to use glovbal version of polar potential Returns ------- diff --git a/deepmd/env.py b/deepmd/env.py index 605dfeed99..50e52fd719 100644 --- a/deepmd/env.py +++ b/deepmd/env.py @@ -102,7 +102,7 @@ def set_default_nthreads(): def get_default_nthreads() -> tuple[int, int]: - """Get paralellism settings. + """Get parallelism settings. The method will first read the environment variables with the prefix `DP_`. If not found, it will read the environment variables with the prefix `TF_` diff --git a/deepmd/infer/__init__.py b/deepmd/infer/__init__.py index 5678494023..8a8afb165a 100644 --- a/deepmd/infer/__init__.py +++ b/deepmd/infer/__init__.py @@ -18,7 +18,7 @@ def DeepPotential(*args, **kwargs) -> "DeepEval": - """Factory function that forwards to DeepEval (for compatbility). + """Factory function that forwards to DeepEval (for compatibility). Parameters ---------- diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index 4d0134c37c..e08dc88674 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -281,7 +281,7 @@ def get_ntypes_spin(self) -> int: """Get the number of spin atom types of this model. Only used in old implement.""" def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" raise NotImplementedError("Not implemented in this backend.") @@ -548,5 +548,5 @@ def get_ntypes_spin(self) -> int: return self.deep_eval.get_ntypes_spin() def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" return self.deep_eval.get_model_def_script() diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py index 68100ba739..304aabdadc 100644 --- a/deepmd/infer/model_devi.py +++ b/deepmd/infer/model_devi.py @@ -378,7 +378,7 @@ def make_model_devi( frequency : int The number of steps that elapse between writing coordinates in a trajectory by a MD engine (such as Gromacs / LAMMPS). - This paramter is used to determine the index in the output file. + This parameter is used to determine the index in the output file. real_error : bool, default: False If True, calculate the RMS real error instead of model deviation. atomic : bool, default: False diff --git a/deepmd/loggers/loggers.py b/deepmd/loggers/loggers.py index 33b9497507..f42c032cfa 100644 --- a/deepmd/loggers/loggers.py +++ b/deepmd/loggers/loggers.py @@ -23,7 +23,7 @@ __all__ = ["set_log_handles"] -# logger formater +# logger formatter FFORMATTER = logging.Formatter( "[%(asctime)s] %(app_name)s %(levelname)-7s %(name)-45s %(message)s" ) @@ -61,7 +61,7 @@ def filter(self, record): class _MPIMasterFilter(logging.Filter): - """Filter that lets through only messages emited from rank==0.""" + """Filter that lets through only messages emitted from rank==0.""" def __init__(self, rank: int) -> None: super().__init__(name="MPI_master_log") @@ -138,7 +138,7 @@ def _open(self): return _MPIFileStream(self.baseFilename, self.MPI, self.mode) def setStream(self, stream): - """Stream canot be reasigned in MPI mode.""" + """Stream cannot be reasigned in MPI mode.""" raise NotImplementedError("Unable to do for MPI file handler!") @@ -254,7 +254,7 @@ def set_log_handles( fh.setFormatter(FFORMATTER_MPI) elif mpi_log == "workers": rank = MPI.COMM_WORLD.Get_rank() - # if file has suffix than inser rank number before suffix + # if file has suffix than insert rank number before suffix # e.g deepmd.log -> deepmd_.log # if no suffix is present, insert rank as suffix # e.g. deepmdlog -> deepmdlog. diff --git a/deepmd/pt/cxx_op.py b/deepmd/pt/cxx_op.py index d46f20a0bc..b0653522b2 100644 --- a/deepmd/pt/cxx_op.py +++ b/deepmd/pt/cxx_op.py @@ -76,7 +76,7 @@ def load_library(module_name: str) -> bool: "instead." ) from e error_message = ( - "This deepmd-kit package is inconsitent with PyTorch " + "This deepmd-kit package is inconsistent with PyTorch " f"Runtime, thus an error is raised when loading {module_name}. " "You need to rebuild deepmd-kit against this PyTorch " "runtime." diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 8f0b686e7b..934cafdb47 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -70,7 +70,7 @@ class DeepEval(DeepEvalBackend): - """PyTorch backend implementaion of DeepEval. + """PyTorch backend implementation of DeepEval. Parameters ---------- @@ -601,7 +601,7 @@ def eval_typeebd(self) -> np.ndarray: return to_numpy_array(typeebd) def get_model_def_script(self) -> str: - """Get model defination script.""" + """Get model definition script.""" return self.model_def_script def eval_descriptor( diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index bd3c2b49ab..e26549581e 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -68,7 +68,7 @@ class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_): Specifying atomic energy contribution in vacuum. Given by key:value pairs. The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. """ @@ -150,7 +150,7 @@ def make_atom_mask( self, atype: torch.Tensor, ) -> torch.Tensor: - """The atoms with type < 0 are treated as virutal atoms, + """The atoms with type < 0 are treated as virtual atoms, which serves as place-holders for multi-frame calculations with different number of atoms in different frames. @@ -162,7 +162,7 @@ def make_atom_mask( Returns ------- mask - True for real atoms and False for virutal atoms. + True for real atoms and False for virtual atoms. """ # supposed to be supported by all backends @@ -202,7 +202,7 @@ def forward_common_atomic( Parameters ---------- extended_coord - extended coodinates, shape: nf x (nall x 3) + extended coordinates, shape: nf x (nall x 3) extended_atype extended atom typs, shape: nf x nall for a type < 0 indicating the atomic is virtual. diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index edb1253234..48c8d0d859 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -175,7 +175,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region + coordinates in extended region extended_atype atomic type in extended region nlist diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 0aa5afc67f..570fcdcc43 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -199,7 +199,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region, (nframes, nall * 3) + coordinates in extended region, (nframes, nall * 3) extended_atype atomic type in extended region, (nframes, nall) nlist @@ -489,7 +489,7 @@ class DPZBLLinearEnergyAtomicModel(LinearEnergyAtomicModel): Mapping atom type to the name (str) of the type. For example `type_map[1]` gives the name of the type 1. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. """ diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 28a165d501..87e3027bc8 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -59,7 +59,7 @@ class PairTabAtomicModel(BaseAtomicModel): rcond : float, optional The condition number for the regression of atomic energy. atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descriptor should be set. """ @@ -104,7 +104,7 @@ def __init__( ) # self.model_type = "ener" - # self.model_version = MODEL_VERSION ## this shoud be in the parent class + # self.model_version = MODEL_VERSION ## this should be in the parent class if isinstance(sel, int): self.sel = sel diff --git a/deepmd/pt/model/descriptor/descriptor.py b/deepmd/pt/model/descriptor/descriptor.py index 03173a7693..5d36606760 100644 --- a/deepmd/pt/model/descriptor/descriptor.py +++ b/deepmd/pt/model/descriptor/descriptor.py @@ -129,7 +129,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index 322fa3a12d..d3156f7c84 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -344,11 +344,11 @@ def get_dim_emb(self) -> int: return self.se_atten.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -371,7 +371,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -620,7 +620,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index 632efe5dbf..277aa4917f 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -343,11 +343,11 @@ def get_dim_emb(self) -> int: return self.repformers.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -373,7 +373,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -819,7 +819,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/hybrid.py b/deepmd/pt/model/descriptor/hybrid.py index c8730e3465..ba64f53ef7 100644 --- a/deepmd/pt/model/descriptor/hybrid.py +++ b/deepmd/pt/model/descriptor/hybrid.py @@ -70,7 +70,7 @@ def __init__( for ii in range(1, self.numb_descrpt): assert ( self.descrpt_list[ii].get_ntypes() == self.descrpt_list[0].get_ntypes() - ), f"number of atom types in {ii}th descrptor does not match others" + ), f"number of atom types in {ii}th descriptor does not match others" # if hybrid sel is larger than sub sel, the nlist needs to be cut for each type self.nlist_cut_idx: list[torch.Tensor] = [] if self.mixed_types() and not all( @@ -168,7 +168,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -308,7 +308,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/repformer_layer.py b/deepmd/pt/model/descriptor/repformer_layer.py index 5270c94112..31132f365e 100644 --- a/deepmd/pt/model/descriptor/repformer_layer.py +++ b/deepmd/pt/model/descriptor/repformer_layer.py @@ -1105,7 +1105,7 @@ def forward( """ Parameters ---------- - g1_ext : nf x nall x ng1 extended single-atom chanel + g1_ext : nf x nall x ng1 extended single-atom channel g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) @@ -1114,7 +1114,7 @@ def forward( Returns ------- - g1: nf x nloc x ng1 updated single-atom chanel + g1: nf x nloc x ng1 updated single-atom channel g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant """ diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index 023a84b3ee..81d96d4372 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -60,7 +60,7 @@ def border_op( "See documentation for DPA-2 for details." ) - # Note: this hack cannot actually save a model that can be runned using LAMMPS. + # Note: this hack cannot actually save a model that can be run using LAMMPS. torch.ops.deepmd.border_op = border_op @@ -342,11 +342,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index 8f3c7605d5..56cb1f5bc6 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -164,7 +164,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -342,7 +342,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -479,11 +479,11 @@ def get_dim_in(self) -> int: return self.dim_in def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py index 8f418c28f9..aab72f7e98 100644 --- a/deepmd/pt/model/descriptor/se_atten.py +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -298,11 +298,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index 12677a3daf..0aa50c613f 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -163,11 +163,11 @@ def get_dim_in(self) -> int: return 0 def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -190,7 +190,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -473,7 +473,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 666eba6baf..7b83bcbd69 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -198,7 +198,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -372,7 +372,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -529,11 +529,11 @@ def get_dim_in(self) -> int: return self.dim_in def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_t_tebd.py b/deepmd/pt/model/descriptor/se_t_tebd.py index 9ee9b4dc0b..82ccb06f32 100644 --- a/deepmd/pt/model/descriptor/se_t_tebd.py +++ b/deepmd/pt/model/descriptor/se_t_tebd.py @@ -215,11 +215,11 @@ def get_dim_emb(self) -> int: return self.se_ttebd.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -242,7 +242,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -470,7 +470,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -632,11 +632,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/model/dp_linear_model.py b/deepmd/pt/model/model/dp_linear_model.py index ef2e84bd19..d19070fc5b 100644 --- a/deepmd/pt/model/model/dp_linear_model.py +++ b/deepmd/pt/model/model/dp_linear_model.py @@ -140,7 +140,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/dp_model.py b/deepmd/pt/model/model/dp_model.py index bd278ed787..e71c5e08de 100644 --- a/deepmd/pt/model/model/dp_model.py +++ b/deepmd/pt/model/model/dp_model.py @@ -28,7 +28,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/dp_zbl_model.py b/deepmd/pt/model/model/dp_zbl_model.py index 59147e1d4c..e1ef00f5fe 100644 --- a/deepmd/pt/model/model/dp_zbl_model.py +++ b/deepmd/pt/model/model/dp_zbl_model.py @@ -140,7 +140,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/frozen.py b/deepmd/pt/model/model/frozen.py index 431c035339..37149303d4 100644 --- a/deepmd/pt/model/model/frozen.py +++ b/deepmd/pt/model/model/frozen.py @@ -182,7 +182,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index 46b7e51109..a9d5e26060 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -221,7 +221,7 @@ def forward_common_lower( Parameters ---------- extended_coord - coodinates in extended region. nf x (nall x 3) + coordinates in extended region. nf x (nall x 3) extended_atype atomic type in extended region. nf x nall nlist @@ -362,7 +362,7 @@ def format_nlist( the `nlist` is pad with -1. 3. If the number of neighbors in the `nlist` is larger than sum(self.sel), - the nearest sum(sel) neighbors will be preseved. + the nearest sum(sel) neighbors will be preserved. Known limitations: @@ -372,7 +372,7 @@ def format_nlist( Parameters ---------- extended_coord - coodinates in extended region. nf x nall x 3 + coordinates in extended region. nf x nall x 3 extended_atype atomic type in extended region. nf x nall nlist @@ -383,7 +383,7 @@ def format_nlist( Returns ------- formated_nlist - the formated nlist. + the formatted nlist. """ mixed_types = self.mixed_types() diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index a9f6e4d75a..bc1bc81a74 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -105,9 +105,9 @@ def process_spin_output( """ Split the output both real and virtual atoms, and scale the latter. add_mag: whether to add magnetic tensor onto the real tensor. - Default: True. e.g. Ture for forces and False for atomic virials on real atoms. + Default: True. e.g. True for forces and False for atomic virials on real atoms. virtual_scale: whether to scale the magnetic tensor with virtual scale factor. - Default: True. e.g. Ture for forces and False for atomic virials on virtual atoms. + Default: True. e.g. True for forces and False for atomic virials on virtual atoms. """ nframes, nloc_double = out_tensor.shape[:2] nloc = nloc_double // 2 @@ -138,9 +138,9 @@ def process_spin_output_lower( """ Split the extended output of both real and virtual atoms with switch, and scale the latter. add_mag: whether to add magnetic tensor onto the real tensor. - Default: True. e.g. Ture for forces and False for atomic virials on real atoms. + Default: True. e.g. True for forces and False for atomic virials on real atoms. virtual_scale: whether to scale the magnetic tensor with virtual scale factor. - Default: True. e.g. Ture for forces and False for atomic virials on virtual atoms. + Default: True. e.g. True for forces and False for atomic virials on virtual atoms. """ nframes, nall_double = extended_out_tensor.shape[:2] nall = nall_double // 2 diff --git a/deepmd/pt/model/network/init.py b/deepmd/pt/model/network/init.py index 0bab6b66bd..fe3c034637 100644 --- a/deepmd/pt/model/network/init.py +++ b/deepmd/pt/model/network/init.py @@ -17,7 +17,7 @@ # These no_grad_* functions are necessary as wrappers around the parts of these # functions that use `with torch.no_grad()`. The JIT doesn't support context # managers, so these need to be implemented as builtins. Using these wrappers -# lets us keep those builtins small and re-usable. +# lets us keep those builtins small and reusable. def _no_grad_uniform_(tensor, a, b, generator=None): with torch.no_grad(): return tensor.uniform_(a, b, generator=generator) diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index 12e1eabf22..88ea108ce7 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -300,7 +300,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index dd32042564..df65f1cd18 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -39,7 +39,7 @@ def __init__( - ntypes: Element count. - embedding_width: Embedding width per atom. - neuron: Number of neurons in each hidden layers of the fitting net. - - bias_atom_e: Average enery per atom for each element. + - bias_atom_e: Average energy per atom for each element. - resnet_dt: Using time-step in the ResNet construction. """ super().__init__() diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index e0c5b0951e..ee8372c3ac 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -117,7 +117,7 @@ def __init__( - ntypes: Element count. - embedding_width: Embedding width per atom. - neuron: Number of neurons in each hidden layers of the fitting net. - - bias_atom_e: Average enery per atom for each element. + - bias_atom_e: Average energy per atom for each element. - resnet_dt: Using time-step in the ResNet construction. """ super().__init__() diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 15837aca98..bae46c2adb 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -59,7 +59,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -96,7 +96,7 @@ class GeneralFitting(Fitting): neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : torch.Tensor, optional - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int @@ -121,9 +121,9 @@ class GeneralFitting(Fitting): Now this only supports setting all the parameters in the fitting net at one state. When in list[bool], the trainable will be True only if all the boolean parameters are True. remove_vaccum_contribution: list[bool], optional - Remove vaccum contribution before the bias is added. The list assigned each + Remove vacuum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same - length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + length as `ntypes` signaling if or not removing the vacuum contribution for the atom types in the list. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. use_aparam_as_mask: bool @@ -407,9 +407,9 @@ def _forward_common( xx = descriptor if self.remove_vaccum_contribution is not None: # TODO: compute the input for vaccm when remove_vaccum_contribution is set - # Idealy, the input for vaccum should be computed; + # Ideally, the input for vacuum should be computed; # we consider it as always zero for convenience. - # Needs a compute_input_stats for vaccum passed from the + # Needs a compute_input_stats for vacuum passed from the # descriptor. xx_zeros = torch.zeros_like(xx) else: diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index e76e1d2063..b9be26cfdc 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -50,7 +50,7 @@ class InvarFitting(GeneralFitting): neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : torch.Tensor, optional - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int @@ -74,7 +74,7 @@ class InvarFitting(GeneralFitting): Specifying atomic energy contribution in vacuum. The value is a list specifying the bias. the elements can be None or np.array of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. use_aparam_as_mask: bool diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index cc6a4e8745..4017f51468 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -35,7 +35,7 @@ @Fitting.register("property") class PropertyFittingNet(InvarFitting): - """Fitting the rotationally invariant porperties of `task_dim` of the system. + """Fitting the rotationally invariant properties of `task_dim` of the system. Parameters ---------- diff --git a/deepmd/pt/model/task/type_predict.py b/deepmd/pt/model/task/type_predict.py index c696590043..e8a5db62b5 100644 --- a/deepmd/pt/model/task/type_predict.py +++ b/deepmd/pt/model/task/type_predict.py @@ -19,7 +19,7 @@ def __init__(self, feature_dim, ntypes, activation_function="gelu", **kwargs): Args: - feature_dim: Input dm. - - ntypes: Numer of types to predict. + - ntypes: Number of types to predict. - activation_function: Activate function. """ super().__init__() diff --git a/deepmd/pt/train/wrapper.py b/deepmd/pt/train/wrapper.py index 922ac296ea..17fb8477a5 100644 --- a/deepmd/pt/train/wrapper.py +++ b/deepmd/pt/train/wrapper.py @@ -63,7 +63,7 @@ def share_params(self, shared_links, resume=False): """ Share the parameters of classes following rules defined in shared_links during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ supported_types = ["descriptor", "fitting_net"] for shared_item in shared_links: diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index c7f44cfb70..581f67196c 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -301,7 +301,7 @@ def get_weighted_sampler(training_data, prob_style, sys_prob=False): else: probs = process_sys_probs(prob_style, training_data.index) log.debug("Generated weighted sampler with prob array: " + str(probs)) - # training_data.total_batch is the size of one epoch, you can increase it to avoid too many rebuilding of iteraters + # training_data.total_batch is the size of one epoch, you can increase it to avoid too many rebuilding of iterators len_sampler = training_data.total_batch * max(env.NUM_WORKERS, 1) with torch.device("cpu"): sampler = WeightedRandomSampler(probs, len_sampler, replacement=True) diff --git a/deepmd/pt/utils/env_mat_stat.py b/deepmd/pt/utils/env_mat_stat.py index cc30bd5155..b253a1b55e 100644 --- a/deepmd/pt/utils/env_mat_stat.py +++ b/deepmd/pt/utils/env_mat_stat.py @@ -61,7 +61,7 @@ def compute_stat(self, env_mat: dict[str, torch.Tensor]) -> dict[str, StatItem]: class EnvMatStatSe(EnvMatStat): - """Environmental matrix statistics for the se_a/se_r environemntal matrix. + """Environmental matrix statistics for the se_a/se_r environmental matrix. Parameters ---------- diff --git a/deepmd/pt/utils/neighbor_stat.py b/deepmd/pt/utils/neighbor_stat.py index 7d52bfaae1..64ad695827 100644 --- a/deepmd/pt/utils/neighbor_stat.py +++ b/deepmd/pt/utils/neighbor_stat.py @@ -25,7 +25,7 @@ class NeighborStatOP(torch.nn.Module): - """Class for getting neighbor statics data information. + """Class for getting neighbor statistics data information. Parameters ---------- diff --git a/deepmd/pt/utils/nlist.py b/deepmd/pt/utils/nlist.py index c30ec6dd02..db1e87785b 100644 --- a/deepmd/pt/utils/nlist.py +++ b/deepmd/pt/utils/nlist.py @@ -56,7 +56,7 @@ def build_neighbor_list( sel: Union[int, list[int]], distinguish_types: bool = True, ) -> torch.Tensor: - """Build neightbor list for a single frame. keeps nsel neighbors. + """Build neighbor list for a single frame. keeps nsel neighbors. Parameters ---------- @@ -264,7 +264,7 @@ def build_directional_neighbor_list( rr = torch.linalg.norm(diff, dim=-1) rr, nlist = torch.sort(rr, dim=-1) - # We assume that the central and neighbor atoms are diffferent, + # We assume that the central and neighbor atoms are different, # thus we do not need to exclude self-neighbors. # # if central atom has two zero distances, sorting sometimes can not exclude itself # rr -= torch.eye(nloc_cntl, nall_neig, dtype=rr.dtype, device=rr.device).unsqueeze(0) @@ -429,7 +429,7 @@ def extend_coord_with_ghosts( extended_atype: torch.Tensor extended atom type of shape [-1, nall]. index_mapping: torch.Tensor - maping extended index to the local index + mapping extended index to the local index """ device = coord.device diff --git a/deepmd/pt/utils/region.py b/deepmd/pt/utils/region.py index 6fa77125aa..3272434995 100644 --- a/deepmd/pt/utils/region.py +++ b/deepmd/pt/utils/region.py @@ -92,7 +92,7 @@ def normalize_coord( Parameters ---------- coord : torch.Tensor - orignal coordinates of shape [*, na, 3]. + original coordinates of shape [*, na, 3]. Returns ------- diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 831d2bef76..4028d89fc9 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -266,7 +266,7 @@ def compute_output_stats( Specifying atomic energy contribution in vacuum. Given by key:value pairs. The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. model_forward : Callable[..., torch.Tensor], optional The wrapped forward function of atomic model. If not None, the model will be utilized to generate the original energy prediction, diff --git a/deepmd/tf/cluster/local.py b/deepmd/tf/cluster/local.py index a9392bd326..25fb1cc645 100644 --- a/deepmd/tf/cluster/local.py +++ b/deepmd/tf/cluster/local.py @@ -43,7 +43,7 @@ def get_gpus(): stdout, stderr = p.communicate() if p.returncode != 0: decoded = stderr.decode("UTF-8") - raise RuntimeError(f"Failed to detect availbe GPUs due to:\n{decoded}") + raise RuntimeError(f"Failed to detect available GPUs due to:\n{decoded}") decoded = stdout.decode("UTF-8").strip() num_gpus = int(decoded) return list(range(num_gpus)) if num_gpus > 0 else None diff --git a/deepmd/tf/descriptor/descriptor.py b/deepmd/tf/descriptor/descriptor.py index ba54ca1309..dd86beb21e 100644 --- a/deepmd/tf/descriptor/descriptor.py +++ b/deepmd/tf/descriptor/descriptor.py @@ -222,7 +222,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters @@ -253,7 +253,7 @@ def enable_compression( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -473,7 +473,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/hybrid.py b/deepmd/tf/descriptor/hybrid.py index e4458476c8..3f20e7d856 100644 --- a/deepmd/tf/descriptor/hybrid.py +++ b/deepmd/tf/descriptor/hybrid.py @@ -72,7 +72,7 @@ def __init__( for ii in range(1, self.numb_descrpt): assert ( self.descrpt_list[ii].get_ntypes() == self.descrpt_list[0].get_ntypes() - ), f"number of atom types in {ii}th descrptor does not match others" + ), f"number of atom types in {ii}th descriptor does not match others" def get_rcut(self) -> float: """Returns the cut-off radius.""" @@ -317,7 +317,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters @@ -352,7 +352,7 @@ def enable_compression( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -434,7 +434,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/loc_frame.py b/deepmd/tf/descriptor/loc_frame.py index 74ba755b4c..9b338a5d25 100644 --- a/deepmd/tf/descriptor/loc_frame.py +++ b/deepmd/tf/descriptor/loc_frame.py @@ -72,7 +72,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -443,7 +443,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se.py b/deepmd/tf/descriptor/se.py index 319a65f6da..746ea8c628 100644 --- a/deepmd/tf/descriptor/se.py +++ b/deepmd/tf/descriptor/se.py @@ -35,7 +35,7 @@ class DescrptSe(Descriptor): ----- All of these descriptors have an environmental matrix and an embedding network (:meth:`deepmd.tf.utils.network.embedding_net`), so - they can share some similiar methods without defining them twice. + they can share some similar methods without defining them twice. Attributes ---------- @@ -162,7 +162,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se_a.py b/deepmd/tf/descriptor/se_a.py index d5a8ed6815..a0b6b810e4 100644 --- a/deepmd/tf/descriptor/se_a.py +++ b/deepmd/tf/descriptor/se_a.py @@ -237,7 +237,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -448,7 +448,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- @@ -502,7 +502,7 @@ def enable_compression( ) elif len(ret_one_side) != 0 and len(ret_two_side) != 0: raise RuntimeError( - "both one side and two side embedding net varaibles are detected, it is a wrong model." + "both one side and two side embedding net variables are detected, it is a wrong model." ) elif len(ret_two_side) != 0: self.final_type_embedding = get_two_side_type_embedding(self, graph) @@ -548,7 +548,7 @@ def enable_compression( self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/descriptor/se_a_ebd_v2.py b/deepmd/tf/descriptor/se_a_ebd_v2.py index af43eedbbc..035fc6509c 100644 --- a/deepmd/tf/descriptor/se_a_ebd_v2.py +++ b/deepmd/tf/descriptor/se_a_ebd_v2.py @@ -23,7 +23,7 @@ class DescrptSeAEbdV2(DescrptSeA): r"""A compressible se_a_ebd model. - This model is a warpper for DescriptorSeA, which set tebd_input_mode='strip'. + This model is a wrapper for DescriptorSeA, which set tebd_input_mode='strip'. """ def __init__( diff --git a/deepmd/tf/descriptor/se_a_ef.py b/deepmd/tf/descriptor/se_a_ef.py index 9f70464c56..bf891e6032 100644 --- a/deepmd/tf/descriptor/se_a_ef.py +++ b/deepmd/tf/descriptor/se_a_ef.py @@ -348,7 +348,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r diff --git a/deepmd/tf/descriptor/se_a_mask.py b/deepmd/tf/descriptor/se_a_mask.py index e12f6a0fff..5667122809 100644 --- a/deepmd/tf/descriptor/se_a_mask.py +++ b/deepmd/tf/descriptor/se_a_mask.py @@ -157,7 +157,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei = self.nnei_a # to be compat with old option of `stripped_type_embedding` @@ -435,7 +435,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 8d101f151c..7bfb784419 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -425,7 +425,7 @@ def enable_compression( suffix: str = "", tebd_suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- @@ -707,7 +707,7 @@ def _pass_filter( assert ( input_dict is not None and input_dict.get("type_embedding", None) is not None - ), "se_atten desctiptor must use type_embedding" + ), "se_atten descriptor must use type_embedding" type_embedding = input_dict.get("type_embedding", None) inputs = tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]) output = [] @@ -1434,9 +1434,9 @@ def build_type_exclude_mask_mixed( Notes ----- - This method has the similiar way to build the type exclude mask as + This method has the similar way to build the type exclude mask as :meth:`deepmd.tf.descriptor.descriptor.Descriptor.build_type_exclude_mask`. - The mathmatical expression has been explained in that method. + The mathematical expression has been explained in that method. The difference is that the attention descriptor has provided the type of the neighbors (idx_j) that is not in order, so we use it from an extra input. @@ -1521,7 +1521,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se_r.py b/deepmd/tf/descriptor/se_r.py index 8096ef7c96..752642c1d5 100644 --- a/deepmd/tf/descriptor/se_r.py +++ b/deepmd/tf/descriptor/se_r.py @@ -149,7 +149,7 @@ def __init__( # descrpt config self.sel_a = [0 for ii in range(len(self.sel_r))] self.ntypes = len(self.sel_r) - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -325,7 +325,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- diff --git a/deepmd/tf/descriptor/se_t.py b/deepmd/tf/descriptor/se_t.py index f96b1ba778..464839aeac 100644 --- a/deepmd/tf/descriptor/se_t.py +++ b/deepmd/tf/descriptor/se_t.py @@ -145,7 +145,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -332,7 +332,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- diff --git a/deepmd/tf/entrypoints/freeze.py b/deepmd/tf/entrypoints/freeze.py index cee6615abc..2658f565a6 100755 --- a/deepmd/tf/entrypoints/freeze.py +++ b/deepmd/tf/entrypoints/freeze.py @@ -59,7 +59,7 @@ def _transfer_fitting_net_trainable_variables(sess, old_graph_def, raw_graph_def raw_graph_def, # The graph_def is used to retrieve the nodes [ n + "_1" for n in old_graph_nodes - ], # The output node names are used to select the usefull nodes + ], # The output node names are used to select the useful nodes ) except AssertionError: # if there's no additional nodes @@ -275,7 +275,7 @@ def freeze_graph( output_graph_def = tf.graph_util.convert_variables_to_constants( sess, # The session is used to retrieve the weights input_graph, # The graph_def is used to retrieve the nodes - output_node, # The output node names are used to select the usefull nodes + output_node, # The output node names are used to select the useful nodes ) # If we need to transfer the fitting net variables @@ -334,7 +334,7 @@ def freeze( # We import the meta graph and retrieve a Saver try: - # In case paralle training + # In case parallel training import horovod.tensorflow as HVD except ImportError: pass diff --git a/deepmd/tf/entrypoints/ipi.py b/deepmd/tf/entrypoints/ipi.py index 1183375119..a08a2293a9 100644 --- a/deepmd/tf/entrypoints/ipi.py +++ b/deepmd/tf/entrypoints/ipi.py @@ -13,7 +13,7 @@ def _program(name: str, args: list[str]): - """Execuate a program. + """Execute a program. Parameters ---------- diff --git a/deepmd/tf/entrypoints/main.py b/deepmd/tf/entrypoints/main.py index d9dff4eb4a..b8bfdef6d8 100644 --- a/deepmd/tf/entrypoints/main.py +++ b/deepmd/tf/entrypoints/main.py @@ -60,7 +60,7 @@ def main(args: Optional[Union[list[str], argparse.Namespace]] = None): args = parse_args(args=args) # do not set log handles for None, it is useless - # log handles for train will be set separatelly + # log handles for train will be set separately # when the use of MPI will be determined in `RunOptions` if args.command not in (None, "train"): set_log_handles(args.log_level, Path(args.log_path) if args.log_path else None) diff --git a/deepmd/tf/entrypoints/train.py b/deepmd/tf/entrypoints/train.py index 66622b3182..3d965ea71c 100755 --- a/deepmd/tf/entrypoints/train.py +++ b/deepmd/tf/entrypoints/train.py @@ -114,7 +114,7 @@ def train( mpi_log=mpi_log, ) if run_opt.is_distrib and len(run_opt.gpus or []) > 1: - # avoid conflict of visible gpus among multipe tf sessions in one process + # avoid conflict of visible gpus among multiple tf sessions in one process reset_default_tf_session_config(cpu_only=True) # load json database diff --git a/deepmd/tf/entrypoints/transfer.py b/deepmd/tf/entrypoints/transfer.py index b93caf3cac..52bf56c4fd 100644 --- a/deepmd/tf/entrypoints/transfer.py +++ b/deepmd/tf/entrypoints/transfer.py @@ -1,5 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -"""Module used for transfering parameters between models.""" +"""Module used for transferring parameters between models.""" import logging import re @@ -43,7 +43,7 @@ def convert_matrix( shape : Sequence[int] shape to cast resulting array to dtype : Optional[type] - type that finall array will be cast to, If None no casting will take place + type that final array will be cast to, If None no casting will take place Returns ------- @@ -58,7 +58,7 @@ def convert_matrix( def transfer(*, old_model: str, raw_model: str, output: str, **kwargs): - """Transfer operation from old fron graph to new prepared raw graph. + """Transfer operation from old from graph to new prepared raw graph. Parameters ---------- @@ -67,7 +67,7 @@ def transfer(*, old_model: str, raw_model: str, output: str, **kwargs): raw_model : str new model that will accept ops from old model output : str - new model with transfered parameters will be saved to this location + new model with transferred parameters will be saved to this location **kwargs additional arguments """ @@ -104,7 +104,7 @@ def load_graph(graph_name: str) -> tf.Graph: def transform_graph(raw_graph: tf.Graph, old_graph: tf.Graph) -> tf.Graph: - """Trasform old graph into new. + """Transform old graph into new. Parameters ---------- @@ -116,7 +116,7 @@ def transform_graph(raw_graph: tf.Graph, old_graph: tf.Graph) -> tf.Graph: Returns ------- tf.Graph - new graph with parameters transfered form the old one + new graph with parameters transferred form the old one """ old_graph_def = old_graph.as_graph_def() raw_graph_def = raw_graph.as_graph_def() diff --git a/deepmd/tf/env.py b/deepmd/tf/env.py index 5a66498dba..16ad4735fd 100644 --- a/deepmd/tf/env.py +++ b/deepmd/tf/env.py @@ -1,5 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -"""Module that sets tensorflow working environment and exports inportant constants.""" +"""Module that sets tensorflow working environment and exports important constants.""" import ctypes import logging @@ -92,7 +92,7 @@ def filter(self, record): # https://keras.io/getting_started/#tensorflow--keras-2-backwards-compatibility # 2024/04/24: deepmd.tf doesn't import tf.keras any more -# import tensorflow v1 compatability +# import tensorflow v1 compatibility import tensorflow.compat.v1 as tf tf.get_logger().addFilter(TFWarningFilter()) @@ -339,7 +339,7 @@ def get_module(module_name: str) -> "ModuleType": try: module = tf.load_op_library(str(module_file)) except tf.errors.NotFoundError as e: - # check CXX11_ABI_FLAG is compatiblity + # check CXX11_ABI_FLAG is compatibility # see https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html # ABI should be the same if "CXX11_ABI_FLAG" in tf.__dict__: @@ -377,7 +377,7 @@ def get_module(module_name: str) -> "ModuleType": "instead." ) from e error_message = ( - "This deepmd-kit package is inconsitent with TensorFlow " + "This deepmd-kit package is inconsistent with TensorFlow " f"Runtime, thus an error is raised when loading {module_name}. " "You need to rebuild deepmd-kit against this TensorFlow " "runtime." diff --git a/deepmd/tf/fit/dipole.py b/deepmd/tf/fit/dipole.py index 0e5b860fa2..fa8a5b680c 100644 --- a/deepmd/tf/fit/dipole.py +++ b/deepmd/tf/fit/dipole.py @@ -41,11 +41,11 @@ class DipoleFittingSeA(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` embedding_width - The rotation matrix dimension of the descrptor :math:`\mathcal{D}` + The rotation matrix dimension of the descriptor :math:`\mathcal{D}` neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool @@ -320,7 +320,7 @@ def init_variables( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/fit/dos.py b/deepmd/tf/fit/dos.py index ebc347c2fd..099cba0d12 100644 --- a/deepmd/tf/fit/dos.py +++ b/deepmd/tf/fit/dos.py @@ -62,9 +62,9 @@ class DOSFitting(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` neuron Number of neurons :math:`N` in each hidden layer of the fitting net resnet_dt @@ -187,7 +187,7 @@ def get_numb_dos(self) -> int: # not used def compute_output_stats(self, all_stat: dict, mixed_type: bool = False) -> None: - """Compute the ouput statistics. + """Compute the output statistics. Parameters ---------- @@ -628,7 +628,7 @@ def init_variables( pass def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index 330ea57179..1ba0fe3dfb 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -109,9 +109,9 @@ class EnerFitting(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` neuron Number of neurons :math:`N` in each hidden layer of the fitting net resnet_dt @@ -132,7 +132,7 @@ class EnerFitting(Fitting): seed Random seed for initializing the network parameters. atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descriptor should be set. activation_function The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| precision @@ -252,7 +252,7 @@ def get_numb_aparam(self) -> int: return self.numb_aparam def compute_output_stats(self, all_stat: dict, mixed_type: bool = False) -> None: - """Compute the ouput statistics. + """Compute the output statistics. Parameters ---------- @@ -828,7 +828,7 @@ def change_energy_bias( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -995,7 +995,7 @@ def change_energy_bias_lower( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. ntest : int The number of test samples in a system to change the energy bias. diff --git a/deepmd/tf/fit/polar.py b/deepmd/tf/fit/polar.py index cc79e3402a..b5a21012bd 100644 --- a/deepmd/tf/fit/polar.py +++ b/deepmd/tf/fit/polar.py @@ -46,11 +46,11 @@ class PolarFittingSeA(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` embedding_width - The rotation matrix dimension of the descrptor :math:`\mathcal{D}` + The rotation matrix dimension of the descriptor :math:`\mathcal{D}` neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool @@ -221,7 +221,7 @@ def compute_output_stats(self, all_stat): else: # No atomic polar in this system, so it should have global polar if ( not all_stat["find_polarizability"][ss] > 0.0 - ): # This system is jsut a joke? + ): # This system is just a joke? continue # Till here, we have global polar sys_matrix.append( @@ -526,7 +526,7 @@ def init_variables( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -618,7 +618,7 @@ class GlobalPolarFittingSeA: Parameters ---------- descrpt : tf.Tensor - The descrptor + The descriptor neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool @@ -745,7 +745,7 @@ def init_variables( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/infer/deep_dipole.py b/deepmd/tf/infer/deep_dipole.py index e10d09564d..b493af8552 100644 --- a/deepmd/tf/infer/deep_dipole.py +++ b/deepmd/tf/infer/deep_dipole.py @@ -39,7 +39,7 @@ class DeepDipoleOld(DeepTensor): -------- For developers: `DeepTensor` initializer must be called at the end after `self.tensors` are modified because it uses the data in `self.tensors` dict. - Do not chanage the order! + Do not change the order! """ def __init__( diff --git a/deepmd/tf/infer/deep_eval.py b/deepmd/tf/infer/deep_eval.py index 56df7f782f..9527cb2ae8 100644 --- a/deepmd/tf/infer/deep_eval.py +++ b/deepmd/tf/infer/deep_eval.py @@ -111,7 +111,7 @@ def __init__( raise RuntimeError( f"model in graph (version {self.model_version}) is incompatible" f"with the model (version {MODEL_VERSION}) supported by the current code." - "See https://deepmd.rtfd.io/compatability/ for details." + "See https://deepmd.rtfd.io/compatibility/ for details." ) # set default to False, as subclasses may not support @@ -190,7 +190,7 @@ def _init_tensors(self): "numb_dos": "fitting_attr/numb_dos:0", # model attrs "sel_type": "model_attr/sel_type:0", - # additonal inputs + # additional inputs "efield": "t_efield:0", "fparam": "t_fparam:0", "aparam": "t_aparam:0", @@ -312,12 +312,12 @@ def sess(self) -> tf.Session: return tf.Session(graph=self.graph, config=default_tf_session_config) def _graph_compatable(self) -> bool: - """Check the model compatability. + """Check the model compatibility. Returns ------- bool - If the model stored in the graph file is compatable with the current code + If the model stored in the graph file is compatible with the current code """ model_version_major = int(self.model_version.split(".")[0]) model_version_minor = int(self.model_version.split(".")[1]) @@ -781,7 +781,7 @@ def _prepare_feed_dict( aparam=None, efield=None, ): - # standarize the shape of inputs + # standardize the shape of inputs natoms, nframes = self._get_natoms_and_nframes( coords, atom_types, @@ -1118,7 +1118,7 @@ def get_has_efield(self) -> bool: return self.has_efield def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" t_script = self._get_tensor("train_attr/training_script:0") [script] = run_sess(self.sess, [t_script], feed_dict={}) model_def_script = script.decode("utf-8") @@ -1171,7 +1171,7 @@ def __init__( raise RuntimeError( f"model in graph (version {self.model_version}) is incompatible" f"with the model (version {MODEL_VERSION}) supported by the current code." - "See https://deepmd.rtfd.io/compatability/ for details." + "See https://deepmd.rtfd.io/compatibility/ for details." ) # set default to False, as subclasses may not support @@ -1224,12 +1224,12 @@ def sess(self) -> tf.Session: return tf.Session(graph=self.graph, config=default_tf_session_config) def _graph_compatable(self) -> bool: - """Check the model compatability. + """Check the model compatibility. Returns ------- bool - If the model stored in the graph file is compatable with the current code + If the model stored in the graph file is compatible with the current code """ model_version_major = int(self.model_version.split(".")[0]) model_version_minor = int(self.model_version.split(".")[1]) diff --git a/deepmd/tf/infer/deep_tensor.py b/deepmd/tf/infer/deep_tensor.py index a20bbfe513..a1edaaa409 100644 --- a/deepmd/tf/infer/deep_tensor.py +++ b/deepmd/tf/infer/deep_tensor.py @@ -186,7 +186,7 @@ def eval( If atomic == False then of size nframes x output_dim else of size nframes x natoms x output_dim """ - # standarize the shape of inputs + # standardize the shape of inputs if mixed_type: natoms = atom_types[0].size atom_types = np.array(atom_types, dtype=int).reshape([-1, natoms]) @@ -330,7 +330,7 @@ def eval_full( """ assert self._support_gfv, "do not support eval_full with old tensor model" - # standarize the shape of inputs + # standardize the shape of inputs if mixed_type: natoms = atom_types[0].size atom_types = np.array(atom_types, dtype=int).reshape([-1, natoms]) diff --git a/deepmd/tf/loss/ener.py b/deepmd/tf/loss/ener.py index 337046836b..95cc8adafb 100644 --- a/deepmd/tf/loss/ener.py +++ b/deepmd/tf/loss/ener.py @@ -673,7 +673,7 @@ def print_on_training( error_ae_train, ) = train_out - # than test data, if tensorboard log writter is present, commpute summary + # than test data, if tensorboard log writer is present, compute summary # and write tensorboard logs if tb_writer: summary_merged_op = tf.summary.merge( diff --git a/deepmd/tf/model/ener.py b/deepmd/tf/model/ener.py index b21c920d9c..57aaa2acf4 100644 --- a/deepmd/tf/model/ener.py +++ b/deepmd/tf/model/ener.py @@ -56,7 +56,7 @@ class EnerModel(StandardModel): use_srtab The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. sw_rmin The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided. sw_rmin @@ -516,7 +516,7 @@ def change_energy_bias( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. """ self.fitting.change_energy_bias( diff --git a/deepmd/tf/model/frozen.py b/deepmd/tf/model/frozen.py index 05700dc64e..7501a5cbd1 100644 --- a/deepmd/tf/model/frozen.py +++ b/deepmd/tf/model/frozen.py @@ -250,7 +250,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/linear.py b/deepmd/tf/model/linear.py index 4c75c2a1d5..7cf3c5194d 100644 --- a/deepmd/tf/model/linear.py +++ b/deepmd/tf/model/linear.py @@ -146,7 +146,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index 833f8364ae..03211d49d5 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -87,7 +87,7 @@ class Model(ABC, make_plugin_registry("model")): use_srtab The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. sw_rmin The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided. sw_rmin @@ -411,7 +411,7 @@ def change_energy_bias( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. """ raise RuntimeError("Not supported") @@ -524,7 +524,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -766,7 +766,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/pairtab.py b/deepmd/tf/model/pairtab.py index d54940fec6..80e68d7825 100644 --- a/deepmd/tf/model/pairtab.py +++ b/deepmd/tf/model/pairtab.py @@ -244,7 +244,7 @@ def get_fitting(self) -> Union[Fitting, dict]: def get_loss(self, loss: dict, lr) -> Optional[Union[Loss, dict]]: """Get the loss function(s).""" - # nothing nees to do + # nothing needs to do return def get_rcut(self) -> float: @@ -285,7 +285,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/pairwise_dprc.py b/deepmd/tf/model/pairwise_dprc.py index c8a57d90b3..a0eaa1385f 100644 --- a/deepmd/tf/model/pairwise_dprc.py +++ b/deepmd/tf/model/pairwise_dprc.py @@ -421,7 +421,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/nvnmd/data/data.py b/deepmd/tf/nvnmd/data/data.py index 55e7c51bc7..e1fcaac9f2 100644 --- a/deepmd/tf/nvnmd/data/data.py +++ b/deepmd/tf/nvnmd/data/data.py @@ -118,7 +118,7 @@ "end": "", } -# change the configuration accordng to the max_nnei +# change the configuration according to the max_nnei jdata_config_v0_ni128 = jdata_config_v0.copy() jdata_config_v0_ni256 = jdata_config_v0.copy() jdata_config_v0_ni256["ctrl"] = { @@ -250,7 +250,7 @@ "end": "", } -# change the configuration accordng to the max_nnei +# change the configuration according to the max_nnei jdata_config_v1_ni128 = jdata_config_v1.copy() jdata_config_v1_ni256 = jdata_config_v1.copy() jdata_config_v1_ni256["ctrl"] = { diff --git a/deepmd/tf/nvnmd/entrypoints/mapt.py b/deepmd/tf/nvnmd/entrypoints/mapt.py index 8ee1967854..7a50ceae30 100644 --- a/deepmd/tf/nvnmd/entrypoints/mapt.py +++ b/deepmd/tf/nvnmd/entrypoints/mapt.py @@ -50,7 +50,7 @@ class MapTable: :math:`h_{ji} = \frac{s(r_{ji})}{r_{ji}}`, and :math:`\mathcal{G}_{ji}` is embedding matrix. - The mapping funciton can be define as: + The mapping function can be define as: | :math:`y = f(x) = y_{k} + (x - x_{k}) * dy_{k}` | :math:`y_{k} = f(x_{k})` @@ -436,7 +436,7 @@ def run_u2s(self): # N = NUM_MAPT N = 512 N2 = int(rc_max**2) - # N+1 ranther than N for calculating defference + # N+1 ranther than N for calculating difference keys = list(dic_ph.keys()) vals = list(dic_ph.values()) @@ -446,7 +446,7 @@ def run_u2s(self): u2 = N2 * np.reshape(np.arange(0, N * 16 + 1) / (N * 16), [-1, 1]) # pylint: disable=no-explicit-dtype res_lst2 = run_sess(sess, vals, feed_dict={dic_ph["u"]: u2}) - res_dic2 = dict(zip(keys, res_lst2)) # reference for commpare + res_dic2 = dict(zip(keys, res_lst2)) # reference for compare # change value for tt in range(ndim): diff --git a/deepmd/tf/nvnmd/utils/encode.py b/deepmd/tf/nvnmd/utils/encode.py index 21398fbf23..46209e5230 100644 --- a/deepmd/tf/nvnmd/utils/encode.py +++ b/deepmd/tf/nvnmd/utils/encode.py @@ -122,7 +122,7 @@ def check_dec(self, idec, nbit, signed=False, name=""): def extend_list(self, slbin, nfull): r"""Extend the list (slbin) to the length (nfull) - the attched element of list is 0. + the attached element of list is 0. such as, when diff --git a/deepmd/tf/nvnmd/utils/network.py b/deepmd/tf/nvnmd/utils/network.py index 76c80ed4e7..c0572a7fa7 100644 --- a/deepmd/tf/nvnmd/utils/network.py +++ b/deepmd/tf/nvnmd/utils/network.py @@ -240,7 +240,7 @@ def one_layer( x = op_module.quantize_nvnmd(inputs, 1, NBIT_DATA_FL, NBIT_DATA_FL, -1) inputs = tf.ensure_shape(x, [None, shape[1]]) # wx - # normlize weight mode: 0 all | 1 column + # normalize weight mode: 0 all | 1 column norm_mode = 0 if final_layer else 1 wx = op_module.matmul_fitnet_nvnmd( inputs, w, NBIT_DATA_FL, NBIT_SHORT_FL, norm_mode diff --git a/deepmd/tf/op/__init__.py b/deepmd/tf/op/__init__.py index 421ef0b123..805dc148a7 100644 --- a/deepmd/tf/op/__init__.py +++ b/deepmd/tf/op/__init__.py @@ -18,7 +18,7 @@ def import_ops(): Notes ----- - Initialy this subdir is unpopulated. CMake will install all the op module python + Initially this subdir is unpopulated. CMake will install all the op module python files and shared libs. """ for module_file in Path(__file__).parent.glob("*.py"): diff --git a/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py b/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py index 8a4ffb2d0c..b6aae52519 100644 --- a/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py +++ b/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py @@ -15,7 +15,7 @@ def _DotmulFltNvnmdGrad(op, grad): x = op.inputs[0] w = op.inputs[1] - # calcualte + # calculate dx = op_module.mul_flt_nvnmd(grad, w) dw = op_module.mul_flt_nvnmd(grad, x) # add shape for output of matmul_nvnmd diff --git a/deepmd/tf/op/_matmul_flt2fix_nvnmd.py b/deepmd/tf/op/_matmul_flt2fix_nvnmd.py index 319fb90ec8..3b802ec56a 100644 --- a/deepmd/tf/op/_matmul_flt2fix_nvnmd.py +++ b/deepmd/tf/op/_matmul_flt2fix_nvnmd.py @@ -22,7 +22,7 @@ def _MatmulFlt2fixNvnmdGrad(op, grad): else: x_T = tf.transpose(x) w_T = tf.transpose(w) - # calcualte + # calculate # dx = tf.matmul(grad, w_T) # dw = tf.matmul(x_T, grad) dx = op_module.matmul_flt_nvnmd(grad, w_T, 1, 1) diff --git a/deepmd/tf/op/_matmul_flt_nvnmd_grad.py b/deepmd/tf/op/_matmul_flt_nvnmd_grad.py index 6493794b00..94e0dc2d67 100644 --- a/deepmd/tf/op/_matmul_flt_nvnmd_grad.py +++ b/deepmd/tf/op/_matmul_flt_nvnmd_grad.py @@ -24,7 +24,7 @@ def _MatmulFltNvnmdGrad(op, grad): else: x_T = tf.transpose(x) w_T = tf.transpose(w) - # calcualte + # calculate modex = (normx >> 4) & 15 modew = (normw >> 4) & 15 if modex: diff --git a/deepmd/tf/train/run_options.py b/deepmd/tf/train/run_options.py index c36b42e194..c7f7b92674 100644 --- a/deepmd/tf/train/run_options.py +++ b/deepmd/tf/train/run_options.py @@ -82,7 +82,7 @@ class RunOptions: gpus: Optional[list[int]] list of GPUs if any are present else None is_chief: bool - in distribured training it is true for tha main MPI process in serail it is + in distribured training it is true for the main MPI process in serail it is always true world_size: int total worker count @@ -93,7 +93,7 @@ class RunOptions: node_list_ : list[str] the list of nodes of the current mpirun my_device: str - deviice type - gpu or cpu + device type - gpu or cpu """ gpus: Optional[list[int]] @@ -180,7 +180,7 @@ def _setup_logger( else: log.warning( f"Log handles have already been set. It is not advisable to " - f"reset them{', especially when runnig with MPI!' if self._HVD else ''}" + f"reset them{', especially when running with MPI!' if self._HVD else ''}" ) def _try_init_distrib(self): @@ -193,7 +193,7 @@ def _try_init_distrib(self): log.warning("Switch to serial execution due to lack of horovod module.") self.is_distrib = False - # Do real intialization + # Do real initialization if self.is_distrib: self._init_distributed(HVD) self._HVD = HVD diff --git a/deepmd/tf/train/trainer.py b/deepmd/tf/train/trainer.py index 9f353f2e32..58be9e8176 100644 --- a/deepmd/tf/train/trainer.py +++ b/deepmd/tf/train/trainer.py @@ -409,7 +409,7 @@ def train(self, train_data=None, valid_data=None): stop_batch = self.stop_batch self._init_session() - # Before data shard is enabled, only cheif do evaluation and record it + # Before data shard is enabled, only chief do evaluation and record it # self.print_head() fp = None if self.run_opt.is_chief: @@ -846,7 +846,7 @@ def _init_from_pretrained_model( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. """ try: @@ -940,7 +940,7 @@ def build(self) -> list[tf.Tensor]: def get_train_batch() -> list[np.ndarray]: batch_data = train_data.get_batch() - # convert dict to list of arryas + # convert dict to list of arrays batch_data = tuple([batch_data[kk] for kk in self.data_keys]) return batch_data diff --git a/deepmd/tf/utils/learning_rate.py b/deepmd/tf/utils/learning_rate.py index 519bf20bd0..fee73ca9a3 100644 --- a/deepmd/tf/utils/learning_rate.py +++ b/deepmd/tf/utils/learning_rate.py @@ -58,7 +58,7 @@ def build( Parameters ---------- global_step - The tf Tensor prividing the global training step + The tf Tensor providing the global training step stop_step The stop step. If provided, the decay_rate will be determined automatically and overwritten. diff --git a/deepmd/tf/utils/neighbor_stat.py b/deepmd/tf/utils/neighbor_stat.py index 4052c89821..37028b23bc 100644 --- a/deepmd/tf/utils/neighbor_stat.py +++ b/deepmd/tf/utils/neighbor_stat.py @@ -33,7 +33,7 @@ class NeighborStatOP: - """Class for getting neighbor statics data information. + """Class for getting neighbor statistics data information. Parameters ---------- diff --git a/deepmd/tf/utils/network.py b/deepmd/tf/utils/network.py index 7941b451af..c4a0646705 100644 --- a/deepmd/tf/utils/network.py +++ b/deepmd/tf/utils/network.py @@ -264,7 +264,7 @@ def embedding_net( stddev : float Standard deviation of initializing network parameters bavg : float - Mean of network intial bias + Mean of network initial bias seed : int Random seed for initializing network parameters trainable : boolean diff --git a/deepmd/tf/utils/nlist.py b/deepmd/tf/utils/nlist.py index 0f33ec883b..6e405e9adb 100644 --- a/deepmd/tf/utils/nlist.py +++ b/deepmd/tf/utils/nlist.py @@ -39,7 +39,7 @@ def extend_coord_with_ghosts( extended_atype: tf.Tensor extended atom type of shape [-1, nall]. index_mapping: tf.Tensor - maping extended index to the local index + mapping extended index to the local index """ # generated by GitHub Copilot, converted from PT codes diff --git a/deepmd/tf/utils/sess.py b/deepmd/tf/utils/sess.py index ca98980f89..3c179d6b96 100644 --- a/deepmd/tf/utils/sess.py +++ b/deepmd/tf/utils/sess.py @@ -10,7 +10,7 @@ def run_sess(sess: tf.Session, *args, **kwargs): - """Run session with erorrs caught. + """Run session with errors caught. Parameters ---------- diff --git a/deepmd/tf/utils/tabulate.py b/deepmd/tf/utils/tabulate.py index d68f5cadf7..588ebdd55e 100644 --- a/deepmd/tf/utils/tabulate.py +++ b/deepmd/tf/utils/tabulate.py @@ -97,7 +97,7 @@ def __init__( elif activation_fn == ACTIVATION_FN_DICT["sigmoid"]: self.functype = 6 else: - raise RuntimeError("Unknown actication function type!") + raise RuntimeError("Unknown activation function type!") self.activation_fn = activation_fn # self.sess = tf.Session(graph = self.graph) diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index b3f3b26fd0..916e4de1b0 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -254,7 +254,7 @@ def descrpt_local_frame_args(): def descrpt_se_a_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -322,7 +322,7 @@ def descrpt_se_a_args(): def descrpt_se_t_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -391,7 +391,7 @@ def descrpt_se_a_tpe_args(): def descrpt_se_r_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -468,7 +468,7 @@ def descrpt_se_atten_common_args(): doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -565,7 +565,7 @@ def descrpt_se_atten_args(): "The input mode of the type embedding. Supported modes are ['concat', 'strip']." "- 'concat': Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. " "When `type_one_side` is False, the input is `input_ij = concat([r_ij, tebd_j, tebd_i])`. When `type_one_side` is True, the input is `input_ij = concat([r_ij, tebd_j])`. " - "The output is `out_ij = embeding(input_ij)` for the pair-wise representation of atom i with neighbor j." + "The output is `out_ij = embedding(input_ij)` for the pair-wise representation of atom i with neighbor j." "- 'strip': Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. " f"When `type_one_side` is False, the input is `input_t = concat([tebd_j, tebd_i])`. {doc_only_pt_supported} When `type_one_side` is True, the input is `input_t = tebd_j`. " "The output is `out_ij = embeding_t(input_t) * embeding_s(r_ij) + embeding_s(r_ij)` for the pair-wise representation of atom i with neighbor j." @@ -665,7 +665,7 @@ def descrpt_se_e3_tebd_args(): doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -687,7 +687,7 @@ def descrpt_se_e3_tebd_args(): "The input mode of the type embedding. Supported modes are ['concat', 'strip']." "- 'concat': Concatenate the type embedding with the smoothed angular information as the union input for the embedding network. " "The input is `input_jk = concat([angle_jk, tebd_j, tebd_k])`. " - "The output is `out_jk = embeding(input_jk)` for the three-body representation of atom i with neighbors j and k." + "The output is `out_jk = embedding(input_jk)` for the three-body representation of atom i with neighbors j and k." "- 'strip': Use a separated embedding network for the type embedding and combine the output with the angular embedding network output. " "The input is `input_t = concat([tebd_j, tebd_k])`." "The output is `out_jk = embeding_t(input_t) * embeding_s(angle_jk) + embeding_s(angle_jk)` for the three-body representation of atom i with neighbors j and k." @@ -952,7 +952,7 @@ def dpa2_repinit_args(): "The input mode of the type embedding. Supported modes are ['concat', 'strip']." "- 'concat': Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. " "When `type_one_side` is False, the input is `input_ij = concat([r_ij, tebd_j, tebd_i])`. When `type_one_side` is True, the input is `input_ij = concat([r_ij, tebd_j])`. " - "The output is `out_ij = embeding(input_ij)` for the pair-wise representation of atom i with neighbor j." + "The output is `out_ij = embedding(input_ij)` for the pair-wise representation of atom i with neighbor j." "- 'strip': Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. " f"When `type_one_side` is False, the input is `input_t = concat([tebd_j, tebd_i])`. {doc_only_pt_supported} When `type_one_side` is True, the input is `input_t = tebd_j`. " "The output is `out_ij = embeding_t(input_t) * embeding_s(r_ij) + embeding_s(r_ij)` for the pair-wise representation of atom i with neighbor j." @@ -1337,7 +1337,7 @@ def descrpt_se_a_ebd_v2_args(): def descrpt_se_a_mask_args(): doc_sel = 'This parameter sets the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." doc_axis_neuron = "Size of the submatrix of G (embedding matrix)." @@ -1398,7 +1398,7 @@ def descrpt_variant_type_args(exclude_hybrid: bool = False) -> Variant: "se_atten_v2", "model[standard]/descriptor[se_atten_v2]" ) link_se_a_mask = make_link("se_a_mask", "model[standard]/descriptor[se_a_mask]") - doc_descrpt_type = f"The type of the descritpor. See explanation below. \n\n\ + doc_descrpt_type = f"The type of the descriptor. See explanation below. \n\n\ - {link_lf}: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.\n\n\ - {link_se_e2_a}: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.\n\n\ - {link_se_e2_r}: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.\n\n\ @@ -1431,7 +1431,7 @@ def fitting_ener(): doc_trainable = f"Whether the parameters in the fitting net are trainable. This option can be\n\n\ - bool: True if all parameters of the fitting net are trainable, False otherwise.\n\n\ - list of bool{doc_only_tf_supported}: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of this list should be equal to len(`neuron`)+1." - doc_rcond = "The condition number used to determine the inital energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." + doc_rcond = "The condition number used to determine the initial energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." doc_seed = "Random seed for parameter initialization of the fitting net" doc_atom_ener = "Specify the atomic energy in vacuum for each type" doc_layer_name = ( @@ -1506,8 +1506,8 @@ def fitting_dos(): doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' doc_trainable = "Whether the parameters in the fitting net are trainable. This option can be\n\n\ - bool: True if all parameters of the fitting net are trainable, False otherwise.\n\n\ -- list of bool: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of tihs list should be equal to len(`neuron`)+1." - doc_rcond = "The condition number used to determine the inital energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." +- list of bool: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of this list should be equal to len(`neuron`)+1." + doc_rcond = "The condition number used to determine the initial energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." doc_seed = "Random seed for parameter initialization of the fitting net" doc_numb_dos = ( "The number of gridpoints on which the DOS is evaluated (NEDOS in VASP)" @@ -1681,7 +1681,7 @@ def fitting_variant_type_args(): - `ener`: Fit an energy model (potential energy surface).\n\n\ - `dos` : Fit a density of states model. The total density of states / site-projected density of states labels should be provided by `dos.npy` or `atom_dos.npy` in each data system. The file has number of frames lines and number of energy grid columns (times number of atoms in `atom_dos.npy`). See `loss` parameter. \n\n\ - `dipole`: Fit an atomic dipole model. Global dipole labels or atomic dipole labels for all the selected atoms (see `sel_type`) should be provided by `dipole.npy` in each data system. The file either has number of frames lines and 3 times of number of selected atoms columns, or has number of frames lines and 3 columns. See `loss` parameter.\n\n\ -- `polar`: Fit an atomic polarizability model. Global polarizazbility labels or atomic polarizability labels for all the selected atoms (see `sel_type`) should be provided by `polarizability.npy` in each data system. The file eith has number of frames lines and 9 times of number of selected atoms columns, or has number of frames lines and 9 columns. See `loss` parameter.\n\n" +- `polar`: Fit an atomic polarizability model. Global polarizazbility labels or atomic polarizability labels for all the selected atoms (see `sel_type`) should be provided by `polarizability.npy` in each data system. The file with has number of frames lines and 9 times of number of selected atoms columns, or has number of frames lines and 9 columns. See `loss` parameter.\n\n" return Variant( "type", @@ -1765,7 +1765,7 @@ def model_args(exclude_hybrid=False): doc_type_embedding = "The type embedding." doc_modifier = "The modifier of model output." doc_use_srtab = "The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly." - doc_smin_alpha = "The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided." + doc_smin_alpha = "The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided." doc_sw_rmin = "The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided." doc_sw_rmax = "The upper boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided." doc_srtab_add_bias = "Whether add energy bias from the statistics of the data to short-range tabulated atomic energy. It only takes effect when `use_srtab` is provided." @@ -1917,7 +1917,7 @@ def standard_model_args() -> Argument: doc=doc_fitting, ), ], - doc="Stardard model, which contains a descriptor and a fitting.", + doc="Standard model, which contains a descriptor and a fitting.", ) return ca @@ -1962,7 +1962,7 @@ def pairtab_model_args() -> Argument: doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' ca = Argument( "pairtab", dict, @@ -2053,7 +2053,7 @@ def learning_rate_variant_type_args(): def learning_rate_args(fold_subdoc: bool = False) -> Argument: doc_scale_by_worker = "When parallel training or batch size scaled, how to alter learning rate. Valid values are `linear`(default), `sqrt` or `none`." - doc_lr = "The definitio of learning rate" + doc_lr = "The definition of learning rate" return Argument( "learning_rate", dict, @@ -2328,10 +2328,10 @@ def loss_dos(): doc_start_pref_dos = start_pref("Density of State (DOS)") doc_limit_pref_dos = limit_pref("Density of State (DOS)") doc_start_pref_cdf = start_pref( - "Cumulative Distribution Function (cumulative intergral of DOS)" + "Cumulative Distribution Function (cumulative integral of DOS)" ) doc_limit_pref_cdf = limit_pref( - "Cumulative Distribution Function (cumulative intergral of DOS)" + "Cumulative Distribution Function (cumulative integral of DOS)" ) doc_start_pref_ados = start_pref("atomic DOS (site-projected DOS)") doc_limit_pref_ados = limit_pref("atomic DOS (site-projected DOS)") @@ -2486,7 +2486,7 @@ def training_data_args(): # ! added by Ziyao: new specification style for data doc_auto_prob_style = 'Determine the probability of systems automatically. The method is assigned by this key and can be\n\n\ - "prob_uniform" : the probability all the systems are equal, namely 1.0/self.get_nsystems()\n\n\ - "prob_sys_size" : the probability of a system is proportional to the number of batches in the system\n\n\ -- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is devided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' +- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is divided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' doc_sys_probs = ( "A list of float if specified. " "Should be of the same length as `systems`, " @@ -2551,7 +2551,7 @@ def validation_data_args(): # ! added by Ziyao: new specification style for dat doc_auto_prob_style = 'Determine the probability of systems automatically. The method is assigned by this key and can be\n\n\ - "prob_uniform" : the probability all the systems are equal, namely 1.0/self.get_nsystems()\n\n\ - "prob_sys_size" : the probability of a system is proportional to the number of batches in the system\n\n\ -- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is devided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' +- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is divided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' doc_sys_probs = ( "A list of float if specified. " "Should be of the same length as `systems`, " @@ -2664,7 +2664,7 @@ def training_args( "doing least square on the errors to add the target shift on the bias." ) doc_disp_training = "Displaying verbose information during training." - doc_time_training = "Timing durining training." + doc_time_training = "Timing during training." doc_profiling = "Export the profiling results to the Chrome JSON file for performance analysis, driven by the legacy TensorFlow profiling API or PyTorch Profiler. The output file will be saved to `profiling_file`." doc_profiling_file = "Output file for profiling." doc_enable_profiler = "Export the profiling results to the TensorBoard log for performance analysis, driven by TensorFlow Profiler (available in TensorFlow 2.3) or PyTorch Profiler. The log will be saved to `tensorboard_log_dir`." diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index 0394993854..259fe93bdb 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -160,7 +160,7 @@ def execute_all( Parameters ---------- callable : Callable - The method should accept *args and **kwargs as input and return the similiar array. + The method should accept *args and **kwargs as input and return the similar array. total_size : int Total size natoms : int diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 72e3d58660..7d58d65578 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -24,7 +24,7 @@ class DeepmdData: """Class for a data system. - It loads data from hard disk, and mantains the data as a `data_dict` + It loads data from hard disk, and maintains the data as a `data_dict` Parameters ---------- @@ -43,7 +43,7 @@ class DeepmdData: trn_all_set [DEPRECATED] Deprecated. Now all sets are trained and tested. sort_atoms : bool - Sort atoms by atom types. Required to enable when the data is directly feeded to + Sort atoms by atom types. Required to enable when the data is directly fed to descriptors except mixed types. """ @@ -196,7 +196,7 @@ def reduce(self, key_out: str, key_in: str): assert key_out not in self.data_dict, "output key should not have been added" assert ( self.data_dict[key_in]["repeat"] == 1 - ), "reduced proerties should not have been repeated" + ), "reduced properties should not have been repeated" self.data_dict[key_out] = { "ndof": self.data_dict[key_in]["ndof"], diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 2b5fb6e6db..03a399106f 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -91,12 +91,12 @@ def __init__( - "prob_uniform" : the probability all the systems are equal, namely 1.0/self.get_nsystems() - "prob_sys_size" : the probability of a system is proportional to the number of batches in the system - "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : - the list of systems is devided into blocks. A block is specified by `stt_idx:end_idx:weight`, + the list of systems is divided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system. sort_atoms : bool - Sort atoms by atom types. Required to enable when the data is directly feeded to + Sort atoms by atom types. Required to enable when the data is directly fed to descriptors except mixed types. """ # init data @@ -184,7 +184,7 @@ def __init__( # ! altered by Marián Rynik # test size # now test size can be set as a percentage of systems data or test size - # can be set for each system individualy in the same manner as batch + # can be set for each system individually in the same manner as batch # size. This enables one to use systems with diverse number of # structures and different number of atoms. self.test_size = test_size @@ -277,7 +277,7 @@ def add_dict(self, adict: dict[str, dict[str, Any]]) -> None: "repeat": repeat, } - For the explaination of the keys see `add` + For the explanation of the keys see `add` """ for kk in adict: self.add( @@ -759,7 +759,7 @@ def process_systems(systems: Union[str, list[str]]) -> list[str]: msg = "cannot find valid a data system" log.fatal(msg) raise OSError(msg, help_msg) - # rougly check all items in systems are valid + # roughly check all items in systems are valid for ii in systems: ii = DPPath(ii) if not ii.is_dir(): diff --git a/deepmd/utils/econf_embd.py b/deepmd/utils/econf_embd.py index 99c7edf284..e33e07cee7 100644 --- a/deepmd/utils/econf_embd.py +++ b/deepmd/utils/econf_embd.py @@ -237,7 +237,7 @@ def make_econf_embedding( def transform_to_spin_rep(res: dict[str, np.ndarray]) -> dict[str, np.ndarray]: - """Tranform electron occupation of 0/1/2 to -1,-1/-1,1/1,1.""" + """Transform electron occupation of 0/1/2 to -1,-1/-1,1/1,1.""" ret = {} def transform(ii): diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index bc765645dc..4d0d788f8b 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -21,7 +21,7 @@ def compute_stats_from_redu( """Compute the output statistics. Given the reduced output value and the number of atoms for each atom, - compute the least-squares solution as the atomic output bais and std. + compute the least-squares solution as the atomic output bias and std. Parameters ---------- @@ -93,7 +93,7 @@ def compute_stats_from_atomic( """Compute the output statistics. Given the output value and the type of atoms, - compute the atomic output bais and std. + compute the atomic output bias and std. Parameters ---------- diff --git a/deepmd/utils/summary.py b/deepmd/utils/summary.py index e2118bf7e0..a35dd4db93 100644 --- a/deepmd/utils/summary.py +++ b/deepmd/utils/summary.py @@ -48,7 +48,7 @@ class SummaryPrinter(ABC): BUILD: ClassVar = { "installed to": "\n".join(deepmd.__path__), "source": GLOBAL_CONFIG["git_summ"], - "source brach": GLOBAL_CONFIG["git_branch"], + "source branch": GLOBAL_CONFIG["git_branch"], "source commit": GLOBAL_CONFIG["git_hash"], "source commit at": GLOBAL_CONFIG["git_date"], "use float prec": global_float_prec, diff --git a/deepmd/utils/weight_avg.py b/deepmd/utils/weight_avg.py index 7c75d18e68..8328be5fcf 100644 --- a/deepmd/utils/weight_avg.py +++ b/deepmd/utils/weight_avg.py @@ -7,7 +7,7 @@ def weighted_average(errors: list[dict[str, tuple[float, float]]]) -> dict: - """Compute wighted average of prediction errors (MAE or RMSE) for model. + """Compute weighted average of prediction errors (MAE or RMSE) for model. Parameters ---------- diff --git a/doc/README b/doc/README index 2f4ce66792..728481df15 100644 --- a/doc/README +++ b/doc/README @@ -1 +1 @@ -To run the HTML documention build, doxygen have to be installed. +To run the HTML documentation build, doxygen have to be installed. diff --git a/doc/development/coding-conventions.rst b/doc/development/coding-conventions.rst index bf186d1231..4f82b34a60 100644 --- a/doc/development/coding-conventions.rst +++ b/doc/development/coding-conventions.rst @@ -72,7 +72,7 @@ Conventions`_ and `Typing Conventions`_ PEPs, clarified and extended as follows: f"something {'this' if x else 'that'}" -* Use f-strings ``s = f"{x:.2f}"`` instead of old style formating with ``"%f" % x``. +* Use f-strings ``s = f"{x:.2f}"`` instead of old style formatting with ``"%f" % x``. string format method ``"{x:.2f}".format()`` may be used sparsely where it is more convenient than f-strings. diff --git a/doc/development/create-a-model-pt.md b/doc/development/create-a-model-pt.md index 257dd8a25d..875067e2b8 100644 --- a/doc/development/create-a-model-pt.md +++ b/doc/development/create-a-model-pt.md @@ -6,7 +6,7 @@ In the following context, we use the PyTorch backend as the example, while it also applies to other backends listed above. ::: -If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainner, etc, you may want to read this section. +If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainer, etc, you may want to read this section. To incorporate your custom model you'll need to: diff --git a/doc/development/create-a-model-tf.md b/doc/development/create-a-model-tf.md index 95a2f66f23..cc7ad1999d 100644 --- a/doc/development/create-a-model-tf.md +++ b/doc/development/create-a-model-tf.md @@ -1,6 +1,6 @@ # Create a model in TensorFlow {{ tensorflow_icon }} -If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainner, etc, you may want to read this section. +If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainer, etc, you may want to read this section. To incorporate your custom model you'll need to: diff --git a/doc/getting-started/quick_start.ipynb b/doc/getting-started/quick_start.ipynb index 0c9563b9e9..1ddb6f5fce 100644 --- a/doc/getting-started/quick_start.ipynb +++ b/doc/getting-started/quick_start.ipynb @@ -454,7 +454,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Checke dargs version and Install\n", + "# Check dargs version and Install\n", "!pip show dargs || pip install --upgrade dargs" ] }, @@ -523,7 +523,7 @@ " color: #bbbbff;\n", "}\n", "\n", - "

{
  \"_comment\": \"that's all\",
  \"model\"model:
type: dict
: {
    \"type_map\"type_map:
type: typing.list[str], optional
A list of strings. Give the name to each type of atoms. It is noted that the number of atom type of training system must be less than 128 in a GPU environment. If not given, type.raw in each system should use the same type indexes, and type_map.raw will take no effect.
: [
     \"H\",
     \"C\"
    ],

    \"descriptor\"descriptor:
type: dict
The descriptor of atomic environment.
: {
      \"type\"type:
type: str
The type of the descritpor. See explanation below.
- loc_frame: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.
- se_e2_a: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.
- se_e2_r: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.
- se_e3: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.
- se_a_tpe: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.
- se_atten: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.
- se_atten_v2: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.
- se_a_mask: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). aparam are required as an indicator matrix for the real/virtual sign of input atoms.
- hybrid: Concatenate of a list of descriptors as a new descriptor.
: \"se_e2_a\",
      \"sel\"sel:
type: str | typing.list[int], optional, default: auto
This parameter set the number of selected neighbors for each type of atom. It can be:
- list[int]. The length of the list should be the same as the number of atom types in the system. sel[i] gives the selected number of type-i neighbors. sel[i] is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.
- str. Can be \"auto:factor\" or \"auto\". \"factor\" is a float number larger than 1. This option will automatically determine the sel. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the \"factor\". Finally the number is wraped up to 4 divisible. The option \"auto\" is equivalent to \"auto:1.1\".
: \"auto\",
      \"rcut_smth\"rcut_smth:
type: float, optional, default: 0.5
Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth
: 0.5,
      \"rcut\"rcut:
type: float, optional, default: 6.0
The cut-off radius.
: 6.0,
      \"neuron\"neuron:
type: typing.list[int], optional, default: [10, 20, 40]
Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built.
: [
       25,
       50,
       100
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: False
Whether to use a \"Timestep\" in the skip connection
: false,
      \"axis_neuron\"axis_neuron:
type: int, optional, default: 4, alias: n_axis_neuron
Size of the submatrix of G (embedding matrix).
: 16,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization
: 1,
      \"_comment\": \" that's all\"
    },
    \"fitting_net\"fitting_net:
type: dict
The fitting of physical properties.
: {
      \"neuron\"neuron:
type: typing.list[int], optional, default: [120, 120, 120], alias: n_neuron
The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built.
: [
       240,
       240,
       240
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: True
Whether to use a \"Timestep\" in the skip connection
: true,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization of the fitting net
: 1,
      \"_comment\": \" that's all\"
    },
    \"_comment\": \" that's all\"
  },
  \"learning_rate\"learning_rate:
type: dict, optional
The definitio of learning rate
: {
    \"type\"type:
type: str, default: exp
The type of the learning rate.
: \"exp\",
    \"decay_steps\"decay_steps:
type: int, optional, default: 5000
The learning rate is decaying every this number of training steps.
: 50,
    \"start_lr\"start_lr:
type: float, optional, default: 0.001
The learning rate at the start of the training.
: 0.001,
    \"stop_lr\"stop_lr:
type: float, optional, default: 1e-08
The desired learning rate at the end of the training.
: 3.51e-08,
    \"_comment\": \"that's all\"
  },
  \"loss\"loss:
type: dict, optional
The definition of loss function. The loss type should be set to tensor, ener or left unset.
: {
    \"type\"type:
type: str, default: ener
The type of the loss. When the fitting type is ener, the loss type should be set to ener or left unset. When the fitting type is dipole or polar, the loss type should be set to tensor.
: \"ener\",
    \"start_pref_e\"start_pref_e:
type: float | int, optional, default: 0.02
The prefactor of energy loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the energy label should be provided by file energy.npy in each data system. If both start_pref_e and limit_pref_e are set to 0, then the energy will be ignored.
: 0.02,
    \"limit_pref_e\"limit_pref_e:
type: float | int, optional, default: 1.0
The prefactor of energy loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_f\"start_pref_f:
type: float | int, optional, default: 1000
The prefactor of force loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the force label should be provided by file force.npy in each data system. If both start_pref_f and limit_pref_f are set to 0, then the force will be ignored.
: 1000,
    \"limit_pref_f\"limit_pref_f:
type: float | int, optional, default: 1.0
The prefactor of force loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_v\"start_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the virial label should be provided by file virial.npy in each data system. If both start_pref_v and limit_pref_v are set to 0, then the virial will be ignored.
: 0,
    \"limit_pref_v\"limit_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 0,
    \"_comment\": \" that's all\"
  },
  \"training\"training:
type: dict
The training options.
: {
    \"training_data\"training_data:
type: dict, optional
Configurations of training data.
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for training. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/training_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
- string \"mixed:N\": the batch data will be sampled from all systems and merged into a mixed system with the batch size N. Only support the se_atten descriptor.
If MPI is used, the value should be considered as the batch size per task.
: \"auto\",
      \"_comment\": \"that's all\"
    },
    \"validation_data\"validation_data:
type: NoneType | dict, optional, default: None
Configurations of validation data. Similar to that of training data, except that a numb_btch argument may be configured
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for validation. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/validation_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
: \"auto\",
      \"numb_btch\"numb_btch:
type: int, optional, default: 1, alias: numb_batch
An integer that specifies the number of batches to be sampled for each validation period.
: 1,
      \"_comment\": \"that's all\"
    },
    \"numb_steps\"numb_steps:
type: int, alias: stop_batch
Number of training batch. Each training uses one batch of data.
: 10000,
    \"seed\"seed:
type: NoneType | int, optional
The random seed for getting frames from the training data set.
: 10,
    \"disp_file\"disp_file:
type: str, optional, default: lcurve.out
The file for printing learning curve.
: \"lcurve.out\",
    \"disp_freq\"disp_freq:
type: int, optional, default: 1000
The frequency of printing learning curve.
: 200,
    \"save_freq\"save_freq:
type: int, optional, default: 1000
The frequency of saving check point.
: 1000,
    \"_comment\": \"that's all\"
  }
}
" + "
{
  \"_comment\": \"that's all\",
  \"model\"model:
type: dict
: {
    \"type_map\"type_map:
type: typing.list[str], optional
A list of strings. Give the name to each type of atoms. It is noted that the number of atom type of training system must be less than 128 in a GPU environment. If not given, type.raw in each system should use the same type indexes, and type_map.raw will take no effect.
: [
     \"H\",
     \"C\"
    ],

    \"descriptor\"descriptor:
type: dict
The descriptor of atomic environment.
: {
      \"type\"type:
type: str
The type of the descriptor. See explanation below.
- loc_frame: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.
- se_e2_a: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.
- se_e2_r: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.
- se_e3: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.
- se_a_tpe: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.
- se_atten: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.
- se_atten_v2: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.
- se_a_mask: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). aparam are required as an indicator matrix for the real/virtual sign of input atoms.
- hybrid: Concatenate of a list of descriptors as a new descriptor.
: \"se_e2_a\",
      \"sel\"sel:
type: str | typing.list[int], optional, default: auto
This parameter set the number of selected neighbors for each type of atom. It can be:
- list[int]. The length of the list should be the same as the number of atom types in the system. sel[i] gives the selected number of type-i neighbors. sel[i] is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.
- str. Can be \"auto:factor\" or \"auto\". \"factor\" is a float number larger than 1. This option will automatically determine the sel. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the \"factor\". Finally the number is wraped up to 4 divisible. The option \"auto\" is equivalent to \"auto:1.1\".
: \"auto\",
      \"rcut_smth\"rcut_smth:
type: float, optional, default: 0.5
Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth
: 0.5,
      \"rcut\"rcut:
type: float, optional, default: 6.0
The cut-off radius.
: 6.0,
      \"neuron\"neuron:
type: typing.list[int], optional, default: [10, 20, 40]
Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built.
: [
       25,
       50,
       100
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: False
Whether to use a \"Timestep\" in the skip connection
: false,
      \"axis_neuron\"axis_neuron:
type: int, optional, default: 4, alias: n_axis_neuron
Size of the submatrix of G (embedding matrix).
: 16,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization
: 1,
      \"_comment\": \" that's all\"
    },
    \"fitting_net\"fitting_net:
type: dict
The fitting of physical properties.
: {
      \"neuron\"neuron:
type: typing.list[int], optional, default: [120, 120, 120], alias: n_neuron
The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built.
: [
       240,
       240,
       240
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: True
Whether to use a \"Timestep\" in the skip connection
: true,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization of the fitting net
: 1,
      \"_comment\": \" that's all\"
    },
    \"_comment\": \" that's all\"
  },
  \"learning_rate\"learning_rate:
type: dict, optional
The definition of learning rate
: {
    \"type\"type:
type: str, default: exp
The type of the learning rate.
: \"exp\",
    \"decay_steps\"decay_steps:
type: int, optional, default: 5000
The learning rate is decaying every this number of training steps.
: 50,
    \"start_lr\"start_lr:
type: float, optional, default: 0.001
The learning rate at the start of the training.
: 0.001,
    \"stop_lr\"stop_lr:
type: float, optional, default: 1e-08
The desired learning rate at the end of the training.
: 3.51e-08,
    \"_comment\": \"that's all\"
  },
  \"loss\"loss:
type: dict, optional
The definition of loss function. The loss type should be set to tensor, ener or left unset.
: {
    \"type\"type:
type: str, default: ener
The type of the loss. When the fitting type is ener, the loss type should be set to ener or left unset. When the fitting type is dipole or polar, the loss type should be set to tensor.
: \"ener\",
    \"start_pref_e\"start_pref_e:
type: float | int, optional, default: 0.02
The prefactor of energy loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the energy label should be provided by file energy.npy in each data system. If both start_pref_e and limit_pref_e are set to 0, then the energy will be ignored.
: 0.02,
    \"limit_pref_e\"limit_pref_e:
type: float | int, optional, default: 1.0
The prefactor of energy loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_f\"start_pref_f:
type: float | int, optional, default: 1000
The prefactor of force loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the force label should be provided by file force.npy in each data system. If both start_pref_f and limit_pref_f are set to 0, then the force will be ignored.
: 1000,
    \"limit_pref_f\"limit_pref_f:
type: float | int, optional, default: 1.0
The prefactor of force loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_v\"start_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the virial label should be provided by file virial.npy in each data system. If both start_pref_v and limit_pref_v are set to 0, then the virial will be ignored.
: 0,
    \"limit_pref_v\"limit_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 0,
    \"_comment\": \" that's all\"
  },
  \"training\"training:
type: dict
The training options.
: {
    \"training_data\"training_data:
type: dict, optional
Configurations of training data.
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for training. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/training_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
- string \"mixed:N\": the batch data will be sampled from all systems and merged into a mixed system with the batch size N. Only support the se_atten descriptor.
If MPI is used, the value should be considered as the batch size per task.
: \"auto\",
      \"_comment\": \"that's all\"
    },
    \"validation_data\"validation_data:
type: NoneType | dict, optional, default: None
Configurations of validation data. Similar to that of training data, except that a numb_btch argument may be configured
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for validation. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/validation_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
: \"auto\",
      \"numb_btch\"numb_btch:
type: int, optional, default: 1, alias: numb_batch
An integer that specifies the number of batches to be sampled for each validation period.
: 1,
      \"_comment\": \"that's all\"
    },
    \"numb_steps\"numb_steps:
type: int, alias: stop_batch
Number of training batch. Each training uses one batch of data.
: 10000,
    \"seed\"seed:
type: NoneType | int, optional
The random seed for getting frames from the training data set.
: 10,
    \"disp_file\"disp_file:
type: str, optional, default: lcurve.out
The file for printing learning curve.
: \"lcurve.out\",
    \"disp_freq\"disp_freq:
type: int, optional, default: 1000
The frequency of printing learning curve.
: 200,
    \"save_freq\"save_freq:
type: int, optional, default: 1000
The frequency of saving check point.
: 1000,
    \"_comment\": \"that's all\"
  }
}
" ], "text/plain": [ "" @@ -682,7 +682,7 @@ "DEEPMD INFO See https://deepmd.rtfd.io/credits/ for details.\n", "DEEPMD INFO installed to: /root/miniconda3/envs/deepmd\n", "DEEPMD INFO source : v2.2.7\n", - "DEEPMD INFO source brach: HEAD\n", + "DEEPMD INFO source branch: HEAD\n", "DEEPMD INFO source commit: 839f4fe7\n", "DEEPMD INFO source commit at: 2023-10-27 21:10:24 +0800\n", "DEEPMD INFO build float prec: double\n", @@ -1050,7 +1050,7 @@ "DEEPMD INFO See https://deepmd.rtfd.io/credits/ for details.\n", "DEEPMD INFO installed to: /root/miniconda3/envs/deepmd\n", "DEEPMD INFO source : v2.2.7\n", - "DEEPMD INFO source brach: HEAD\n", + "DEEPMD INFO source branch: HEAD\n", "DEEPMD INFO source commit: 839f4fe7\n", "DEEPMD INFO source commit at: 2023-10-27 21:10:24 +0800\n", "DEEPMD INFO build float prec: double\n", diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index 3f65375865..07239cd3b7 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -319,7 +319,7 @@ pip install -U cmake You must enable at least one backend. If you enable two or more backends, these backend libraries must be built in a compatible way, e.g. using the same `_GLIBCXX_USE_CXX11_ABI` flag. -We recommend using [conda pacakges](https://docs.deepmodeling.org/faq/conda.html) from [conda-forge](https://conda-forge.org), which are usually compatible to each other. +We recommend using [conda packages](https://docs.deepmodeling.org/faq/conda.html) from [conda-forge](https://conda-forge.org), which are usually compatible to each other. ::::{tab-set} @@ -427,7 +427,7 @@ See also [ROCm documentation](https://rocm.docs.amd.com/en/latest/conceptual/cma **Type**: `PATH` -Only neccessary for using [LAMMPS plugin mode](./install-lammps.md#install-lammps-plugin-mode). +Only necessary for using [LAMMPS plugin mode](./install-lammps.md#install-lammps-plugin-mode). The path to the [LAMMPS source code](install-lammps.md). LAMMPS 8Apr2021 or later is supported. If not assigned, the plugin mode will not be enabled. diff --git a/doc/install/install-tf.2.12.md b/doc/install/install-tf.2.12.md index 8523345d3d..ab6a9ed00a 100644 --- a/doc/install/install-tf.2.12.md +++ b/doc/install/install-tf.2.12.md @@ -2,7 +2,7 @@ TensorFlow's C++ interface will be compiled from the source code. In this manual, we install TensorFlow 2.12.0. It is noted that the source code of TensorFlow 2.12.0 uses C++ 17, so one needs a C++ compiler that supports C++ 17. -Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be lanuched to use [bazel](https://github.com/bazelbuild/bazel). +Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be launched to use [bazel](https://github.com/bazelbuild/bazel). ```bash wget https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64 -O /some/workspace/bazel/bin/bazel diff --git a/doc/install/install-tf.2.8.md b/doc/install/install-tf.2.8.md index 4145ba01d1..5e9057492b 100644 --- a/doc/install/install-tf.2.8.md +++ b/doc/install/install-tf.2.8.md @@ -1,6 +1,6 @@ # Install TensorFlow's C++ interface -TensorFlow's C++ interface will be compiled from the source code. Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be lanuched to use [bazel](https://github.com/bazelbuild/bazel). +TensorFlow's C++ interface will be compiled from the source code. Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be launched to use [bazel](https://github.com/bazelbuild/bazel). ```bash wget https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64 -O /some/workspace/bazel/bin/bazel diff --git a/doc/model/dprc.md b/doc/model/dprc.md index d9ce24b600..9f3eee244d 100644 --- a/doc/model/dprc.md +++ b/doc/model/dprc.md @@ -66,7 +66,7 @@ In a DPRc model, QM atoms and MM atoms have different atom types. Assuming we ha "type_map": ["C", "H", "HW", "O", "OW", "P"] ``` -As described in the paper, the DPRc model only corrects $E_\text{QM}$ and $E_\text{QM/MM}$ within the cutoff, so we use a hybrid descriptor to describe them separatedly: +As described in the paper, the DPRc model only corrects $E_\text{QM}$ and $E_\text{QM/MM}$ within the cutoff, so we use a hybrid descriptor to describe them separately: ::::{tab-set} diff --git a/doc/model/train-energy-spin.md b/doc/model/train-energy-spin.md index ec169892f2..eda4ffa835 100644 --- a/doc/model/train-energy-spin.md +++ b/doc/model/train-energy-spin.md @@ -145,7 +145,7 @@ We list the details about spin system data format in TensorFlow backend: ### Spin data format in PyTorch/DP -In the PyTorch backend, spin and magnetic forces are listed in seperate files, and the data format may contain the following files: +In the PyTorch backend, spin and magnetic forces are listed in separate files, and the data format may contain the following files: ``` type.raw diff --git a/doc/model/train-se-a-mask.md b/doc/model/train-se-a-mask.md index 69f344b138..93edfc999e 100644 --- a/doc/model/train-se-a-mask.md +++ b/doc/model/train-se-a-mask.md @@ -64,7 +64,7 @@ To make the `aparam.npy` used for descriptor `se_a_mask`, two variables in `fitt ``` - `neuron`, `resnet_dt` and `seed` are the same as the {ref}`fitting_net ` section for fitting energy. -- {ref}`numb_aparam ` gives the dimesion of the `aparam.npy` file. In this example, it is set to 1 and stores the real/virtual sign of the atoms. For real/virtual atoms, the corresponding sign in `aparam.npy` is set to 1/0. +- {ref}`numb_aparam ` gives the dimension of the `aparam.npy` file. In this example, it is set to 1 and stores the real/virtual sign of the atoms. For real/virtual atoms, the corresponding sign in `aparam.npy` is set to 1/0. - {ref}`use_aparam_as_mask ` is set to `true` to use the `aparam.npy` as the mask of the atoms in the descriptor `se_a_mask`. Finally, to make a reasonable fitting task with `se_a_mask` descriptor for DP/MM simulations, the loss function with `se_a_mask` is designed to include the atomic forces difference in specific atoms of the input particles only. diff --git a/doc/nvnmd/nvnmd.md b/doc/nvnmd/nvnmd.md index c415b275ec..279236ec96 100644 --- a/doc/nvnmd/nvnmd.md +++ b/doc/nvnmd/nvnmd.md @@ -78,7 +78,7 @@ where items are defined as: | --------- | --------------------------------------------------------------------- | ---------------------------------------------------------------------------- | | version | the version of network structure | 0 or 1 | | max_nnei | the maximum number of neighbors that do not distinguish element types | 128 or 256 | -| net_size | the size of nueral network | 128 | +| net_size | the size of neural network | 128 | | sel | the number of neighbors | version 0: integer list of lengths 1 to 4 are acceptable; version 1: integer | | rcut | the cutoff radial | (0, 8.0] | | rcut_smth | the smooth cutoff parameter | (0, 8.0] | @@ -162,7 +162,7 @@ where items are defined as: | Item | Mean | Optional Value | | ---------- | --------------------------------------------------- | ------------------ | -| seed | the randome seed | a integer | +| seed | the random seed | a integer | | stop_batch | the total training steps | a positive integer | | numb_test | the accuracy is test by using {numb_test} sample | a positive integer | | disp_file | the log file where the training message display | a string | @@ -213,7 +213,7 @@ where the frozen model file to import is given via the `-m` command line flag, t # Running MD in Bohrium -After CNN and QNN training, you can upload the ML model to our online NVNMD system and run MD there through Bohrium (https://bohrium.dp.tech). Bohrium is a research platfrom designed for AI for Science Era. For more information, please refer to [Bohrium Introduction](https://bohrium-doc.dp.tech/en/docs/WhatIsBohrium/). +After CNN and QNN training, you can upload the ML model to our online NVNMD system and run MD there through Bohrium (https://bohrium.dp.tech). Bohrium is a research platform designed for AI for Science Era. For more information, please refer to [Bohrium Introduction](https://bohrium-doc.dp.tech/en/docs/WhatIsBohrium/). ## Registration diff --git a/doc/third-party/lammps-command.md b/doc/third-party/lammps-command.md index 6a16605bfc..4af3fe5096 100644 --- a/doc/third-party/lammps-command.md +++ b/doc/third-party/lammps-command.md @@ -15,7 +15,7 @@ All units in LAMMPS except `lj` are supported. `lj` is not supported. The most commonly used units are `metal`, since the internal units of distance, energy, force, and charge in DeePMD-kit are `\AA`, `eV`, `eV / \AA`, and `proton charge`, respectively. These units are consistent with the `metal` units in LAMMPS. -If one wants to use other units like `real` or `si`, it is welcome to do so. There is no need to do the unit conversion mannualy. The unit conversion is done automatically by LAMMPS. +If one wants to use other units like `real` or `si`, it is welcome to do so. There is no need to do the unit conversion manually. The unit conversion is done automatically by LAMMPS. The only thing that one needs to take care is the unit of the output of `compute deeptensor/atom`. Working with `metal` units for `compute deeptensor/atom` is totally fine, since there is no unit conversion. For other unit styles, we currently assume that the output of the `compute deeptensor/atom` command has the unit of distance and have applied the unit conversion factor of distance. If a user wants to infer quantities with units other than distance, the user is encouraged to open a GitHub feature request, so that the unit conversion factor can be added. @@ -95,7 +95,7 @@ Evaluate the interaction of the system by using [Deep Potential][DP] or [Deep Po This pair style takes the deep potential defined in a model file that usually has the .pb extension. The model can be trained and frozen by package [DeePMD-kit](https://github.com/deepmodeling/deepmd-kit), which can have either double or single float precision interface. -The model deviation evalulates the consistency of the force predictions from multiple models. By default, only the maximal, minimal and average model deviations are output. If the key `atomic` is set, then the model deviation of force prediction of each atom will be output. +The model deviation evaluates the consistency of the force predictions from multiple models. By default, only the maximal, minimal and average model deviations are output. If the key `atomic` is set, then the model deviation of force prediction of each atom will be output. The unit follows [LAMMPS units](#units) and the [scale factor](https://docs.lammps.org/pair_hybrid.html) is not applied. By default, the model deviation is output in absolute value. If the keyword `relative` is set, then the relative model deviation of the force will be output, including values output by the keyword `atomic`. The relative model deviation of the force on atom $i$ is defined by diff --git a/doc/train/finetuning.md b/doc/train/finetuning.md index 669d1319bd..e50109318d 100644 --- a/doc/train/finetuning.md +++ b/doc/train/finetuning.md @@ -106,7 +106,7 @@ $ dp --pt train input.json --finetune multitask_pretrained.pt --model-branch CHO ``` :::{note} -One can check the available model branches in multi-task pre-trained model by refering to the documentation of the pre-trained model or by using the following command: +One can check the available model branches in multi-task pre-trained model by referring to the documentation of the pre-trained model or by using the following command: ```bash $ dp --pt show multitask_pretrained.pt model-branch diff --git a/doc/troubleshooting/precision.md b/doc/troubleshooting/precision.md index 56dbd51958..5ebef97122 100644 --- a/doc/troubleshooting/precision.md +++ b/doc/troubleshooting/precision.md @@ -14,7 +14,7 @@ Some common reasons are listed below. The unit of training data should follow what is listed in [data section](../data/system.md). Usually, the package to calculate the training data has different units from those of the DeePMD-kit. It is noted that some software label the energy gradient as forces, instead of the negative energy gradient. -It is neccessary to check them carefully to avoid inconsistent data. +It is necessary to check them carefully to avoid inconsistent data. ### SCF coverage and data accuracy @@ -29,7 +29,7 @@ Here is a checklist for the accuracy of data: ### Enough data If the model performs good on the training data, but has bad accuracy on another data, this means some data space is not covered by the training data. -It can be validated by evaluting the [model deviation](../test/model-deviation.md) with multiple models. +It can be validated by evaluating the [model deviation](../test/model-deviation.md) with multiple models. If the model deviation of these data is high for some data, try to collect more data using [DP-GEN](../third-party/out-of-deepmd-kit.md#dp-gen). ### Values of data diff --git a/pyproject.toml b/pyproject.toml index 0a1b2e6731..f4f399156a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -331,14 +331,14 @@ legacy_tox_ini = """ # be silenced # W504 - line break after binary operator - there is conflict between W503 and W504 in -# some lintners. One recomends line bread after and one before binary operator so we -# swith W504 off and recomend this coding style: +# some lintners. One recommends line bread after and one before binary operator so we +# switch W504 off and recommend this coding style: # a = (b + -> instead of -> a = (b # c) + c) [tool.autopep8] ignore = "W504" -# D413 - Missing blank line after last section - makes no sense only adds empy lines in +# D413 - Missing blank line after last section - makes no sense only adds empty lines in # docstrings # D416 - Section name should end with a colon - only applicable to RST type docstrings, # we are using numpy style diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index 71b3dca1ea..805c6514e0 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -4,7 +4,7 @@ project(DeePMD) option(ENABLE_TENSORFLOW "Enable TensorFlow interface" OFF) option(ENABLE_PYTORCH "Enable PyTorch interface" OFF) -option(BUILD_TESTING "Build test and enable converage" OFF) +option(BUILD_TESTING "Build test and enable coverage" OFF) set(DEEPMD_C_ROOT "" CACHE PATH "Path to imported DeePMD-kit C library") @@ -272,7 +272,7 @@ endif() # set op prec set(HIGH_PREC_DEF "HIGH_PREC") -# this defination doesn't work, but leaving it empty will cause error +# this definition doesn't work, but leaving it empty will cause error set(LOW_PREC_DEF "LOW_PREC") set(HIGH_PREC_VARIANT "") set(LOW_PREC_VARIANT "_low") diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 9d0310d99a..270bc94cc5 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -1286,7 +1286,8 @@ class DeepPotModelDevi { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; - // memory will be continous for std::vector but not std::vector + // memory will be continuous for std::vector but not + // std::vector std::vector energy_flat(numb_models); std::vector force_flat(static_cast(numb_models) * natoms * 3); @@ -1464,7 +1465,8 @@ class DeepPotModelDevi { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; - // memory will be continous for std::vector but not std::vector + // memory will be continuous for std::vector but not + // std::vector std::vector energy_flat(numb_models); std::vector force_flat(static_cast(numb_models) * natoms * 3); @@ -2326,7 +2328,7 @@ void inline read_file_to_string(std::string model, std::string &file_content) { int size; const char *c_file_content = DP_ReadFileToChar2(model.c_str(), &size); if (size < 0) { - // negtive size indicates error + // negative size indicates error std::string error_message = std::string(c_file_content, -size); DP_DeleteChar(c_file_content); throw deepmd::hpp::deepmd_exception(error_message); diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 9ed37d04aa..56c5f9720f 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -1586,7 +1586,7 @@ const char* DP_ReadFileToChar2(const char* c_model, int* size) { try { deepmd::read_file_to_string(model, file_content); } catch (deepmd::deepmd_exception& ex) { - // use negtive size to indicate error + // use negative size to indicate error std::string error_message = std::string(ex.what()); *size = -error_message.size(); return string_to_char(error_message); diff --git a/source/api_cc/include/DeepTensor.h b/source/api_cc/include/DeepTensor.h index f355413d80..1ec14e3e7f 100644 --- a/source/api_cc/include/DeepTensor.h +++ b/source/api_cc/include/DeepTensor.h @@ -37,7 +37,7 @@ class DeepTensorBase { const std::string& name_scope = "") = 0; /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -75,7 +75,7 @@ class DeepTensorBase { /** @} */ /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -180,7 +180,8 @@ class DeepTensor { /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -193,7 +194,8 @@ class DeepTensor { const std::vector& box); /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -210,7 +212,7 @@ class DeepTensor { const InputNlist& inlist); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -229,7 +231,7 @@ class DeepTensor { const std::vector& box); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -252,7 +254,7 @@ class DeepTensor { const InputNlist& inlist); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -277,7 +279,7 @@ class DeepTensor { const std::vector& box); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size diff --git a/source/api_cc/include/DeepTensorTF.h b/source/api_cc/include/DeepTensorTF.h index 3ca316a29f..3fd8338b1f 100644 --- a/source/api_cc/include/DeepTensorTF.h +++ b/source/api_cc/include/DeepTensorTF.h @@ -39,7 +39,8 @@ class DeepTensorTF : public DeepTensorBase { private: /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -52,7 +53,8 @@ class DeepTensorTF : public DeepTensorBase { const std::vector& box); /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -69,7 +71,7 @@ class DeepTensorTF : public DeepTensorBase { const InputNlist& inlist); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -94,7 +96,7 @@ class DeepTensorTF : public DeepTensorBase { const std::vector& box); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -163,7 +165,7 @@ class DeepTensorTF : public DeepTensorBase { /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -201,7 +203,7 @@ class DeepTensorTF : public DeepTensorBase { /** @} */ /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size diff --git a/source/api_cc/include/commonTF.h b/source/api_cc/include/commonTF.h index 0c14597e30..003b330308 100644 --- a/source/api_cc/include/commonTF.h +++ b/source/api_cc/include/commonTF.h @@ -63,7 +63,7 @@ int session_get_dtype(tensorflow::Session* session, * @param[in] aparam_ Atom parameters. * @param[in] atommap Atom map. * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * @param[in] aparam_nall Whether the atomic dimension of atomic parameters is * nall. */ template @@ -93,7 +93,7 @@ int session_input_tensors( * @param[in] nghost Number of ghost atoms. * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * @param[in] aparam_nall Whether the atomic dimension of atomic parameters is * nall. */ template @@ -126,7 +126,7 @@ int session_input_tensors( * @param[in] nghost Number of ghost atoms. * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * @param[in] aparam_nall Whether the atomic dimension of atomic parameters is * nall. */ template diff --git a/source/api_cc/src/DataModifierTF.cc b/source/api_cc/src/DataModifierTF.cc index aaa2252955..80cf6120a3 100644 --- a/source/api_cc/src/DataModifierTF.cc +++ b/source/api_cc/src/DataModifierTF.cc @@ -306,7 +306,7 @@ void DipoleChargeModifierTF::compute( dfcorr_2[pairs[ii].first * 3 + dd] += delef_[pairs[ii].second * 3 + dd]; } } - // add ele contrinution + // add ele contribution dfcorr_ = dfcorr_2; for (int ii = 0; ii < nloc_real; ++ii) { int oii = real_bkw_map[ii]; diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index d7a7edfb60..a990cecf8d 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -465,10 +465,10 @@ void DeepPotTF::init(const std::string& model, } if (!model_compatable(model_version)) { throw deepmd::deepmd_exception( - "incompatable model: version " + model_version + + "incompatible model: version " + model_version + " in graph, but version " + global_model_version + " supported " - "See https://deepmd.rtfd.io/compatability/ for details."); + "See https://deepmd.rtfd.io/compatibility/ for details."); } dtype = session_get_dtype(session, "descrpt_attr/rcut"); if (dtype == tensorflow::DT_DOUBLE) { diff --git a/source/api_cc/src/DeepTensorTF.cc b/source/api_cc/src/DeepTensorTF.cc index c69b7c018e..1081473f25 100644 --- a/source/api_cc/src/DeepTensorTF.cc +++ b/source/api_cc/src/DeepTensorTF.cc @@ -65,10 +65,10 @@ void DeepTensorTF::init(const std::string &model, } if (!model_compatable(model_version)) { throw deepmd::deepmd_exception( - "incompatable model: version " + model_version + + "incompatible model: version " + model_version + " in graph, but version " + global_model_version + " supported " - "See https://deepmd.rtfd.io/compatability/ for details."); + "See https://deepmd.rtfd.io/compatibility/ for details."); } dtype = session_get_dtype(session, "descrpt_attr/rcut"); if (dtype == tensorflow::DT_DOUBLE) { diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index baa257d60e..e84517ea7a 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -934,7 +934,7 @@ void deepmd::select_map(std::vector& out, for (int ii = 0; ii < in.size() / stride / nframes; ++ii) { #ifdef DEBUG assert(ii < idx_map.size() && "idx goes over the idx map size"); - assert(idx_map[ii] < out.size() && "mappped idx goes over the out size"); + assert(idx_map[ii] < out.size() && "mapped idx goes over the out size"); #endif if (idx_map[ii] >= 0) { int to_ii = idx_map[ii]; diff --git a/source/cmake/Findtensorflow.cmake b/source/cmake/Findtensorflow.cmake index 6321d4872b..d579af7679 100644 --- a/source/cmake/Findtensorflow.cmake +++ b/source/cmake/Findtensorflow.cmake @@ -366,7 +366,7 @@ elseif(NOT DEFINED OP_CXX_ABI) AND ${CPP_CXX_ABI_COMPILE_RESULT_VAR1}) message( WARNING - "Both _GLIBCXX_USE_CXX11_ABI=0 and 1 work. The reason may be that your C++ compiler (e.g. Red Hat Developer Toolset) does not support the custom cxx11 abi flag. For convience, we set _GLIBCXX_USE_CXX11_ABI=1." + "Both _GLIBCXX_USE_CXX11_ABI=0 and 1 work. The reason may be that your C++ compiler (e.g. Red Hat Developer Toolset) does not support the custom cxx11 abi flag. For convenience, we set _GLIBCXX_USE_CXX11_ABI=1." ) set(OP_CXX_ABI 1) else() diff --git a/source/cmake/tf_version.cpp b/source/cmake/tf_version.cpp index 390bd4c375..6d09e33493 100644 --- a/source/cmake/tf_version.cpp +++ b/source/cmake/tf_version.cpp @@ -6,7 +6,7 @@ int main(int argc, char* argv[]) { // See // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/public/version.h - // TF_VERSION_STRING has been avaiable since TensorFlow v0.6 + // TF_VERSION_STRING has been available since TensorFlow v0.6 std::cout << TF_VERSION_STRING; return 0; } diff --git a/source/gmx/dp_gmx_patch b/source/gmx/dp_gmx_patch index 4dacaea835..8df3f12cc6 100644 --- a/source/gmx/dp_gmx_patch +++ b/source/gmx/dp_gmx_patch @@ -128,6 +128,6 @@ do v) VERSION=${OPTARG} && DEEPMD_PATCH_ROOT=${DEEPMD_PATCH_ROOT}/${VERSION} ;; p) check_version ${VERSION} && dp_gmx_patch ${GMX_ROOT} ;; r) check_version ${VERSION} && dp_gmx_revert ${GMX_ROOT} ;; - *) echo "- ERROR: Invaild option ${opt}" && exit 1 ;; + *) echo "- ERROR: Invalid option ${opt}" && exit 1 ;; esac done diff --git a/source/gmx/src/gmx_plugin.cpp b/source/gmx/src/gmx_plugin.cpp index 15c4fa84ae..53f02f1fbe 100644 --- a/source/gmx/src/gmx_plugin.cpp +++ b/source/gmx/src/gmx_plugin.cpp @@ -103,7 +103,7 @@ void DeepmdPlugin::init_from_json(char* json_file) { std::cout << "Successfully init plugin!" << std::endl; } else { - std::cerr << "Invaild json file: " << json_file << std::endl; + std::cerr << "Invalid json file: " << json_file << std::endl; exit(1); } } diff --git a/source/install/build_tf.py b/source/install/build_tf.py index a9e1e247cd..0239ebfa46 100755 --- a/source/install/build_tf.py +++ b/source/install/build_tf.py @@ -19,7 +19,7 @@ if sys.version_info[0] < 3: # noqa: UP036 raise Exception("Python 3 or a more recent version is required.") -# The script should only rely on the stardard Python libraries. +# The script should only rely on the standard Python libraries. import argparse import hashlib @@ -333,7 +333,7 @@ def copytree2(src: Path, dst: Path, *args, **kwargs): call( [ "/bin/cp", - # archieve, recursive, force, do not create one inside + # achieve, recursive, force, do not create one inside # https://stackoverflow.com/a/24486142/9567349 "-arfT", str(tmpdst), @@ -386,7 +386,7 @@ def call(commands: list[str], env={}, **kwargs): # online resources to download RESOURCES = { - # bazelisk is used to warpper bazel + # bazelisk is used to wrapper bazel "bazelisk-1.11.0": OnlineResource( "bazel-linux-amd64-1.11.0", "https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64", diff --git a/source/lib/include/ComputeDescriptor.h b/source/lib/include/ComputeDescriptor.h index 7c3eaf4cd2..733cb1ee0c 100644 --- a/source/lib/include/ComputeDescriptor.h +++ b/source/lib/include/ComputeDescriptor.h @@ -501,7 +501,7 @@ void compute_descriptor(std::vector &descrpt_a, if (fmt_nlist_a[nei_iter] < 0) { break; } - // drdS, stored in tranposed form + // drdS, stored in transposed form double dtrdST[4][3]; double *rr = &sel_a_diff[nei_iter][0]; double tr[3]; diff --git a/source/lib/include/coord.h b/source/lib/include/coord.h index 699a90898c..6621d714a5 100644 --- a/source/lib/include/coord.h +++ b/source/lib/include/coord.h @@ -18,7 +18,7 @@ void normalize_coord_cpu(FPTYPE* coord, // in_c, in_t, nloc, mem_nall, rc, region // mem_nall is the size of allocated memory for out_c, out_t, mapping // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all copied coords and types. // i.e. nall > mem_nall template @@ -66,7 +66,7 @@ void normalize_coord_gpu(FPTYPE* coord, // box_info mem_nall is the size of allocated memory for out_c, out_t, // mapping // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all copied coords and types. // i.e. nall > mem_nall template diff --git a/source/lib/include/neighbor_list.h b/source/lib/include/neighbor_list.h index b99827b552..95f5cb6174 100644 --- a/source/lib/include/neighbor_list.h +++ b/source/lib/include/neighbor_list.h @@ -126,7 +126,7 @@ int max_numneigh(const InputNlist& to_nlist); // c_cpy, nloc, nall, mem_size, rcut, region // mem_size is the size of allocated memory for jlist. // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all neighbors. // i.e. max_list_size > mem_nall template @@ -190,7 +190,7 @@ void use_nlist_map(int* nlist, // c_cpy, nloc, nall, mem_size, rcut, region // mem_size is the size of allocated memory for jlist. // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all neighbors. // i.e. max_list_size > mem_nall template diff --git a/source/lib/include/prod_force.h b/source/lib/include/prod_force.h index b5ae68bdce..2d88607131 100644 --- a/source/lib/include/prod_force.h +++ b/source/lib/include/prod_force.h @@ -29,7 +29,7 @@ void prod_force_a_cpu(FPTYPE* force, /** * @brief Produce force from net_deriv and in_deriv. * @details This function is used for multi-threading. Only part of atoms - * are computed in this thread. They will be comptued in parallel. + * are computed in this thread. They will be computed in parallel. * * @tparam FPTYPE float or double * @param[out] force Atomic forces. diff --git a/source/lib/src/gpu/tabulate.cu b/source/lib/src/gpu/tabulate.cu index 71ea17ced5..e0723b81af 100644 --- a/source/lib/src/gpu/tabulate.cu +++ b/source/lib/src/gpu/tabulate.cu @@ -272,7 +272,7 @@ __global__ void tabulate_fusion_se_a_grad_fifth_order_polynomial( bool enable_se_atten = two_embed != nullptr; GPU_DYNAMIC_SHARED_MEM_DECL(int, _data); const int_64 block_idx = blockIdx.x; // nloc - const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usally 128 here~ + const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usually 128 here~ int warp_idx = GpuShuffleSync(0xffffffff, threadIdx.x / WARP_SIZE, 0); int lane_idx = threadIdx.x % WARP_SIZE; int breakpoint = nnei - 1; @@ -531,7 +531,7 @@ __global__ void tabulate_fusion_se_t_grad_fifth_order_polynomial( const int last_layer_size) { GPU_DYNAMIC_SHARED_MEM_DECL(int, _data); const int_64 block_idx = blockIdx.x; // nloc - const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usally 128 here~ + const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usually 128 here~ int warp_idx = GpuShuffleSync(0xffffffff, threadIdx.x / WARP_SIZE, 0); int lane_idx = threadIdx.x % WARP_SIZE; FPTYPE* iteratorA = (FPTYPE*)&_data[0]; // dy @@ -678,7 +678,7 @@ __global__ void tabulate_fusion_se_r_grad_fifth_order_polynomial( const int nnei, const int last_layer_size) { const int_64 block_idx = blockIdx.x; // nloc - const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usally 128 here~ + const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usually 128 here~ int warp_idx = GpuShuffleSync(0xffffffff, thread_idx / WARP_SIZE, 0); int lane_idx = thread_idx % WARP_SIZE; __syncthreads(); diff --git a/source/lib/tests/test_fmt_nlist.cc b/source/lib/tests/test_fmt_nlist.cc index bc79c92ea6..6cd24b556a 100644 --- a/source/lib/tests/test_fmt_nlist.cc +++ b/source/lib/tests/test_fmt_nlist.cc @@ -134,7 +134,7 @@ class TestEncodingDecodingNborInfo : public ::testing::Test { void TearDown() override {} }; -// orginal implementation. copy ghost +// original implementation. copy ghost TEST_F(TestFormatNlist, orig_cpy) { std::vector> nlist_a, nlist_r; std::vector fmt_nlist_a, fmt_nlist_r; @@ -155,7 +155,7 @@ TEST_F(TestFormatNlist, orig_cpy) { } } -// orginal implementation. copy ghost should be equal to pbc +// original implementation. copy ghost should be equal to pbc TEST_F(TestFormatNlist, orig_pbc) { std::vector> nlist_a_1, nlist_r_1; build_nlist(nlist_a_1, nlist_r_1, posi, rc, rc, ncell, region); @@ -174,7 +174,7 @@ TEST_F(TestFormatNlist, orig_pbc) { } } -// orginal implementation. copy ghost should be equal to pbc +// original implementation. copy ghost should be equal to pbc TEST_F(TestFormatNlist, orig_cpy_equal_pbc) { std::vector> nlist_a_0, nlist_r_0; build_nlist(nlist_a_0, nlist_r_0, posi_cpy, nloc, rc, rc, nat_stt, ncell, @@ -251,7 +251,7 @@ TEST_F(TestFormatNlist, cpu) { } } -// orginal implementation. copy ghost +// original implementation. copy ghost TEST_F(TestFormatNlistShortSel, orig_cpy) { std::vector> nlist_a, nlist_r; std::vector fmt_nlist_a, fmt_nlist_r; diff --git a/source/lmp/pppm_dplr.cpp b/source/lmp/pppm_dplr.cpp index 613a9f1c93..e1bdb828af 100644 --- a/source/lmp/pppm_dplr.cpp +++ b/source/lmp/pppm_dplr.cpp @@ -92,7 +92,7 @@ void PPPMDPLR::compute(int eflag, int vflag) { return; } - // convert atoms from box to lamda coords + // convert atoms from box to lambda coords if (triclinic == 0) { boxlo = domain->boxlo; @@ -266,7 +266,7 @@ void PPPMDPLR::compute(int eflag, int vflag) { slabcorr(); } - // convert atoms back from lamda to box coords + // convert atoms back from lambda to box coords if (triclinic) { domain->lamda2x(atom->nlocal); diff --git a/source/op/tf/descrpt.cc b/source/op/tf/descrpt.cc index 6362b8d37a..db3b0ca8e5 100644 --- a/source/op/tf/descrpt.cc +++ b/source/op/tf/descrpt.cc @@ -293,7 +293,7 @@ class DescrptOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_ef.cc b/source/op/tf/descrpt_se_a_ef.cc index 96c953f167..18dda3d8b0 100644 --- a/source/op/tf/descrpt_se_a_ef.cc +++ b/source/op/tf/descrpt_se_a_ef.cc @@ -310,7 +310,7 @@ class DescrptSeAEfOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_ef_para.cc b/source/op/tf/descrpt_se_a_ef_para.cc index 6dc4442ee6..0f34de3f4f 100644 --- a/source/op/tf/descrpt_se_a_ef_para.cc +++ b/source/op/tf/descrpt_se_a_ef_para.cc @@ -310,7 +310,7 @@ class DescrptSeAEfParaOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_ef_vert.cc b/source/op/tf/descrpt_se_a_ef_vert.cc index 9899e29f06..b4eb30d9ee 100644 --- a/source/op/tf/descrpt_se_a_ef_vert.cc +++ b/source/op/tf/descrpt_se_a_ef_vert.cc @@ -310,7 +310,7 @@ class DescrptSeAEfVertOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_mask.cc b/source/op/tf/descrpt_se_a_mask.cc index e27ea099ab..28e4a575db 100644 --- a/source/op/tf/descrpt_se_a_mask.cc +++ b/source/op/tf/descrpt_se_a_mask.cc @@ -181,7 +181,7 @@ class DescrptSeAMaskOp : public OpKernel { for (int jj = 0; jj < natoms * 3; ++jj) { rij(kk, ii * natoms * 3 + jj) = 0.; } - // Save the neighbor atoms indicies. + // Save the neighbor atoms indices. for (int jj = 0; jj < natoms; jj++) { nlist(kk, ii * natoms + jj) = -1; } @@ -304,7 +304,7 @@ class DescrptSeAMaskOp : public OpKernel { for (int jj = 0; jj < natoms * 3; ++jj) { rij(kk, ii * natoms * 3 + jj) = rij_atom[jj]; } - // Save the neighbor atoms indicies. + // Save the neighbor atoms indices. for (int jj = 0; jj < natoms; ++jj) { nlist(kk, ii * natoms + jj) = sorted_nlist[jj]; } diff --git a/source/op/tf/neighbor_stat.cc b/source/op/tf/neighbor_stat.cc index d2a6b3ab31..26f13b0c84 100644 --- a/source/op/tf/neighbor_stat.cc +++ b/source/op/tf/neighbor_stat.cc @@ -243,7 +243,7 @@ class NeighborStatOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, -1, rcut, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } int MAX_NNEI = 0; diff --git a/source/op/tf/pairwise.cc b/source/op/tf/pairwise.cc index 8ed140a14a..ba1e5e6475 100644 --- a/source/op/tf/pairwise.cc +++ b/source/op/tf/pairwise.cc @@ -78,7 +78,7 @@ class PairwiseIdxOp : public OpKernel { backward_qm_maps.push_back(backward_qm_map); forward_qmmm_maps.push_back(forward_qmmm_map); backward_qmmm_maps.push_back(backward_qmmm_map); - // get the maximun + // get the maximum int nghost_qm_ii = nall_qm_ii - nloc_qm_ii, nghost_qmmm_ii = nall_qmmm_ii - nloc_qmmm_ii; nloc_qm.push_back(nloc_qm_ii); diff --git a/source/op/tf/prod_env_mat_multi_device.cc b/source/op/tf/prod_env_mat_multi_device.cc index 7037a00a6c..e374102224 100644 --- a/source/op/tf/prod_env_mat_multi_device.cc +++ b/source/op/tf/prod_env_mat_multi_device.cc @@ -485,7 +485,7 @@ class ProdEnvMatAOp : public OpKernel { const FPTYPE* std = std_tensor.flat().data(); const int* p_type = type_tensor.flat().data(); - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); @@ -791,7 +791,7 @@ class ProdEnvMatROp : public OpKernel { const FPTYPE* std = std_tensor.flat().data(); const int* p_type = type_tensor.flat().data(); - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); @@ -1144,7 +1144,7 @@ class ProdEnvMatAMixOp : public OpKernel { } } - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); diff --git a/source/op/tf/prod_env_mat_multi_device_nvnmd.cc b/source/op/tf/prod_env_mat_multi_device_nvnmd.cc index d9f9275b86..57390077ef 100644 --- a/source/op/tf/prod_env_mat_multi_device_nvnmd.cc +++ b/source/op/tf/prod_env_mat_multi_device_nvnmd.cc @@ -45,7 +45,7 @@ REGISTER_OP("ProdEnvMatANvnmdQuantize") .Output("descrpt_deriv: T") .Output("rij: T") .Output("nlist: int32"); -// only sel_a and rcut_r uesd. +// only sel_a and rcut_r used. // ProdEnvMatAMixNvnmd REGISTER_OP("ProdEnvMatAMixNvnmdQuantize") @@ -68,7 +68,7 @@ REGISTER_OP("ProdEnvMatAMixNvnmdQuantize") .Output("nlist: int32") .Output("ntype: int32") .Output("nmask: bool"); -// only sel_a and rcut_r uesd. +// only sel_a and rcut_r used. template static int _norm_copy_coord_cpu(std::vector& coord_cpy, @@ -463,7 +463,7 @@ class ProdEnvMatANvnmdQuantizeOp : public OpKernel { const FPTYPE* std = std_tensor.flat().data(); const int* p_type = type_tensor.flat().data(); - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); @@ -734,7 +734,7 @@ class ProdEnvMatAMixNvnmdQuantizeOp : public OpKernel { } } - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); diff --git a/source/tests/common/dpmodel/test_pairtab_preprocess.py b/source/tests/common/dpmodel/test_pairtab_preprocess.py index da3b9251f7..7f4058dedd 100644 --- a/source/tests/common/dpmodel/test_pairtab_preprocess.py +++ b/source/tests/common/dpmodel/test_pairtab_preprocess.py @@ -72,7 +72,7 @@ def test_preprocess(self): ) # for this test case, the table does not decay to zero at rcut = 0.22, - # in the cubic spline code, we use a fixed size grid, if will be a problem if we introduce variable gird size. + # in the cubic spline code, we use a fixed size grid, if will be a problem if we introduce variable grid size. # we will do post process to overwrite spline coefficient `a3`,`a2`,`a1`,`a0`, to ensure energy decays to `0`. np.testing.assert_allclose( self.tab3.vdata, diff --git a/source/tests/common/test_argument_parser.py b/source/tests/common/test_argument_parser.py index 1404185607..2c67c1f6cb 100644 --- a/source/tests/common/test_argument_parser.py +++ b/source/tests/common/test_argument_parser.py @@ -156,7 +156,7 @@ def run_test(self, *, command: str, mapping: "TEST_DICT"): namespace = parse_args(cmd_args) except SystemExit as e: raise SystemExit( - f"Encountered expection when parsing arguments ->\n\n" + f"Encountered exception when parsing arguments ->\n\n" f"{buffer.getvalue()}\n" f"passed in arguments were: {cmd_args}\n" f"built from dict {mapping}" @@ -188,7 +188,7 @@ def run_test(self, *, command: str, mapping: "TEST_DICT"): namespace = parse_args(cmd_args) except SystemExit as e: raise SystemExit( - f"Encountered expection when parsing DEFAULT arguments ->\n\n" + f"Encountered exception when parsing DEFAULT arguments ->\n\n" f"{buffer.getvalue()}\n" f"passed in arguments were: {cmd_args}\n" f"built from dict {mapping}" diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index e3bf808978..885662c766 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -500,7 +500,7 @@ def tearDown(self) -> None: def parameterized(*attrs: tuple, **subblock_attrs: tuple) -> Callable: """Parameterized test. - Orginal class will not be actually generated. Avoid inherbiting from it. + Original class will not be actually generated. Avoid inherbiting from it. New classes are generated with the name of the original class and the parameters. diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index e2836c7a6c..a4b516ef16 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -90,7 +90,7 @@ def addtional_data(self) -> dict: use_econf_tebd, use_tebd_bias, ) = self.param - # implict argument not input by users + # implicit argument not input by users return { "ntypes": self.ntypes, "padding": padding, diff --git a/source/tests/pt/model/test_descriptor_dpa1.py b/source/tests/pt/model/test_descriptor_dpa1.py index a3d696516a..ddd5dc6c3c 100644 --- a/source/tests/pt/model/test_descriptor_dpa1.py +++ b/source/tests/pt/model/test_descriptor_dpa1.py @@ -249,7 +249,7 @@ def test_descriptor_block(self): coord = self.coord atype = self.atype box = self.cell - # handel type_embedding + # handle type_embedding type_embedding = TypeEmbedNet(ntypes, 8, use_tebd_bias=True).to(env.DEVICE) type_embedding.load_state_dict( torch.load(self.file_type_embed, weights_only=True) diff --git a/source/tests/pt/model/test_embedding_net.py b/source/tests/pt/model/test_embedding_net.py index 1566eb2416..2cfcaa820e 100644 --- a/source/tests/pt/model/test_embedding_net.py +++ b/source/tests/pt/model/test_embedding_net.py @@ -181,7 +181,7 @@ def test_consistency(self): key = gen_key(worb=m[2], depth=int(m[1]) + 1, elemid=int(m[0])) var = dp_vars[key] with torch.no_grad(): - # Keep parameter value consistency between 2 implentations + # Keep parameter value consistency between 2 implementations param.data.copy_(torch.from_numpy(var)) pt_coord = self.torch_batch["coord"].to(env.DEVICE) diff --git a/source/tests/pt/model/test_fitting_net.py b/source/tests/pt/model/test_fitting_net.py index ecff0d47e6..e08eed4f8b 100644 --- a/source/tests/pt/model/test_fitting_net.py +++ b/source/tests/pt/model/test_fitting_net.py @@ -133,7 +133,7 @@ def test_consistency(self): assert key is not None var = values[key] with torch.no_grad(): - # Keep parameter value consistency between 2 implentations + # Keep parameter value consistency between 2 implementations param.data.copy_(torch.from_numpy(var)) embedding = torch.from_numpy(self.embedding) embedding = embedding.view(4, -1, self.embedding_width) diff --git a/source/tests/pt/model/test_make_hessian_model.py b/source/tests/pt/model/test_make_hessian_model.py index ef615554ef..df58d115a2 100644 --- a/source/tests/pt/model/test_make_hessian_model.py +++ b/source/tests/pt/model/test_make_hessian_model.py @@ -99,7 +99,7 @@ def test( aparam = torch.rand( [nf, natoms * nap], dtype=dtype, device=env.DEVICE, generator=generator ) - # forward hess and valu models + # forward hess and value models ret_dict0 = self.model_hess.forward_common( coord, atype, box=cell, fparam=fparam, aparam=aparam ) diff --git a/source/tests/pt/model/test_model.py b/source/tests/pt/model/test_model.py index 8fdbdaf413..84f5a113a3 100644 --- a/source/tests/pt/model/test_model.py +++ b/source/tests/pt/model/test_model.py @@ -300,7 +300,7 @@ def test_consistency(self): limit_pref_f=self.limit_pref_f, ) - # Keep statistics consistency between 2 implentations + # Keep statistics consistency between 2 implementations my_em = my_model.get_descriptor() mean = stat_dict["descriptor.mean"].reshape([self.ntypes, my_em.get_nsel(), 4]) stddev = stat_dict["descriptor.stddev"].reshape( @@ -314,7 +314,7 @@ def test_consistency(self): stat_dict["fitting_net.bias_atom_e"], device=DEVICE ) - # Keep parameter value consistency between 2 implentations + # Keep parameter value consistency between 2 implementations for name, param in my_model.named_parameters(): name = name.replace("sea.", "") var_name = torch2tf(name, last_layer_id=len(self.n_neuron)) diff --git a/source/tests/pt/model/test_nlist.py b/source/tests/pt/model/test_nlist.py index c4401b2cdd..7558a2a7d9 100644 --- a/source/tests/pt/model/test_nlist.py +++ b/source/tests/pt/model/test_nlist.py @@ -44,7 +44,7 @@ def setUp(self): self.rcut = 1.01 self.prec = 1e-10 self.nsel = [10, 10] - # genrated by preprocess.build_neighbor_list + # generated by preprocess.build_neighbor_list # ref_nlist, _, _ = legacy_build_neighbor_list( # 2, ecoord[0], eatype[0], # self.rcut, diff --git a/source/tests/pt/model/test_unused_params.py b/source/tests/pt/model/test_unused_params.py index 3f068d5e5b..98bbe7040e 100644 --- a/source/tests/pt/model/test_unused_params.py +++ b/source/tests/pt/model/test_unused_params.py @@ -38,10 +38,10 @@ def test_unused(self): [True], ): if (not drrd) and (not grrg) and h2: - # skip the case h2 is not envolved + # skip the case h2 is not involved continue if (not grrg) and (not conv): - # skip the case g2 is not envolved + # skip the case g2 is not involved continue model = copy.deepcopy(model_dpa2) model["descriptor"]["repformer"]["nlayers"] = 2 diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index fa9e5c138a..a7fcedcede 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -477,7 +477,7 @@ def test_dp_train(self): trainer.run() state_dict_trained = trainer.wrapper.model.state_dict() - # test fine-tuning using diffferent fitting_net, here using property fitting + # test fine-tuning using different fitting_net, here using property fitting finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pt" self.config_property["model"], finetune_links = get_finetune_rules( finetune_model, diff --git a/source/tests/tf/common.py b/source/tests/tf/common.py index 705e9f7faa..2b912c7a10 100644 --- a/source/tests/tf/common.py +++ b/source/tests/tf/common.py @@ -969,7 +969,7 @@ def __init__(self, systems, set_prefix, batch_size, test_size, rcut, run_opt=Non chk_ret = self.data_systems[ii].check_test_size(test_size) if chk_ret is not None: warnings.warn( - "WARNNING: system %s required test size %d is larger than the size %d of the dataset %s" + "WARNING: system %s required test size %d is larger than the size %d of the dataset %s" % (self.system_dirs[ii], test_size, chk_ret[1], chk_ret[0]) ) diff --git a/source/tests/tf/test_model_pairtab.py b/source/tests/tf/test_model_pairtab.py index 5caeb0a053..0a09e70430 100644 --- a/source/tests/tf/test_model_pairtab.py +++ b/source/tests/tf/test_model_pairtab.py @@ -42,7 +42,7 @@ def test_model(self): rcut = jdata["model"]["rcut"] def pair_pot(r: float): - # LJ, as exmaple + # LJ, as example return 4 * (1 / r**12 - 1 / r**6) dx = 1e-4 diff --git a/source/tests/universal/common/cases/atomic_model/utils.py b/source/tests/universal/common/cases/atomic_model/utils.py index bfd2e2cd5f..97a6cf707b 100644 --- a/source/tests/universal/common/cases/atomic_model/utils.py +++ b/source/tests/universal/common/cases/atomic_model/utils.py @@ -40,7 +40,7 @@ class AtomicModelTestCase: expected_has_message_passing: bool """Expected whether having message passing.""" forward_wrapper: Callable[[Any], Any] - """Calss wrapper for forward method.""" + """Class wrapper for forward method.""" aprec_dict: dict[str, Optional[float]] """Dictionary of absolute precision in each test.""" rprec_dict: dict[str, Optional[float]] From 40b3ea1854438d19037864ea0770dc2241b293b6 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 28 Oct 2024 19:56:39 -0400 Subject: [PATCH 089/193] docs: document the floating-point precision of the model (#4240) ## Summary by CodeRabbit - **New Features** - Added a new section on `precision` in the documentation, enhancing navigation. - Introduced detailed guidelines on floating-point precision settings for the model. - Included structured instructions for creating models with the PyTorch backend. - **Documentation** - Expanded troubleshooting documentation related to model precision issues, including data accuracy and training recommendations. - Enhanced guidelines for integrating new components into user configurations and ensuring model integrity across different backends. --------- Signed-off-by: Jinzhe Zeng --- doc/development/create-a-model-pt.md | 9 +++++++++ doc/model/index.rst | 1 + doc/model/precision.md | 15 +++++++++++++++ doc/troubleshooting/precision.md | 1 + 4 files changed, 26 insertions(+) create mode 100644 doc/model/precision.md diff --git a/doc/development/create-a-model-pt.md b/doc/development/create-a-model-pt.md index 875067e2b8..8d2b1494b5 100644 --- a/doc/development/create-a-model-pt.md +++ b/doc/development/create-a-model-pt.md @@ -137,6 +137,15 @@ class SomeAtomicModel(BaseAtomicModel, torch.nn.Module): pass ``` +### Floating-point precision + +When creating a new component, the floating-point precision should obey the [Floating-point precision of the model](../model/precision.md) section. +In implementation, the component should + +- store parameters in the component precision, except those for output normalization; +- store output normalization parameters in {py:data}`deepmd.pt.utils.env.GLOBAL_PT_FLOAT_PRECISION`; +- before input normalization, cast the input tensor to the component precision; before output normalization, cast the output tensor to the {py:data}`deepmd.pt.utils.env.GLOBAL_PT_FLOAT_PRECISION`. + ## Register new arguments To let someone uses your new component in their input file, you need to create a new method that returns some `Argument` of your new component, and then register new arguments. For example, the code below diff --git a/doc/model/index.rst b/doc/model/index.rst index 8409d4ce97..c067ea4207 100644 --- a/doc/model/index.rst +++ b/doc/model/index.rst @@ -24,3 +24,4 @@ Model linear pairtab change-bias + precision diff --git a/doc/model/precision.md b/doc/model/precision.md new file mode 100644 index 0000000000..d8643c9c61 --- /dev/null +++ b/doc/model/precision.md @@ -0,0 +1,15 @@ +# Floating-point precision of the model + +The following options control the precision of the model: + +- The environment variable {envvar}`DP_INTERFACE_PREC` controls the interface precision of the model, the descriptor, and the fitting, the precision of the environmental matrix, and the precision of the normalized parameters for the environmental matrix and the fitting output. +- The training parameters {ref}`precision ` in the descriptor, the fitting, and the type embedding control the precision of neural networks in those components, and the subsequent operations after the output of neural networks. +- The reduced output (e.g. total energy) is always `float64`. + +Usually, the following two combinations of options are recommended: + +- Setting {envvar}`DP_INTERFACE_PREC` to `high` (default) and all {ref}`precision ` options to `float64` (default). +- Setting {envvar}`DP_INTERFACE_PREC` to `high` (default) and all {ref}`precision ` options to `float32`. + +The Python and C++ inference interfaces accept both `float64` and `float32` as the input and output arguments, whatever the floating-point precision of the model interface is. +Usually, the MD programs (such as LAMMPS) only use `float64` in their interfaces. diff --git a/doc/troubleshooting/precision.md b/doc/troubleshooting/precision.md index 5ebef97122..a754dbeb53 100644 --- a/doc/troubleshooting/precision.md +++ b/doc/troubleshooting/precision.md @@ -60,6 +60,7 @@ See [FAQ: How to tune Fitting/embedding-net size](./howtoset_netsize.md) for det In some cases, one may want to use the FP32 precision to make the model faster. For some applications, FP32 is enough and thus is recommended, but one should still be aware that the precision of FP32 is not as high as that of FP64. +See [Floating-point precision of the model](../model/precision.md) section for how to set the precision. ## Training From 95f0ed59aba6fedea1b2ca28c1a17b837f7236ff Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 28 Oct 2024 23:37:54 -0400 Subject: [PATCH 090/193] fix(lmp): apply NEIGHMASK to neighbor list (#4269) Fix #4250. See https://github.com/lammps/lammps/pull/581#issuecomment-316351879 for an explanation of `NEIGHMASK`. ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced a new function to set a mask for neighbor lists, enhancing configurability. - Added a method to the `InputNlist` structure for setting the mask. - Enhanced `ComputeDeeptensorAtom` and `FixDPLR` classes to utilize neighbor list masks in computations. - **Bug Fixes** - Improved validation of bonded pairs in the `FixDPLR` class with enhanced error handling. - **Documentation** - Updated documentation for new methods and functionalities related to neighbor list management. Signed-off-by: Jinzhe Zeng --- source/api_c/include/c_api.h | 12 +++++++++++- source/api_c/include/deepmd.hpp | 4 ++++ source/api_c/src/c_api.cc | 1 + source/api_cc/src/common.cc | 3 +++ source/lib/include/neighbor_list.h | 6 ++++++ source/lmp/compute_deeptensor_atom.cpp | 1 + source/lmp/fix_dplr.cpp | 1 + source/lmp/pair_deepmd.cpp | 2 ++ 8 files changed, 29 insertions(+), 1 deletion(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 2f88f25e43..cd940edc0d 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -12,7 +12,7 @@ extern "C" { /** C API version. Bumped whenever the API is changed. * @since API version 22 */ -#define DP_C_API_VERSION 22 +#define DP_C_API_VERSION 23 /** * @brief Neighbor list. @@ -68,6 +68,16 @@ extern DP_Nlist* DP_NewNlist_comm(int inum_, int* recvproc, void* world); +/* + * @brief Set mask for a neighbor list. + * + * @param nl Neighbor list. + * @param mask mask. + * @since API version 23 + * + **/ +extern void DP_NlistSetMask(DP_Nlist* nl, int mask); + /** * @brief Delete a neighbor list. * diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 270bc94cc5..f1e04ef3bc 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -611,6 +611,10 @@ struct InputNlist { int *numneigh; /// @brief Array stores the core region atom's neighbor index int **firstneigh; + /** + * @brief Set mask for this neighbor list. + */ + void set_mask(int mask) { DP_NlistSetMask(nl, mask); }; }; /** diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 56c5f9720f..9bb9e8a775 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -42,6 +42,7 @@ DP_Nlist* DP_NewNlist_comm(int inum_, DP_Nlist* new_nl = new DP_Nlist(nl); return new_nl; } +void DP_NlistSetMask(DP_Nlist* nl, int mask) { nl->nl.set_mask(mask); } void DP_DeleteNlist(DP_Nlist* nl) { delete nl; } DP_DeepPot::DP_DeepPot() {} diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index e84517ea7a..bd3f18c579 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -241,6 +241,9 @@ void deepmd::NeighborListData::copy_from_nlist(const InputNlist& inlist) { int jnum = inlist.numneigh[ii]; jlist[ii].resize(jnum); memcpy(&jlist[ii][0], inlist.firstneigh[ii], jnum * sizeof(int)); + for (int jj = 0; jj < jnum; ++jj) { + jlist[ii][jj] &= inlist.mask; + } } } diff --git a/source/lib/include/neighbor_list.h b/source/lib/include/neighbor_list.h index 95f5cb6174..bb4b8cf13c 100644 --- a/source/lib/include/neighbor_list.h +++ b/source/lib/include/neighbor_list.h @@ -42,6 +42,8 @@ struct InputNlist { int* recvproc; /// MPI_comm data in lmp void* world; + /// mask to the neighbor index + int mask = 0xFFFFFFFF; InputNlist() : inum(0), ilist(NULL), @@ -93,6 +95,10 @@ struct InputNlist { recvproc(recvproc), world(world) {}; ~InputNlist() {}; + /** + * @brief Set mask for this neighbor list. + */ + void set_mask(int mask_) { mask = mask_; }; }; /** diff --git a/source/lmp/compute_deeptensor_atom.cpp b/source/lmp/compute_deeptensor_atom.cpp index 6e6e9508b7..68c97a629e 100644 --- a/source/lmp/compute_deeptensor_atom.cpp +++ b/source/lmp/compute_deeptensor_atom.cpp @@ -136,6 +136,7 @@ void ComputeDeeptensorAtom::compute_peratom() { neighbor->build_one(list); deepmd_compat::InputNlist lmp_list(list->inum, list->ilist, list->numneigh, list->firstneigh); + lmp_list.set_mask(NEIGHMASK); // declare outputs std::vector gtensor, force, virial, atensor, avirial; diff --git a/source/lmp/fix_dplr.cpp b/source/lmp/fix_dplr.cpp index 9f2b0eadb1..8a6be7d840 100644 --- a/source/lmp/fix_dplr.cpp +++ b/source/lmp/fix_dplr.cpp @@ -463,6 +463,7 @@ void FixDPLR::pre_force(int vflag) { NeighList *list = pair_deepmd->list; deepmd_compat::InputNlist lmp_list(list->inum, list->ilist, list->numneigh, list->firstneigh); + lmp_list.set_mask(NEIGHMASK); // declear output vector tensor; // compute diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 2cb6cfacd4..09d97fe460 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -565,6 +565,7 @@ void PairDeepMD::compute(int eflag, int vflag) { commdata_->nswap, commdata_->sendnum, commdata_->recvnum, commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, commdata_->recvproc, &world); + lmp_list.set_mask(NEIGHMASK); deepmd_compat::InputNlist extend_lmp_list; if (atom->sp_flag) { extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, @@ -574,6 +575,7 @@ void PairDeepMD::compute(int eflag, int vflag) { extend_lmp_list = deepmd_compat::InputNlist(extend_inum, &extend_ilist[0], &extend_numneigh[0], &extend_firstneigh[0]); + extend_lmp_list.set_mask(NEIGHMASK); } if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix From abd1c9cd9575942e1030dac8e4ac91166a93af8f Mon Sep 17 00:00:00 2001 From: "A bot of @njzjz" <48687836+njzjz-bot@users.noreply.github.com> Date: Mon, 28 Oct 2024 23:40:59 -0400 Subject: [PATCH 091/193] docs: replace sphinx-rtd-theme with sphinx-book-theme (#4266) Compared to `sphinx-rtd-theme`, `sphinx-book-theme` is a clean and modern Sphinx theme. Generated by the task: https://github.com/njzjz-bot/njzjz-bot/issues/11. ## Summary by CodeRabbit - **New Features** - Updated documentation theme to enhance visual presentation with the new "sphinx_book_theme." - Introduced a new extension to manage table of contents visibility. - Enhanced dark mode styles for improved user experience. - **Bug Fixes** - Improved compatibility with documentation tools by updating dependencies in the project configuration. - Updated version constraints for documentation-related dependencies. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- doc/_static/css/custom.css | 2 +- doc/conf.py | 15 ++++++++++++--- pyproject.toml | 5 +++-- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/doc/_static/css/custom.css b/doc/_static/css/custom.css index d0b761e71d..8bcfdd3c7e 100644 --- a/doc/_static/css/custom.css +++ b/doc/_static/css/custom.css @@ -11,7 +11,7 @@ pre { img.platform-icon { height: 2ex; } -@media (prefers-color-scheme: dark) { +html[data-theme="dark"] { .wy-side-nav-search .wy-dropdown > a img.logo, .wy-side-nav-search > a img.logo { content: url("../logo-dark.svg"); diff --git a/doc/conf.py b/doc/conf.py index 51d463fd1f..c72e05bf8a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -44,7 +44,7 @@ # ones. # extensions = [ # 'recommonmark', -# "sphinx_rtd_theme", +# "sphinx_book_theme", # 'myst_parser', # 'sphinx_markdown_tables', # 'sphinx.ext.autosummary' @@ -53,7 +53,7 @@ extensions = [ "deepmodeling_sphinx", "dargs.sphinx", - "sphinx_rtd_theme", + "sphinx_book_theme", "myst_nb", "sphinx.ext.autosummary", "sphinx.ext.mathjax", @@ -70,6 +70,7 @@ "autoapi.extension", "sphinxcontrib.programoutput", "sphinxcontrib.moderncmakedomain", + "sphinx_remove_toctrees", ] # breathe_domain_by_extension = { @@ -175,9 +176,15 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "sphinx_rtd_theme" +html_theme = "sphinx_book_theme" html_logo = "_static/logo.svg" +html_theme_options = { + "logo": { + "image_light": "_static/logo.svg", + "image_dark": "_static/logo-dark.svg", + } +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". @@ -215,3 +222,5 @@ napoleon_numpy_docstring = False bibtex_bibfiles = ["../CITATIONS.bib"] + +remove_from_toctrees = ["autoapi/**/*", "API_CC/*", "api_c/*", "api_core/*"] diff --git a/pyproject.toml b/pyproject.toml index f4f399156a..1faacb973c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ test = [ ] docs = [ "sphinx>=3.1.1", - "sphinx_rtd_theme>=1.0.0rc1", + "sphinx-book-theme", "myst-nb>=1.0.0rc0", "myst-parser>=0.19.2", "sphinx-design", @@ -97,7 +97,7 @@ docs = [ "exhale>=0.3.7", "numpydoc", "ase", - "deepmodeling-sphinx>=0.1.0", + "deepmodeling-sphinx>=0.3.0", "dargs>=0.3.4", "sphinx-argparse<0.5.0", "pygments-lammps", @@ -105,6 +105,7 @@ docs = [ "sphinx-autoapi>=3.0.0", "sphinxcontrib-programoutput", "sphinxcontrib-moderncmakedomain", + "sphinx-remove-toctrees", ] lmp = [ "lammps~=2024.8.29.1.0", From b647547b212b48b84134c5d5afe45eb2d093fac8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 03:55:05 +0000 Subject: [PATCH 092/193] [pre-commit.ci] pre-commit autoupdate (#4268) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.0 → v0.7.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.0...v0.7.1) - [github.com/asottile/blacken-docs: 1.19.0 → 1.19.1](https://github.com/asottile/blacken-docs/compare/1.19.0...1.19.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 53fdd9b71c..6cb534fd22 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.7.0 + rev: v0.7.1 hooks: - id: ruff args: ["--fix"] @@ -55,7 +55,7 @@ repos: exclude: ^source/3rdparty # Python inside docs - repo: https://github.com/asottile/blacken-docs - rev: 1.19.0 + rev: 1.19.1 hooks: - id: blacken-docs # C++ From 82aaa0db8b2e484d2179112b509bb8bcadc6ab1f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 29 Oct 2024 14:51:23 -0400 Subject: [PATCH 093/193] feat(jax): neighbor stat (#4258) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced `NeighborStat` and `NeighborStatOP` classes for enhanced neighbor statistics computation. - Added `AutoBatchSize` class to manage automatic batch sizing in deep learning applications. - **Improvements** - Enhanced `JAXBackend` functionality with implemented properties for neighbor statistics and serialization. - Refactored neighbor counting logic for better clarity and modularity. - **Tests** - Updated unit tests for `neighbor_stat` to support multiple backends (TensorFlow, PyTorch, NumPy, JAX). - Removed outdated test files to streamline testing processes. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- deepmd/backend/jax.py | 10 +- deepmd/dpmodel/utils/neighbor_stat.py | 35 +++--- deepmd/jax/utils/auto_batch_size.py | 59 ++++++++++ deepmd/jax/utils/neighbor_stat.py | 104 ++++++++++++++++++ .../common/dpmodel/test_neighbor_stat.py | 69 ------------ .../{pt => consistent}/test_neighbor_stat.py | 24 +++- source/tests/tf/test_neighbor_stat.py | 68 ------------ 7 files changed, 210 insertions(+), 159 deletions(-) create mode 100644 deepmd/jax/utils/auto_batch_size.py create mode 100644 deepmd/jax/utils/neighbor_stat.py delete mode 100644 source/tests/common/dpmodel/test_neighbor_stat.py rename source/tests/{pt => consistent}/test_neighbor_stat.py (77%) delete mode 100644 source/tests/tf/test_neighbor_stat.py diff --git a/deepmd/backend/jax.py b/deepmd/backend/jax.py index bb2fba5a7c..7131f4d534 100644 --- a/deepmd/backend/jax.py +++ b/deepmd/backend/jax.py @@ -33,9 +33,9 @@ class JAXBackend(Backend): """The formal name of the backend.""" features: ClassVar[Backend.Feature] = ( Backend.Feature.IO - # Backend.Feature.ENTRY_POINT + | Backend.Feature.ENTRY_POINT # | Backend.Feature.DEEP_EVAL - # | Backend.Feature.NEIGHBOR_STAT + | Backend.Feature.NEIGHBOR_STAT ) """The features of the backend.""" suffixes: ClassVar[list[str]] = [".jax"] @@ -82,7 +82,11 @@ def neighbor_stat(self) -> type["NeighborStat"]: type[NeighborStat] The neighbor statistics of the backend. """ - raise NotImplementedError + from deepmd.jax.utils.neighbor_stat import ( + NeighborStat, + ) + + return NeighborStat @property def serialize_hook(self) -> Callable[[str], dict]: diff --git a/deepmd/dpmodel/utils/neighbor_stat.py b/deepmd/dpmodel/utils/neighbor_stat.py index 43ca2cadd1..3aea8ceeb9 100644 --- a/deepmd/dpmodel/utils/neighbor_stat.py +++ b/deepmd/dpmodel/utils/neighbor_stat.py @@ -6,6 +6,7 @@ Optional, ) +import array_api_compat import numpy as np from deepmd.dpmodel.common import ( @@ -68,42 +69,42 @@ def call( np.ndarray The maximal number of neighbors """ + xp = array_api_compat.array_namespace(coord, atype) nframes = coord.shape[0] - coord = coord.reshape(nframes, -1, 3) + coord = xp.reshape(coord, (nframes, -1, 3)) nloc = coord.shape[1] - coord = coord.reshape(nframes, nloc * 3) + coord = xp.reshape(coord, (nframes, nloc * 3)) extend_coord, extend_atype, _ = extend_coord_with_ghosts( coord, atype, cell, self.rcut ) - coord1 = extend_coord.reshape(nframes, -1) + coord1 = xp.reshape(extend_coord, (nframes, -1)) nall = coord1.shape[1] // 3 coord0 = coord1[:, : nloc * 3] diff = ( - coord1.reshape([nframes, -1, 3])[:, None, :, :] - - coord0.reshape([nframes, -1, 3])[:, :, None, :] + xp.reshape(coord1, [nframes, -1, 3])[:, None, :, :] + - xp.reshape(coord0, [nframes, -1, 3])[:, :, None, :] ) assert list(diff.shape) == [nframes, nloc, nall, 3] # remove the diagonal elements - mask = np.eye(nloc, nall, dtype=bool) - diff[:, mask] = np.inf - rr2 = np.sum(np.square(diff), axis=-1) - min_rr2 = np.min(rr2, axis=-1) + mask = xp.eye(nloc, nall, dtype=xp.bool) + mask = xp.tile(mask[None, :, :, None], (nframes, 1, 1, 3)) + diff = xp.where(mask, xp.full_like(diff, xp.inf), diff) + rr2 = xp.sum(xp.square(diff), axis=-1) + min_rr2 = xp.min(rr2, axis=-1) # count the number of neighbors if not self.mixed_types: mask = rr2 < self.rcut**2 - nnei = np.zeros((nframes, nloc, self.ntypes), dtype=int) + nneis = [] for ii in range(self.ntypes): - nnei[:, :, ii] = np.sum( - mask & (extend_atype == ii)[:, None, :], axis=-1 - ) + nneis.append(xp.sum(mask & (extend_atype == ii)[:, None, :], axis=-1)) + nnei = xp.stack(nneis, axis=-1) else: mask = rr2 < self.rcut**2 # virtual type (<0) are not counted - nnei = np.sum(mask & (extend_atype >= 0)[:, None, :], axis=-1).reshape( - nframes, nloc, 1 - ) - max_nnei = np.max(nnei, axis=1) + nnei = xp.sum(mask & (extend_atype >= 0)[:, None, :], axis=-1) + nnei = xp.reshape(nnei, (nframes, nloc, 1)) + max_nnei = xp.max(nnei, axis=1) return min_rr2, max_nnei diff --git a/deepmd/jax/utils/auto_batch_size.py b/deepmd/jax/utils/auto_batch_size.py new file mode 100644 index 0000000000..eec6766ae2 --- /dev/null +++ b/deepmd/jax/utils/auto_batch_size.py @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import jaxlib + +from deepmd.jax.env import ( + jax, +) +from deepmd.utils.batch_size import AutoBatchSize as AutoBatchSizeBase + + +class AutoBatchSize(AutoBatchSizeBase): + """Auto batch size. + + Parameters + ---------- + initial_batch_size : int, default: 1024 + initial batch size (number of total atoms) when DP_INFER_BATCH_SIZE + is not set + factor : float, default: 2. + increased factor + + """ + + def __init__( + self, + initial_batch_size: int = 1024, + factor: float = 2.0, + ): + super().__init__( + initial_batch_size=initial_batch_size, + factor=factor, + ) + + def is_gpu_available(self) -> bool: + """Check if GPU is available. + + Returns + ------- + bool + True if GPU is available + """ + return jax.devices()[0].platform == "gpu" + + def is_oom_error(self, e: Exception) -> bool: + """Check if the exception is an OOM error. + + Parameters + ---------- + e : Exception + Exception + """ + # several sources think CUSOLVER_STATUS_INTERNAL_ERROR is another out-of-memory error, + # such as https://github.com/JuliaGPU/CUDA.jl/issues/1924 + # (the meaningless error message should be considered as a bug in cusolver) + if isinstance(e, (jaxlib.xla_extension.XlaRuntimeError, ValueError)) and ( + "RESOURCE_EXHAUSTED:" in e.args[0] + ): + return True + return False diff --git a/deepmd/jax/utils/neighbor_stat.py b/deepmd/jax/utils/neighbor_stat.py new file mode 100644 index 0000000000..6d9bc872e8 --- /dev/null +++ b/deepmd/jax/utils/neighbor_stat.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from collections.abc import ( + Iterator, +) +from typing import ( + Optional, +) + +import numpy as np + +from deepmd.dpmodel.common import ( + to_numpy_array, +) +from deepmd.dpmodel.utils.neighbor_stat import ( + NeighborStatOP, +) +from deepmd.jax.common import ( + to_jax_array, +) +from deepmd.jax.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.neighbor_stat import NeighborStat as BaseNeighborStat + + +class NeighborStat(BaseNeighborStat): + """Neighbor statistics using JAX. + + Parameters + ---------- + ntypes : int + The num of atom types + rcut : float + The cut-off radius + mixed_type : bool, optional, default=False + Treat all types as a single type. + """ + + def __init__( + self, + ntypes: int, + rcut: float, + mixed_type: bool = False, + ) -> None: + super().__init__(ntypes, rcut, mixed_type) + self.op = NeighborStatOP(ntypes, rcut, mixed_type) + self.auto_batch_size = AutoBatchSize() + + def iterator( + self, data: DeepmdDataSystem + ) -> Iterator[tuple[np.ndarray, float, str]]: + """Iterator method for producing neighbor statistics data. + + Yields + ------ + np.ndarray + The maximal number of neighbors + float + The squared minimal distance between two atoms + str + The directory of the data system + """ + for ii in range(len(data.system_dirs)): + for jj in data.data_systems[ii].dirs: + data_set = data.data_systems[ii] + data_set_data = data_set._load_set(jj) + minrr2, max_nnei = self.auto_batch_size.execute_all( + self._execute, + data_set_data["coord"].shape[0], + data_set.get_natoms(), + data_set_data["coord"], + data_set_data["type"], + data_set_data["box"] if data_set.pbc else None, + ) + yield np.max(max_nnei, axis=0), np.min(minrr2), jj + + def _execute( + self, + coord: np.ndarray, + atype: np.ndarray, + cell: Optional[np.ndarray], + ): + """Execute the operation. + + Parameters + ---------- + coord + The coordinates of atoms. + atype + The atom types. + cell + The cell. + """ + minrr2, max_nnei = self.op( + to_jax_array(coord), + to_jax_array(atype), + to_jax_array(cell), + ) + minrr2 = to_numpy_array(minrr2) + max_nnei = to_numpy_array(max_nnei) + return minrr2, max_nnei diff --git a/source/tests/common/dpmodel/test_neighbor_stat.py b/source/tests/common/dpmodel/test_neighbor_stat.py deleted file mode 100644 index 8dd700f608..0000000000 --- a/source/tests/common/dpmodel/test_neighbor_stat.py +++ /dev/null @@ -1,69 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import shutil -import unittest - -import dpdata -import numpy as np - -from deepmd.entrypoints.neighbor_stat import ( - neighbor_stat, -) - -from ...seed import ( - GLOBAL_SEED, -) - - -def gen_sys(nframes): - rng = np.random.default_rng(GLOBAL_SEED) - natoms = 1000 - data = {} - X, Y, Z = np.mgrid[0:2:3j, 0:2:3j, 0:2:3j] - positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T # + 0.1 - data["coords"] = np.repeat(positions[np.newaxis, :, :], nframes, axis=0) - data["forces"] = rng.random([nframes, natoms, 3]) - data["cells"] = np.array([3.0, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 3.0]).reshape( - 1, 3, 3 - ) - data["energies"] = rng.random([nframes, 1]) - data["atom_names"] = ["TYPE"] - data["atom_numbs"] = [27] - data["atom_types"] = np.repeat(0, 27) - return data - - -class TestNeighborStat(unittest.TestCase): - def setUp(self): - data0 = gen_sys(1) - sys0 = dpdata.LabeledSystem() - sys0.data = data0 - sys0.to_deepmd_npy("system_0", set_size=1) - - def tearDown(self): - shutil.rmtree("system_0") - - def test_neighbor_stat(self): - for rcut in (0.0, 1.0, 2.0, 4.0): - for mixed_type in (True, False): - with self.subTest(rcut=rcut, mixed_type=mixed_type): - rcut += 1e-3 # prevent numerical errors - min_nbor_dist, max_nbor_size = neighbor_stat( - system="system_0", - rcut=rcut, - type_map=["TYPE", "NO_THIS_TYPE"], - mixed_type=mixed_type, - backend="numpy", - ) - upper = np.ceil(rcut) + 1 - X, Y, Z = np.mgrid[-upper:upper, -upper:upper, -upper:upper] - positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T - # distance to (0,0,0) - distance = np.linalg.norm(positions, axis=1) - expected_neighbors = np.count_nonzero( - np.logical_and(distance > 0, distance <= rcut) - ) - self.assertAlmostEqual(min_nbor_dist, 1.0, 6) - ret = [expected_neighbors] - if not mixed_type: - ret.append(0) - np.testing.assert_array_equal(max_nbor_size, ret) diff --git a/source/tests/pt/test_neighbor_stat.py b/source/tests/consistent/test_neighbor_stat.py similarity index 77% rename from source/tests/pt/test_neighbor_stat.py rename to source/tests/consistent/test_neighbor_stat.py index 08ba453d74..55181a6903 100644 --- a/source/tests/pt/test_neighbor_stat.py +++ b/source/tests/consistent/test_neighbor_stat.py @@ -12,6 +12,11 @@ from ..seed import ( GLOBAL_SEED, ) +from .common import ( + INSTALLED_JAX, + INSTALLED_PT, + INSTALLED_TF, +) def gen_sys(nframes): @@ -42,7 +47,7 @@ def setUp(self): def tearDown(self): shutil.rmtree("system_0") - def test_neighbor_stat(self): + def run_neighbor_stat(self, backend): for rcut in (0.0, 1.0, 2.0, 4.0): for mixed_type in (True, False): with self.subTest(rcut=rcut, mixed_type=mixed_type): @@ -52,7 +57,7 @@ def test_neighbor_stat(self): rcut=rcut, type_map=["TYPE", "NO_THIS_TYPE"], mixed_type=mixed_type, - backend="pytorch", + backend=backend, ) upper = np.ceil(rcut) + 1 X, Y, Z = np.mgrid[-upper:upper, -upper:upper, -upper:upper] @@ -67,3 +72,18 @@ def test_neighbor_stat(self): if not mixed_type: ret.append(0) np.testing.assert_array_equal(max_nbor_size, ret) + + @unittest.skipUnless(INSTALLED_TF, "tensorflow is not installed") + def test_neighbor_stat_tf(self): + self.run_neighbor_stat("tensorflow") + + @unittest.skipUnless(INSTALLED_PT, "pytorch is not installed") + def test_neighbor_stat_pt(self): + self.run_neighbor_stat("pytorch") + + def test_neighbor_stat_dp(self): + self.run_neighbor_stat("numpy") + + @unittest.skipUnless(INSTALLED_JAX, "jax is not installed") + def test_neighbor_stat_jax(self): + self.run_neighbor_stat("jax") diff --git a/source/tests/tf/test_neighbor_stat.py b/source/tests/tf/test_neighbor_stat.py deleted file mode 100644 index 22b7790958..0000000000 --- a/source/tests/tf/test_neighbor_stat.py +++ /dev/null @@ -1,68 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import shutil -import unittest - -import dpdata -import numpy as np - -from deepmd.tf.entrypoints.neighbor_stat import ( - neighbor_stat, -) - -from ..seed import ( - GLOBAL_SEED, -) - - -def gen_sys(nframes): - rng = np.random.default_rng(GLOBAL_SEED) - natoms = 1000 - data = {} - X, Y, Z = np.mgrid[0:2:3j, 0:2:3j, 0:2:3j] - positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T # + 0.1 - data["coords"] = np.repeat(positions[np.newaxis, :, :], nframes, axis=0) - data["forces"] = rng.random([nframes, natoms, 3]) - data["cells"] = np.array([3.0, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 3.0]).reshape( - 1, 3, 3 - ) - data["energies"] = rng.random([nframes, 1]) - data["atom_names"] = ["TYPE"] - data["atom_numbs"] = [27] - data["atom_types"] = np.repeat(0, 27) - return data - - -class TestNeighborStat(unittest.TestCase): - def setUp(self): - data0 = gen_sys(1) - sys0 = dpdata.LabeledSystem() - sys0.data = data0 - sys0.to_deepmd_npy("system_0", set_size=1) - - def tearDown(self): - shutil.rmtree("system_0") - - def test_neighbor_stat(self): - for rcut in (0.0, 1.0, 2.0, 4.0): - for mixed_type in (True, False): - with self.subTest(rcut=rcut, mixed_type=mixed_type): - rcut += 1e-3 # prevent numerical errors - min_nbor_dist, max_nbor_size = neighbor_stat( - system="system_0", - rcut=rcut, - type_map=["TYPE", "NO_THIS_TYPE"], - mixed_type=mixed_type, - ) - upper = np.ceil(rcut) + 1 - X, Y, Z = np.mgrid[-upper:upper, -upper:upper, -upper:upper] - positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T - # distance to (0,0,0) - distance = np.linalg.norm(positions, axis=1) - expected_neighbors = np.count_nonzero( - np.logical_and(distance > 0, distance <= rcut) - ) - self.assertAlmostEqual(min_nbor_dist, 1.0, 6) - ret = [expected_neighbors] - if not mixed_type: - ret.append(0) - np.testing.assert_array_equal(max_nbor_size, ret) From dd36e6c59d0983176013191a444ae0b4491f8b10 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 29 Oct 2024 14:51:35 -0400 Subject: [PATCH 094/193] docs: document JAX backend (#4259) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced support for the JAX backend, expanding user options for model training and execution. - Added installation instructions for JAX within the source installation documentation. - Included new environment variables related to JAX to enhance configuration options. - **Documentation Updates** - Updated various documentation files to reflect the addition of JAX, including sections on model commands, supported backends, and environment variables. - Enhanced documentation with a visual representation for JAX through an icon. - Improved clarity and organization of installation instructions for DeePMD-kit. - Updated the README to highlight JAX as a supported backend and reflect changes in version history. --------- Signed-off-by: Jinzhe Zeng --- README.md | 4 +- doc/_static/jax.svg | 1 + doc/backend.md | 9 +++ doc/conf.py | 1 + doc/env.md | 1 + doc/install/easy-install-dev.md | 4 +- doc/install/easy-install.md | 96 ++++++++++++++++++++++++++---- doc/install/install-from-source.md | 15 +++++ doc/model/sel.md | 8 +++ doc/model/train-energy.md | 4 +- doc/model/train-fitting-dos.md | 4 +- doc/model/train-se-atten.md | 4 +- doc/model/train-se-e2-a.md | 4 +- doc/model/train-se-e2-r.md | 4 +- 14 files changed, 131 insertions(+), 28 deletions(-) create mode 100644 doc/_static/jax.svg diff --git a/README.md b/README.md index e821a29768..55f927d62b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ For more information, check the [documentation](https://deepmd.readthedocs.io/). ### Highlighted features -- **interfaced with multiple backends**, including TensorFlow and PyTorch, the most popular deep learning frameworks, making the training process highly automatic and efficient. +- **interfaced with multiple backends**, including TensorFlow, PyTorch, and JAX, the most popular deep learning frameworks, making the training process highly automatic and efficient. - **interfaced with high-performance classical MD and quantum (path-integral) MD packages**, including LAMMPS, i-PI, AMBER, CP2K, GROMACS, OpenMM, and ABUCUS. - **implements the Deep Potential series models**, which have been successfully applied to finite and extended systems, including organic molecules, metals, semiconductors, insulators, etc. - **implements MPI and GPU supports**, making it highly efficient for high-performance parallel and distributed computing. @@ -72,7 +72,7 @@ See [our latest paper](https://doi.org/10.1063/5.0155600) for details of all fea #### v3 -- Multiple backends supported. Add a PyTorch backend. +- Multiple backends supported. Add PyTorch and JAX backends. - The DPA-2 model. ## Install and use DeePMD-kit diff --git a/doc/_static/jax.svg b/doc/_static/jax.svg new file mode 100644 index 0000000000..360a6624d4 --- /dev/null +++ b/doc/_static/jax.svg @@ -0,0 +1 @@ + diff --git a/doc/backend.md b/doc/backend.md index f6eaf0e45b..cf99eea9cb 100644 --- a/doc/backend.md +++ b/doc/backend.md @@ -23,6 +23,15 @@ DeePMD-kit does not use the TensorFlow v2 API but uses the TensorFlow v1 API (`t [PyTorch](https://pytorch.org/) 2.0 or above is required. While `.pth` and `.pt` are the same in the PyTorch package, they have different meanings in the DeePMD-kit to distinguish the model and the checkpoint. +### JAX {{ jax_icon }} + +- Model filename extension: `.xlo` +- Checkpoint filename extension: `.jax` + +[JAX](https://jax.readthedocs.io/) 0.4.33 (which requires Python 3.10 or above) or above is required. +Both `.xlo` and `.jax` are customized format extensions defined in DeePMD-kit, since JAX has no convention for file extensions. +Currently, this backend is developed actively, and has no support for training and the C++ interface. + ### DP {{ dpmodel_icon }} :::{note} diff --git a/doc/conf.py b/doc/conf.py index c72e05bf8a..eca7665712 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -168,6 +168,7 @@ myst_substitutions = { "tensorflow_icon": """![TensorFlow](/_static/tensorflow.svg){class=platform-icon}""", "pytorch_icon": """![PyTorch](/_static/pytorch.svg){class=platform-icon}""", + "jax_icon": """![JAX](/_static/jax.svg){class=platform-icon}""", "dpmodel_icon": """![DP](/_static/logo_icon.svg){class=platform-icon}""", } diff --git a/doc/env.md b/doc/env.md index 65a50ff163..3cf42b724a 100644 --- a/doc/env.md +++ b/doc/env.md @@ -31,6 +31,7 @@ See [How to control the parallelism of a job](./troubleshooting/howtoset_num_nod - If ROCm is used, [ROCm environment variables](https://rocm.docs.amd.com/en/latest/conceptual/gpu-isolation.html#environment-variables) can be used to control ROCm devices. - {{ tensorflow_icon }} If TensorFlow is used, TensorFlow environment variables can be used. - {{ pytorch_icon }} If PyTorch is used, [PyTorch environment variables](https://pytorch.org/docs/stable/torch_environment_variables.html) can be used. +- {{ jax_icon }} [`JAX_PLATFORMS`](https://jax.readthedocs.io/en/latest/faq.html#controlling-data-and-computation-placement-on-devices) and [`XLA_FLAGS`](https://jax.readthedocs.io/en/latest/gpu_performance_tips.html#xla-performance-flags) are commonly used. ## Python interface only diff --git a/doc/install/easy-install-dev.md b/doc/install/easy-install-dev.md index bb68272ace..54309a8582 100644 --- a/doc/install/easy-install-dev.md +++ b/doc/install/easy-install-dev.md @@ -16,14 +16,12 @@ For CUDA 11.8 support, use the `devel_cu11` tag. ## Install with pip -Below is an one-line shell command to download the [artifact](https://nightly.link/deepmodeling/deepmd-kit/workflows/build_wheel/devel/artifact.zip) containing wheels and install it with `pip`: +Follow [the documentation for the stable version](easy-install.md#install-python-interface-with-pip), but add `--pre` and `--extra-index-url` options like below: ```sh pip install -U --pre deepmd-kit[gpu,cu12,lmp,torch] --extra-index-url https://deepmodeling.github.io/deepmd-kit/simple ``` -`cu12` and `lmp` are optional, which is the same as the stable version. - ## Download pre-compiled C Library {{ tensorflow_icon }} :::{note} diff --git a/doc/install/easy-install.md b/doc/install/easy-install.md index 99962d08b8..c2260b58b6 100644 --- a/doc/install/easy-install.md +++ b/doc/install/easy-install.md @@ -104,44 +104,114 @@ docker pull ghcr.io/deepmodeling/deepmd-kit:2.2.8_cuda12.0_gpu ## Install Python interface with pip -If you have no existing TensorFlow installed, you can use `pip` to install the pre-built package of the Python interface with CUDA 12 supported: +[Create a new environment](https://docs.deepmodeling.com/faq/conda.html#how-to-create-a-new-conda-pip-environment), and then execute the following command: + +:::::::{tab-set} + +::::::{tab-item} TensorFlow {{ tensorflow_icon }} + +:::::{tab-set} + +::::{tab-item} CUDA 12 ```bash -pip install deepmd-kit[gpu,cu12,torch] +pip install deepmd-kit[gpu,cu12] ``` `cu12` is required only when CUDA Toolkit and cuDNN were not installed. -To install the package built against CUDA 11.8, use +:::: + +::::{tab-item} CUDA 11 ```bash -pip install torch --index-url https://download.pytorch.org/whl/cu118 pip install deepmd-kit-cu11[gpu,cu11] ``` -Or install the CPU version without CUDA supported: +:::: + +::::{tab-item} CPU ```bash -pip install torch --index-url https://download.pytorch.org/whl/cpu pip install deepmd-kit[cpu] ``` +:::: + +::::: + [The LAMMPS module](../third-party/lammps-command.md) and [the i-PI driver](../third-party/ipi.md) are only provided on Linux and macOS for the TensorFlow backend. To install LAMMPS and/or i-PI, add `lmp` and/or `ipi` to extras: ```bash -pip install deepmd-kit[gpu,cu12,torch,lmp,ipi] +pip install deepmd-kit[gpu,cu12,lmp,ipi] ``` MPICH is required for parallel running. -:::{Warning} -When installing from pip, only the TensorFlow {{ tensorflow_icon }} backend is supported with LAMMPS and i-PI. -::: +:::::: + +::::::{tab-item} PyTorch {{ pytorch_icon }} + +:::::{tab-set} + +::::{tab-item} CUDA 12 + +```bash +pip install deepmd-kit[torch] +``` + +:::: + +::::{tab-item} CUDA 11.8 + +```bash +pip install torch --index-url https://download.pytorch.org/whl/cu118 +pip install deepmd-kit-cu11 +``` + +:::: + +::::{tab-item} CPU + +```bash +pip install torch --index-url https://download.pytorch.org/whl/cpu +pip install deepmd-kit +``` + +:::: + +::::: + +:::::: + +::::::{tab-item} JAX {{ jax_icon }} + +:::::{tab-set} + +::::{tab-item} CUDA 12 + +```bash +pip install deepmd-kit[jax] jax[cuda12] +``` + +:::: + +::::{tab-item} CPU + +```bash +pip install deepmd-kit[jax] +``` + +:::: + +::::: + +:::::: + +::::::: -It is suggested to install the package into an isolated environment. The supported platform includes Linux x86-64 and aarch64 with GNU C Library 2.28 or above, macOS x86-64 and arm64, and Windows x86-64. -A specific version of TensorFlow and PyTorch which is compatible with DeePMD-kit will be also installed. :::{Warning} -If your platform is not supported, or you want to build against the installed TensorFlow, or you want to enable ROCM support, please [build from source](install-from-source.md). +If your platform is not supported, or you want to build against the installed backends, or you want to enable ROCM support, please [build from source](install-from-source.md). ::: diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index 07239cd3b7..4a0a104b7e 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -78,6 +78,21 @@ One can also [use conda](https://docs.deepmodeling.org/faq/conda.html) to instal ::: +:::{tab-item} JAX {{ jax_icon }} + +To install [JAX AI Stack](https://github.com/jax-ml/jax-ai-stack), run + +```sh +pip install jax-ai-stack +``` + +One can also install packages in JAX AI Stack manually. +Follow [JAX documentation](https://jax.readthedocs.io/en/latest/installation.html) to install JAX built against different CUDA versions or without CUDA. + +One can also [use conda](https://docs.deepmodeling.org/faq/conda.html) to install JAX from [conda-forge](https://conda-forge.org). + +::: + :::: It is important that every time a new shell is started and one wants to use `DeePMD-kit`, the virtual environment should be activated by diff --git a/doc/model/sel.md b/doc/model/sel.md index 4908954618..babea1d463 100644 --- a/doc/model/sel.md +++ b/doc/model/sel.md @@ -24,6 +24,14 @@ dp --pt neighbor-stat -s data -r 6.0 -t O H ::: +:::{tab-item} JAX {{ jax_icon }} + +```sh +dp --jax neighbor-stat -s data -r 6.0 -t O H +``` + +::: + :::: where `data` is the directory of data, `6.0` is the cutoff radius, and `O` and `H` is the type map. The program will give the `max_nbor_size`. For example, `max_nbor_size` of the water example is `[38, 72]`, meaning an atom may have 38 O neighbors and 72 H neighbors in the training data. diff --git a/doc/model/train-energy.md b/doc/model/train-energy.md index 75d31d4670..484564b14f 100644 --- a/doc/model/train-energy.md +++ b/doc/model/train-energy.md @@ -1,7 +1,7 @@ -# Fit energy {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Fit energy {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: In this section, we will take `$deepmd_source_dir/examples/water/se_e2_a/input.json` as an example of the input file. diff --git a/doc/model/train-fitting-dos.md b/doc/model/train-fitting-dos.md index d04dbc669c..fb4a3677e5 100644 --- a/doc/model/train-fitting-dos.md +++ b/doc/model/train-fitting-dos.md @@ -1,7 +1,7 @@ -# Fit electronic density of states (DOS) {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Fit electronic density of states (DOS) {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: Here we present an API to DeepDOS model, which can be used to fit electronic density of state (DOS) (which is a vector). diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index bebce78365..3e88a4e950 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -1,7 +1,7 @@ -# Descriptor `"se_atten"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Descriptor `"se_atten"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: ## DPA-1: Pretraining of Attention-based Deep Potential Model for Molecular Simulation diff --git a/doc/model/train-se-e2-a.md b/doc/model/train-se-e2-a.md index 81b95399e0..d4a4510a31 100644 --- a/doc/model/train-se-e2-a.md +++ b/doc/model/train-se-e2-a.md @@ -1,7 +1,7 @@ -# Descriptor `"se_e2_a"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Descriptor `"se_e2_a"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: The notation of `se_e2_a` is short for the Deep Potential Smooth Edition (DeepPot-SE) constructed from all information (both angular and radial) of atomic configurations. The `e2` stands for the embedding with two-atoms information. This descriptor was described in detail in [the DeepPot-SE paper](https://arxiv.org/abs/1805.09003). diff --git a/doc/model/train-se-e2-r.md b/doc/model/train-se-e2-r.md index 316bde43b4..baff6d6331 100644 --- a/doc/model/train-se-e2-r.md +++ b/doc/model/train-se-e2-r.md @@ -1,7 +1,7 @@ -# Descriptor `"se_e2_r"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Descriptor `"se_e2_r"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: The notation of `se_e2_r` is short for the Deep Potential Smooth Edition (DeepPot-SE) constructed from the radial information of atomic configurations. The `e2` stands for the embedding with two-atom information. From 159361dd7b1335315d280786326ff02e9ed58b08 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 29 Oct 2024 14:52:08 -0400 Subject: [PATCH 095/193] feat(jax): force & virial (#4251) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced new methods `forward_common_atomic` in multiple classes to enhance atomic model predictions and derivative calculations. - Added a new function `get_leading_dims` for better handling of output dimensions. - Added a new function `scatter_sum` for performing reduction operations on tensors. - Updated test methods to include flexible handling of results with the new `SKIP_FLAG` variable. - **Bug Fixes** - Improved numerical stability in calculations by ensuring small values are handled appropriately. - **Tests** - Expanded test outputs to include additional data like forces and virials for more comprehensive testing. - Enhanced backend handling in tests to accommodate new return values based on backend availability. --------- Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/model/make_model.py | 30 +++++- deepmd/dpmodel/model/transform_output.py | 84 +++++++++++++++-- deepmd/dpmodel/utils/env_mat.py | 4 +- deepmd/jax/common.py | 10 ++ deepmd/jax/env.py | 1 + deepmd/jax/model/base_model.py | 101 +++++++++++++++++++++ deepmd/jax/model/ener_model.py | 26 ++++++ source/tests/consistent/common.py | 4 + source/tests/consistent/model/common.py | 2 +- source/tests/consistent/model/test_ener.py | 39 +++++++- 10 files changed, 284 insertions(+), 17 deletions(-) diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index afe2eaffb6..e36182e712 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -222,22 +222,42 @@ def call_lower( extended_coord, fparam=fparam, aparam=aparam ) del extended_coord, fparam, aparam - atomic_ret = self.atomic_model.forward_common_atomic( + model_predict = self.forward_common_atomic( cc_ext, extended_atype, nlist, mapping=mapping, fparam=fp, aparam=ap, + do_atomic_virial=do_atomic_virial, + ) + model_predict = self.output_type_cast(model_predict, input_prec) + return model_predict + + def forward_common_atomic( + self, + extended_coord: np.ndarray, + extended_atype: np.ndarray, + nlist: np.ndarray, + mapping: Optional[np.ndarray] = None, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + do_atomic_virial: bool = False, + ): + atomic_ret = self.atomic_model.forward_common_atomic( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, ) - model_predict = fit_output_to_model_output( + return fit_output_to_model_output( atomic_ret, self.atomic_output_def(), - cc_ext, + extended_coord, do_atomic_virial=do_atomic_virial, ) - model_predict = self.output_type_cast(model_predict, input_prec) - return model_predict forward_lower = call_lower diff --git a/deepmd/dpmodel/model/transform_output.py b/deepmd/dpmodel/model/transform_output.py index 107455a6d5..af1429ce25 100644 --- a/deepmd/dpmodel/model/transform_output.py +++ b/deepmd/dpmodel/model/transform_output.py @@ -9,6 +9,7 @@ from deepmd.dpmodel.output_def import ( FittingOutputDef, ModelOutputDef, + OutputVariableDef, get_deriv_name, get_reduce_name, ) @@ -47,6 +48,28 @@ def fit_output_to_model_output( return model_ret +def get_leading_dims( + vv: np.ndarray, + vdef: OutputVariableDef, +): + """Get the dimensions of nf x nloc. + + Parameters + ---------- + vv : np.ndarray + The input array from which to compute the leading dimensions. + vdef : OutputVariableDef + The output variable definition containing the shape to exclude from `vv`. + + Returns + ------- + list + A list of leading dimensions of `vv`, excluding the last `len(vdef.shape)` dimensions. + """ + vshape = vv.shape + return list(vshape[: (len(vshape) - len(vdef.shape))]) + + def communicate_extended_output( model_ret: dict[str, np.ndarray], model_output_def: ModelOutputDef, @@ -57,6 +80,7 @@ def communicate_extended_output( local and ghost (extended) atoms to local atoms. """ + xp = array_api_compat.get_namespace(mapping) new_ret = {} for kk in model_output_def.keys_outp(): vv = model_ret[kk] @@ -65,15 +89,63 @@ def communicate_extended_output( if vdef.reducible: kk_redu = get_reduce_name(kk) new_ret[kk_redu] = model_ret[kk_redu] + kk_derv_r, kk_derv_c = get_deriv_name(kk) + mldims = list(mapping.shape) + vldims = get_leading_dims(vv, vdef) if vdef.r_differentiable: - kk_derv_r, kk_derv_c = get_deriv_name(kk) - # name holders - new_ret[kk_derv_r] = None + if model_ret[kk_derv_r] is not None: + derv_r_ext_dims = list(vdef.shape) + [3] # noqa:RUF005 + mapping = xp.reshape(mapping, (mldims + [1] * len(derv_r_ext_dims))) + mapping = xp.tile(mapping, [1] * len(mldims) + derv_r_ext_dims) + force = xp.zeros(vldims + derv_r_ext_dims, dtype=vv.dtype) + # jax only + if array_api_compat.is_jax_array(force): + from deepmd.jax.common import ( + scatter_sum, + ) + + force = scatter_sum( + force, + 1, + mapping, + model_ret[kk_derv_r], + ) + else: + raise NotImplementedError("Only JAX arrays are supported.") + new_ret[kk_derv_r] = force + else: + # name holders + new_ret[kk_derv_r] = None if vdef.c_differentiable: assert vdef.r_differentiable - kk_derv_r, kk_derv_c = get_deriv_name(kk) - new_ret[kk_derv_c] = None - new_ret[kk_derv_c + "_redu"] = None + if model_ret[kk_derv_c] is not None: + derv_c_ext_dims = list(vdef.shape) + [9] # noqa:RUF005 + mapping = xp.tile( + mapping, [1] * (len(mldims) + len(vdef.shape)) + [3] + ) + virial = xp.zeros( + vldims + derv_c_ext_dims, + dtype=vv.dtype, + ) + # jax only + if array_api_compat.is_jax_array(virial): + from deepmd.jax.common import ( + scatter_sum, + ) + + virial = scatter_sum( + virial, + 1, + mapping, + model_ret[kk_derv_c], + ) + else: + raise NotImplementedError("Only JAX arrays are supported.") + new_ret[kk_derv_c] = virial + new_ret[kk_derv_c + "_redu"] = xp.sum(new_ret[kk_derv_c], axis=1) + else: + new_ret[kk_derv_c] = None + new_ret[kk_derv_c + "_redu"] = None if not do_atomic_virial: # pop atomic virial, because it is not correctly calculated. new_ret.pop(kk_derv_c) diff --git a/deepmd/dpmodel/utils/env_mat.py b/deepmd/dpmodel/utils/env_mat.py index f4bc333a03..aa8520202e 100644 --- a/deepmd/dpmodel/utils/env_mat.py +++ b/deepmd/dpmodel/utils/env_mat.py @@ -61,7 +61,9 @@ def _make_env_mat( # nf x nloc x nnei x 3 diff = coord_r - coord_l # nf x nloc x nnei - length = xp.linalg.vector_norm(diff, axis=-1, keepdims=True) + # the grad of JAX vector_norm is NaN at x=0 + diff_ = xp.where(xp.abs(diff) < 1e-30, xp.full_like(diff, 1e-30), diff) + length = xp.linalg.vector_norm(diff_, axis=-1, keepdims=True) # for index 0 nloc atom length = length + xp.astype(~xp.expand_dims(mask, axis=-1), length.dtype) t0 = 1 / (length + protection) diff --git a/deepmd/jax/common.py b/deepmd/jax/common.py index f372e97eb5..59f36d11ad 100644 --- a/deepmd/jax/common.py +++ b/deepmd/jax/common.py @@ -95,3 +95,13 @@ def __dlpack__(self, *args, **kwargs): def __dlpack_device__(self, *args, **kwargs): return self.value.__dlpack_device__(*args, **kwargs) + + +def scatter_sum(input, dim, index: jnp.ndarray, src: jnp.ndarray) -> jnp.ndarray: + """Reduces all values from the src tensor to the indices specified in the index tensor.""" + idx = jnp.arange(input.size, dtype=jnp.int64).reshape(input.shape) + new_idx = jnp.take_along_axis(idx, index, axis=dim).ravel() + shape = input.shape + input = input.ravel() + input = input.at[new_idx].add(src.ravel()) + return input.reshape(shape) diff --git a/deepmd/jax/env.py b/deepmd/jax/env.py index 5a5a7f6bf0..ee11e17125 100644 --- a/deepmd/jax/env.py +++ b/deepmd/jax/env.py @@ -10,6 +10,7 @@ ) jax.config.update("jax_enable_x64", True) +# jax.config.update("jax_debug_nans", True) __all__ = [ "jax", diff --git a/deepmd/jax/model/base_model.py b/deepmd/jax/model/base_model.py index fee4855da3..8631c85d16 100644 --- a/deepmd/jax/model/base_model.py +++ b/deepmd/jax/model/base_model.py @@ -1,6 +1,107 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + from deepmd.dpmodel.model.base_model import ( make_base_model, ) +from deepmd.dpmodel.output_def import ( + get_deriv_name, + get_reduce_name, +) +from deepmd.jax.env import ( + jax, + jnp, +) BaseModel = make_base_model() + + +def forward_common_atomic( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, +): + atomic_ret = self.atomic_model.forward_common_atomic( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + ) + atomic_output_def = self.atomic_output_def() + model_predict = {} + for kk, vv in atomic_ret.items(): + model_predict[kk] = vv + vdef = atomic_output_def[kk] + shap = vdef.shape + atom_axis = -(len(shap) + 1) + if vdef.reducible: + kk_redu = get_reduce_name(kk) + model_predict[kk_redu] = jnp.sum(vv, axis=atom_axis) + kk_derv_r, kk_derv_c = get_deriv_name(kk) + if vdef.c_differentiable: + + def eval_output( + cc_ext, + extended_atype, + nlist, + mapping, + fparam, + aparam, + *, + _kk=kk, + _atom_axis=atom_axis, + ): + atomic_ret = self.atomic_model.forward_common_atomic( + cc_ext[None, ...], + extended_atype[None, ...], + nlist[None, ...], + mapping=mapping[None, ...] if mapping is not None else None, + fparam=fparam[None, ...] if fparam is not None else None, + aparam=aparam[None, ...] if aparam is not None else None, + ) + return jnp.sum(atomic_ret[_kk][0], axis=_atom_axis) + + # extended_coord: [nf, nall, 3] + # ff: [nf, *def, nall, 3] + ff = -jax.vmap(jax.jacrev(eval_output, argnums=0))( + extended_coord, + extended_atype, + nlist, + mapping, + fparam, + aparam, + ) + # extended_force: [nf, nall, *def, 3] + def_ndim = len(vdef.shape) + extended_force = jnp.transpose( + ff, [0, def_ndim + 1, *range(1, def_ndim + 1), def_ndim + 2] + ) + + model_predict[kk_derv_r] = extended_force + if vdef.c_differentiable: + assert vdef.r_differentiable + # avr: [nf, *def, nall, 3, 3] + avr = jnp.einsum("f...ai,faj->f...aij", ff, extended_coord) + # avr: [nf, *def, nall, 9] + avr = jnp.reshape(avr, [*ff.shape[:-1], 9]) + # extended_virial: [nf, nall, *def, 9] + extended_virial = jnp.transpose( + avr, [0, def_ndim + 1, *range(1, def_ndim + 1), def_ndim + 2] + ) + + # the correction sums to zero, which does not contribute to global virial + # cannot jit + # if do_atomic_virial: + # raise NotImplementedError("Atomic virial is not implemented yet.") + # to [...,3,3] -> [...,9] + model_predict[kk_derv_c] = extended_virial + return model_predict diff --git a/deepmd/jax/model/ener_model.py b/deepmd/jax/model/ener_model.py index 79c5a29e88..b1bf568544 100644 --- a/deepmd/jax/model/ener_model.py +++ b/deepmd/jax/model/ener_model.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, + Optional, ) from deepmd.dpmodel.model import EnergyModel as EnergyModelDP @@ -10,8 +11,12 @@ from deepmd.jax.common import ( flax_module, ) +from deepmd.jax.env import ( + jnp, +) from deepmd.jax.model.base_model import ( BaseModel, + forward_common_atomic, ) @@ -22,3 +27,24 @@ def __setattr__(self, name: str, value: Any) -> None: if name == "atomic_model": value = DPAtomicModel.deserialize(value.serialize()) return super().__setattr__(name, value) + + def forward_common_atomic( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ): + return forward_common_atomic( + self, + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index 885662c766..bcad7c4502 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -69,6 +69,8 @@ "INSTALLED_ARRAY_API_STRICT", ] +SKIP_FLAG = object() + class CommonTest(ABC): data: ClassVar[dict] @@ -362,6 +364,8 @@ def test_dp_consistent_with_ref(self): data2 = dp_obj.serialize() np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): + if rr1 is SKIP_FLAG or rr2 is SKIP_FLAG: + continue np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" diff --git a/source/tests/consistent/model/common.py b/source/tests/consistent/model/common.py index 4112e09cff..11940d9bdf 100644 --- a/source/tests/consistent/model/common.py +++ b/source/tests/consistent/model/common.py @@ -51,7 +51,7 @@ def build_tf_model(self, obj, natoms, coords, atype, box, suffix): {}, suffix=suffix, ) - return [ret["energy"], ret["atom_ener"]], { + return [ret["energy"], ret["atom_ener"], ret["force"], ret["virial"]], { t_coord: coords, t_type: atype, t_natoms: natoms, diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index 78a2aac703..2a358ba7e0 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -16,6 +16,7 @@ INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, + SKIP_FLAG, CommonTest, parameterized, ) @@ -94,6 +95,21 @@ def data(self) -> dict: jax_class = EnergyModelJAX args = model_args() + def get_reference_backend(self): + """Get the reference backend. + + We need a reference backend that can reproduce forces. + """ + if not self.skip_pt: + return self.RefBackend.PT + if not self.skip_tf: + return self.RefBackend.TF + if not self.skip_jax: + return self.RefBackend.JAX + if not self.skip_dp: + return self.RefBackend.DP + raise ValueError("No available reference") + @property def skip_tf(self): return ( @@ -195,11 +211,26 @@ def eval_jax(self, jax_obj: Any) -> Any: def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: - return (ret["energy_redu"].ravel(), ret["energy"].ravel()) + return ( + ret["energy_redu"].ravel(), + ret["energy"].ravel(), + SKIP_FLAG, + SKIP_FLAG, + ) elif backend is self.RefBackend.PT: - return (ret["energy"].ravel(), ret["atom_energy"].ravel()) + return ( + ret["energy"].ravel(), + ret["atom_energy"].ravel(), + ret["force"].ravel(), + ret["virial"].ravel(), + ) elif backend is self.RefBackend.TF: - return (ret[0].ravel(), ret[1].ravel()) + return (ret[0].ravel(), ret[1].ravel(), ret[2].ravel(), ret[3].ravel()) elif backend is self.RefBackend.JAX: - return (ret["energy_redu"].ravel(), ret["energy"].ravel()) + return ( + ret["energy_redu"].ravel(), + ret["energy"].ravel(), + ret["energy_derv_r"].ravel(), + ret["energy_derv_c_redu"].ravel(), + ) raise ValueError(f"Unknown backend: {backend}") From d165fee9d93626b314f734097c01f2ba5ee4a099 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 29 Oct 2024 20:50:28 -0400 Subject: [PATCH 096/193] feat(jax): freeze to StableXLO & DeepEval (#4256) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced support for `.hlo` file extensions in model loading and saving functionalities. - Added a `DeepEval` class for enhanced deep learning model evaluation in molecular simulations. - Implemented a new `HLO` class for managing model predictions within a deep learning framework. - **Bug Fixes** - Improved handling of suffixes and backend names in test cases for better consistency. - **Documentation** - Added SPDX license identifier to relevant files. - **Chores** - Refactored internal methods to streamline model prediction processes. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- deepmd/backend/jax.py | 10 +- deepmd/dpmodel/descriptor/se_e2_a.py | 2 +- deepmd/dpmodel/model/make_model.py | 130 +++++++-- deepmd/dpmodel/utils/serialization.py | 4 +- deepmd/jax/env.py | 2 + deepmd/jax/infer/__init__.py | 1 + deepmd/jax/infer/deep_eval.py | 391 ++++++++++++++++++++++++++ deepmd/jax/model/hlo.py | 311 ++++++++++++++++++++ deepmd/jax/utils/serialization.py | 50 ++++ source/tests/consistent/io/test_io.py | 15 +- 10 files changed, 875 insertions(+), 41 deletions(-) create mode 100644 deepmd/jax/infer/__init__.py create mode 100644 deepmd/jax/infer/deep_eval.py create mode 100644 deepmd/jax/model/hlo.py diff --git a/deepmd/backend/jax.py b/deepmd/backend/jax.py index 7131f4d534..cfb0936bda 100644 --- a/deepmd/backend/jax.py +++ b/deepmd/backend/jax.py @@ -34,11 +34,11 @@ class JAXBackend(Backend): features: ClassVar[Backend.Feature] = ( Backend.Feature.IO | Backend.Feature.ENTRY_POINT - # | Backend.Feature.DEEP_EVAL + | Backend.Feature.DEEP_EVAL | Backend.Feature.NEIGHBOR_STAT ) """The features of the backend.""" - suffixes: ClassVar[list[str]] = [".jax"] + suffixes: ClassVar[list[str]] = [".hlo", ".jax"] """The suffixes of the backend.""" def is_available(self) -> bool: @@ -71,7 +71,11 @@ def deep_eval(self) -> type["DeepEvalBackend"]: type[DeepEvalBackend] The Deep Eval backend of the backend. """ - raise NotImplementedError + from deepmd.jax.infer.deep_eval import ( + DeepEval, + ) + + return DeepEval @property def neighbor_stat(self) -> type["NeighborStat"]: diff --git a/deepmd/dpmodel/descriptor/se_e2_a.py b/deepmd/dpmodel/descriptor/se_e2_a.py index feebe57af7..6c0efb94d4 100644 --- a/deepmd/dpmodel/descriptor/se_e2_a.py +++ b/deepmd/dpmodel/descriptor/se_e2_a.py @@ -555,7 +555,7 @@ def call( coord_ext, atype_ext, nlist, self.davg, self.dstd ) nf, nloc, nnei, _ = rr.shape - sec = xp.asarray(self.sel_cumsum) + sec = self.sel_cumsum ng = self.neuron[-1] gr = xp.zeros([nf * nloc, ng, 4], dtype=self.dstd.dtype) diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index e36182e712..b6379573e1 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( + Callable, Optional, ) @@ -39,6 +40,95 @@ ) +def model_call_from_call_lower( + *, # enforce keyword-only arguments + call_lower: Callable[ + [ + np.ndarray, + np.ndarray, + np.ndarray, + Optional[np.ndarray], + Optional[np.ndarray], + bool, + ], + dict[str, np.ndarray], + ], + rcut: float, + sel: list[int], + mixed_types: bool, + model_output_def: ModelOutputDef, + coord: np.ndarray, + atype: np.ndarray, + box: Optional[np.ndarray] = None, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + do_atomic_virial: bool = False, +): + """Return model prediction from lower interface. + + Parameters + ---------- + coord + The coordinates of the atoms. + shape: nf x (nloc x 3) + atype + The type of atoms. shape: nf x nloc + box + The simulation box. shape: nf x 9 + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + If calculate the atomic virial. + + Returns + ------- + ret_dict + The result dict of type dict[str,np.ndarray]. + The keys are defined by the `ModelOutputDef`. + + """ + nframes, nloc = atype.shape[:2] + cc, bb, fp, ap = coord, box, fparam, aparam + del coord, box, fparam, aparam + if bb is not None: + coord_normalized = normalize_coord( + cc.reshape(nframes, nloc, 3), + bb.reshape(nframes, 3, 3), + ) + else: + coord_normalized = cc.copy() + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, bb, rcut + ) + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + rcut, + sel, + distinguish_types=not mixed_types, + ) + extended_coord = extended_coord.reshape(nframes, -1, 3) + model_predict_lower = call_lower( + extended_coord, + extended_atype, + nlist, + mapping, + fparam=fp, + aparam=ap, + do_atomic_virial=do_atomic_virial, + ) + model_predict = communicate_extended_output( + model_predict_lower, + model_output_def, + mapping, + do_atomic_virial=do_atomic_virial, + ) + return model_predict + + def make_model(T_AtomicModel: type[BaseAtomicModel]): """Make a model as a derived class of an atomic model. @@ -130,45 +220,23 @@ def call( The keys are defined by the `ModelOutputDef`. """ - nframes, nloc = atype.shape[:2] cc, bb, fp, ap, input_prec = self.input_type_cast( coord, box=box, fparam=fparam, aparam=aparam ) del coord, box, fparam, aparam - if bb is not None: - coord_normalized = normalize_coord( - cc.reshape(nframes, nloc, 3), - bb.reshape(nframes, 3, 3), - ) - else: - coord_normalized = cc.copy() - extended_coord, extended_atype, mapping = extend_coord_with_ghosts( - coord_normalized, atype, bb, self.get_rcut() - ) - nlist = build_neighbor_list( - extended_coord, - extended_atype, - nloc, - self.get_rcut(), - self.get_sel(), - distinguish_types=not self.mixed_types(), - ) - extended_coord = extended_coord.reshape(nframes, -1, 3) - model_predict_lower = self.call_lower( - extended_coord, - extended_atype, - nlist, - mapping, + model_predict = model_call_from_call_lower( + call_lower=self.call_lower, + rcut=self.get_rcut(), + sel=self.get_sel(), + mixed_types=self.mixed_types(), + model_output_def=self.model_output_def(), + coord=cc, + atype=atype, + box=bb, fparam=fp, aparam=ap, do_atomic_virial=do_atomic_virial, ) - model_predict = communicate_extended_output( - model_predict_lower, - self.model_output_def(), - mapping, - do_atomic_virial=do_atomic_virial, - ) model_predict = self.output_type_cast(model_predict, input_prec) return model_predict diff --git a/deepmd/dpmodel/utils/serialization.py b/deepmd/dpmodel/utils/serialization.py index 5e70ec6769..37702cc9f0 100644 --- a/deepmd/dpmodel/utils/serialization.py +++ b/deepmd/dpmodel/utils/serialization.py @@ -90,7 +90,7 @@ def save_dp_model(filename: str, model_dict: dict) -> None: # use UTC+0 time "time": str(datetime.datetime.now(tz=datetime.timezone.utc)), } - if filename_extension == ".dp": + if filename_extension in (".dp", ".hlo"): variable_counter = Counter() with h5py.File(filename, "w") as f: model_dict = traverse_model_dict( @@ -141,7 +141,7 @@ def load_dp_model(filename: str) -> dict: The loaded model dict, including meta information. """ filename_extension = Path(filename).suffix - if filename_extension == ".dp": + if filename_extension in {".dp", ".hlo"}: with h5py.File(filename, "r") as f: model_dict = json.loads(f.attrs["json"]) model_dict = traverse_model_dict(model_dict, lambda x: f[x][()].copy()) diff --git a/deepmd/jax/env.py b/deepmd/jax/env.py index ee11e17125..1b90433b00 100644 --- a/deepmd/jax/env.py +++ b/deepmd/jax/env.py @@ -8,6 +8,7 @@ from flax import ( nnx, ) +from jax import export as jax_export jax.config.update("jax_enable_x64", True) # jax.config.update("jax_debug_nans", True) @@ -16,4 +17,5 @@ "jax", "jnp", "nnx", + "jax_export", ] diff --git a/deepmd/jax/infer/__init__.py b/deepmd/jax/infer/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/jax/infer/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/jax/infer/deep_eval.py b/deepmd/jax/infer/deep_eval.py new file mode 100644 index 0000000000..76f044a327 --- /dev/null +++ b/deepmd/jax/infer/deep_eval.py @@ -0,0 +1,391 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, +) + +import numpy as np + +from deepmd.dpmodel.common import ( + to_numpy_array, +) +from deepmd.dpmodel.output_def import ( + ModelOutputDef, + OutputVariableCategory, + OutputVariableDef, +) +from deepmd.dpmodel.utils.serialization import ( + load_dp_model, +) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.infer.deep_dipole import ( + DeepDipole, +) +from deepmd.infer.deep_dos import ( + DeepDOS, +) +from deepmd.infer.deep_eval import DeepEval as DeepEvalWrapper +from deepmd.infer.deep_eval import ( + DeepEvalBackend, +) +from deepmd.infer.deep_polar import ( + DeepPolar, +) +from deepmd.infer.deep_pot import ( + DeepPot, +) +from deepmd.infer.deep_wfc import ( + DeepWFC, +) +from deepmd.jax.common import ( + to_jax_array, +) +from deepmd.jax.model.hlo import ( + HLO, +) +from deepmd.jax.utils.auto_batch_size import ( + AutoBatchSize, +) + +if TYPE_CHECKING: + import ase.neighborlist + + +class DeepEval(DeepEvalBackend): + """NumPy backend implementation of DeepEval. + + Parameters + ---------- + model_file : str + The name of the frozen model file. + output_def : ModelOutputDef + The output definition of the model. + *args : list + Positional arguments. + auto_batch_size : bool or int or AutoBatchSize, default: True + If True, automatic batch size will be used. If int, it will be used + as the initial batch size. + neighbor_list : ase.neighborlist.NewPrimitiveNeighborList, optional + The ASE neighbor list class to produce the neighbor list. If None, the + neighbor list will be built natively in the model. + **kwargs : dict + Keyword arguments. + """ + + def __init__( + self, + model_file: str, + output_def: ModelOutputDef, + *args: Any, + auto_batch_size: Union[bool, int, AutoBatchSize] = True, + neighbor_list: Optional["ase.neighborlist.NewPrimitiveNeighborList"] = None, + **kwargs: Any, + ): + self.output_def = output_def + self.model_path = model_file + + model_data = load_dp_model(model_file) + self.dp = HLO( + stablehlo=model_data["@variables"]["stablehlo"].tobytes(), + model_def_script=model_data["model_def_script"], + **model_data["constants"], + ) + self.rcut = self.dp.get_rcut() + self.type_map = self.dp.get_type_map() + if isinstance(auto_batch_size, bool): + if auto_batch_size: + self.auto_batch_size = AutoBatchSize() + else: + self.auto_batch_size = None + elif isinstance(auto_batch_size, int): + self.auto_batch_size = AutoBatchSize(auto_batch_size) + elif isinstance(auto_batch_size, AutoBatchSize): + self.auto_batch_size = auto_batch_size + else: + raise TypeError("auto_batch_size should be bool, int, or AutoBatchSize") + + def get_rcut(self) -> float: + """Get the cutoff radius of this model.""" + return self.rcut + + def get_ntypes(self) -> int: + """Get the number of atom types of this model.""" + return len(self.type_map) + + def get_type_map(self) -> list[str]: + """Get the type map (element name of the atom types) of this model.""" + return self.type_map + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this DP.""" + return self.dp.get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this DP.""" + return self.dp.get_dim_aparam() + + @property + def model_type(self) -> type["DeepEvalWrapper"]: + """The evaluator of the model type.""" + model_output_type = self.dp.model_output_type() + if "energy" in model_output_type: + return DeepPot + elif "dos" in model_output_type: + return DeepDOS + elif "dipole" in model_output_type: + return DeepDipole + elif "polar" in model_output_type: + return DeepPolar + elif "wfc" in model_output_type: + return DeepWFC + else: + raise RuntimeError("Unknown model type") + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.dp.get_sel_type() + + def get_numb_dos(self) -> int: + """Get the number of DOS.""" + return 0 + + def get_has_efield(self): + """Check if the model has efield.""" + return False + + def get_ntypes_spin(self): + """Get the number of spin atom types of this model.""" + return 0 + + def eval( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + atomic: bool = False, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + **kwargs: Any, + ) -> dict[str, np.ndarray]: + """Evaluate the energy, force and virial by using this DP. + + Parameters + ---------- + coords + The coordinates of atoms. + The array should be of size nframes x natoms x 3 + cells + The cell of the region. + If None then non-PBC is assumed, otherwise using PBC. + The array should be of size nframes x 3 x 3 + atom_types + The atom types + The list should contain natoms ints + atomic + Calculate the atomic energy and virial + fparam + The frame parameter. + The array can be of size : + - nframes x dim_fparam. + - dim_fparam. Then all frames are assumed to be provided with the same fparam. + aparam + The atomic parameter + The array can be of size : + - nframes x natoms x dim_aparam. + - natoms x dim_aparam. Then all frames are assumed to be provided with the same aparam. + - dim_aparam. Then all frames and atoms are provided with the same aparam. + **kwargs + Other parameters + + Returns + ------- + output_dict : dict + The output of the evaluation. The keys are the names of the output + variables, and the values are the corresponding output arrays. + """ + if fparam is not None or aparam is not None: + raise NotImplementedError + # convert all of the input to numpy array + atom_types = np.array(atom_types, dtype=np.int32) + coords = np.array(coords) + if cells is not None: + cells = np.array(cells) + natoms, numb_test = self._get_natoms_and_nframes( + coords, atom_types, len(atom_types.shape) > 1 + ) + request_defs = self._get_request_defs(atomic) + out = self._eval_func(self._eval_model, numb_test, natoms)( + coords, cells, atom_types, request_defs + ) + return dict( + zip( + [x.name for x in request_defs], + out, + ) + ) + + def _get_request_defs(self, atomic: bool) -> list[OutputVariableDef]: + """Get the requested output definitions. + + When atomic is True, all output_def are requested. + When atomic is False, only energy (tensor), force, and virial + are requested. + + Parameters + ---------- + atomic : bool + Whether to request the atomic output. + + Returns + ------- + list[OutputVariableDef] + The requested output definitions. + """ + if atomic: + return list(self.output_def.var_defs.values()) + else: + return [ + x + for x in self.output_def.var_defs.values() + if x.category + in ( + OutputVariableCategory.REDU, + OutputVariableCategory.DERV_R, + OutputVariableCategory.DERV_C_REDU, + ) + ] + + def _eval_func(self, inner_func: Callable, numb_test: int, natoms: int) -> Callable: + """Wrapper method with auto batch size. + + Parameters + ---------- + inner_func : Callable + the method to be wrapped + numb_test : int + number of tests + natoms : int + number of atoms + + Returns + ------- + Callable + the wrapper + """ + if self.auto_batch_size is not None: + + def eval_func(*args, **kwargs): + return self.auto_batch_size.execute_all( + inner_func, numb_test, natoms, *args, **kwargs + ) + + else: + eval_func = inner_func + return eval_func + + def _get_natoms_and_nframes( + self, + coords: np.ndarray, + atom_types: np.ndarray, + mixed_type: bool = False, + ) -> tuple[int, int]: + if mixed_type: + natoms = len(atom_types[0]) + else: + natoms = len(atom_types) + if natoms == 0: + assert coords.size == 0 + else: + coords = np.reshape(np.array(coords), [-1, natoms * 3]) + nframes = coords.shape[0] + return natoms, nframes + + def _eval_model( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + request_defs: list[OutputVariableDef], + ): + model = self.dp + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + atom_types = np.tile(atom_types, nframes).reshape(nframes, -1) + else: + natoms = len(atom_types[0]) + + coord_input = coords.reshape([-1, natoms, 3]) + type_input = atom_types + if cells is not None: + box_input = cells.reshape([-1, 3, 3]) + else: + box_input = None + + do_atomic_virial = any( + x.category == OutputVariableCategory.DERV_C_REDU for x in request_defs + ) + batch_output = model( + to_jax_array(coord_input), + to_jax_array(type_input), + box=to_jax_array(box_input), + do_atomic_virial=do_atomic_virial, + ) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + for kk, vv in batch_output.items(): + batch_output[kk] = to_numpy_array(vv) + + results = [] + for odef in request_defs: + # it seems not doing conversion + # dp_name = self._OUTDEF_DP2BACKEND[odef.name] + dp_name = odef.name + if dp_name in batch_output: + shape = self._get_output_shape(odef, nframes, natoms) + if batch_output[dp_name] is not None: + out = batch_output[dp_name].reshape(shape) + else: + out = np.full(shape, np.nan, dtype=GLOBAL_NP_FLOAT_PRECISION) + results.append(out) + else: + shape = self._get_output_shape(odef, nframes, natoms) + results.append( + np.full(np.abs(shape), np.nan, dtype=GLOBAL_NP_FLOAT_PRECISION) + ) # this is kinda hacky + return tuple(results) + + def _get_output_shape(self, odef, nframes, natoms): + if odef.category == OutputVariableCategory.DERV_C_REDU: + # virial + return [nframes, *odef.shape[:-1], 9] + elif odef.category == OutputVariableCategory.REDU: + # energy + return [nframes, *odef.shape, 1] + elif odef.category == OutputVariableCategory.DERV_C: + # atom_virial + return [nframes, *odef.shape[:-1], natoms, 9] + elif odef.category == OutputVariableCategory.DERV_R: + # force + return [nframes, *odef.shape[:-1], natoms, 3] + elif odef.category == OutputVariableCategory.OUT: + # atom_energy, atom_tensor + return [nframes, natoms, *odef.shape, 1] + else: + raise RuntimeError("unknown category") + + def get_model_def_script(self) -> dict: + """Get model definition script.""" + return json.loads(self.dp.get_model_def_script()) diff --git a/deepmd/jax/model/hlo.py b/deepmd/jax/model/hlo.py new file mode 100644 index 0000000000..010e3d7a5e --- /dev/null +++ b/deepmd/jax/model/hlo.py @@ -0,0 +1,311 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, + Optional, +) + +from deepmd.dpmodel.model.make_model import ( + model_call_from_call_lower, +) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + ModelOutputDef, + OutputVariableDef, +) +from deepmd.jax.env import ( + jax_export, + jnp, +) +from deepmd.jax.model.base_model import ( + BaseModel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + +OUTPUT_DEFS = { + "energy": OutputVariableDef( + "energy", + shape=[1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + "mask": OutputVariableDef( + "mask", + shape=[1], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), +} + + +class HLO(BaseModel): + def __init__( + self, + stablehlo, + model_def_script, + type_map, + rcut, + dim_fparam, + dim_aparam, + sel_type, + is_aparam_nall, + model_output_type, + mixed_types, + min_nbor_dist, + sel, + ) -> None: + self._call_lower = jax_export.deserialize(stablehlo).call + self.stablehlo = stablehlo + self.type_map = type_map + self.rcut = rcut + self.dim_fparam = dim_fparam + self.dim_aparam = dim_aparam + self.sel_type = sel_type + self._is_aparam_nall = is_aparam_nall + self._model_output_type = model_output_type + self._mixed_types = mixed_types + self.min_nbor_dist = min_nbor_dist + self.sel = sel + self.model_def_script = model_def_script + + def __call__( + self, + coord: jnp.ndarray, + atype: jnp.ndarray, + box: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ) -> Any: + """Return model prediction. + + Parameters + ---------- + coord + The coordinates of the atoms. + shape: nf x (nloc x 3) + atype + The type of atoms. shape: nf x nloc + box + The simulation box. shape: nf x 9 + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + If calculate the atomic virial. + + Returns + ------- + ret_dict + The result dict of type dict[str,np.ndarray]. + The keys are defined by the `ModelOutputDef`. + + """ + return self.call(coord, atype, box, fparam, aparam, do_atomic_virial) + + def call( + self, + coord: jnp.ndarray, + atype: jnp.ndarray, + box: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ): + """Return model prediction. + + Parameters + ---------- + coord + The coordinates of the atoms. + shape: nf x (nloc x 3) + atype + The type of atoms. shape: nf x nloc + box + The simulation box. shape: nf x 9 + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + If calculate the atomic virial. + + Returns + ------- + ret_dict + The result dict of type dict[str,np.ndarray]. + The keys are defined by the `ModelOutputDef`. + + """ + return model_call_from_call_lower( + call_lower=self.call_lower, + rcut=self.get_rcut(), + sel=self.get_sel(), + mixed_types=self.mixed_types(), + model_output_def=self.model_output_def(), + coord=coord, + atype=atype, + box=box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + + def model_output_def(self): + return ModelOutputDef( + FittingOutputDef([OUTPUT_DEFS[tt] for tt in self.model_output_type()]) + ) + + def call_lower( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ): + return self._call_lower( + extended_coord, + extended_atype, + nlist, + mapping, + fparam, + aparam, + do_atomic_virial, + ) + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.type_map + + def get_rcut(self): + """Get the cut-off radius.""" + return self.rcut + + def get_dim_fparam(self): + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.dim_fparam + + def get_dim_aparam(self): + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.dim_aparam + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.sel_type + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return self._is_aparam_nall + + def model_output_type(self) -> list[str]: + """Get the output type for the model.""" + return self._model_output_type + + def serialize(self) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + raise NotImplementedError("Not implemented") + + @classmethod + def deserialize(cls, data: dict) -> "BaseModel": + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + BaseModel + The deserialized model + """ + raise NotImplementedError("Not implemented") + + def get_model_def_script(self) -> str: + """Get the model definition script.""" + return self.model_def_script + + def get_min_nbor_dist(self) -> Optional[float]: + """Get the minimum distance between two atoms.""" + return self.min_nbor_dist + + def get_nnei(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.nsel + + def get_sel(self) -> list[int]: + return self.sel + + def get_nsel(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return sum(self.sel) + + def mixed_types(self) -> bool: + return self._mixed_types + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statictics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + raise NotImplementedError("Not implemented") + + @classmethod + def get_model(cls, model_params: dict) -> "BaseModel": + """Get the model by the parameters. + + By default, all the parameters are directly passed to the constructor. + If not, override this method. + + Parameters + ---------- + model_params : dict + The model parameters + + Returns + ------- + BaseBaseModel + The model + """ + raise NotImplementedError("Not implemented") diff --git a/deepmd/jax/utils/serialization.py b/deepmd/jax/utils/serialization.py index 43070f8a07..fcfcc8a610 100644 --- a/deepmd/jax/utils/serialization.py +++ b/deepmd/jax/utils/serialization.py @@ -3,10 +3,17 @@ Path, ) +import numpy as np import orbax.checkpoint as ocp +from deepmd.dpmodel.utils.serialization import ( + load_dp_model, + save_dp_model, +) from deepmd.jax.env import ( jax, + jax_export, + jnp, nnx, ) from deepmd.jax.model.model import ( @@ -39,6 +46,44 @@ def deserialize_to_file(model_file: str, data: dict) -> None: model_def_script=ocp.args.JsonSave(model_def_script), ), ) + elif model_file.endswith(".hlo"): + model = BaseModel.deserialize(data["model"]) + model_def_script = data["model_def_script"] + call_lower = model.call_lower + + nf, nloc, nghost, nfp, nap = jax_export.symbolic_shape( + "nf, nloc, nghost, nfp, nap" + ) + exported = jax_export.export(jax.jit(call_lower))( + jax.ShapeDtypeStruct((nf, nloc + nghost, 3), jnp.float64), # extended_coord + jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int32), # extended_atype + jax.ShapeDtypeStruct((nf, nloc, model.get_nnei()), jnp.int64), # nlist + jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int64), # mapping + jax.ShapeDtypeStruct((nf, nfp), jnp.float64) + if model.get_dim_fparam() + else None, # fparam + jax.ShapeDtypeStruct((nf, nap), jnp.float64) + if model.get_dim_aparam() + else None, # aparam + False, # do_atomic_virial + ) + serialized: bytearray = exported.serialize() + data = data.copy() + data.setdefault("@variables", {}) + data["@variables"]["stablehlo"] = np.void(serialized) + data["constants"] = { + "type_map": model.get_type_map(), + "rcut": model.get_rcut(), + "dim_fparam": model.get_dim_fparam(), + "dim_aparam": model.get_dim_aparam(), + "sel_type": model.get_sel_type(), + "is_aparam_nall": model.is_aparam_nall(), + "model_output_type": model.model_output_type(), + "mixed_types": model.mixed_types(), + "min_nbor_dist": model.get_min_nbor_dist(), + "sel": model.get_sel(), + } + save_dp_model(filename=model_file, model_dict=data) else: raise ValueError("JAX backend only supports converting .jax directory") @@ -93,5 +138,10 @@ def convert_str_to_int_key(item: dict): "@variables": {}, } return data + elif model_file.endswith(".hlo"): + data = load_dp_model(model_file) + data.pop("constants") + data["@variables"].pop("stablehlo") + return data else: raise ValueError("JAX backend only supports converting .jax directory") diff --git a/source/tests/consistent/io/test_io.py b/source/tests/consistent/io/test_io.py index df81c24ff5..dc0f280d56 100644 --- a/source/tests/consistent/io/test_io.py +++ b/source/tests/consistent/io/test_io.py @@ -74,14 +74,21 @@ def tearDown(self): @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_data_equal(self): prefix = "test_consistent_io_" + self.__class__.__name__.lower() - for backend_name in ("tensorflow", "pytorch", "dpmodel", "jax"): + for backend_name, suffix_idx in ( + ("tensorflow", 0), + ("pytorch", 0), + ("dpmodel", 0), + ("jax", 0), + ): with self.subTest(backend_name=backend_name): backend = Backend.get_backend(backend_name)() if not backend.is_available(): continue reference_data = copy.deepcopy(self.data) - self.save_data_to_model(prefix + backend.suffixes[0], reference_data) - data = self.get_data_from_model(prefix + backend.suffixes[0]) + self.save_data_to_model( + prefix + backend.suffixes[suffix_idx], reference_data + ) + data = self.get_data_from_model(prefix + backend.suffixes[suffix_idx]) data = copy.deepcopy(data) reference_data = copy.deepcopy(self.data) # some keys are not expected to be not the same @@ -131,7 +138,7 @@ def test_deep_eval(self): ).reshape(1, 9) prefix = "test_consistent_io_" + self.__class__.__name__.lower() rets = [] - for backend_name in ("tensorflow", "pytorch", "dpmodel"): + for backend_name in ("tensorflow", "pytorch", "dpmodel", "jax"): backend = Backend.get_backend(backend_name)() if not backend.is_available(): continue From d4c7d1a67efff9bc09eef620cfbd84b5633fce75 Mon Sep 17 00:00:00 2001 From: hztttt <49030097+hztttt@users.noreply.github.com> Date: Thu, 31 Oct 2024 20:18:46 +0800 Subject: [PATCH 097/193] Spin lmp nlist (#35) * add spin nlist for tf * add UT nlist for spin * fix UT nlist for spin * add lammps spin UT * add model for deviation * fix lammps spin UT * support lammps spin MPI UT * add lammps UT for PT --- source/api_cc/include/DeepPotTF.h | 12 +- source/api_cc/src/DeepPotTF.cc | 105 +- source/api_cc/tests/test_deeppot_tf_spin.cc | 115 + source/lmp/tests/run_mpi_pair_deepmd_spin.py | 65 + source/lmp/tests/test_lammps_spin.py | 253 + source/lmp/tests/test_lammps_spin_pt.py | 249 + source/lmp/tests/write_lmp_data.py | 22 + source/tests/infer/deepspin_nlist-2.pbtxt | 22628 +++++++++++++++++ source/tests/infer/deepspin_nlist.pbtxt | 22628 +++++++++++++++++ 9 files changed, 46074 insertions(+), 3 deletions(-) create mode 100644 source/api_cc/tests/test_deeppot_tf_spin.cc create mode 100644 source/lmp/tests/run_mpi_pair_deepmd_spin.py create mode 100644 source/lmp/tests/test_lammps_spin.py create mode 100644 source/lmp/tests/test_lammps_spin_pt.py create mode 100644 source/tests/infer/deepspin_nlist-2.pbtxt create mode 100644 source/tests/infer/deepspin_nlist.pbtxt diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index 5f4cefe05a..4fbbe2f5c3 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -396,6 +396,14 @@ class DeepPotTF : public DeepPotBase { const int numb_types_spin, const std::vector& virtual_len, const std::vector& spin_norm); + + template + void extend_nlist(std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_); + void cum_sum(std::map&, std::map&); private: @@ -415,8 +423,8 @@ class DeepPotTF : public DeepPotBase { std::string model_version; int ntypes; int ntypes_spin; - // std::vector virtual_len; - // std::vector spin_norm; + std::vector virtual_len; + std::vector spin_norm; int extend_inum; std::vector extend_ilist; std::vector extend_numneigh; diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 9e85a2bdbf..456d28f5d2 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -695,7 +695,60 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, const std::vector& fparam_, const std::vector& aparam_, const bool atomic) { - std::cout << "not support" << std::endl; + // if datype.size is 0, not clear nframes; but 1 is just ok + int nframes = datype_.size() > 0 ? (dcoord_.size() / 3 / datype_.size()) : 1; + int nloc = datype_.size(); + std::vector fparam; + std::vector aparam; + validate_fparam_aparam(nframes, nloc, fparam_, aparam_); + tile_fparam_aparam(fparam, nframes, dfparam, fparam_); + tile_fparam_aparam(aparam, nframes, nloc * daparam, aparam_); + + std::vector extend_dcoord; + std::vector extend_atype; + extend_nlist(extend_dcoord, extend_atype, dcoord_, dspin_, datype_); + + atommap = deepmd::AtomMap(extend_atype.begin(), extend_atype.end()); + + std::vector> input_tensors; + std::vector dforce_tmp; + + if (dtype == tensorflow::DT_DOUBLE) { + int ret = session_input_tensors(input_tensors, extend_dcoord, ntypes, + extend_atype, dbox, cell_size, fparam, + aparam, atommap, "", aparam_nall); + if (atomic) { + run_model(dener, dforce_tmp, dvirial, datom_energy_, datom_virial_, + session, input_tensors, atommap, nframes); + } else { + run_model(dener, dforce_tmp, dvirial, session, input_tensors, + atommap, nframes); + } + } else { + int ret = session_input_tensors(input_tensors, extend_dcoord, ntypes, + extend_atype, dbox, cell_size, fparam, + aparam, atommap, "", aparam_nall); + if (atomic) { + run_model(dener, dforce_tmp, dvirial, datom_energy_, datom_virial_, + session, input_tensors, atommap, nframes); + } else { + run_model(dener, dforce_tmp, dvirial, session, input_tensors, atommap, + nframes); + } + } + // backward force and mag. + dforce_.resize(static_cast(nframes) * nloc * 3); + dforce_mag_.resize(static_cast(nframes) * nloc * 3); + for (int ii = 0; ii < nloc; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + dforce_[3 * ii + dd] = dforce_tmp[3 * ii + dd]; + if (datype_[ii] < ntypes_spin) { + dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (ii + nloc) + dd]; + } else { + dforce_mag_[3 * ii + dd] = 0.0; + } + } + } } template void DeepPotTF::compute( @@ -1594,4 +1647,54 @@ template void DeepPotTF::extend( const int numb_types_spin, const std::vector& virtual_len, const std::vector& spin_norm); + +template +void DeepPotTF::extend_nlist(std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_) { + if (dtype == tensorflow::DT_DOUBLE) { + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); + } else { + std::vector virtual_len; + std::vector spin_norm; + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); + } + // extend coord and atype + int nloc = datype_.size(); + int nloc_spin = 0; + for (int ii = 0; ii < nloc; ii++) { + if (datype_[ii] < ntypes_spin) nloc_spin += 1; + } + int extend_nall = nloc + nloc_spin; + extend_dcoord.resize(static_cast(extend_nall) * 3); + extend_atype.resize(extend_nall); + for (int ii = 0; ii < nloc; ii++) { + extend_atype[ii] = datype_[ii]; + if (datype_[ii] < ntypes_spin) + extend_atype[ii + nloc] = datype_[ii] + ntypes - ntypes_spin; + for (int jj = 0; jj < 3; jj++) { + extend_dcoord[ii * 3 + jj] = dcoord_[ii * 3 + jj]; + if (datype_[ii] < ntypes_spin) + extend_dcoord[(ii + nloc) * 3 + jj] = dcoord_[ii * 3 + jj] + dspin_[ii * 3 + jj] / spin_norm[datype_[ii]] * virtual_len[datype_[ii]]; + } + } +} + +template void DeepPotTF::extend_nlist( + std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_); + +template void DeepPotTF::extend_nlist( + std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_); #endif diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc new file mode 100644 index 0000000000..246fa0c51a --- /dev/null +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "DeepPot.h" +#include "neighbor_list.h" +#include "test_utils.h" + +template +class TestInferDeepPotSpin : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, + 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 0, 1, 1}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + std::vector expected_e = { + -7.314365618560289 , -7.313531316181837 , + -2.8980532245013997, -2.897373810282277}; + std::vector expected_f = { + 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, + -0.0229926640905535, 0.0114378553363334, 0.019670014885563 , + 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, + -0.013170850878758 , -0.009124743467278 , 0.0142592491588383}; + std::vector expected_fm = { + 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, + -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + int natoms; + double expected_tot_e; + + deepmd::DeepPot dp; + + void SetUp() override { + std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + deepmd::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", + "deepspin_nlist.pb"); + + dp.init("deepspin_nlist.pb"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + expected_tot_e = 0.; + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + }; + + void TearDown() override { remove("deepspin_nlist.pb"); }; +}; + +TYPED_TEST_SUITE(TestInferDeepPotSpin, ValueTypes); + +TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + deepmd::DeepPot& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } +} + +TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + deepmd::DeepPot& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(atom_ener.size(), natoms); + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } +} diff --git a/source/lmp/tests/run_mpi_pair_deepmd_spin.py b/source/lmp/tests/run_mpi_pair_deepmd_spin.py new file mode 100644 index 0000000000..47e807e088 --- /dev/null +++ b/source/lmp/tests/run_mpi_pair_deepmd_spin.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""Use mpi4py to run a LAMMPS pair_deepmd + model deviation (atomic, relative) task.""" + +import argparse + +import numpy as np +from lammps import ( + PyLammps, +) +from mpi4py import ( + MPI, +) + +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +parser = argparse.ArgumentParser() +parser.add_argument("DATAFILE", type=str) +parser.add_argument("PBFILE", type=str) +parser.add_argument("PBFILE2", type=str) +parser.add_argument("MD_FILE", type=str) +parser.add_argument("OUTPUT", type=str) +parser.add_argument("--balance", action="store_true") +parser.add_argument("--nopbc", action="store_true") + +args = parser.parse_args() +data_file = args.DATAFILE +pb_file = args.PBFILE +pb_file2 = args.PBFILE2 +md_file = args.MD_FILE +output = args.OUTPUT +balance = args.balance + +lammps = PyLammps() +if balance: + # 4 and 2 atoms + lammps.processors("2 1 1") +else: + # 6 and 0 atoms + lammps.processors("1 2 1") +lammps.units("metal") +if args.nopbc: + lammps.boundary("f f f") +else: + lammps.boundary("p p p") +lammps.atom_style("spin") +lammps.neighbor("2.0 bin") +lammps.neigh_modify("every 10 delay 0 check no") +lammps.read_data(data_file) +lammps.mass("1 58") +lammps.mass("2 16") +lammps.timestep(0.0005) +lammps.fix("1 all nve") + +relative = 1.0 +lammps.pair_style( + f"deepmd {pb_file} {pb_file2} out_file {md_file} out_freq 1 atomic relative {relative}" +) +lammps.pair_coeff("* *") +lammps.run(0) +if rank == 0: + pe = lammps.eval("pe") + arr = [pe] + np.savetxt(output, np.array(arr)) +MPI.Finalize() diff --git a/source/lmp/tests/test_lammps_spin.py b/source/lmp/tests/test_lammps_spin.py new file mode 100644 index 0000000000..11bf2bc93b --- /dev/null +++ b/source/lmp/tests/test_lammps_spin.py @@ -0,0 +1,253 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +import os +import shutil +import subprocess as sp +import sys +import tempfile +from pathlib import ( + Path, +) + +import constants +import numpy as np +import pytest +from lammps import ( + PyLammps, +) +from write_lmp_data import ( + write_lmp_data_spin, +) + +pbtxt_file = Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist.pbtxt" +pbtxt_file2 = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist-2.pbtxt" +) +pb_file = Path(__file__).parent / "graph.pb" +pb_file2 = Path(__file__).parent / "graph2.pb" +system_file = Path(__file__).parent.parent.parent / "tests" +data_file = Path(__file__).parent / "data.lmp" +data_file_si = Path(__file__).parent / "data.si" +data_type_map_file = Path(__file__).parent / "data_type_map.lmp" +md_file = Path(__file__).parent / "md.out" + +expected_ae = np.array( + [ + -7.314365618560289 , + -7.313531316181837 , + -2.8980532245013997, + -2.897373810282277 + ] +) +expected_e = np.sum(expected_ae) +expected_f = np.array( + [ + [0.0275132293555514, -0.0112057401883111, -0.0212278132621243], + [-0.0229926640905535, 0.0114378553363334, 0.019670014885563], + [0.0086502856137601, 0.0088926283192558, -0.0127014507822769], + [-0.013170850878758 , -0.009124743467278 , 0.0142592491588383] + ] +) +expected_fm = np.array( + [ + [0.0066245455049449, -0.0023055088004378, 0.0294608578045521], + [-0.0041979452385972, 0.0025775020220167, 0.0316295420619988], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] + ] +) + +expected_f2 = np.array( + [ + [-0.0009939342103254, 0.0009450997605637, -0.0002710189976979], + [0.0040364645780618, -0.0008326705633617, -0.000208982833015], + [0.0007716358981262, 0.0018705501216939, -0.002687696295354], + [-0.0038141662658625, -0.0019829793188958, 0.0031676981260669] + ] +) + +expected_fm2 = np.array( + [ + [0.0021649674715341, -0.0008507073771461, 0.0270620372234819], + [-0.0026523551738949, 0.0013308033074224, 0.0294569107929189], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] + ] +) + +box = np.array([0, 13, 0, 13, 0, 13, 0, 0, 0]) +coord = np.array( + [ + [12.83, 2.56, 2.18], + [12.09, 2.87, 2.74], + [3.51, 2.51, 2.60], + [4.27, 3.22, 1.56], + ] +) +spin = np.array( + [ + [0, 0, 1.2737], + [0, 0, 1.2737], + [0, 0, 0], + [0, 0, 0], + ] +) +type_NiO = np.array([1, 1, 2, 2]) + + +sp.check_output( + f"{sys.executable} -m deepmd convert-from pbtxt -i {pbtxt_file.resolve()} -o {pb_file.resolve()}".split() +) +sp.check_output( + f"{sys.executable} -m deepmd convert-from pbtxt -i {pbtxt_file2.resolve()} -o {pb_file2.resolve()}".split() +) + + +def setup_module(): + write_lmp_data_spin(box, coord, spin, type_NiO, data_file) + + +def teardown_module(): + os.remove(data_file) + + +def _lammps(data_file, units="metal") -> PyLammps: + lammps = PyLammps() + lammps.units(units) + lammps.boundary("p p p") + lammps.atom_style("spin") + if units == "metal": + lammps.neighbor("2.0 bin") + else: + raise ValueError("units for spin should be metal") + lammps.neigh_modify("every 10 delay 0 check no") + lammps.read_data(data_file.resolve()) + if units == "metal": + lammps.mass("1 58") + lammps.mass("2 16") + else: + raise ValueError("units for spin should be metal") + if units == "metal": + lammps.timestep(0.0005) + else: + raise ValueError("units for spin should be metal") + lammps.fix("1 all nve") + return lammps + + +@pytest.fixture +def lammps(): + lmp = _lammps(data_file=data_file) + yield lmp + lmp.close() + + +def test_pair_deepmd(lammps): + lammps.pair_style(f"deepmd {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + lammps.run(1) + + +def test_pair_deepmd_model_devi(lammps): + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +def test_pair_deepmd_model_devi_atomic_relative(lammps): + relative = 1.0 + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +@pytest.mark.skipif( + shutil.which("mpirun") is None, reason="MPI is not installed on this system" +) +@pytest.mark.skipif( + importlib.util.find_spec("mpi4py") is None, reason="mpi4py is not installed" +) +@pytest.mark.parametrize( + ("balance_args",), + [(["--balance"],), ([],)], +) +def test_pair_deepmd_mpi(balance_args: list): + with tempfile.NamedTemporaryFile() as f: + sp.check_call( + [ + "mpirun", + "-n", + "2", + sys.executable, + Path(__file__).parent / "run_mpi_pair_deepmd_spin.py", + data_file, + pb_file, + pb_file2, + md_file, + f.name, + *balance_args, + ] + ) + arr = np.loadtxt(f.name, ndmin=1) + pe = arr[0] + + relative = 1.0 + assert pe == pytest.approx(expected_e) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) \ No newline at end of file diff --git a/source/lmp/tests/test_lammps_spin_pt.py b/source/lmp/tests/test_lammps_spin_pt.py new file mode 100644 index 0000000000..e0a596d2ae --- /dev/null +++ b/source/lmp/tests/test_lammps_spin_pt.py @@ -0,0 +1,249 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +import os +import shutil +import subprocess as sp +import sys +import tempfile +from pathlib import ( + Path, +) + +import constants +import numpy as np +import pytest +from lammps import ( + PyLammps, +) +from write_lmp_data import ( + write_lmp_data_spin, +) + +pbtxt_file2 = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist-2.pbtxt" +) +pb_file = Path(__file__).parent.parent.parent / "tests" / "infer" / "deeppot_dpa_spin.pth" +pb_file2 = Path(__file__).parent / "graph2.pb" +system_file = Path(__file__).parent.parent.parent / "tests" +data_file = Path(__file__).parent / "data.lmp" +data_file_si = Path(__file__).parent / "data.si" +data_type_map_file = Path(__file__).parent / "data_type_map.lmp" +md_file = Path(__file__).parent / "md.out" + +expected_ae = np.array( + [ + -5.449480235829702, + -5.477427268428831, + -5.123857693399778, + -5.177090216511519 + ] +) +expected_e = np.sum(expected_ae) +expected_f = np.array( + [ + [0.0009801138704236, -0.0463347604851765, -0.0971306357815108], + [-0.1470821855808306, 0.0437825717490265, 0.1068452488480858], + [0.0227539242796509, -0.0733473535079378, 0.1021096625763913], + [0.123348147430756 , 0.0758995422440877, -0.1118242756429664] + ] +) +expected_fm = np.array( + [ + [0.0072488655758703, -0.0111496506342658, 0.018024837587741], + [-0.0469100751121456, 0.0170834549641258, 0.0338904617477562], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] + ] +) + +expected_f2 = np.array( + [ + [-0.0009939342103254, 0.0009450997605637, -0.0002710189976979], + [0.0040364645780618, -0.0008326705633617, -0.000208982833015], + [0.0007716358981262, 0.0018705501216939, -0.002687696295354], + [-0.0038141662658625, -0.0019829793188958, 0.0031676981260669] + ] +) + +expected_fm2 = np.array( + [ + [0.0021649674715341, -0.0008507073771461, 0.0270620372234819], + [-0.0026523551738949, 0.0013308033074224, 0.0294569107929189], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] + ] +) + +box = np.array([0, 13, 0, 13, 0, 13, 0, 0, 0]) +coord = np.array( + [ + [12.83, 2.56, 2.18], + [12.09, 2.87, 2.74], + [3.51, 2.51, 2.60], + [4.27, 3.22, 1.56], + ] +) +spin = np.array( + [ + [0, 0, 1.2737], + [0, 0, 1.2737], + [0, 0, 0], + [0, 0, 0], + ] +) +type_NiO = np.array([1, 1, 2, 2]) + + +sp.check_output( + f"{sys.executable} -m deepmd convert-from pbtxt -i {pbtxt_file2.resolve()} -o {pb_file2.resolve()}".split() +) + + +def setup_module(): + write_lmp_data_spin(box, coord, spin, type_NiO, data_file) + + +def teardown_module(): + os.remove(data_file) + + +def _lammps(data_file, units="metal") -> PyLammps: + lammps = PyLammps() + lammps.units(units) + lammps.boundary("p p p") + lammps.atom_style("spin") + if units == "metal": + lammps.neighbor("2.0 bin") + else: + raise ValueError("units for spin should be metal") + lammps.neigh_modify("every 10 delay 0 check no") + lammps.read_data(data_file.resolve()) + if units == "metal": + lammps.mass("1 58") + lammps.mass("2 16") + else: + raise ValueError("units for spin should be metal") + if units == "metal": + lammps.timestep(0.0005) + else: + raise ValueError("units for spin should be metal") + lammps.fix("1 all nve") + return lammps + + +@pytest.fixture +def lammps(): + lmp = _lammps(data_file=data_file) + yield lmp + lmp.close() + + +def test_pair_deepmd(lammps): + lammps.pair_style(f"deepmd {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + lammps.run(1) + + +def test_pair_deepmd_model_devi(lammps): + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +def test_pair_deepmd_model_devi_atomic_relative(lammps): + relative = 1.0 + lammps.pair_style( + f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +@pytest.mark.skipif( + shutil.which("mpirun") is None, reason="MPI is not installed on this system" +) +@pytest.mark.skipif( + importlib.util.find_spec("mpi4py") is None, reason="mpi4py is not installed" +) +@pytest.mark.parametrize( + ("balance_args",), + [(["--balance"],), ([],)], +) +def test_pair_deepmd_mpi(balance_args: list): + with tempfile.NamedTemporaryFile() as f: + sp.check_call( + [ + "mpirun", + "-n", + "2", + sys.executable, + Path(__file__).parent / "run_mpi_pair_deepmd_spin.py", + data_file, + pb_file, + pb_file2, + md_file, + f.name, + *balance_args, + ] + ) + arr = np.loadtxt(f.name, ndmin=1) + pe = arr[0] + + relative = 1.0 + assert pe == pytest.approx(expected_e) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) \ No newline at end of file diff --git a/source/lmp/tests/write_lmp_data.py b/source/lmp/tests/write_lmp_data.py index 12e91764f1..10c73c4076 100644 --- a/source/lmp/tests/write_lmp_data.py +++ b/source/lmp/tests/write_lmp_data.py @@ -69,3 +69,25 @@ def write_lmp_data_full( % (bond_count, i + 1, bond_list[i][j][0], bond_list[i][j][1]) ) f.write("\n") + + +def write_lmp_data_spin(box, coord, spin, type_list, file_name): + natom = coord.shape[0] + ntype = np.unique(type_list).shape[0] + sp_norm = np.linalg.norm(spin, axis=1, keepdims=True) + sp_norm = np.where(sp_norm == 0, 1, sp_norm) + sp_unit = spin/sp_norm + with open(file_name, "w") as f: + f.write(comment_lmp_data + "\n") + f.write("%d atoms\n" % (natom)) + f.write("%d atom types\n" % (ntype)) + f.write(f"{box[0]:.10e} {box[1]:.10e} xlo xhi\n") + f.write(f"{box[2]:.10e} {box[3]:.10e} ylo yhi\n") + f.write(f"{box[4]:.10e} {box[5]:.10e} zlo zhi\n") + f.write(f"{box[6]:.10e} {box[7]:.10e} {box[8]:.10e} xy xz yz\n\nAtoms\n\n") + for i in range(natom): + f.write( + "%d %d %.10e %.10e %.10e %.10e %.10e %.10e %.10e\n" + % (i + 1, type_list[i], coord[i][0], coord[i][1], coord[i][2], sp_unit[i][0], sp_unit[i][1], sp_unit[i][2], sp_norm[i][0]) + ) + f.write("\n") \ No newline at end of file diff --git a/source/tests/infer/deepspin_nlist-2.pbtxt b/source/tests/infer/deepspin_nlist-2.pbtxt new file mode 100644 index 0000000000..6c086f1991 --- /dev/null +++ b/source/tests/infer/deepspin_nlist-2.pbtxt @@ -0,0 +1,22628 @@ +node { + name: "train_attr/min_nbor_dist" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.3999999935274064 + } + } + } +} +node { + name: "train_attr/training_script" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "{\"model\":{\"type_map\":[\"Ni\",\"O\"],\"descriptor\":{\"type\":\"se_e2_a\",\"sel\":[60,60],\"rcut_smth\":5.4,\"rcut\":5.6,\"neuron\":[20],\"resnet_dt\":false,\"axis_neuron\":16,\"type_one_side\":true,\"precision\":\"float64\",\"seed\":222,\"activation_function\":\"tanh\",\"trainable\":true,\"exclude_types\":[],\"env_protection\":0.0,\"set_davg_zero\":false},\"fitting_net\":{\"neuron\":[20],\"resnet_dt\":true,\"precision\":\"float64\",\"seed\":222,\"type\":\"ener\",\"numb_fparam\":0,\"numb_aparam\":0,\"activation_function\":\"tanh\",\"trainable\":true,\"rcond\":null,\"atom_ener\":[],\"use_aparam_as_mask\":false},\"spin\":{\"use_spin\":[true,false],\"virtual_len\":[0.4],\"spin_norm\":[1.2737]},\"data_stat_nbatch\":10,\"data_stat_protect\":0.01,\"data_bias_nsample\":10,\"pair_exclude_types\":[],\"atom_exclude_types\":[],\"preset_out_bias\":null,\"srtab_add_bias\":true,\"type\":\"standard\"},\"learning_rate\":{\"type\":\"exp\",\"decay_steps\":10000,\"start_lr\":0.001,\"stop_lr\":5.92e-06,\"scale_by_worker\":\"linear\",\"decay_rate\":null},\"loss\":{\"type\":\"ener_spin\",\"start_pref_e\":0.02,\"limit_pref_e\":1,\"start_pref_fr\":1000,\"limit_pref_fr\":1.0,\"start_pref_fm\":10000,\"limit_pref_fm\":10.0,\"start_pref_v\":0,\"limit_pref_v\":0,\"start_pref_ae\":0.0,\"limit_pref_ae\":0.0,\"start_pref_pf\":0.0,\"limit_pref_pf\":0.0,\"enable_atom_ener_coeff\":false},\"training\":{\"training_data\":{\"systems\":[\"../../data/data_0/\"],\"batch_size\":1,\"auto_prob\":\"prob_sys_size\",\"sys_probs\":null},\"validation_data\":{\"systems\":[\"../../data/data_1/\"],\"batch_size\":1,\"numb_btch\":10,\"auto_prob\":\"prob_sys_size\",\"sys_probs\":null},\"numb_steps\":10,\"seed\":222,\"disp_file\":\"lcurve.out\",\"disp_freq\":5000,\"save_freq\":10000,\"save_ckpt\":\"model.ckpt\",\"max_ckpt_keep\":5,\"change_bias_after_training\":false,\"disp_training\":true,\"time_training\":true,\"profiling\":false,\"profiling_file\":\"timeline.json\",\"enable_profiler\":false,\"tensorboard\":false,\"tensorboard_log_dir\":\"log\",\"tensorboard_freq\":1,\"opt_type\":\"Adam\"}}" + } + } + } +} +node { + name: "model_type" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "original_model" + } + } + } +} +node { + name: "t_box" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_coord" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_type" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_natoms" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: 5 + } + } + } + } +} +node { + name: "t_mesh" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "model_attr/tmap" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "Ni O" + } + } + } +} +node { + name: "model_attr/model_type" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "ener" + } + } + } +} +node { + name: "model_attr/model_version" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "1.1" + } + } + } +} +node { + name: "strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice/stack" + input: "strided_slice/stack_1" + input: "strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul" + op: "Mul" + input: "strided_slice" + input: "mul/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape/shape" + op: "Pack" + input: "Reshape/shape/0" + input: "mul" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape" + op: "Reshape" + input: "t_coord" + input: "Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_1" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_1/stack" + input: "strided_slice_1/stack_1" + input: "strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_1/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_1/shape" + op: "Pack" + input: "Reshape_1/shape/0" + input: "strided_slice_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_1" + op: "Reshape" + input: "t_type" + input: "Reshape_1/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "spin_attr/ntypes_spin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "spin_attr/virtual_len" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + } + double_val: 0.4 + } + } + } +} +node { + name: "spin_attr/spin_norm" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + } + double_val: 1.2737 + } + } + } +} +node { + name: "descrpt_attr/rcut" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 5.6 + } + } + } +} +node { + name: "descrpt_attr/ntypes" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "descrpt_attr/sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "<\000\000\000<\000\000\000<\000\000\000" + } + } + } +} +node { + name: "descrpt_attr/original_sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "<\000\000\000<\000\000\000<\000\000\000" + } + } + } +} +node { + name: "descrpt_attr/t_avg" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 3 + } + dim { + size: 720 + } + } + tensor_content: "\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\020\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "descrpt_attr/t_avg/read" + op: "Identity" + input: "descrpt_attr/t_avg" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr/t_avg" + } + } + } +} +node { + name: "descrpt_attr/t_std" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 3 + } + dim { + size: 720 + } + } + tensor_content: "\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?|\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?" + } + } + } +} +node { + name: "descrpt_attr/t_std/read" + op: "Identity" + input: "descrpt_attr/t_std" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr/t_std" + } + } + } +} +node { + name: "strided_slice_3/stack" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_3/stack_1" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_3/stack_2" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_3" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_3/stack" + input: "strided_slice_3/stack_1" + input: "strided_slice_3/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_1/y" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_1" + op: "Mul" + input: "strided_slice_3" + input: "mul_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_2/shape/0" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_2/shape" + op: "Pack" + input: "Reshape_2/shape/0" + input: "mul_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_2" + op: "Reshape" + input: "Reshape" + input: "Reshape_2/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_3/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "Reshape_3" + op: "Reshape" + input: "t_box" + input: "Reshape_3/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_4/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_4/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_4/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_4" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_4/stack" + input: "strided_slice_4/stack_1" + input: "strided_slice_4/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_4/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_4/shape" + op: "Pack" + input: "Reshape_4/shape/0" + input: "strided_slice_4" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_4" + op: "Reshape" + input: "Reshape_1" + input: "Reshape_4/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ProdEnvMatA" + op: "ProdEnvMatA" + input: "Reshape_2" + input: "Reshape_4" + input: "t_natoms" + input: "Reshape_3" + input: "t_mesh" + input: "descrpt_attr/t_avg/read" + input: "descrpt_attr/t_std/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "rcut_a" + value { + f: -1.0 + } + } + attr { + key: "rcut_r" + value { + f: 5.599999904632568 + } + } + attr { + key: "rcut_r_smth" + value { + f: 5.400000095367432 + } + } + attr { + key: "sel_a" + value { + list { + i: 60 + i: 60 + i: 60 + } + } + } + attr { + key: "sel_r" + value { + list { + i: 0 + i: 0 + i: 0 + } + } + } +} +node { + name: "Reshape_7/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\320\002\000\000" + } + } + } +} +node { + name: "Reshape_7" + op: "Reshape" + input: "ProdEnvMatA" + input: "Reshape_7/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_rmat" + op: "Identity" + input: "Reshape_7" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rmat_deriv" + op: "Identity" + input: "ProdEnvMatA:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rij" + op: "Identity" + input: "ProdEnvMatA:2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_nlist" + op: "Identity" + input: "ProdEnvMatA:3" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_5/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_5/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_5/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_5" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_5/stack" + input: "strided_slice_5/stack_1" + input: "strided_slice_5/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_8/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_8/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 720 + } + } + } +} +node { + name: "Reshape_8/shape" + op: "Pack" + input: "Reshape_8/shape/0" + input: "strided_slice_5" + input: "Reshape_8/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_8" + op: "Reshape" + input: "o_rmat" + input: "Reshape_8/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_9/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\320\002\000\000" + } + } + } +} +node { + name: "Reshape_9" + op: "Reshape" + input: "Reshape_8" + input: "Reshape_9/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice" + op: "Slice" + input: "Reshape_9" + input: "filter_type_all/Slice/begin" + input: "filter_type_all/Slice/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Shape" + op: "Shape" + input: "filter_type_all/Slice" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "filter_type_all/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice" + op: "StridedSlice" + input: "filter_type_all/Shape" + input: "filter_type_all/strided_slice/stack" + input: "filter_type_all/strided_slice/stack_1" + input: "filter_type_all/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "filter_type_all/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape" + op: "Reshape" + input: "filter_type_all/Slice" + input: "filter_type_all/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice_1/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_1/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_1" + op: "Slice" + input: "filter_type_all/Reshape" + input: "filter_type_all/Slice_1/begin" + input: "filter_type_all/Slice_1/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_1/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_1" + op: "Reshape" + input: "filter_type_all/Slice_1" + input: "filter_type_all/Reshape_1/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/matrix_1_0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + dim { + size: 20 + } + } + tensor_content: "3\302\227\242\212\205\253?\336\212\322\207\306\005\260?\031\'\327\324d2\304?\271\\D\003@;\274\277\363\0227\364\2076\320\277\231\013-\303\253\234\240\277H\314\026q\376r\305?\nEq\257\232?\321\277S\251y^+\244\266\277S\247\200\270\256\341\305\277\320\226\311\016\334\215\317?za#;\273F\327?W\37442K\215\301\277EqI\323\240\374\261\277ry\005\274Sn\241?\273\222\276S\336\234\256\277\003\313\225\221\263\327\305\277\347\221q\034h\013\313?\202\005\245\"\357\003\335\277\375\327\351\312\340&\322?" + } + } + } +} +node { + name: "filter_type_all/matrix_1_0/read" + op: "Identity" + input: "filter_type_all/matrix_1_0" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/matrix_1_0" + } + } + } +} +node { + name: "filter_type_all/bias_1_0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\215S\242,\236M\313\277\"z?3\255\354\332\277\032>\3527\016\206\332?\374\\du\245\221\270?U\240k\261\233l\346?\201\276\204\r\374y\352?\327\260\013{\211\215\000\300~\347B\200^1\361\277x!J\317\325\033\356\277\312\243\\u\270\202\322?\263\002\262p\337>\335?t\245`R\3144\372\277\247\341\022\005(\030\312\277XN\331 \271f\340?4K4\003u\275\273\277\325*\355\\\202\313\366?4]\200^\301Z\373?\374\005W\230\353\n\317?&\355G\254\277~\376\2775\240\373\027g\034\370\277" + } + } + } +} +node { + name: "filter_type_all/bias_1_0/read" + op: "Identity" + input: "filter_type_all/bias_1_0" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/bias_1_0" + } + } + } +} +node { + name: "filter_type_all/MatMul" + op: "MatMul" + input: "filter_type_all/Reshape_1" + input: "filter_type_all/matrix_1_0/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all/BiasAdd" + op: "BiasAdd" + input: "filter_type_all/MatMul" + input: "filter_type_all/bias_1_0/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all/Tanh" + op: "Tanh" + input: "filter_type_all/BiasAdd" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_2/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_2" + op: "Reshape" + input: "filter_type_all/Tanh" + input: "filter_type_all/Reshape_2/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_3/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377<\000\000\000\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_3" + op: "Reshape" + input: "filter_type_all/Reshape_2" + input: "filter_type_all/Reshape_3/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_4/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 60 + } + } + } +} +node { + name: "filter_type_all/Reshape_4/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "filter_type_all/Reshape_4/shape" + op: "Pack" + input: "filter_type_all/strided_slice" + input: "filter_type_all/Reshape_4/shape/1" + input: "filter_type_all/Reshape_4/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "filter_type_all/Reshape_4" + op: "Reshape" + input: "filter_type_all/Slice" + input: "filter_type_all/Reshape_4/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/MatMul_1" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_4" + input: "filter_type_all/Reshape_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/Slice_2/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_2/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_2" + op: "Slice" + input: "Reshape_9" + input: "filter_type_all/Slice_2/begin" + input: "filter_type_all/Slice_2/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Shape_1" + op: "Shape" + input: "filter_type_all/Slice_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1" + op: "StridedSlice" + input: "filter_type_all/Shape_1" + input: "filter_type_all/strided_slice_1/stack" + input: "filter_type_all/strided_slice_1/stack_1" + input: "filter_type_all/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "filter_type_all/Reshape_5/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_5" + op: "Reshape" + input: "filter_type_all/Slice_2" + input: "filter_type_all/Reshape_5/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice_3/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_3/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_3" + op: "Slice" + input: "filter_type_all/Reshape_5" + input: "filter_type_all/Slice_3/begin" + input: "filter_type_all/Slice_3/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_6/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_6" + op: "Reshape" + input: "filter_type_all/Slice_3" + input: "filter_type_all/Reshape_6/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/matrix_1_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + dim { + size: 20 + } + } + tensor_content: "\034\327\377\272\276\021\330?\210\207\006\236\303\250\227?\325\357\214\225\020\212\321\277\227\314\'y\236 \306\277YOo\355](\244?\035I\204\351m\330\304?j\025?\332\311LQ?\367:e\263\336X\240?\244\237\216^\367\034\314\277`\2560\353\177\304\306\277.q\3648\337\323\315?T{\311;\022\031\312\277\243\225\363\377\355\363\327\277ECU\017\215h\240\277ts\357\370\353m\250\277`\0214\224\177>\301?\n\315\t\263^\312v?R\336Y\025\r\367\246?\260,\224\223\371\354\313\277<\300\366\256h\206\247\277" + } + } + } +} +node { + name: "filter_type_all/matrix_1_1/read" + op: "Identity" + input: "filter_type_all/matrix_1_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/matrix_1_1" + } + } + } +} +node { + name: "filter_type_all/bias_1_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\360\332\331J\303\025\260\277\237\345\242+\270C\367\277P\351\226\2079\344\334?O\177\002\353\t\016q?\t\313.d_}\370\277\375\360\370v\275(\354\277\211\3158\326b\336\316\277V6\345\345\306h\352\2776R\3059\311!\360\277\373\217\032\224\353;\373?0\243\337lB)\332?\377\t\314\260\033\352\332\277\2753\353\007\363\010\356?\372\365\220]\342\256\310\277\3308-Q\013\350\315?\222\374swZ-\336?\010\362\363\034C\363\374?\332\350W\376\235\273\266\277\237\243W\210N\245\340?\003\037(\362w\332\326\277" + } + } + } +} +node { + name: "filter_type_all/bias_1_1/read" + op: "Identity" + input: "filter_type_all/bias_1_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/bias_1_1" + } + } + } +} +node { + name: "filter_type_all/MatMul_2" + op: "MatMul" + input: "filter_type_all/Reshape_6" + input: "filter_type_all/matrix_1_1/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all/BiasAdd_1" + op: "BiasAdd" + input: "filter_type_all/MatMul_2" + input: "filter_type_all/bias_1_1/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all/Tanh_1" + op: "Tanh" + input: "filter_type_all/BiasAdd_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_7/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_7" + op: "Reshape" + input: "filter_type_all/Tanh_1" + input: "filter_type_all/Reshape_7/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_8/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377<\000\000\000\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_8" + op: "Reshape" + input: "filter_type_all/Reshape_7" + input: "filter_type_all/Reshape_8/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_9/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 60 + } + } + } +} +node { + name: "filter_type_all/Reshape_9/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "filter_type_all/Reshape_9/shape" + op: "Pack" + input: "filter_type_all/strided_slice_1" + input: "filter_type_all/Reshape_9/shape/1" + input: "filter_type_all/Reshape_9/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "filter_type_all/Reshape_9" + op: "Reshape" + input: "filter_type_all/Slice_2" + input: "filter_type_all/Reshape_9/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/MatMul_3" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_9" + input: "filter_type_all/Reshape_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/Slice_4/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\340\001\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_4/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_4" + op: "Slice" + input: "Reshape_9" + input: "filter_type_all/Slice_4/begin" + input: "filter_type_all/Slice_4/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Shape_2" + op: "Shape" + input: "filter_type_all/Slice_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2" + op: "StridedSlice" + input: "filter_type_all/Shape_2" + input: "filter_type_all/strided_slice_2/stack" + input: "filter_type_all/strided_slice_2/stack_1" + input: "filter_type_all/strided_slice_2/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "filter_type_all/Reshape_10/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_10" + op: "Reshape" + input: "filter_type_all/Slice_4" + input: "filter_type_all/Reshape_10/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice_5/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_5/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_5" + op: "Slice" + input: "filter_type_all/Reshape_10" + input: "filter_type_all/Slice_5/begin" + input: "filter_type_all/Slice_5/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_11/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_11" + op: "Reshape" + input: "filter_type_all/Slice_5" + input: "filter_type_all/Reshape_11/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/matrix_1_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + dim { + size: 20 + } + } + tensor_content: "\303\243\030\021x\337\263\277\340\233\023\\ES\300??\237\235\375:\245\322?t\035K\264\244\243\265?I\220\211\347\373\376\311\277V\036\375\t\233\375\300?\314\003\331\243n\331\266?TE\205Y\225\205\257?\323g\275\226\223p\323\277\227\346\347;\005\322\274\277\005?\217<%L\264\277\000\250\345\334\347\275\327\277\234\200\307\204\360\311\301?\0244\250kJb\333\277H\303x\352\r\002\323\277\276{\361\232\232i\265?d\230\300Q\251\306\234?\276nn%^\201\311?\207\030\201yI#\232?M\020\273\316J\312\270\277" + } + } + } +} +node { + name: "filter_type_all/matrix_1_2/read" + op: "Identity" + input: "filter_type_all/matrix_1_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/matrix_1_2" + } + } + } +} +node { + name: "filter_type_all/bias_1_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\335\376\213_\036A\360?\313\016@g\001\271\001@d\205\316V[\205\256?\035V\217\266\257\336\363\277\261U\343\351\235;\362?\276\351.y2J\360?\243\337\004\316\322p\343\277\234\016{\350\270\303\360?J\321\343\336\261\254\251?\217\376\270t\263\225\265\277mY$\267\366\265\277?S`\t\265\372\r\005@\020~\0209i\225\304\277\214\206\024\261+\320\302\277\303\203\016c\272\311\312?\004\252US\004\t\366?#\272\371>\303v\010\300\025K\nz\373\236\214\277 \236H\034aP\307\277\026\223,\347_T\360\277" + } + } + } +} +node { + name: "filter_type_all/bias_1_2/read" + op: "Identity" + input: "filter_type_all/bias_1_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/bias_1_2" + } + } + } +} +node { + name: "filter_type_all/MatMul_4" + op: "MatMul" + input: "filter_type_all/Reshape_11" + input: "filter_type_all/matrix_1_2/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all/BiasAdd_2" + op: "BiasAdd" + input: "filter_type_all/MatMul_4" + input: "filter_type_all/bias_1_2/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all/Tanh_2" + op: "Tanh" + input: "filter_type_all/BiasAdd_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_12/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_12" + op: "Reshape" + input: "filter_type_all/Tanh_2" + input: "filter_type_all/Reshape_12/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_13/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377<\000\000\000\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_13" + op: "Reshape" + input: "filter_type_all/Reshape_12" + input: "filter_type_all/Reshape_13/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_14/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 60 + } + } + } +} +node { + name: "filter_type_all/Reshape_14/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "filter_type_all/Reshape_14/shape" + op: "Pack" + input: "filter_type_all/strided_slice_2" + input: "filter_type_all/Reshape_14/shape/1" + input: "filter_type_all/Reshape_14/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "filter_type_all/Reshape_14" + op: "Reshape" + input: "filter_type_all/Slice_4" + input: "filter_type_all/Reshape_14/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/MatMul_5" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_14" + input: "filter_type_all/Reshape_13" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/AddN" + op: "AddN" + input: "filter_type_all/MatMul_1" + input: "filter_type_all/MatMul_3" + input: "filter_type_all/MatMul_5" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/truediv/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 180.0 + } + } + } +} +node { + name: "filter_type_all/truediv" + op: "RealDiv" + input: "filter_type_all/AddN" + input: "filter_type_all/truediv/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Slice_6/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_6/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\377\377\377\377\020\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_6" + op: "Slice" + input: "filter_type_all/truediv" + input: "filter_type_all/Slice_6/begin" + input: "filter_type_all/Slice_6/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/MatMul_6" + op: "BatchMatMulV2" + input: "filter_type_all/truediv" + input: "filter_type_all/Slice_6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/Reshape_15/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377@\001\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_15" + op: "Reshape" + input: "filter_type_all/MatMul_6" + input: "filter_type_all/Reshape_15/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_2" + op: "Shape" + input: "Reshape_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_9/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_9/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_9/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_9" + op: "StridedSlice" + input: "Shape_2" + input: "strided_slice_9/stack" + input: "strided_slice_9/stack_1" + input: "strided_slice_9/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_10/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_10/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_10/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_10" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_10/stack" + input: "strided_slice_10/stack_1" + input: "strided_slice_10/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_12/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 320 + } + } + } +} +node { + name: "Reshape_12/shape" + op: "Pack" + input: "strided_slice_9" + input: "strided_slice_10" + input: "Reshape_12/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_12" + op: "Reshape" + input: "filter_type_all/Reshape_15" + input: "Reshape_12/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_1/concat" + op: "Identity" + input: "Reshape_12" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_descriptor" + op: "Identity" + input: "concat_1/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "fitting_attr/dfparam" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "fitting_attr/daparam" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "fitting_attr/t_bias_atom_e" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "d[\236\207\317\263\033\300d[\236\207\317\263\013\300" + } + } + } +} +node { + name: "fitting_attr/t_bias_atom_e/read" + op: "Identity" + input: "fitting_attr/t_bias_atom_e" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr/t_bias_atom_e" + } + } + } +} +node { + name: "strided_slice_13/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_13/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_13/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_13" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_13/stack" + input: "strided_slice_13/stack_1" + input: "strided_slice_13/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_14/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_14/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 320 + } + } + } +} +node { + name: "Reshape_14/shape" + op: "Pack" + input: "Reshape_14/shape/0" + input: "strided_slice_13" + input: "Reshape_14/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_14" + op: "Reshape" + input: "o_descriptor" + input: "Reshape_14/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_14/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_14/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_14/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_14" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_14/stack" + input: "strided_slice_14/stack_1" + input: "strided_slice_14/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_15/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_15/shape" + op: "Pack" + input: "Reshape_15/shape/0" + input: "strided_slice_14" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_15" + op: "Reshape" + input: "t_type" + input: "Reshape_15/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_16/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_16/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "strided_slice_16/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_16" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_16/stack" + input: "strided_slice_16/stack_1" + input: "strided_slice_16/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "Sum" + op: "Sum" + input: "strided_slice_16" + input: "Const" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "Slice_2/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_2/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_2/size" + op: "Pack" + input: "Slice_2/size/0" + input: "Sum" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_2" + op: "Slice" + input: "Reshape_15" + input: "Slice_2/begin" + input: "Slice_2/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GreaterEqual_1/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "GreaterEqual_1" + op: "GreaterEqual" + input: "Slice_2" + input: "GreaterEqual_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_1" + op: "Cast" + input: "GreaterEqual_1" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_BOOL + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_17/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_17" + op: "Reshape" + input: "Slice_2" + input: "Reshape_17/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_17/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_17/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "strided_slice_17/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_17" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_17/stack" + input: "strided_slice_17/stack_1" + input: "strided_slice_17/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_3/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_3/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_3/size/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_3/size" + op: "Pack" + input: "Slice_3/size/0" + input: "strided_slice_17" + input: "Slice_3/size/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_3" + op: "Slice" + input: "Reshape_14" + input: "Slice_3/begin" + input: "Slice_3/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_18/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377@\001\000\000" + } + } + } +} +node { + name: "Reshape_18" + op: "Reshape" + input: "Slice_3" + input: "Reshape_18/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_0_type_0/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 320 + } + dim { + size: 20 + } + } + tensor_content: "\352\3313xul\255?\234;gSao\271\277\361\014\232\200\200a\301\277\351\342\214\250\376\016\252\277\273\221\205\373(\321\251\277|&t\t\363\257\241\277\201\353@\215\230\021z\277\377;0Dj2y\277@.\321\320*&\232\277c\377P\003\261\345\241\277\0308uF\255\353\222\277\370\325\355\235\3542\227?\000ea\265\241~\255?/\347\340\314s\324\245\277\353p\022vQ\340\262?\265\253\330us%\262?\317\264\003\310+\237\262?i\343v+f\245\212\277\334\351\260Q\376\275\251\277\340\030\247\033\272\212\210?O\215l\224\365\003U\277;\005\202\2001\305s\277\306\031\361\221z\326\231\2775\242c\250\266\3247\277\204IS%\231=\246?\242@\2720\266\303\244?MW\273\346\302\351\235?\306\302\376-76\240\277CLX\247m\020\220?\247\237\363\235\"\216\273?\230;\033\333\235\324\264?U\035\261\261\315\033h\277\'\335o\377\266\255}?ui\037\013\215W\270\277\377\371\365\350\023\336\215?\312`\201B\334\205\262\277Pck\225:\264\261\277RL\365\355\3300\230?=\216\\pY\004\234\277\224I8\234R>\234\277\365\3056\342\025\r\252?\301\230\334\213\251\335s?Py\266\254\273\202\241\277dq\337\375o\006\233\277\035e;\323\262:\252?\307\317\2134R\373\257\277\235\245_\033G\325\264?C\010}a\362\253\253?\337d\003\313\316N\264?\023\220\244G\022\230\243\277\004[\255\177\260<\240?0\334\314\352\027\256\227?\260\006\364\303\344\241\266?\234\007)\272\3764d\277M\000\017\256$\264\301\277G\031w\320\246\334\207?\352\207z\"\000\\?n\370LYy\230\227\277\216\330\266n{\244\257\277R\376\204:r\027\217\277\332\243M\354\272U\236?\031 \254\"\357\371\252?o(\354\305\311\267\253?\305\234\302Z>J\275?-fW|\361\265\251\277\333\231J+?\252\240?\241\371:\255F\024\232?\221\244#\303\223As\277{Z\033\267kT\252?\305\374\007K\302P4\277\231\2177\231\211\014\273\277\212k\347\245\2522\257\277#\360\272l\024\n\226?\261\267g3\212U\260\277\311+v\227\202\245\241?\315\022\"\326\251$\267\277E\266\347T@]m\277vO+\211\251\356\231?\221\365\224\214)\200\236?t\005\324\262\306c\243\277\030\261\213\346<\317\225\277\231\rV\357a\365\234\277\332\265\355\027`\273\274\277\230\242\372\027\222\210\252\277%&\t\265=n\267?\221\302\3705\206\340\245\277}\273y\2208\\\223\277\261G\243\244*\256{\277\277\251\201\177L\035\241\277\342\314\347\2337\014\225\277\212\347\332\237[|\263\277\324{\034[5\342\240\277\031xx\253\235\213\260?\347\334\222/m\203\255\277\366~\321\3723\316\225\277 \006\327@T3\220?\333\222lj\354\235\245\277\372\227\312\342I\010\247?\360J\267\240\267\245\232?\214\255A\347\003u\242?\354\004\363\357\335\300\245?\323\214\370\206^E\241?%\345cq\203\223\245\277\221\303^\021\017h\234?\240Y\274wV\227\263\277\005\356\013\2707\375\261\277\251\301-\200-\232\240?+\317\370\332\177\030\244?\016\252\203~\033n\241\277\356\240\377\302F\364\274\2772\244\312g.(\232\277,\\\345\241\024\210\253\277+\320<\215\275\220\203?\305\254\360S\345\310\266\277\307\301B?\304\225\241?\324\256\330\211\241H\177\277\257N\314\237\256\306\240\277\247\022\t&3f\241\277\340aP0UP\237?\377\212\321\321B\360\227\277\r68h\317@\261\277U\316\203t\345\340\246\277\350\332\351\337\324=\247?W\221\273\341\220\034\262?\214\340\000\323\207^\272\277B\014/m\300\273\251\277\354\375D\014J+\250\277\310\376\030\237\007\372\271?\351\307\226\245\335\367\256?\261\237f\007\030pu\277!\327`t\376\224\265?c\357\246x\226;\266\277\010\371mc\341w\243?7(G\014#\376\255?O\342\030~)=\266?\312`Y\257\361x\201?\376\326\275\347\201\345\270\277\227\252\361Ol\r\251?T\351\327\330{R\253\277\376\275\335\037`\243\243\277\376\017\006<-}\206?H}aR\017R\264\277\303\205\374h\3346\225?\377\306x\354\274_R?\004\236S\032\322d\227?\242\263\271\304\337\'\220\277\313\202\364\347\243\372\252\277\323\037)(y?\246\277\241\245$Yi\210\200?\334\363\251\274q\322\235?{Ys\343<0\254\277\311\351\367\031L\003\226\277G\033\220z\010\250\252\277q\352\350\037\314\372\272?\377\231E\tm\216\227?\035<\372\226\210\244\225?\263b\036\272\266\320\256?C\257l\231#\034\246?\333\357s7\251\312\260\2773\230\217\215\354\257\201?\260Z\366\325\373T5?\351{\321\357P\253\244\277\300/\304\036\252\177\272\277\2318P\355\t\356f?\352\267vq\240s\266?#\345\275\021\230i\252?C\202\216U\030\260\261?/L\000\300\341\340r?\214\257x\217\224i\251\277\031n\324\316\243t\260?\313\335\035\tf\014u\277\3535\303,\030\231\254\277\205W\024\263\201\327\242\277\225iU\316\254\002\205\277(\313\252\211m\230\240?\316\341\255\2138f\234?\335\256\236Q\006\363T?\276\374*I%=\302\277\276\017\313\325F\346\241\277\235\304J\240P\302\244?\336\231T\373\024\t\204?\277\377\223\272\350\245\234\277\023i?\340\321\024\246?\025\257\305\266\256g\250\277\017\023Vo\333<\207?\007\214\214D\205`\232?\323\263\034N\014\235\216\277\017s \306\220\247\272\277\204\240B\333e\261\233?\347\326sf\025P\243\277n\022\370)\323\'\224?\306o/\356\217\325\214\277r\365\376\231\220b\301\277\002\270w=.\262\227\277W\344\200\203\341/\266?:\035\n\226\307Dl?\333\277\327\205\0333\244?\177\265K\364\312\315\244\277n-9.\2130\252?\205\302\\i\217#\205\277\247O07\362,\203\277o\335)\263K\312\302?\316\333\013\261\231\322\232\277(\235M2\327\350\250\277\270\004e\243\365\264\225\2773\203)\371\010\311\241?\246\274A*r$\274\2779i\254*{A\265\277\312\256:gV\213\255?\254\010@\310T\351\235\277\036\017^kYP\273?\366\336\036U\237\317v?U\323\327@\223\355\247\277\031\355N\237\274\206\023?\333\016\241\241\276\236\247\277\031\302\n\337&|\260\277\263\262Y\200A\202\250?\017\340\265\016\365\016\275\2778\336\0279\377\246\261\277=om\177\005\224\231\277\270#uz\356X\207\277\375OG\354\034\225\261\277\\}m\177~_\265?yK\230d\216\301\243\277\234S\234F\367\336\267\277\351\346)\367\245\364\272\277\\/\306g\n\031\207\277\220\362d\333:T\206?\254^\253\367\n\264\243?8\225#\324\010\302\257\277`\336\311@\351\265\257?\033.\373\030\205\002\220?A\276\207\333\224\177b?\253*j\326\235\330\254\277\331w\345\332Wz\273?\331[\224\352Q\251\215?\322\301-\360\302c\201?U\030,\371C5\251\277\271x$\214\031\274\252?\247\021t\365\234\217\256\277\266/\177\003\337\365\237?\0249(\243\234\260\267?)\"\372\302\246\202\263?F\256\265Pt\211w\277.\215\315\247\354/\273?X\363\270\311\210\002y\277\253\032\264\356\234\366\221\277\322\303 \367\201\020\264\277`\275\307\350\304\031\271?\305\244\364\374\217\302\245?\240^\324\022v-\265\277\013\030$sA\241\233? 8\n\366\030\376\272\2779\272\222\202\327\005\253?\361\364\376i=\342\205\277\243]cQt\236\253\277K\344\332l\353\207\214\277\237\255\305\'\004\336\035?Jl\235\350|\036\215?\2348 \375\372\247\237\277\014\344\360\365x\316\253\277\313`\n\373m`\204\277\312\260]\310\260?iJ,\325\225\352\245?\363\016 \367\276z\250?\n\373E\266\342\227\250\277\317\240\313\343\312\221\242?$T:\240dj\254\277\004\236\215C\270\312\261?\021\n\341\n:\325\255\277\217\356\370\215\373@\275?\255\257\346{\351\010\260?\t\035o\001C\026\257\277<\371T>8\346\272\277\242\362\237\177K\253\211?\004\027\353\222\036^k\277\024/n\352K!\232\277\210$\262\225-\216\261\277m\023_2\220\361\241\277 {\343\\\235\021\241\277\'\304\372\007\275s\224\277\334\364\244\317\305V\243?n\265\376#\352\010\243\277\201\227\251\032\024\350\242\277,\321\232\223\2504\240?C\310\343\205\215:\200?\350G`\350\r\226\237\277\021\304\257\253C\313\230?|\014\226P\254[\270\277\221\353\315\214\306>\252\277\035\246>a\264\274j?\261\210\3629\025\372\233\277yJH@\212\361\006\014\210\2776\030\244\227Eut\277\327\341\010\304\344~\260?\"\331\311Eq\207\243?\224\275\035\375\021E\253\277\273\323.\241X\231\213\277\276\314\0324\226e\250\277\275\346>(\363\305\240?2L\310o\204K\254?\002\001\'\247\002\025\264\277\266\241\256\376r\263\241?TOL\226g \216\277\252r\n\333\353\227\206\277\021\233\374\210\271\203\177\277F\237Zq,\013\240\277\301\211\350\210\260\342\236?\021s\0015\311\277\261?\227\013\"\315\257\023\244\277\030o\344\332\334B\227?0\206\022D\2426\240?E_v?\004\201R\026\301\244\237?o\333pt\355+\261\2778\307XT[\030\236?e\023\212V\326na\277\3343\203(\010\031\257?7\r\226\233\325\201\243?\202!\2761\327\365\266\277\013\371y\315\'ev?W\rM\255\270\365\255?\033*\037\227\2677\233?\337\302\207\301Lh\210\277\007m\253\346\362\264\211?PG\271>=\036\230?3\245\376\232\352\005\251?\245\347f#xW\240?)\300\376\323\035n\213?\330k#!/^g\277Q\376W~\366\023\240\277J9\316`\264\276\227\277\275d\224\207\373\252\237\277\371\366]\340\342!s\277\372\231\320\240\223\262\244?\'\262\257\263\321\340\273?;\350\305\221\336\344\256\277\025\351B\203\341y\270\277 *\017+\303F\232?\327\262:\336i\227\272?UF\245\364V\347\220?^\224\302\300#\221\271\277\202\330Fy\007C\223\277\341\t\355\316r\222\217\277\214\3557\304Z\013\260\277\306FG\022\303{\250\277\314~\373OgL\233?1\213\310\316\306t\225?\314\036n\331o\270\203\277;\3554h\'k\235\277\217\231\036g\214\251\274\277\377\246\313\255q%\257\277~T}\014C\304\250?\0218\316\330\323\220\240?\256\273B\353\0059\226\277\351ZJ\220\305\354\243\277\246\273\233\241,\302\233\277\346\374V\265\206\024\210\277&\227\315\327\217e\245\277\345\303\211\260\261\004\275?\245\345\034^yZ\255\277\336\243\346lBV|?\326\344>\342\325\343\202?F\\\216\333At}\277\"\342U\345\357\323\223\277\362\r\302\315\226\267\250?Y9._i>\232\277\216\344\203\312\317\266\255?\255\207\237~\235O\230\277+O\0148h\330~\277\006\253\227\253\247\021\264\277\333o\266\013\210K\246\277\177\004\211\n\261\230\243?\030\014\024\262\254\366\252?\025\r\327\014\317p\210\277\376\345h+c6\236\277\364\354\364b\334\210v?K\207;\3332\337\261?\337\331y\367@\000\262\277}\031xJ\371\353\217\2777\333\022\247\241\014\241\277\034\224\300\177\233\031\202?\353;\024\221Q\215\301\277Y\315\225\n\345\231\263?\364\033\303\253kA\300?\265F\365\340F\311\261?|f{g,\262\240?O\211|\035pxt\277\260\276\344\322\225=\226?\007\256M\354c;\215\277\215\0176\252\257\334\263?^\261k\345\353\244\200?e\247}J\343\312\266?\336\227t\374\336\350\247?1\325\237\200F{\244\277\215j \226\216\307\235\277\302\371\021N\177%\242\277\t g\205n7\233\277F\210\021,\264:\260\277\000\253\3254\342\357\233\277\034\373\225\352\307\252L\277\001l\317TyK\204?\261\254\321\215\010\350\221\277A-\311_\365\220\255?.\267\247\3038\225\255?\356\241f\332O5\243?\345\007\217\"\007\271s?u\177\244ji&\241\277\204\361\016(](\221\277\033\204\316\0239\211\300?\367\233\230\351=+\262\277\020T\356 &\252\272?\221\357\265\321K^\251\277E\000Y\351\323y\270\277`\225\264\025\025\037w?(vmk\370\242\247? \313\n60\n\251\277\363\247\210:N\026\227?(\272\004\217I\016\230?\033+\200P\371\357\263\277\375z\267\240\220\373\246\277\332\366\371jmB\261?\316\324\304\363\325\337\227?\355\203\177=\376\331\203?\247+\262\315\376\217n?Z\002\304\215\214\274V?\t\330U8\226\345\275?\262\252;\314;\230\243\277\377:d\376\350G\236\277\361t8u\343P\262\277xA%\032(\354\240\277\330\345V \255\221\257\277\260v14\035\335\246?\351\320b\337\022\264\277V\326%\212Iv\263\2778\212:\373\376X\246\277\303\376R\010\342\302v?\227\n\003\362\254p\263?i)dr\202\201\224\277\214\301\007\333;3k\277\032\n.\320@\023\263?}\271\235\013>\307\266?gd/$\256\004c?\363\254\256\026\262D\255\277[\345\367s\342P:\277\227\300?@%\251\201\277\206\255>\304\274;\253\277L\213\241`\374\261\262?\004\244I\250lQ\252?io\330v\002E\262?\377!\330+&-\240?\004B\314JB\002\234\277\014\357\026\324\303\254\231\277\322rn+\030\032}\277lU\330\267,[\241?e\2425\331\313\211\270\277\265z$\006\357-\267\277\222\326\223\300\323U\260?Z\207\013\211.\376\206?G\221*w\346+\262\277\300\335\275@\274~\266?\231`\362\240p\213\257\277YM-\230\371\260\202\277\245\2320\227\033\276\304\277m2\373d\034Hs?H\206\306\377\224\271\242\277\013C\367x$\336w?%\016\333%\273s\250\277d8\212\324\273$\246?:C+\205\301\354\215?\035\233`\321\020w\240\277\310\360O\030\343\251\257\277\004\205\313r\265\223\261?\315\316z\201G[\206\277\3629\233\014`\364\250\277\231 |\223\365\007\246?\236\321\345\022+\021\236\277z\207u\212E\222\271\2776\360~\215\355R\261?@3\343\356\307\253\257?\360\203Lo\251\315\234\277\016b5j\252\341\266\277V\357\235\246\321\233\240?\322\307\003=\244\216p\277-z\235\352\355\225p?\201\221\352B\337Nx?J\202\271\275\010;\260?.\373~Q\256\026\253\277\005\n5\316\346\214\207?\231\330\325\017\350\205\340\276N\375\200dly\230?\245^\210\333\2342\246?r\263_{\214\030\247\277@\376.d\334\031\226\277\353y\322\272\323\244\265\277`U\332\002\376v\247?\222\304\037\255\205+\253\277\201,\0167S\316\243\277\024I\271>\003\016\274?GA\220ie]\253?\271#\264\341v\306\272\277[\335\247\357\303\361\201\277t[\253\306\206\025\261\277\332\232\006\230\035\243\216?\216;E\033\325n\256\277\311Y\207\245\331\372\264?;\236\224\323f\304\246?\255tsA6\037\257\277\263\243\255\263=\277\252\277\314\3208w$\371\240?Y\356\304\217\206\031\242?\3667\245\027r&\204\277@d\272\'\205N\243?\210395\rw\263\277\256F\203?\206\024\221?\201\327\211\005\034\350\237?EJ\341\264\357K\262?\213\321[9\013\332\265\277\304\021\3761I6w?\337q\337G\225P\262\277\363\316\307j\270tc?\320\224t\324\302\023\245?\252b3\270\tS\271?5\205&\316\335\264\253?$\3001P\240\\\243?u\035\016\324\225\274\237\277&11r3\322\221\277oX\220Af\270\251\277\223\307\262\337|\250\201\277\223fZz{\350N?\200\370\267\361TN\244\277Zy~\244%\340W?v\003:\273E>\265\277\036\351\321\357\213\366\253\277\241W%\252\214\274\266?\254Y\311\202\024\031\200\277\025\261k@M\330\241\277d)\227\321\345\272\254?\220\242\355\367\370g\211\277\210H=\306\006\274\261?\\\223\202\023\205C\246\277|v\231b\321D\232\277\221\351\220)\246\036\265\277\241\340n\221j\247\247?B\214\026;\212^\251\277\004\007\2075J\303\255\277#C\024U\324_\222\277:\353Q\232Q\336\220?`\224\271\301I\306\261\277<%/.\251\350\246?\310\317\004\313D\204\254\277y\007Q}4\354\237?cmc:\030\322\271\277\344O\262\356\345\222\256\277:f\034\231:\212\260?\255{9L\303#\275?\3143\025\273\267\344\225?\262B\327?\367\370\260?W\t\n\201\300\223\236\277~\361\325\027\206\210\262?k%\201\027 !\203?\303\013\255]\024\r\265\277\312\257\370\264\252\202\227\277\027\204\362.\377\223\251\277l\317\005 \024\233\205?\357K(\256\232\364\236\277\350\263J\357\253\001\243\277\240\322gm\023\024\271?\014\014VZ\371\376\203?f\202s\366\362\252\250?\357\230R/\222\230\220\277q\364\021j\346@\240?\370TP\302\276\201q?\023-\227~\275:\247?\265B\013\370\0272s?\330\324\217\217\337\226\240\277oy\033[\260\260\267?YuK\272\252a\243?\2640H\316Ai\215?\367\311\216C\210^\231?\321\014J\247\272h\265?\007\021\345?\r\001\233\277p!\305P:\262\256?2\261\211)\324\204\224\277\014\341\tp\307\224\241?\016#hngO\274?\242!o\373 \255\262\277!\242a\344\357S\206?Q\".\324\352\222\246\277\206ni\003\361\'\257\277^P\212$ye\254\277\033\tZ\3068/\220?\016KH^\244\247p\277\325;&\231?N\226?\354\375\301\367b\022\226?,\031m\247\241\244F\277\357\311\263h\362\024\265\277ef9\342`\321\243?\014l\373\350<\251t?\352\276\257>\255\260\261?\253c\204w\n\034\231?=+2hx?\211\2771k4<\'r\244\277\265\204\r\314\006\205\241\277\207\372&;\252\306\247\277T@@DR\327\231?\237\230\312\357\362\006\251?\277N\026\365\254\316\254\277\026\022\276\351\001\246\245\277\237\303\363\351D(\223\277\271\037,=\034h\251\277\025\342\216\302_\037\231?\013\320ty\037\320\245\2778\247J\251\300\375\205?6\211\331\227{\301\227\277\306On\201\347\275\206?d\204 \260\310\277\247\277a\333\203\324\313\276\236\277\222\356z\214\201\247\260\277C\376g\375C\311\236?\3023\332\252T\370\251\277[\350\351%i\016\246\277\274\317\260X\234m\252\277\270j\241\363qV\252\277\211\275\326?\334\024\263\277\243\010\352\365\355w\247\277\335_0\242\"\274t?\230*a\377\177\200\222?\202Xz\322\354\222\235?\256\264;iK\177\241?7\212\327\370JV\255?-\345Q7\350\255\263?H\354\324M\364d\271?\"u\255\241H\360\227?N\033\327\004\005\357C?o\362j\010\262\177\264\277\346\341\202\345\022\340\223?\226\273:\207s\t\231\277\221\313\247y\257\273\203?S\337\004\001\006\260\263\277\326A!+~\311\240?\353\223\275F\244/\230\277h?.\376\220\306\266\277Jlw\0143\210\271?\226}sA\366_\231?\0322_&\363\246\233\277RaZP/m\246\277~\214\247A\306\t\241\277\342\335\204C\366*\264\277\025\227\212\232\2733\241\277F\033\324J:\030\210?U\345BT\r\330w\277\367\033\325r\217\"\267?\340p\032]\2344\277\277B\311\031\357ft\236?0\276BC@\230\225\277\226\010E/\003\273\232\277\350\276/i\221G\266?\036\273\004^\034\224\263\277\306\370\365H)\000\271?\227\356\363\256\251*\263?D\241\363M\271\003\205?=kG~q\255[?\021\203\275\260b\242\177\277\277\252(\3146\313y?%\253y1\302\215\256\277,\026\000#\204\316d\277\253\352\272\307\201k\262?h42p\341c\205\277\256\310\2644e\257x?\213\037k\020\272\343\244\277\3374VF\260j\247\277(\264\224\225\031$`\277^\307r(=\327\210?\026\205\205\301R\'\267\2775,\243\024\237\216\220\277{\031\201Q\340\027\266\277\360\373+%|g\222?K\211\'\221\004\377\241\277k\004s\263\016\376\253\2773]x\003\233}o?\010`\216\021\346\n\256\277/\337\213o\337\362\214?\"`\301\253\277c\276\277\325a6od\306\210?\311\252\247\375&\203\222?\363R/\033~\237\253\277\027K\333\374\326\027\261\277qH\250\036:%\264\277;\201\2519\330(\206?\365\233\337\265\260\273b?\223\355\322\032\r\021\244\277v\376\246\251\356j\237\277\370 \332\362\316e\257\277\022\244\034\271\375\177\241?\036\276\311\271\242\254\263?\306\316Y\262\003\023\265\277L\324\315m\340M\224\277\212\273L\027\334\216\234?\272\342\033\2009\305\222?\273\023Zi\225\267\234?\033De\3449\251|\277\326\326bM\327\023\251\2770\345\252\0008\\\301?\326\206ZQ\340u\273\2770\370\256]\032\220\221\277\305\276r\241\214\266\242\277|\270h\306Ab\236\2773\247&\276\342\337\251\277g\364\365\000J\304\252\277\376\366W\010\320\343\241?o\247 ,R\013\235\277C\267\374:\311\'\243?\301\310\310\270\004\200\242\277\215p\257\215\363\037v\277\354\344\330R\201\265\257\277\307\266q\023Mk\245\277\004\305$\306\302\235\244\2773v\224j\351\345\251?M\357\023!\266l\301\277\252C\356\\\254 \210\277\317M\256\'\250\022\264?\342\245\247\322\366\306\232\277\340$=\251uX\273?X\360\353]N\234\265\277\0168\207\026hl\234\277\204tB\034*\007\276\277\010\325C\003p\331\177?\254\203!\025g?\204\277\033\307\366\320\\\375\242?\006:\0026\351\033\257\277R\037K\026\320\305\270\277r\265X\332\303\243\244?\355\250\245!\206R\256\277b\310%\370\272a\261?Q\007\375\250\315\241\205\277C\177\021s\t\034\301\277\362\300\244\265c\002\272\277K\340\366\000\262L\226?]\250^\243A\017\267\277~\031\014U\270\324\277\277t}I\221\346\233\252?\227\003\304K\312\374\262\277\355\000\353\341\327\201\254\277\270\200O\316\366z\270?\367\251\240\200\t7\263?\360-\364\001\314\226\253?\371\326&\252b^\246?)\341\302\033\253\303\256?S\226`\330bJ\237\277X_\245J\322\010\271\277N\362\235\312I\310\300?\3203\257\265u\345|\277!\252\231\\\356`\271\277\036\207\350\301\220N\254?\300\220\214\217b\241\251\277\206.\265\032&\352u\277\237\274H\322\033\275\260?`\230-\022v\304\224\277#\224\243\253\241@\263\277Z\367\205\037\330\267\233\277M\273\377\004\265\253\234\277\261\210\341-\356\365\000?\235l@\220\033\343\232\277\265<\002?/\371\242?&\221)$\016\320\260?m%j\032O\211\201?\236\371G\213\347\346\244\277Ah\\\246\315\252\225\277\025\225}\013\377\345\251\277\231\037-\t\326\351\202\277Jw\326\273\224>\215?\236\234\311=\212\200\203?f\271\212\206\251z\272?R\022\350\340%i\261?5{O\210\304\277\232\277\n:\035r\243\"\262?\320|)\224\221G\223?k*\357\370\314\275\255\277\257\t\336\233d\230\261?ftf\231\255\226\226? \023\370$\205e\251?nP\035\315\367v\241\277t\003\3076T\354\251\277U1:\301\363\306\233\277\202\001\357\237\203H\212\277\200\'\245\302\2616\243?h\351S\205ai\203\277\014\023\342\352>\r\242?Zs\2677\\\260\274\277:\322\311nu\024\241\277H\376\223\023\270\355\254\277\305\306O\325\372\342\203?=IPJ\221\\\251?\236M\231\002\034\264\247?\331? \263\212k\216?r\332F\327n\306\240\277\3344\3142\027\330\202\277\273w\257r\301\275\235\277\361\306i\311\014\354\241\277)]u \272:\216\277\274\300\343g\212r\210\277\232\317\014F\036\022\261\277\'\362\325%H\'\244\277\3006\226\024|\033e\277\202\261\356q\330\246\270?R\277\375O\235\376\222?\250?D]~R\261\277I\030\3251W#\226?P\314\010\301\240\310\250\277\267\366\2241\334m^?\206\250\256\364\035\243\267\277\000e\265\373\027y\250?\210\330<\226e\003\274?\272\313\010\277MQ\265?\345+ge\207\301\224?\217\035\240z\227\251j\277\274\243\320\267\375>\244?\177\035\026\021\364\237\252?$(hc\271\200\244\277\'\223\2110M\345\200\2770X\003/\254\316z?\220]y\356\224\314\240\277\207\277^p\366\204\227?\304Y&\310U\320\264\277\302\316\312\367\327l\236\277,\211o\037\020\345\203\277\320\366\347\321\2711\242?\"^\351\207\225B\234?\211\360\014\353O\253\221\277i\002\316\333\024\246\213?\267\032O(\362\306\275?\333v\n6\361\330\253\277\220n\335\327\016\303\260\277\025\231f\n\301\026\241?\303m<\354\340\005\246?=\363\005\365]|\232?\276U\321Z\246 \251\277\343d\371GK\"\222?$\'\2754\203\023\203?\001\245\214}\311W\311?E[|[\010\026\220\277CS\026\324?\361\243\277\247:\232\342\321G\221?\003\317\246\336\265G\205\277\337\210\027\006\324n\225\277\027$\3778G\351\241?\325i\235\334h\274\304\277\\A\316\267EL\271\277\242W\260?G\373\237?<\374-\363\266V\177\277\323\357g)\353\226\250\277\221\263\004R\203[\221?\226\216\177!|\353\240\277\224\036.tE.\264\277~\307\252*\344\353\235?\266\262\242\316\377\262\263\277\316\034\311\004\267\372\262?|\214\302\234Vm\203?\035\006\032\230L]\242?\236\325\362\203\265\223\217?\3102\271f\000\331\240\277\316\223\311\211\230\330\266\277\350c\212`\023\211\250?\231PD\316\307y\264\277\3763\335\'\014\014\265\277Rg\366\220DGo\277\207}B\2741M\254\277\n9\370\024+\240\222\277 u!\335\343T\262\277\326\260\007\352\241\374\256\277\262\352wLA\017\246?\002\246\t\364$\207\246\2779A\345\004`\005\263?\030vc\223\370\347\243\277\227\014\360\343^I\205\277\204\320\357!\246\240k\277\035\352;\225v\203\205\277]\361\030\346-\357\241?\376?\261d`\326f?\253+J\221\300\270\235\277\200\005b^\023\353\260?\236\314\030>@\277t?\271K\311\r\001\313\234?*\033p\217\324~\260\2777\365{\337\203D\257?\317h\014\354q\002r\277\313\247\340\025)@z\277\211:\301U}\201\260\277&%\016\276`I\220?\360\256?\\\006\357z?|\233D\356\232N\267\277\257\243\357\201\310\r\263?\006\273N\364\'\221\230\277e\211Uk\177\005\247\277z\366\003w\271b\226\277m\303\'\025S\024\243?DrT\306]m\247?\257\371\252A]+\212\277\240I3\320\265<\260\277ti\245c\237\374\211?\022#\342\027\271u\261?!u\344\335\260\311w?\357^\306\201\016\367\260?,[\244\003DR\222\277+9\207kC\312~\277\257\032\010.\333\263\235?\3121\001\272`\247\234?\371\303XQ\215k\262?\304\254\377\237\304\006\261\277\275\021\2062\016<\276?\365\210*\tx\211\244\277\206\376m\300v\322\260\277s}&\232\220\241\243\277\255q6\272\300\206\207?\343\332\367\017d\r\271\277\355X\321q\230X\242?\224\345:\'\342N\210\277J\270Y/\303\357\240?\326\013\224\214\253A\245?\233\010\243\361\321\334\207\277T\n\347\212DE\211?\202\020{\266\343_w\277\264\224\204u\306\'\215\277\034O\004\324BS\230\277\017Hyp\340\215\257?\210\303 \376\270\275\260?K\000/\264\354\335\226?\345\032i\273\177\026\250?\212J\333\377\251s\277?v<4\020\277\316\254?\021L\177\322\257h\217\277\232\324\357F9\262\261\277!_\342\257&1\231?\301\301m\220\354$\267\277\016\344\377\002\253\034\252\277\025\266\310\004i\201g\277\216\307\007\323\222#\252\277\246Kkt;\321\263\277y\276\332$\315\273\252?\3404\027\370X_\260\277r\211\225\224lwR?\277\365\260\237\000\253\246\277\010\313\000g\316\n\247\277\370\314\3740=\243\244\277M@<\343\025\204\214\277\341\2648\233\205p\276\277\227\nf\250U\217\223?\nChjFO\276?P\367f\025]}\263\277zz\360\275\273 \247?\035\307\224L\372\212\205\277\307ix\266\0253o?\3478\371.\250\230\204?\207\306#\377\"\242L\277\367\3159\353\211\267\247?v\200\322\214)S\177?\212\325\226c\006\307\242\277\036,Q\201\237\352\216\277\364\203}\005\035,\262?\233\254LdJZ\302?\3207\242\313\304\325\261?\252\225\267\n\353\n\263\277l\320\274k5m\210\277\031\356\377\266$0\241\277\311\302\351\363\"\276\264\277a\221\036&\207\223\236\277\276\276#\003\033P\274?\263\373*d\317\311\244\277~\224\352\262\214\353\222\277\252\270\031\3333Rx\277s\353\316\304\215\233\256\277\372\002\034q\327)\234\277\243\235n\266\265\003\261?\214\305\347\3413\021\300?x\222\030\311la\232\277qc1\343P\207\275?\377\277\315\017\375S\244?(\311\270\364\240\352\230?\374\274\341o3Q\270\277c\303\343\310{\376\254\277-\244\370V#+\246\277\216\336\363\262\034;\255\277w\256\322\023\243t\257\277\350\204?\230\204p~\277;\377/\215\354\326\253?dW!UX\313\225\277\324\313FO\006\344\247\277\273\325\255)D\215\254\277t\212\220\211\212 \243?S\206\257\014\367;T\277\013\245\355\0078%\236?\031)\364+\360;\250\277\376\371F\312o*\240?.0\224\3767(\265\277\216\257\014h\355A\224?\372\337\232\027\303\225\272?\247\320\274\366\321\206\250\277\205P\265\367\300\325\263?\3604\033\246\2347c\277\302&$n\204\254\246\277\\\342\310\222\314\213\230?\364l\241\360\215\003\262\277\233\022\027\025\245\rY?\222n\200\360\231\273\246?\326=\272\266\375\273\231\277\355\005-\'\230\205\252\277T\357\016.q2~?\357\026Q\264\240\375\235\277\376\330J\332\322\2369?\302\201\000\032\341\210w?Po\221\224\265\273\266\277\'#\035\241\302\353\232?\244\272\252\264m\325\274\277U\244%+\224\221\270?\341S\313V\310ut?\315\020\231\033\323C\232\277\314\275\2374\036\312\224?\253\341\317kuR\216\277\026\033\004`*\306\231\277\376\276ni\203\333\261\277\226\177t\353\255\245\205\277\327\277\025f\362\r\262?\336\321R\225\341\373\255?{\025\217\2210l\231\277\030D\323\213\253\233\300?y\221\355#\t\276\220?!(9\33276\206?j\2272\337\006\026\300\277\r\033\211q\033I\250\277\242}\350\333\370\272\266\277\"\355z\225\241s\222?\361\332\254\235\265{\251\277\010\276M\353z\273u?]\267\274\324\374\261\241?\016x!\004\320\235\266?\326G\366\217\235 \224\277s\373|\330z\343\265\277VW\321:q\030\243\277\003A;\360*N\260\277prO\014\177\314\264?&m\363\377\245\322\267\277-\247\000\374lG\241?\367\325\2351\021\010\216?=8\213\027^\027\265\277I1\271c!\335\201?\265&\001jy\264\230?B\032\374\376%\021\255?dj,\323\251\247f\277\241\206\323XJ~\251\277\005\250\220r\343v\235\277\232\323\311\002\274 \256\277\t\023f\270\253\347\203?P~\016M\343M\221\277\177\013\202\217@\313\237?I0K\340-/\240?\361\312\377s\246\277\236\2775\016-\336$\261\200\277\265\\\254\214\314\211|\277mq\200\236FD|\277\364\235\274\\&g\242?\255\242\342WF\016.?\261\314a\211/f\242\2776\036\262\273\261\366\247\2775%:\352\237L\256?\376d\243\241Y\234\235?T\020\024u\021\300i?\310 q\034\206\347a?j\016!\006\215\232\253?\330\t\255\001\307\344\252\277Nc\262\312[\321\212?\326\235]\362]\327\243?\201\32082\313>\237\277\360pXZn\271\252?\207b\304Ut\264\276\277\352\270\326\270:I\237\277\030\373\3728Z\214m?\222\377H\344$\314\261\277\233\3223\244\231>\210\277\250YW7W\222s?r\002\002\211&\312\261\277\256:\214\305W`\245?\314\262\016\366\327+\222?\033kC\367\233b\256\277\361)\377\333\327\374\247?->\3454\327=\220\277V\030\005C\227\035\261\277\233\262`:9\007\205\277\357eS@\365\021\263\277\013s\360\244\035H\237\277\331\n\226@Kt\257\277Uz\0019\334\374\245\277,\323\313\370\034\023\257?\215:@|\363r\275?\312\021*K|\254\252?J\216\016\204\215q\230\277\255\213\346\016\2149\242?b:-\377*&\202\277\033\236}\344@\324\232?L\373\337\255\275\362\225\277L6\247\351`\220\241?E\356\017\274\013\270\220?`P\231F\336\337\222?xtQ\332\373%\232\277\340\363\007J\250\251\306\277\376y\017\365l?\253?.\361\212\377\220^\230\277\206!%\346\232r\273\277n\271\30267\250\236\277\274\005]M\370A\206?\253z\257D3\031\206\277\374\013\2401x$\217?:N\" \350\323\212?\020\321\003\260\222\037\225?\312\320\252N\365\256\222\277\250rg\372\247Z\214\277\220$\326\250J1\265\277^N\016B7w\270?G\360\231\351\355\321\264\277\036\224\303\315\334\016\227\277/Y\207\356%;\233?\214\215\307\323\266]\224\277\222b\270\320L!\205\277O\262\357M\271$\235?+&\331\375\023\206\222?\033\256_S\234\276\240?\246\017\310\352=\277\236\277i\347\304\364\247\302\177?\321\007\217T\024H\231?\r\004\201\335V\373\177?\344\327\242\377vh\204?\253\202\230p\r\342\203?\242\236Fr)w\252\277r\273\357?\243\332\242\277\330W\233J\222\367\217\277\274\3224\275E\202\245?_\320{+\031Q\260\277\361.3\212\367>\264?\370Npp>#V\2771\"\320Kp\001\232\277D\300\364\200\314\365{?4\013\347 \316|\266?\306\334\300i\246,\244?nR\252\324\241\355\247?C\215\244\350\214\344\263?O=x)p\256\236\277$.~\361c\214d?\273\242\276\360O\264H\277(4fZ\004\207\245\277bQ\276\025\321\271\262?\370d$K\224D\247\277\205\351yNp\357\237\277u\342\371\275\025\210\266?CSoEu\r\240?\331c\354\222\021N\204\277I\244\317\370\005{\253\2778\311\243\266\255\373\225?\262\360\014w\347*\240\277=\200\020\267\377\020\236?\241=\336\021E\203\253?\331\010r%\253\362N?[\246\2478&e\240?\ro<\375<\363\263?\2229\260\255_f\214?lSPo\200y\240\277v\177A;G\202\220\277\2448j\2466gF?\314\202\300\365g\327\247?b)\371^\253\332\227?\331%CC\372^\247\277\351\227\023\355\221\334\231?\036\303\341X\335D\237?\230g\307[\332B\265\277\226q\323\263\326\272\247?\014\263&\354yy\262\277\336\341f\005UF\263\277\336\260=<\301G\264?\024\002\025l\210\217\232\277N|%\032\227s\243\277\t!>x\006|\207\277\320\221\277\030J\022\202?D\221\303w\200\325u?\217\213\327\315C\310\256\277\235\253k\266\270@\242\2772\320\000\031|M\255?\000\3075\037d\313\225?c\206*\235\215\"\262\2771\036\300d4\\\225?\355o\234\370su\244?_y\206/\316\005\231\277\377\267\237\310\322\022\251?R\246\244\032\361P\232\277\014\213z\375q\342\264?\221|\022\242\014\221\251\277|\261\263\375\215IV?t\213\2766\305E\267\277\353y\255\233\022M\254\277\333\377n\362\321`\211\277x\337X\362\305\315\244\277B\322\021t\r\264Z\277\35309\036\251L\204?\027\236b\013\226Q\300?\350\t\260\027\003\336\233\277\307.\313|<+\270?\311\363\201\246\233\247\237?+\221\261\025d\326\227\277\2056G\342\006\313\222?\023\373\212y\226\016\241\277o\355+\200\373F\260\277%\364\244\352\300\314\217\277\266\224\320\332\342p\242\277\222\rB\350\205\021\262\277V\216\334\3450f\250\277\240\232y\247\327\271\261?8\214\353V\344\255\264\277\260\300R\'\023\247\265\277\003LG8\357\023\246\277b\347\273d\260\333\245\277\333\205\254\2628:\262?\202\021-\tfr\266?fG\376\253p_\264?CxB\242{\231\271\277e8[v&\001\252\277\270\274\302t\354\037\245?lS,\257\311\334\256\277?\275\200\220E[\225\277c\\.g\275#\246?\202\316\224 o|\262?(\245vl/\001\245?\253`\313\272\347+\244\277\251\007\235\223/\362\233?\214=&].Z\262\277\002<\233\361\302\367\242?\004\367\377\205\013\277\245?\261\256L)\262\270\255\277\276\226\244\2761\254a\277\276{\205D*\214b\277\277\376\236\214^\331\276\277\302j\002$\025\373\216\277V\233\366\310\370:\264\277L\247al\304\304\243\277\360\324\3671\314M\225?/\250\261\221\222\276\227?\017\222\221\363S\"\302?\374\371\333\211\220_\252\277\010\272\037Eai\213\277\245%\177`\237\375\244\277SP\025\010:/\261?\242\303\0144\307G\221?@\030-\036 L\220?I\312N\344\323+m\2771\205\201\005\352\221{?\354\226$$\356o\265\277\3524D\300wf\234?\245\245\367p\177\213\234?\321H\226\036\333\272\256?\213)\031\313\326n\265\277\272\344`\3724\325\211\277\206\306Y\362*ty\277P\237\265\302\316\304\215?z\323\304\222\315\311\257?\023\340\370t\024\265y?\r\222\323\367\025\367\232\277-,\221\035C\000\263?8\215\212\023\024\r\245\277\211\345-e\333\031\216?\200\\\347\355w\234j?>\036\323\033\275\215\267?\254L\375TL\241\221?\022\344A\370\r\350\260\277\216\324E\373\240\246\252\277\341Y\371\311h\020\262\277K\335\372,\237|\225?2\006\r-\024[\261\277\225:\263IR\263\264?\330\261\022\276\'\206\271\2775\')f\340Y\222?\217a9A\354\035\260\277C\333t\342\r\277\254\277\365o)\201\336_\242\277\313\\^?K|\237\277\022\3572}\276\201\273\277T\363\3444\377I\240\277\363\275\355kp\363\206?\243\276\207\217\265\020\243\277\266\257\216\204\r\027\266\277o\363\\\354\216k\252?aT\256\272\306\336\270?j\377\240\000#o\240?\246#\260\333\255\267u\277k\241n\322\244l\247\277Nu\355\204\005\352\231\277\356\027\014\\\030\230\245\277f\215O\333\307\027\247?\236\025\217\360=`\245\277\275\316+p\214\250\206?\333_\254W4=x?\301^a@\211>\300?dz\264\320\336\351\243\277\260r^6gP\217?fH\363J\201\302\230?\317]JQ\004ek\277\313U+\"\313\254\226\277\200+\347\366\262\262\221?9\314\006\230\352\342\254\277(\314n|\240\251\241?\027\241\273\247\213#\231\277K.?\2703\270\260\277\222r0\243\351\247\275\277\341\334\357\250\037_\217?m\263s\024\355q\265?a\300\237\023\323\270\246?\237_\313\250\343\205\224\277M\025\374\027\2136\267\277\266\237\223\203\206}\216\277\240\255\301\242\321\310\274\277\373\224\031\233u\236\204?\3777\344\350\236\201\226?\300\366ms;=\240\277f\232k\005\343\377\216?]\241\025\020\274\240\225\277P\243\276\203L}z?s;\244\246\0203e?I!\343\275\347\357Q?!\206\371mq\037\274\277\353\250\306\204\035\370\245?\330_`_%\005\267\277\255Z\262\014L{\221\277\331\324\206K\354J\234?\236N2)\261`\241\277\342o\356\302Wv\264?\206\360j\267\t\n\231\277\004\001\033\303\327V\243?0\371*\277\177\330w?\375n\367D\254\306\245?\\\226\036\353\234l\220\277\010_C\005!\343\232\277\313\362\223q^\017\260\277}\016\261\206\300\371\253?\376\237E\317\"a\276?\246\254>\343\251I\254\277O2 \177\235\204\245\277\236!!\301L\314\245?#\272+-\247j\263\277\366m\327F\364\226\272?_\347H\231\2558\246?\367\262 \362W\250\214?\230\214#\370\021\017s\277f\207\245-\230\344\262\277\265/\322N=(n\246?\221\035\\wS\221e\277<\230\327\210\025\330\234?\261N}8\"T\244\277\"P1\315\022\217\224?\253\350Z\313y\237\230\277\340\336]\244\223\230\227?\024\322\343l\371\274F?\016jla)\206\204\277\365\302H\240J\376\273?\225\302\3773\264\326\263?]\302\341\313x\332\215\277\351b\0377Y\323\261?\334q}<\306\016\177\277\300G\273\006\213\250}?\345\375F\265\036\007\242?\377\3335\375\334\254\237\277\217\225:0|K\261\277\232\031w\003\341>\252\277\254\022a\020\207U\225?KM\353kUj\261\277\341%\365~\272\361\227?\257\335\345%\006h\263?}\374\307U+\340\243\277\0338\261*h\260\243?6\r\271\233:\221\251\277\377\337\r\200\223\266\206\277Jf\217,\334\304o?aZ?\210\373\204\251?_\031em\212\360\244?{]\030o\300\245\230?\005u\373\301\312k\257?\004\245\252j1\273\272?\345n\033X\247h\276?C\031\210\316,P\232?(\357\374\377\235\203\261?OF\342\3753\207y?\350\336\261a\0171\236\277\211+\216kE\372\227\277j(\306\226?8\215?k\353YyvY\276?\016\362\244\332M\245\260\277|H\354\340.W\256?\305$-z\237X\300\277\331\206\033\346\313%\216?G\020\304\350\212\034\261?7\362;\300\346\267\217\277\251A\273wZ\021\264?\362\034\227\027\342z\261\2776\026A\314\220x\256\277\200p\253\363\256\026\225\277mnQ\0003\370\255?s=\305\177\341\307\220?\322\352\347\341P\320\235\277T\212.\215\213\262\301?\344_\322\251\214\334\271\2771,\035\216\250y\204?\312U\331\"NW\246?Z%\315\'b\362\234\277\260\354\261~\372\332\260?\022-\351\004\010\243\202\277\'\372\350\025\255QF\27760-jLm}\277\027\021\347W\0370\270\277dD\312\253$h\250?\360a\360\275xO\216?Z\372\217O\316R\266?\222Qg\300\246D\254\277\323W[\024\204s\210\277\3230P{2\025\231\277\305L\002\373\332\007\224\277RZ[yE\340\234?J\255\213\317A5\223?\0003t\252@\377}?\005=2\320v\n\223\277\345\370\324\317\346\331\215\277:\302\025\244\251x\235?K@\362\320\222\027\240?\230\201? \020\334\246\277\325\207}i)\300\251?-Iq\347d\027\266?\016}\267@\233\312\254?\003\207G\030\354@\235\277\365!\240h\201e\265?\331\005\347\321X2\225\277@rv?\234\007r\277,MrSt\375\300\277\270l\177\244Y_\230?\352Y9yR\342\263\277\326\021\216\361DT\261\277/\024O_\263\232\257?=\370\014\020\341r\244?\336w\273sOb\265\277\\\222\256\373W\324\234\277*\245t\322\036\230\247\277\013:\2648\023\335\222?\323\006\370\007\315\330\265\277\322r\331\035I\225l?Y\214\377/8\224\300\277w\271/\001!\236\177?6\322\355\350\377\263\242\277\207w\266\366\n\212\262?f\351\033\226y\314\261\277\300\361\004&\275I\240?|\375]\276\342\331\251\277\021@\364\202\207bx\277:\276MZ\253V\247?\302\250\221]R\221\267\277\2212P\363S\253T\277\245.\326e|\350l?\302e\0340\346\222u\277a\356\371\2570\234\300\277\311\324\344\231\336\366\270\277\217-H\246;\216`?\346\025\207\355 c\253?5-\266r\234\013\251?\377\344\002\250\227I\262?\305\2762\341\213\026\253?\371!\027\031\314\277\230\277\206Gk\263\321R\267?\002\022l\367\017\224a\277\340\347S\267\346M\242\277k\005d\264\003\035\223\277X\314\231\352n\377\273?\376\3115\300]E\250\277\275k\027\327S\303R?\302\366cl\314\323\271\277_+\303\034\366\224\201?\305g\212\334qd\210\277\2244\000-\032\\\267\277J\351\006V2#\306\277\234\301\236\320k\232\274\277\302\310\002\243\007Aa\277\313y\300[\276V\244\277\325\200\025\214,\225\262\277]\230\030gaZ\267?\324\247\002\324:\024\235\277\n\373n\225\374\260\264\277N\3655\2466\211\227?\240\326hX\177\201\224?Bi\365\224\224\323\231\277\034\235\027z}\271Y?\025\306\222\304\037Y\216?\017\247o\\\0019\207?v\026\221:\024[\247?\337\210\034\256,}\201?\201\371?>H\241\246?wy\022\2056\206\273\277GU\311\364\216\243_\277\374\367\021\007\021\200\236?\n\271CO\302\362\225\277\300\307.\207.\026~\277^\033aq\360\217~\277\220\266\265T42\252\277\243\331\240S\335\262\221?x\323P\204\177\013`?\354\366\230\360\031\354\232\277\367\315\377\225\237\003\266\2777\366\215\225\316Wv?\201\262\336M\332\244\260?\220\\\365\036\350En\277\356\3426\323\213\202\251\277\246\302\373\316\006/\246?\316.\227~\237\030\245?:\304J\375m\303\247\277\214\213\030\022M\352\207\277\222\236\314\304W5\260\277\342\217\t\363D\343\246\277\201I?Vq\330\261?:\276\206\275\244\206\213?7\335-\330\321H\241?w\273\247\021\321\224\263?\300_\3354\3614\220?\"+\223\244\262\274\274?\027\022I\312M\230\226\277\234\204\342a$\337r\277\013\322\340\255\363_\224?%D\222?\'\252\247?.I\001+\007\246\271?Z8/\353s\311\247?\325\255\332H/vc?\2738\252=n\207\244\2779\343wT\"*\266?\334H\374I\252\\\241\277\006J\n-uJ_?\006#\312\304\254\013\243?#j\261\r\304/\210\277\227\330fbF%\271?\211\237P\3775\234\260\277>}\237\207\'\020\233?=X\2426\006\366\263\277\277\024\0338\311\037\300?\346\343{\230M\362\252?F\332\241\372\336\216\207?\234\367\027\014\222\252\246\277~*\326G\020\365\252\277R\350\\U\013\234\262?\204]\366i\035\322\230\277\333\2053\223\035^\200\277}\264^_5*\242?\346\217\226\214+\035\262\277q\271\261\200\306\350d\277K0\276\227n8\241?\370\376\024\263@\346\246?#\310\270[{\021\252\277\256\205\366Jvy\253\277y\205\364\317\026\274\254?+\205\022-(\265\207\277T\374\243\353\337\247\300?\306\266\264\t\353\t\306?\016\364\310\3112F\235\277\241C3\034\235\225\221?.\010\240q\257L\216\277\324\271\0200^\235\240?\013\250xw\241K\254?\262^\267\271L9\222?\240\003\322b\304H\236\277]\317\005\301\315\352\265?\373i\213\316p]\213\277\311\307&\2638\341\232\277\377\314z\005\304)\203\277u\366_n\252@\256?\"@\323>\0242$?<\325\363\216\270\206\220\277\375\235\212\341\014\017\275?[\t\356\366\343\340\247?\335\014\263}d\216\245?\243\236\226\036\215u\235?\341N\033\372=\376\222\277N\375\301\316\006\254\244\277\232\311\365\326%9\234\277/\277\325\331z\234\203?W`K\231\337K\263?y&5\223\314i\246\277\350\001\242{\352z\267?\347n1\340\001Q\246?\225\337\315\227\276\241\227?\t\207\214$\022S\240?\366\342n\374\236s\222\277\231\222\236b\023>g\277\212V\254\271\213\337\210\277\200_\327H@8\271\277qc3M\211\342\253?\353\242\350\004Dw\211\277\257\265&\272\257U\245?x\263\274~d\351\263?\203\2002\254mj\254\277m:\371\225\027\001\255\277\032m\353\350D\246\225?\276\202\014@\242\301\204?:]\255\237\315t\266?\304Q\352)kR\247\277\351H\335a\024\221\256\2778\3534\304B\256b\277\353\003\2008t\253\224?\313\313\200\331\237\\\207?8\263\223\262\351\372\247\277srM\373\036\247\254\277\232*`\327?\311\261\277,\350pc\361Tr?E`\300\215ZA\262?B\236\261\256\030\210\267\277\364\303\354\303\346)\270\277\314\226$S\237R\255\277\267\221x\265\222\260\267\277\222\001\274\304\2623\247?\307\246\320\036\030\340\260?\345Q\325\2556\020\242?\212\263\035\007\274\020\267?\263\221\351\201BN\311\277\300\363C\342\324+\261\277\375D\334d\241+\231\2776\217\370\215\004Z\264?\303\306\256?\225\200\220\277\000y[o\201\324\245?!h*#Hh\227\277\360C%O\022\211\256?\256\315m\201\214Y\217\277Qg`q_\351\261?j\243vd\304\235\252?_\356S?a\365\216?Nt\276;o]\202?\222\215\331\252\345\325\215\277\361c\t,F\\\271\277\3611d\024H\206\202?*\350\013\273bp\201?%\377e\374\355\323\207?\230\2614;k\220\241\277p\271\204\257\371\341\221?\204\"\037\224#\376\247?\031\211\202\303\374\322\270\277\260\344\331{jr\252\277\336\3369\023\020\321`?5\365\275\376{\301y?#\263\002*\323E\242?\016\327\022\330\356\263\260\277\315N\256^\360w\246?\2120\230\276\216\026\262?\322e\277\034\335\253\236?\205C\010(\220\3539\277\310\376\201b\2056q?W\331\344\264\344\014\224\277?t\372\364\343\245\225?\313\322\203\353\0259\246?\3214\340\301\273+\240\277\204\372\3463\250\332\300\277\275\373\352\025\211\364_\277\222\333\241\021O\260\226?\262g\220\343D\343\202?P\270\260^\002\001\260\277\277pb\364^\346\214\277eV}\000\223\241\223\277\200\325\017\243u\325\220?N\302\207\354\017\200\211?\215\270\rMH\243f?U\313.\350\304Y\266\277\tj\203\254\243]\265\277\315\224\271\342\0048\247\2772\003\232\001/\231\264\277;\275~\234\316,\242?\335E\332\203 \'\226\277\010\033\247\235\021:\235?\223\265\304Zu\222\257\277=])\373\343\222\232?AS\300\245Q\351\207\277\020\310\021\243%b\221?\371\367\365:\362\310\237\277\221$%\'Pw\270\277cx>\217;\257\217\277\361\302\221\003\345\n\261?\326!\274\225\304\317\224\277\027\035R\001\314\"\241?\337\300\243DN\253\273\277\250F\370\216\263X\247?t\r\rO\313\346\271?v!\000 \317=\244\277Q\326li\330\371z\277L 7\265\366\213\245?O\361t\301o\357\243\277&\2162\342@8\205\277t\314#h\363\241\277\235\210\274-\320\002\251?\366z\374-\332\311\300\277\353U<\022\221\245\226\277^\314% \265\321^\277\253\212S\\\243>\241?j\006\315i\020\332\263?}\351\035gv\253\267?s\305\004=\030w\264\277\362\222)h\001\006\204\277\021\263\236(Hq\237?+\023\377M\037B\013\022\260?,\330\364`IK\253\277\244\235\277\305\262\340\233\277\031\010v\275GH\226?\314\265\005@ Is\277\364:\252\234$h\242\277\263\371x\254\216\020\261?\206\222\352v*\310B?@\361P\315\'\313\244\277lY\220\332R\330\246\277;O\237\373-\365\226\277\317~\236\033\036Y{?\252\310r\372\2650\226\277\332\003n\023q\032\262?\'Mh\032\266_\242\277##e\273O%\243\277\215W\244\024\201G\242?u\ne\t^\034\275?\242\234\010h\336)\221?C\'\032~\375\207\272?\345\302\232\365\354\001\256?U\270U\n\251\376\273\277\274\265KU\036\222\260?\t\321\312\020P\220\247?\267\340P\344\266\312\273?p\271\326\006\211\275\235\277\373u\301\233\213\262\240?3#\254F\313tD\277]\216\364\361&w\256?H\013a\331,\330\247?\335\350\205E/\020\243\277\037\316\261a\t\345\251?go\024\372\020\010\263?\230\306\221?\203\207\240\277K^b\031\304\356\250\277\255<\230\267\240\272\265?\335`E!w\007\230\277)\265,\203\261\356P?Mff\036\356\314\214\277{\036\210\343DX\305\277.p&\333\374\177\243?\363\343\017q\032\327\215\277v\317\205\243\357Jk\277\342\306\315\322\026>\300?4L\215\225\272\360\300\277\005\225\210\276j\023\223?\240\343\010\021\323E\250?\"P\257\324R\226h\277\246C\331\2122\017t?\014\357B\374\034\010\220\277\237N\023y\323\005\264\277p\313\370 \370z\251?&\2474\001\262\365\221\277\021\361\236\370KM\242?rm\362>\216ky?\026\'_\0172\024d?\234\177\017\323o\t\223?\302v\013\241\324\020\241?\312\177\035\370\010\272\262?\232\355\303\007i\006\232?\376?\200~\367\315\213\277\356\262$\022\377\357\202?\367\374\307\263\'\345\206\277\330\222\250N\3266\232?\270q?\236B\342\206\277\327\030\201 \206b\203?\223Pa\" \261\256?I\346\021F\'\372\260?\010i\325Y\214X\250?v\277X\205;\244\226?\303e$\221\256h\251? v\260V\233\036\303?[U\305~e\013\204?\333N\201\306\333\'\263?\236\033\352\033\212\265\302\277\371\010\003\375\271z\260\277\305\300\220\271\203\014\260\277m\020\361\212$\352o\277\340Fg\206\371\364\240\277\233nUg-\177\265\277\221\350\232\346:>\230?\364\213IF\241\027\223\277:\246\362W\230g\224?\227~\311+\321\177\220\277\206\272\322Dv\371\207?\317[l\367r\216\267?\345\351N\000A\226\232?\351\3202_-\364\245\277\200\200\035:\305\245t\277%S\247\370\202\354\252?\267y0r!_\231?\257]\241\374\310K|?r\224\020e\301\027z?\357#\322\332G,\233\277\031=)\212\260\277\236?\246\202o\257\002\003f?pW\273\357\227\007\261?a\0018nG\177\246?\206@o\346\251\310\221?\373\214b\020\306\352\270?\311\034j\275\005[\220\277\342,\3723\240m\241?Y\265M\251\333\211\264?\232?\010I\331\rd\277\352\300?\t\023\367\220\277\330\370\313\340H\246\270\277A\261K\201z\036\200?\266)\362\357g$\242?{\323\362JJ|\260\277\'\336\217T#t\265?\201\353&H\004\337_\277\305l\201I{\310\241?E@\205\225\020\032\274?\022\0362wR\246\261?\225bv\027\257\371\227\277\224x\274\351\001`\233?xl$\226\377\307\256\277\256\270\277w\304\203\257\277\247=y\355\257\234\252\277\230\022\2726\250S\242\2779\001\315\261#\203\251\277\004d\277\255|\315\245\277\r\356\024\333\332\227\213\27749\226-D\'\256?=\033\366\004\261\335/?g\331\3545\247 \255?\312\3104{/\375\240\277\340\010\244\225o7\261\277\020)\3322\016\366\263?9hQ.\246.\246?\322.\221z\007\311s?\363\276W {\215G\277\225\36649\234\n\224\277m\263\300-\0003\251\277\244#\340a\246i\177?J\201\262f\352\331\256?\2117n\356\2141\244\277\362\232zY\030~\266?M\r\010U\306\331\242\277R\002\3079~)\203\277(\316\370\023,P\260\277\335\'\225\007\207\\\244?\031\344l\213` \272\277\021\352\274\221\233\010\211\277\326\3612BU\037\255\277\013\025\207\3423\355\271?\347\375\230\361\3779\245?)\351\311zk\313\234\277B0}\020Tb\263?\021\234\246s\271\377\240?\214!\"\022\321\311\241\277\355#\233\362\347\030\270?\252\303\372]\264g\212\277&\366\230\210.]\267\277\326\325\327\001\344\267\274\277*c\026^\266\354\263?\177\004\352\242D{x\277\376\366\320\325\235\020\231\277\031\010\014\2623E\201?\033$+\326\036\205\300?\023\321n:9.\261\277U5K\3122a\261\277c\363\303H\256(\204\277\246\237\t-\356\372\275\277i yq\177\025\203\277\273\307\302\345\264\354\252\277\265\257\010V\352B\230\277U\307m\212\211\263\270\277\230L\366MQ\300~?\223\232Ie\324[q\277\326\007mP}1\235?`\354\376F\030a\272?\217S/\035Z\356\255\277x\177\272\236\353\014\233\277\205\260\251\335P\352\255\2772hl\223r\000\246\277sF\215X!\303\266?\374\340QR|\251\243\277T\254\302j\222\317\240\277q\305\210D\177\324l?&\rEw\246Y\262?\313\273U%H\342\262\2779\3503\202\315\t\253\277h}2\021\311\033\266?3p\n\324~\301\242\277\275\223\343\031],\216?\005375E\375\271\277\222\010\024\261\305\204\\\277\325D\353\307E\254\231\277\243K/\337 \202\220\2774\343=e\201\354\236\277\313\035\320\232\221Q\272?K\303\271>\301\250\227?\000\005bK\007\235\262?\033\256\204\027\212\r\222\277*\307F9f\010\274?&\244\214\017\247\337\246?\014\020]7\371\366\254?\346H\330\262Fq\207?g\267\317\022\207\240\247\277\223\353ac\212rs\277\331\234z\261Se\263?\220\356\326\334\017)\232?\023\232M\243\316\251q\277n\356E\263\271\225\221?P\010\301\365EH\241?\245\273\320\220-K\242\277\002\\Jr\355\350\273\277\360c\022\032\315\'\207\277X\343M\006\301\246\255?\365\371\200\306\034\272\301?\366*\216Z\316e\230\277\214c\'\233\361\261v\277\337\220\230\317#<\237?\244A\374\316\203da\277\t?\034\343\007\265\242\277\241\277Z-9\034\227?b\310:\333JG\222?\267`\307d\356\033s?\364\034\304\355a7x\277\241\351\342\231/\354\224?\020@w\004\220c\240?\\\242\t\345\266u\234?)rU4&h\262?\300\177GW\260\366\243?\343%\337\346\231/\241\277\251\035\366\020\022T\263\277X\002z\224\264\250\261?\343mB\310\307]\270?B\213\325\222M\006\247\277\245BO\000 K\265?\225\371\320\214\317\005\260\277\004Gma\340\005\272?\265\000\337\003z\353]\277\254\345H\377\226/r\2770Oq\2214V\256?T\033\231\371\265\351\260?X\237\367\257\262\365\205?\252m\201\333Oi\270\277\344\205Fk\2061\236\277\032\020\311\266\361\321\263\277\225\336f\305|\321\277?P\354\031\304\243\201\206?kJB\037\327\001\254\277\202\031\210\035\366\323r?\037\357\336f\253\240p?\370zI\273\267\010\245?\021g\307ya$\266\277\022\020\206\033\013G\244\277\315\220@\335\326\317\216\277\3362#P\334S\257\277T\006\330\331wD\231\277\037\263\000h\033\203\241?\265\325\307|\203\265\225?b\307#\327\0027\264?\010\230U\374!?\263\277\221\376O\205r\201\253\277\265\030\324\n\302K\275\277t\t\244A{r\237\277\233\232{6\237~\244?5\2206\360x{\241\277\377p[[&K\302?l]*\351\206M\203\277\222\2728\366\353,\260\277\362}J\363\204\371\221?\3506h\273\374\034y?\002}Qk\2129\245?\177\260R\020\374\306\234?z}dC\327M\261?+O\251\330Z\021\230\277\346.\036s\366\356\241\277\301\376\022Fo\252\250?\014\243\'g\306\363\251?\031\031,nq\t\274\277\257\355\320\374\262\253\231?\331lsL\317\222\275?\2501n^\276&\237\277(R\nsR\n\235\277\307@v&\216\"\256?o\224\273\254\214\255\305\277z\235\335\255k\226\230\277\261\010\372\361\255\221\226\277\301\271\254tr\235\256?\3335\201\366\213n\272?\301X\237\332\255d\246?\247\025\302V\nz\223\277\210(\277\037\223\226\211?v\243\232rM,\246?\323Y\301\243\210\232\213\277)!YJ5\013\256\277\005\020\003\377\351j\265?r9\257\370\034\346S\277Ol}6\232\216\223\277v+z\200\256\215\261?bV\021B\374\331\252?\371\r\366Xd\232\257?N\312\337\003:\177\206?\0255\024n\262\277\236\r\030\332\000\214\243?\250\231\213\267\232]\205\277\213\374\271F\201\244\253\277\300\207\355\3213\236\221?\212\027x \207E\230\277\376\200\324\306\016Q\254?\265 \n\030f\177\301?\300\033\013\rrz\252\277\240\341|\022\326\321\220\277^\004\257\251\250\235\266\2774\303R\320\2319w?\366\256\005\330\365\344\245\277q\0312x\r\026}\277\212.\325\304\222 \261?e\262o\272\t\227H?\255\273v\020Ij\247\277\326\324\2470\253&\270\277Ix$\013\313\267\253?oT\363\253\262\260\212?\026\336\241\027\003\216\231?;\225\246\327W\311\241\277\332\261\212\201\201s\236\277\272[\030\300\000\020\250?Is\250\226\365\032\177\277\315J&\034T0\233?\027\2330\3268\306\245?\200\310A\036A\215\210\277\342{\363\013\032I\264?T6\370\271_\325\233\277K#\014\215\211H|?\343S8[54\265?\340\235\001\331\314\022\273?o\243\251\324\211\350\220?\275\020_DZ\320\303?\330[s\301\017\017\243?\034]\377^\347d\230?\022\026\313[\342\345\221?\001\267\235\251\276\270~\277\372wt\325o\233\202\277(\021\020\3613\315\225\277\357\000\200S\006Q\204?q\201HN\201\001\214?-\270o8Z\254\240?\325\353~\022\353\301|?d\031\030\002\341\303\204\277g\034\216Q>\000f\277`a\340\344?\225@\277XGE\332\t\211\220?\273\370\265\035O\001\261?\357\214\177g\240\264\201?+\004\356\306\202\370\262\277kx\014\317\303u\265?\304\3749(\013u\223\277\214\357\275\230\007\277t?T3\277%\250\017\253?;_g\"v\325a?\200\307\307\3572\371\246?*\010\337\025(\254\254\277\034\255;y\304\274\242?&V\032V\304i\264?n\311\246q\331\207R?>\203yn\210?\232?\304qi\0251v\243?;\007\360\271\256J\221\277\031\341;\270\276u\267\277\323\266\346\014*H\253\277\265Y2q\300\214\262?\320+\'\226O\333\237\277S\\,x\300\273r\277#K*C\206\007\243?\216\030\266\026\332\010\256\277\316\351\220\2114V\233\277\377H\236\303-o\245\277M\371\314E\2469\220?c\026g|KX\260\277\276Y\365\272\317\366\216?A\360\254\235\352\350\210?\214\206\224A}\255\213?v\354\250\244|5\220\277\275\267W\000\035Q\243\277eY\346F\243\010\246\277\023\343\331Z\302b\260\277\260X+\376B4\215\277\023\337\030\333\375fk\277.\310\365\373\025\204\274\277W?\030!\235N\203\277\013\355c\030\320\330\220\277m\3447\342&\347\252?\205w\032]\271\306\262\277~\276\326\363\237\361\213\277m\326\220\257J\262\260?N\262A\325\317}\222?1n7I\205\253\230\277\237\327 \226\010\347\266\277\371\261\321\014I\312u\2771\212`\3272gn?A\273\325\274O.\261\277\275\361\205\203.+\247\277\275b\354\331\354l\263\277K7#\031\365\234\204\277q\261\345Gk\265\266?\242u\007>M\025\254?\230\222\345\252\003\206\257\277\276\334[\254\367\306\260?\304\262l\311]\n\201\277p\253&F\035\"\267??E\3040\265#\202?q\356C\037\014\266\217\2773\005\331\230\\\346\260\277J6V\202\370\022~\277:\374\374\206\266m\264\277\253\314\221\375E\343\274?\023\360\256\")\"\251\277\006\216\343\263\313\372j\277pu\000\206\344\206\227?\352z\344\021\371\003\233\277\355\235\206b\315\227\232\277\231J\232\266\304\030\247\277\270\316\257\367]s\267\2773\215\'\263s\r\254\277\rh\021u\215E\233?\204Uw\025H\037\210\277\313\220\373\264\020.\272\277\320\324`8\205\347\221?`\261}\241\254\346\232?TU\260\370\022;\244\277\221\2548J\212\354\250?\371\302\223\002\276\314\241?\317\300\007Z \277\242\277\225k70Mo\257\277g\016\347\353\327\370\252\277+\345x\030\177r{?<\\\242|#\304\310\277\366\003b\374&dq?\264.\367\002\333\026\255?T\027\346\345\254\027\256?\307s+\210\3467\255\277l\005w\\_\026\240\277 \272\335\020\257\366\260\277\200)i\333\003%\217\277o!n\002\255o\241?\2416\363\332eC\251\277OA\344Y\010\277\270\277\235\346y\370\270\231\220\2778\033\267@\216\027\246?P\235\305L\311\323\240\277\254\025\377\353\360?\265\277\201j\216\253\353\231\240?\206\007\342\257\262\321\262\277\340@P{\376r\261\277=\260\337\360+\301\242?\254\332\260\224\207\301\307\277\234\025\r\022\344w\275\277\352v\325&\"kp\277\263\256\375\220\370\356\257?\265\252\244\345\023W\210?\'\264yZQ\263\264?\302[\212\373n\363\252\277\374\2270\201D\203\251\277|P8z\325f\251\277\213\035\316\252\363\\\206?\272$\337\325\001\316\253?\031S\220\203\017\344\241\277&\010\013P3\315\207?\207\210\245\305\353\224\251?n\022}s\231)H?g\035\310\2311\252\255?\001\225/\226\366\316\243?Pm\250\'\302r\241?\325Oc\303gB\264\277\362\224\227G\207dz\277\337\376\003\223\361#\276?\203\026\243\342y\330\202\277$\300,>\212\375\303\277\240c\303\273\243\033\262\277\323\346\211\001\317a|\277\024\375GPb\256\244\277\324v\350\317\315a\236\277\"\266\032 Z\250\227\277m\334\301\037*i\236\277\352P\317\304L\333\300?\353\233*\304\332\271\211\277p\312\206\264\375\002\264\277\340j0\215\315\365\234?\3431\001\2532\261\225\277M|\367\037\247*\266?\265G<\300H\231\241\277\001\3045w\300\003\250?n\254/\271 \352\244?F,\337\300\0258\263\277\333\270\266\275!e\234?\213\270\264\033\177m\270?\277oYU}\205s?d\204\007\021\034H\261?55\220\217\276\345\214\277\020\227\262\242*\266\245?\203\3518\235\351|\210\277\272\204\357\263\004\373\267?Z\250\036\014\353L\260\277\257\333\312X\334\234\263?\336\220\262\234\304>\220\277\211Po\325\t\335\242?M\224\351\312\013\006v\277\261\330\337\231E\210\247?\022V{\034\302\370\207\277[\037Z\260\371\007\263\277\220\204+\350\320\250\274\277\0352T;\253\241Q\277\210i\353\256\001|\212\277:\236m\217q\201\254\277\244\207D\021\331\304\263?\207\300!^\371\350\253?\311\217\244\233Z\022\252?\027\n\251I\343\\\275?\007\006\021^e\235\266\277|\204I\335\202\034\254?\306\357\324K\243\231|?\264=\333\245\021\332\230\277\177\334\211\226\320K\256\277:,\236\326u\264\261?(y\340\361\0047\227?O\233\2547\022\214V?\350\354\303\352\313\023\255?\227(\363[\322M\257?\255\253\241hR\355\233\277}R\231\306v\274\244\277\214\200\272\330bS\241\277P\2036\364;Yz?\251\253U\373MH\244?\231f\037\352t\373\266\277\017Z\245\224q\345\244\277~\352\357\373A[\276\277Yk\326r\205\234\232\277\326\227\305\305\241\022\257?\236\330\331\260\003\225\255?\276S\"ap\316\245?\375\333&\366\252\224\257\277\212\2502\236;sE?\331?\342\031\221N\255?\220\004E\225\373\307u\277B\261Z\251Q\201\234\277\206\254\211\010\006\327\262\277\242\371n\2065PB?Xy} \374\330\236?\202\371t9\220n\274?\343\254\250[\202H\223?\364\310a\240\212\213\267?\227\007\204|\025J\274\277\260\215\337\014\025\377\251?\211\311\2659\261\363G\277\347X\372u\333r\212\277\203FI\326a\020v\277\334\000\300\377\016\343p\277Rm\351O;\023\250?\321T\322=A\234\272\277\230\342m\231~\254\270?>\016\033\t0\005\267\277\216\370#\\\334\263\262?~\355P\223g!\232\277j=\223\tG\370Y\277\356t\247?\226L\222\277\331\257\262\323\365\033\267\277\300\315\000\221\274W\273\2775\021\263\350\026\326\256?\261\243_\345\002d\224?\270\3053\254\035\360Y?\345{Y\270n\"\245\277\252\004L\037o*\257\277\224\340=X~Uz\277p\005\271\362\375b\233\277\354G0\304\014\r\275?\230\006\323~BH\271?:\001\025\255\244Q\223\277g\204\262\305\317m\300?R\215qRM\302\267\277u\220grDd\240\277\261\007\311\340\001\370\206\277\003\250 \207\223\244\205?\366\271\360C\177\372\242\277H\261\226\325\246I\213\277\216\336\320\375\000\344\261?NW\342|\311\211\206\277\007x\016\277\031g\265?\n\203\2468\3744\233?\335\252\025\361\266a\206\277\366\017\371o`m\232\277#~\n\033?\376~?~\262P*\247\335\244?\320\025\310: 6\205\277\332n?\344\275\303\234?\206\335\210O\306\002\231\277\305\227*\275\234\364\261?\230\371\3104.x\245\277\310J.\372\277Z\240?.Z\270X\301\356\256\277W\260K\262/\323\233\277Ws\352k\017\014\225\277K\250\277vU\250\224\277\227\343&u$Mi\277\\\302\204\364+\247z?\300\027\274Pw\331[\277\340\320\300\322\266\341\243\277%r\322\342&/\221?\333i\301\003\032\217\266?\006\006\013\3274\014\256\277\372\215u\333\201\364\240\277_\002\256\312\273\225\266\277\300\025\233\311;D\236?\261\\\374\020\204\253\254\277\010\"\373\374\225\331\225?\274H\2333\001\257t?\013\262\030Q\211\205\257\277\006d<5\014\016\251\277\344\277\\D\006<\260?\'9>\032\215U\243\277\322\334\226_\364\032\250?H\314\016.3\352\256?\243#A\342T\327\300\277\2772R\026Db\261?\265\013\214\255\231J\213?1\240o\327\224\274\260?F+PuG\261\267\277<\247\016\203\210F\247?\024zu\331\251r|?vQ\002;{\r\232?\010\367x\375wC\253\277G\266gS\005\227\245\277eq\200\224\306\204\224\277\264\236\241#g\241P?\005Q\r\031\331\311\270?\001\351\273F\366<\263?\327\341\266>\277\304\273\277\272\374\244\3335u\273\277\361\275\351c\277\377\230?\326\306\252K\220\306\240?\206\030F7\t\027z?\364c\334O\010\007\262?!-\346\37377\260?\274Oq\2512\013\206\277\273kJhXN\244?\313\365g\007\374\354\212\277\002(k\236\247U\244?\324\330l\354\357\365\244?-d\257\214ta\255\277\333\212^\305>\033\224?P\357\rBm\236\256?\203\351\341\027\373\225\257\277\3056K\220\314\337\266\2778\032\237\3662\270\230\277\362\351p\n\357\245\266??\223\330g\206\000\215?\340\'&y\240\272\242?/\031Q9\030\256\235?a\347\035LO\327\246\277\344\321vMEz\264?\233\310\270]\336_\253?\335\207\323\273\202\253\266?\024\004:\377\223M\261\277\354\n\006\016kd\231?\002\374}\222X\017^\277\n\327rM\036^G?M\334<\025\305\031\247\277\233\375\332+\267]\252\277j7$ b\247\232?\201\273X\327\232\325\206?\004\230\267\210/\036\221?\343d\372*L\r\267?R\273\200\374\362;\276\277\333\317\234!\342y\200?\220\315\225t\027\365\240?H\035K`\014\246\\\277:S\372\255\270H\241\277H\336\005\022\346k\243?\356\252wB,\231\236?b\203z\244\014\207\232?2\275$\232\344\253q?\306\357t\376\302D[?\234f\217\304P\030\267\277\355j9\315\2057\240\277\010E\251\304r\215\213?\227]\0076\r\226\236?\024\000`\223Ha\262\277\016X\n\3519\306\247\277\276\320\235\270i;\263\277\220J\236G\240\256\262?.\n)2\344r\255\277\245\350\262\313\036\'\267?\"\264\025W\020\027\227?\177\234\372\016\267?\211k\246\316\337\to?\322\026\005\2578\211e?\324\266\326\010T\020\227?\027=\271\005u\021\257?-\365T \241\230\203?\"\230\346\324\205\352]?\262\212j\003\022\027\254\277\370\264Pz(\026\235?Ym7\343]w\265?\033\014\021>i\203\202\277\374\013\236\316\251.\230?\320\026\000q\343\321\273?\347\354\343\034tF\240\277\336\212&\320Ut\247\277\207\030\263+<\350\266?\020 c|\353\325\203\277)\302W\201\335t\243?\225\0026\234\321>\236?\363g\177\267\022F\262\277\247\333Q\2675.\223?p\367\252j\201\370\205?~\004\243T$]\230?\331\026\371Hc\\\242\2774\037\331\361\243\306\201\277h\320\031\034\257\341\264?U\353D\n\377\032\226\277\276_5\227\301L\237?X)x\276^g\274\277\032\324\344U\377\304a\277l\357\304g\212=\217\277\000y4t\363\001\254\277\031\210\323\213\301L\271\277\277\325\304\245-_\242\277\214\261gH\311\254\223\277\3058\317$\0262\243\277-J|\027\352\224\257?I\222\"\370\210\216\221?6\257\002j\226\244\205?\036\211\311Tko\213?6\223\315\210\302\022\246?\302e\367\322\202m\260?>&\242\334\226\340\242\277\322\006\017\206\234O\203\277Q\242R7\250\360\270?\261\000}\321-\216\271\277w\023\310n\330.\232?zZ\020)\250\262\247?\230\222\033\211\036\374\243\277\337{{\371\343\277\236?\227H[\246\3466\270\277B\237\324\2148\007\226?G\200\276~2\300\256?#\311\002y\257\005\250\2770\321<\256\216\260\304?5\355#\031\264\n\257?\363Y\224,z\023\267\277\337;\0319\255\377\263\277|\n\325\232\212\351\216\277\225\034\361\317dm\252\277\371\031\253BW@\241\277N\272e>FGI\277\225\246`\241\257\352\231\277\204\255\244\232\330\324\210?\003\275[]\035F\231\277Z\210\347.\372z\251\277\031\355\303C\250a\267?\236\310\232\000\217\206\224\277\212\235\177x\310:\227?yU\311\315\230v\257?1\240^\361\177+r\277n\327\263\3759&\252\277\n\225\323\263?K\351\266A\271=\201\277+\001\356gP6\253\277V\264\3113>p\242?r\254e\340\310\374\260?pI\371\206\367\206\262\277vH\261\255\352\'\253\277\255\335\302\000\255\r\226\277B\005\374\343<\266\221?\332\337V\021\367\373\250?\261\304\317>\264b\216?\206\3549!\023\273\245\277\307\317:\353\343g\261?i\275\r\017o\263\266\277s\020I\002\366\270\271\277W\223\365\254\206\177\244\277\021\267\n\213H\360\251?\001*,\023\361\205\222\277$#Cf\230e\272\277b\036\345\233\354\351\244?]\245\251\353\277\323\226?\301\313oo\371v\261?\211c|0\232\312\233?\204f\353\210\377\000\232\277\217\322\335l\216N\230?\325\200\374\217h5\253\277n-,\206\302\337\265\277Z\'\032\240:Y\273\277\200h+h}b\256\277\261\315\275\225\327@\250\277\236|G\027\261\201\232?z\257\222\311\023x\261\277\007\324\346R\300\215\213\277\247?s(\362\242\264\2775RRndV\261?\265\314\243\260V\006\256?f.\224b\323\240\242?E\362\260\317\\X\245\277\t\301\201\202,\232\270?\324\300\372=\007c\261?\t\'K\256\242\246\210?4\367\'n\357Y\240?_.\3770\254\250\275?lUS\304!2\240?\035\211}\260\217\376\224\277\234\241\022+\010\312\264\277\276\363\030\206\255W\264\277\337\244\254\275\027w\273?Y\367\211\363\2175\244?\025,\237\2756\237\221?\376\272Y\013\004\332\246?[\242\027\214$\234\200?\357+\367D\372\n\242?%g:\235\021\025\260\277w\272m;\020\216\177\277X9\265\325\365\316\263\277\3775\256)\022\276\261\277\204O\321 \353\220\233\277)\rB\032\031\253\253\277\220\331\343uY\274\227?\331\220\372V6/\254?gM1R\234P\274?\335\020\244\212&\350\220\277\223\\\206\t\217\022\223\277\377?\t<\234\331\220?\220wO9n\r\203\277\316\307E`\026\267\271?\201\314B\267$\014\251\277\t\203{\320\224v\214?\346\006\rK\244\013\237\277\304+\226\261\217\001\207\277\270\034\270\275Q\003w\277\250]\024d|9\262?\267\224\216G\261\222\201?\267\320\255\201\013\334\261\277:\330\362Lfh\262\277L#}\r\271\210~\277\371\022\364\314\010\354\240\277\371\320\360\234)\355\236\277>s\274]\356\021\255?\2727\377\017\360\223\212\277\250\232\024\212+#\254?\0320\022\315Ul\266\277\316\325\003\0022(\300?c\214f\300\0328\251?0:\254@\315i\232?m\307S\213\005_\215?\310\"\220\003dY\241?\3236\200\246\256\232\230?N.\213\263<\220\224?\223\315c\262p\364\242\277\025BQ\353j\340V\277\316\205\354\356\027\003\302?\342\352a\0029 \233?\360\233\236\022\336Ld?,8\335/\023\031\205\277\256PI\2422I\222?\224m\032\203\346M\236?\340\213\345 C6\252\277=\242W%\275w\272\277\007=\007|Q7\223\277\305\214\362\315I~|?xK\210Yc\006\265?\364\247\345\200|1\202\277\217\367\027\213\037H\216?\334\036t-Yk\245\277\350,\375\305\307\035\210?`)|\311)\032!\277L\211\355\337\232\016\211\277\203\021RF\347~{\277\304\013\030\327a6\210\277E\364pk!\252\250?s\252\021W\233X\261?\257m\365\036a-\250?\227yC\277j>\250?\272P\017\222\r\006\235?\373i\377\212\332\233i\277\241(U\215:j\233\277\225x\343MD\326R\277\242[\030\261\r\267\247?\301`H7c\356\272?C9\034\335\250\344\274?\355\314\273Yn\230\202\277\204\336&oj\025\252\277\222\242\271\270\347U\243?\327\301!\225\016\033u\277\274\"\340\217A\225\246\277\330\234M2}\367\260?\256\352\262\325\247\200\201\277-\020\231\007\357Y\264?!]\322+\364\215w\277C\242\257k!u\231\277\021\201I[\256\271\223\277\203\216\3678x\016\225?!Q/\204\3334\225?\304\270\362\235p\376\250\277\004\215l\217\003|\270?\351\211\302\261\313B\247\277\200\230\006\254\336V\217?\177\337\317;\212\366s?\377\365/\016\"\000\202\277\036\257\006 \211\336\223\277\356\237\342\364\255\226\262?\270\260\005v>\036\233?f@\001\240*\3276?\2273\336\313K~\215?\275\"\200\256\266W\254?O<9\357\306\325\207\277\307Y\235\223\356r\235?\t\316%\342\317\262\215?\374\036\250\243\261\031{?\036\267 xoP\216?x\'\325\324$E\265\277\276\000\007\330ua\252?\2167\2544\357e\261?\360C\347:2E\271?I\314\007EM\343\265?}\356m~\307\365\226\277\220\327\377l\327\363\221?\217\336\002h\336w\247\277\251\336u\231\314\355\222\277%X\315\362\334\034\204?\326s*\332\365&\271\277\270\244El\356\254\253\277{k!\262)0\301?3P\3215\245u\220?\332\331\334\r\236\021\241?\017\313\242\254\325\317\\?\214N\026\253_\335\221?\226 \237\032\341\n\260?\261\331\232q\207\033\265?\225\023\3227\261?@I\376y\251H\276?u\026\266\006\220\263\261?\\C\331\267V\023\206\277i\375\240-~B\255\277\303J\263\023\353\225\257\277\t\037Y\347\320;\034\277\301)\234\340\341\242\224?\204\306\024}D\340\262?m\205&w\345\016\215\277\215\217\342*v\037\264\277\271f\272\0228,\177?\221\227\037A\332Y\255?\341\264ma\234p\226?\332k\036\361\355\272\260\277\247SIs\362\314\211?\306\330E\344,\032\275\277\226D\262\2506\325p?\353\227\265\341\020\335\252\277\316\360}D\325\256\261?\240\204J\244m\ru\277uA\220\340\354\333\267?$\302h\341\277\213\206\277\321\344\204B\010\212\247?R\302wg\261\032\261?\325l\256\364q\352\250\277\nN\t5Q\350\252?\017\263\227t\323\213\244?\265\272)\330B)\230\277\004\372\344\0376*g\277\372\331\371\353\327\242\220\277\270\340\017\271\250\014\216?^_\302}z\240i?\376p\214\240i\303\232?\350\364\362\373\262\002\274?K\263Xu\337\210\207?+\210GnO\257\255?zOw\214<\223\251?%\242\3200\376\247\234\277\n\336Dj\315\310\264\277~\t\022\007\036M\203?u\225\273\376\212\211\220\277\3115[\361H(\205?\261\"\210jU;\235?6\225_\313]\235\310\277\270\334\336\363\242r\205?\306\001\346T\372\364\227?Zt\303\365ju\276\277U4\235I\264\300\233?m\036\244\004PZ\235\277\362\350\346\325\n|\263\277`\005y\313\010\332\223\277\025\177\021S&\004\210?,#\005\000\024\327\263?6\307\267\212\032K\265?\215\213NG~\017\303\277\000\274\273\244\231\257\242?E\262;\314\035DR?<\271\'\341\202\231\241?\365\230\nW\371\372\225?8v2\305\316r\266?\354z\371\330F\212\253?\344\240#\021\266\365\243?\2266,\254\370s\257?\022\217k\246\207\202\232\277\306\222\326\356\'\222\263\277`\245\304\217R\027\223\277^D\232\200Ye\253?\367\356\014\302\025/\245\277\316\356\037gy\233\221\277\246\231\362\247WU\300?\231\232\026\373~\020`?\323\317\327\003\036\021\211\277\t\324 \353\320\256\234?\013\001\373\204,h\211?=\2127Y\377\272\204?p5\001\241\234\233\236\277;\306\304\374\266\277\202\277\267\251\032\254}\203\262?9wb\200W\240\272\2779N\201\372\257\202\262?\376 \361\325=g\257?\303\356`9\236|\241\277\337\n\332\225\365\006\230\277\346\275\275K\360\213\277\277\016\245&\031\016A\265?\002\010\302@~\321\260\277.K\013\034=\343\231\277\031a\265\257\302\030\243\277Lg\237\'1\t\243?\031H\362?(\250\262\277=\347\202\310\r\305\244?\312;\276\304,\222\266?g\3037\255\223~\230\277r\345;\241\017\211\265\277\214\306N2\256f\234?\276\033E\365 p\223?\336\265\016\243~\274\257\2779{\316\372I\254\200?\025\002\027\336\347\246\231\277\302,\026\350\331h`?_\315{\366\337\372\215?m\305\316\020c\025\216?\245\004T:V\024\237\277\216\254\220\007OJ\243?M\205\245\211\252O\234\277k\235(\306\340\276\253?qu\211\031\221\231\232\277\340\222z\274\007\213\231\277_\016\367\341\233P\220\277\3526\232rn\225\255\277\251:\016\304\021\257\237?\365\034g|\007\257\254\277e\020\327:*\006\202\277\035\376\331\000C\037\253\277\225 \264 E\332\270\277\322p[\025\033,\221?\311\235\336\233\r\226\220?\256~\35280m\273?\246\236o\207\366\212\241\277D\274\344\027\327\241\243?\316\005=\2428\006\262\277|\237s\343C\241\222?\354\320k\356)?\247\277\213\030:\275\370|\243?\216tP\315R\276\222?\346\351a\244\003\210\254\277b\3725],%\216?\270\224?\365\341\354\237\277\036\262\342\211{U\256\277\3504Ih\013\227\261\277)\317\206\355\327\371\236\277\201YR\024\321\221\250?7\"\276\225\tQ\225\277\031\364\002[/\305\265?\3432\356\357\356\216\247?\234\251\301VG@\202?\"\272\251\235\242\005\222\277\360\310\023\204q\323\252\277\nX\357\373B \266?\021\267H\027\242Y\262?\325\016\337|\376 \241?\000v\010\017\375\372\203?`i\3700k\210\246?\364N\334R\340p\203\277\017\352l\215j\r\251?\220\331\3274[\036\272?3\240K\365X}\234?D;\340^\031{\244?f\343L\252\335\226\227?q\353K3\016\360\231\277\233\263\242O;\211\230\277*\215\314\335\227\225\241\277\320\037I\350\265}\242?:2\2179\024j\220\277\277\231\304\354_\323\242\277K\364\232\322\207\016\261?\303~\270\367.\245\244\277\231d\265\324t*\225?\315}Yxi\247\263?\035\314Q\305\246\244\270\277\330\031\321A\221\370\265?m\007)\301\253m\230?\336\277L\201~\031\254?\365\211\216\223\321d\226?\016\361#\321P\004L\2773?\350&A\000\202\277\223.\230\r\353\350\210?\327s\267\204\235\217\261?\224Ju\242\224\347\252?\335h\3146\352f\216?\210@\030O:\333\241\277\2511\320\253\031\217y\277V\032\335\305B\006\243?c\327a\312\324o\243\277{,\031\221)\241\242?i3\274\005\004\307\241?\'\2206\003xv\216\277#3\226\273\0163\216?3\000ImZO\222?B\323_?8\022\260?($c\27250~\277\247\373Lw\003Xc\277\247\312\227\224t+\223?\323\335\3516i\363\204\277>}`\227\265\'\256?q\363.\2412\030\261\277\204\301\337\216:\353\266?\234\312\372\334d q?\231W}A*\373\252\277\235\377=\367/\321\244\277\370\276\024\313\203\276\250\277;\025y\001\320\357\276\277\257\303\374\354p\376\240?\n\007M\256\025bw\277\217N\241=\035\260Y?\335\366d\367\273t\243\277\306\336WMn\317Q?CC6A+\214\262\277\323\341w0\037&\265\277G\345n\0341\037\244?59\035\231\3364\271\2773w\267\323\014{\241?P\214a\221U\335}?x \230\370=-\244?\303\335r1\357\342\250?\350hX\377<\353\237\277N?\270\n\301\252\246\277\023/\347\242\021~\245\277\371v]\376}\242\236\277\021\322\203\235#<\230\277\356\373t\004K\035\265\277\027\257\333\207?\377\246\277If\304\333\202\365\276?\363\'\005q\244x\230?\214\245\235\033\332\230-?\270\330\2229X`c\277w\3378\207E\221o?\227\272\260\207\3022\222\277\nm\251\230\010\002\203\277\tU\261;^\250g\277i\022sj\365\314\265\277XO/\274\036\000\177\2772wF\314x\177B\277\0347\320\305+\335[\277\234\366\357bm\213\222?\277\236T\002j\340\247\277\031\267W\213\345\340]\277\361c\227\307`\023\261?%\031\013a\364\307\206?\206\346\343tb\331\250\277z\345\340\030Hd\264?CRRX?\331\207\277\210\246S\320\254\240\232\277S&\0041\342UD?.\353\315\212\234]\262\277\342\2416i\013\016\254?I\2668.?\246\264?#%Q\360j\344\242\277>\212\252\321\203\353m?6b\376/\324\013\253?\354fA\234\252j\254?\372=R\276\364\006\223??\201\366\316<\350\257?\037Q\227\274\346\347\242?\021\227\213t\355!\216\277+^\365o\'d\257\277\325Y~\0328F\246?\357\334\302f@\366A?<\303\"\2267\335\262\277\027\021\202\n)`\235?\006\214\260&\210\177\227\277\273\210\355T%\327\234?\244\255\275+@\020\251\277\215(\226G\316pR?mf\322\003!/\232?xf\330\243\316\005\201?\254\300\025?@[\207\277Qx\251=[\034\243?\261\212o\263\367\310\221\277\n\'\013^U\231\241?\005\247\351\004:\224e\277\373^\230B\002\211:?\212GpXi\037j?p2x\032S\216\242?\321\260S\267\276\343\220\277|\202\247\207S\345\251\277l\003\273\177\003\352\261?T\321%h\370O\272?\021\251m\\z\"\260\277?\254\332\213\226N\230?\372Q\227\2634wz?\033\335\177q\273\264\240?\214\356\303\373z\251\307\2771\305\222\211\326\340\262\277e\260\020Ng5\246?\016\205\314AZq\273?\334\301?d\205WG\366D\226?\243C\267~\032o\276?q\276\237c\333\260\240\277\024\235\224\300\276h\273?\362g\356\317\245\347\266?\367\302<\310\217D\242\277J\351CJ\'\005\257\277W\017\2425\002c\257?%\334U\006\242f\226\2774:\274G\006\230}\277\376\313R\026\024\262\261\2772PK\247\000\340\237?U\207\010\035\306v\237?P\271\213\330\326)\235\277\177\235\030X\355\300\254\277\264\242!\020vV\227\277p[\210\366\332xY\277\325\314\326\t\304\320\222\277\214\247c*\363\306h\277\374\343\" p\376\234\277\266\307\024\274\215\013\244?HP\273s\254\t\242\2774\360\032\2764\010\226\277\225\024a\031\0327\263\277\270.~\214\266G\204\277\304\231\026/\315\365~\277\210\223\277\257%\264b\267\301\253\277j\253<\354\346\335\262?`\232\256\344q\355\240\277\303V\364\342\313 \264?\020\322w\030b\275\301?\200\216\361zU\260\260\277vfN\252G\203\246?0\207vVSo\301?\332q{\223\'\025\220?\371\005\n\267\257\005\272\277\375\331e\266\321\375\266\277\n\007G\314\020\010\266?4\007\354\246!\177\267?\031a\004\241*\344\221\277\007\232\263\030\216\205m?3\022(\036\270\260\235\277\245\261\356\033^K\261\277\200\003\351\246W\364\264\277\222\270Y\344L\267\225\277\007\364\033M\232j\212?\370tC\360\250n\220\277%>\237\244\374=g?@*\270\260\037\314\302\277|\363R\244\3061\205\277q\272Q\372\304\357\271\277z\373\024g\370\024\272?\017\023\232c\367\\\205?1l\244\371\244\300\207?F\347\313\201.\204\230\277\377\332\211;_I\246\277a\224$\010\225s\213?=Be\355`y\242\277!\014`7\007\'o\277{%\242\256\252\205\245\277\205\336\000\235)\364\274\277Pf\223\372\305\333\203\277\0224\244!4\225X\277\236!z\225:r\252?\202\344\002\341!\037t?\2161_H\201\236\226\277\010Z\334[b\202\211?\"*\271\360u\213\263?zV\222Y\020(}\277`\013\035C\242w\243\277\\QT%\273g\255?j\r\354\370[Lg?Ii,j\022Z\260\277\336\326\212\361b\213\207?5\\\370\n\263\021\202? !\024\370w%\261?k\033d\361S\347\210?\257\374(zyX\254\277Ww\375\017i;\263\277\025P\016\215\377F\256?.\032\247\035\2756\243\277S\034-\r\233\332\301\277\321\244OG\341\317\225?\271\020\271\225\267\361\254?\037\206\360C\370.\255\277\n\t\207\364e\313\230\277\004z\206\3613]\273\277\324q\324\341a\245\255\277i\357\334\205\r\271\300\277#I?\254\364\302\231\277\312?ok\364\233\242\277 \277\301\242\244\234\261?T\327\032P\336o\260?\313\264M\376\032\357\200\277\"\262\307W\252^\257\277[\252\2602\246\302\220\277\237\014\214E\312\007{\277l\226|\037Kf\201?\244\n\222w|\311j\277\365\215\227\203a\266\236?\026\033\365 \010\177\220?\307s\027$\374\274\255?\344UK\220\034\275\264?\323a\233\204/um?\257\024\001\241L`\263?EW/\267\211\360\260\277\343]\267r\2431\262\277\220\252\034\270\224\270\252?\223e\2137\231fu?\266(\000\017\332p\226\277k\250\204\256R\223\207\277%\304E\237W\343\257\277\350j\336\263\332&\265\277\301\267*\310\256*\235\277mE@\357\347X\230?\223\001\213\366\253\254?\204\326%\177?\332\255\277\347\2167\370z^{?\036h\210\253\364gY?\312U\370\223\217-\225?;a\341\003H\330\240?_0\356\357!\247\247?\303\351\326\332g\267p?\037x\035\rE\246\217\277\016\021\004\324({\263?J\200\007z\357\227O\277\234A\013~\240\233\261?\244QJ\333\034\377n?\001Qf\200\347\375\245?\220\001\257\332\260\024\243?n\347\226\013\224v\2778\036,I\372\340\224?\365\014\274\322\265\003z\277\234;9\241\275+\245?O~k\016$(`\277\375A^\312 m\262\277\020\005#\010\341*\234?\350\355\303\207k\232\274\277\357t\245\222\333\t\252\277K3\354\222b\245\267?\366\244aO\334\237\211\277\257O\365)\326\376\252\277wA9l4\204\206?\025\314\372v\323QZ?\027\370\307/t\016\246\277{J27\004!\217\277\272\034\216x\037\353\266?\020\025a\377\371o\266?\332\337.}\253\215\236?\241.ZDr\034\227?\326[\n\014\270\370\231?L\351\025z\222lt?)`\001\245`R\217\277&\224G8\223\275\262?0)v\214\027\356\246?\210\346\271!M\375\237?\"\257f\032\032\366\251\277.\345\023\260\037\235\240\277qn+\331\340J\225\277\177\255\241\035>K\273\277\301i,O\366:\243\277d\246\271z\033\006\212\277\3548\266\340\024\351\250?X\025\330\1778\347\207?\022C\025\373W\222\224?a\031Q\307\222sf?\244V\350Wd\n\215?\240@Z.\013\251\205?e\027[\026w\206\257?=\230\010\277\010\226\216?\024\345%ch\251\252?\354\250dL\354\350\212?\326\354\035H\002\341\224\277iM\373\320\203\372\246?\336\221\031\362\037\263\220\2774A\200BZ\355\245?\036\371\205\227\177O\244?\367\0277.B1v?\\\266\r\277\234Z\241\277b\332\252\2414\373\226?f]\022\233Gx\240?sG\304\177\234\264\253\277\2550\010\256\005\337\260\2775\333\323\034D2\272?\322\024\026\344\367\365\247\277\177\351\305\370;\014p\277\215\014\217\304d\014I\277=\225\016\324\203\014\255?M\221\370\221\363z\302\277\246dW\036\352\310\233?\227\"\265\2238\347}\277pwc\250\026\016\214?\304!\202w^\240\256?\2541\256\033h\370\220\277\216\2361X\221n\214\277_\307\347\334\353\\\232\277\365\273\00071\252\240\277\310xE\264\024-\267\277\254^\003\270\256(\233?\316\334~`\032\241\253?\325\352\350\242\264\365\035\277Q\301(!O\237\256\277\373\311\314\017fn\254\277\323\371\367J\255\312\260?\364\201\242\345\260h\243\277@*\207\202\026\177\247?\324\266/o>a\214?n\322\004\031\200*\242\277\344\217\237S\255k\233?\325%s\037\034\312\263\277\317E\346\352X\374\257\277\204\2644\215\264\261\256\277\242\303\2430\221\334\300?\204\330K\t\312}\236?B~\271\007G+\240?\341\352\214\314/\256\222\277i\366\377\317\216\253\224\277e\243\242\237\263\025\300\277\340M\\V~\263\240\277B\244\010\252\037_\245\277\321\355\202H\204\241\206\277\327v8\ta\276\220\277\255OOl\204\333\251?\315\250\203\202\202\336\263\277\226q\364\3334\273\222\277\336\362\353Pu\n\217\277\315S\337\t-\340\271\277>i\246\3159\253\265?\215K>\037T\375\204\277B\236G\025H\215\202\2779\361$+\250/\257?\341\266\324)@2\233?m\025HN+\005\212?{\227\302h\0072\257?\033\325\367\222j\317\233\277|-\322\342g\016\245\277\026\244\254\021\344\245|\277\253\3557\352|uj?H\037&Q\"\245\200\277\272\274; 0J\273?\335\031^7?;L\277\r\316\007\312\342\207S?\241\306\206CZ(|\277\027\322\232>\300\364o\277bAtc\216/\270\277\274\004iS\376/\251?(\276Z\242\177\306\265\277\354\271kL\032\201\254\277\327a0\014\241\303\214?\254\327\371)\313\\\230\2771\351\357m\234b\214?\262\020\273h\344\360\230?q\\\245\034=\360\247\277t6Ky\205\010\212?T\002\263qD\353\241\277\234Hq\217\222\206\247\277\273\335\230\371\010\302\247?\353\0323_I\273\236\277\253\210\nO\360\022\251?WM\001\243y0\251\277B\177\036\225t>\260?\0109\233/\366\273\224\277\000\207\0041\001\276\242?\001\036\346\2644\356\232?k\314\215\325\276me\277\230\264\010 %%\277\277\307\22467\226\250\236\277BV.#\270\037\256?\302\232\304*\226\022\205? xZ\3318\356\253\277\260R\006\217\243\350\234?2l\337\240\"\335M\277o\277U$\321:\262?\026\231\224t\246\373\245\277\272\342\244\322\177\236\256?\274\005\367KF\273\237?\203\177\266PX\321\233?wz\202\035\202SX?Vx\204\027\354\230\216?\367Er\315G\036\207?\300\226\251s\026\307\224\277\267)(\202\370\245\256\277\251if#\215B\202?\346\331\nY\370/\242?\027H\370\2732\364\256\277{\277\n\226\262\363\243\277>\2113\216]\352\201\277\362,\240\370Q:\256\277pIf\201S;\240?\363\023s\245Q\307\271\2770\302\032\210IN\263?\247\366\367\370~\013\260?\013Q\307J\234/\273?\331{\375\207Vx\225?\023\201\360y\214x\261\277\037\024\335\312\266o\247?\026i_\316\370\235\247?%\235\231\213Rs\252\277\343\254\257h\364\217\242?\027\237\323\300\274n@\277\374\367\250\312\'\275\242?w\245\332\201\305\354\214\277\337-\017\236\355K\300\277\245d\005\321]\024\220\277`*\337\226&\177\217?\203K\216Fp7\201?\300\322\037\231_%q\277\n\204.N\242\177\242?\274\224a\272\000\326^?\231V.\207\265\023\215?;pS\325{\343\222\277\203M]\022\340\316\230?P\326\341W?|\241\277\2278\241\355\324y\264?\005\005A\266g\377\245?\332\236B\252\307\236\213\277k\021\370y\376l\213?#\005\206`\360\354\233?^\233\2339?\307\200?\005M\257\200\364\324\261?\262\251\365\\[}\265?\277\275)\256\274\345\220\277\314E2\026\255`\244\277?QE\336\344\261\277\277\205kJ+\351\032\247?M\221>\226\373\247\256\277.\301\010\232,\365\235?\255\264\346%d\364\247\277\364\010\263\304\232Tz?\263=\266\326:Y\246??9O\317\3576\220?\255=\240\331\324,\245?\247z\234\025\304+\216\277.C\334\326\266\212\223\277\n|I\376\204u\240?\232\220\266y\222\225\245\2776,\205^\234\031\254\277\256\016\237u\240\216\200\277\362\373\032\354\222\246\260\277\240>\013\n\224\342\257?<\203\017)\251M\222?\027PwY\313\314\242\277}\030\220\225\312\376\236?\231\331\002\200\343\245S\277\235O7s\005\332\005?\363\227\276\3614*u\277\207D\033\277M4\232\277=gh72\352N\277a\314\034\017B@\261\277v\251!\237\327\227\235\277Z\277\215\'\362\270\202?\276h\202?\345!\261?l`7\331\215\377\242?7\303\215\303\233\260t?\251!{\266H\370\260?N%*\251\315\275\251?5\323,I}\257\235\277\334h\352\325v\332l?\262i#\030\271^\220?\211\206J|\270\231\262?eH\307\216q[\240\277\305\313\376\202\276\311\244\277\036\357I\250Ym\271\277tQhrTZ\230\277\261\273\206)\221\206\226?P2\250 \355\340\234\277\222\347v\341\235\267\261?=.\033+\362\310\242??\016L-\263\377\245\277\252?*\301=\355\231\277\212@m\311\315\253\244\277\2311U\233\254js?\366\211\303\362\200\371|\277\366\333\036\212mS\207\277L\300Ur\031R\211?\222\270\300\305\031\027\223\277\010\245\240\256\257\212\274?\351\330w\247\027\357\267\277\201em\253?\305N;\342\270~\242\2775\364\376\277\0264\266\277\225{\317G\323\250\242?kZ\200,\027m\254\277\313Ni\377\277\270\302?\367\361\330\253\225\322\234?Z\'[\263`=\256\277\212J&\207\256\310\255\277\357U\220\303u`\225\277\300~$\2357\021\273?\314\021\334\202\322q\240?\302\341g\r\246!\256\277\260\006\305\n\265\236\223\277\202\372n\200\340\370\256?y\037BT\372\213\236\277g\244\264I\026\276\267\277\200oXZ\243\370\213\277{Q/`\273\314\234\277\216\355\363\"\266y}?\304\270\032\307g\010\276?\322\251\331\211\364\337\271?\372\212q\354\211\235W\277|x\311\013\367g\243\277*\213\204\352:\233\256?\255\347\241\243\271V\210\2771\215\311N\205D\205\277\214G\027\354\301\217o\277\\9[\025`\203\206\277{a9}\202\036\234\277\231\246\36144\322\253\277\225Z`\244\030\360\256?j\355(\036\311\245\233?\255\372\3707\244x\264\277\243\244e\201\213\000\242?\307(\331MA\207\260?L\303\305\024LV\260?\242K\372\274W\277\260?Ge\376\370P\306\260?\374\342*\275[j\255?\251Y\035\356\177\010\240?\200\rd\270\247?\200?\036\367\243\371\262\277\263\277\020Ec#\270\305\221?\034\002\264}\177f\264?j\301\274\177\304z\220?z1\271\235\372G\243?\322\356\325\352\364\203\251?\367\315\354R:5\224?\234oW\020#H\234\277L\331-AF8\255?x\300\237g\216z\264\277`F2\330&\343\214?\333\301>y>\317\223?\237\226\222\271\301\276\241?\221s\013\001\274b\242\277\270\006x\005\376#\215\277\323\214\345\'\271\010\216\277D\213r];{\256\277\343\257u\350\233Z\246\277\225\342\306\030\014\377\264?\244E\023\037\rc\245\277wM\317\325\036\235\224\277 \035\377\215\260\217|?\331\231\321\220\250>\214?A\221p\327e\317\177\277]\220\211\317MW\264?J <7\217\204\266\277\302\t\257\335\325S}\277FA\3012\2437\261\277o^\361\214\233\200\260?\303\340qo\032\324\203\277\035\374H\2103\232\235?\234\211\371+\207|\261?\032L.\323=B\241?;H\325v\351\343\250\2774\324G\211\376\201\250\277Y\036\221B\301\016\247?\255\0246\364\225\260\273?\362vo\020h\332\223?\341\362A\377\342\253\230\277[\242\376#\364\352\223\277O\375\276j\205\325S?\300$\207j\326w\261?\362\034)\177\226\"h?\301d4\377\255\345\252\277jd\326\377T\357\215?S\334\025\370\275|\237\277T\365`t\346I\224?\2524\264\210g\273\241?\302;\245a1\033\264\277>5\360\371H\303\246?4;\356\0270\331\217\277\001\276\001\3646\204\260\277\004z\342\033\216^\274?~u\007\347\351\321\241?\231\226\033\312_\330\224\277\326\375s\330\255\351\231\277xcyD\376\253\223?\267?\030\357\363S\226\277\312\341\373\007k=\253\277.\324\353i\247\341\261\277\324X\211_e\312\241?\365.F\306*Ky?\344\027J\312\346.\215?\022Iw\272\323\'\267?\246F\320\272+H\272\277\377\243@\273+\315\251?9\2531\271\177\376\"\277\200\304y\r\335\"\254?+@\024\027zn\264?\341\324\276\255\007r\252?-\251B\372E\346\245\277\2607\354\253\nPo?\223}\306\342\366\276\256?\235\003\007\000\006\0016\277N\'\347\256\020\232\224\277/\311\245^Z\031\245\277\313\376K\313\251\210\244\277m\016v\356j2\240\2779\331\320\000\'\373\240?\201\204\026UF\273\252?\276a\336\010\334\010\245?\231\22088i0\247\277]\231\336\241\207\251\270\277,7\034C\273\365\257?\374\354\362\036\270\353\267\277{_6s\363\tt\277\271h\316\351\364Q\226\277^\260\014\255\336\246c?\250\237\236\365\300u\226?@5\177\376k/\227\277\241\316ka\334u\246\2774]\211\005\240u\224\277>\206\271\203\300\370\213\277\224t*X\272\200e\277\322\036\277|\360\206\263?\002w\255\255\362C\232\277[\333NN+n\205?\267\350JM\2447\254\277b\032F\276f\025\250?tA\217\371@p\242\277K\304Poa\016\254?\003\301\267\307\206\231\256\277B\366\241\270A?r\277\367\332\004\3720\036\250?\273\0377\357\317t\271?\224\274\210V%\267x?\017\313\215N\214\210\265\277\204\'c\232\225f\263\277C\ng\207(\034b?\207\202,\300\352Op\277\352\030\373\006x\r\265\277\\-\336\0148Y\213\277x:T\360y\235\216\277E\261\027\337{\004\245?iD\2366\r\275\271\277\375\204h\334\221\202\302\277\207EjB\"\231\264\277\326\310\2242\271\360\230\277\300\214\014\221\245\177\270?&\353X\313\034\014\242\277\331\306h9]\367\212?\212\354\022c\374\n\260\277*\245\014g\305\264\244?Y\301\030Tu\010\221?\205\225\351\003\265$\261?i1\034\021\033o\232?dK\321^\332s\260\277\200\nE 3\n\215?\326\300\331bi\305\226?\363\277\237p\257\\\247?G:9\'\201\362\245\277\314\360a\033k\374m?2\203\207\220\271\253\233?S\006C\026\024\026\206\277q\213\\\344#\334\230?\376\02012pd\233\277U\277\220e\225\206\250?\230\317\0355)\214\263?\032OYk\242\027\261\277\035\212\260r\030\216\241\277r\222\347\330W\013\267?\376,N\202A\274`?b~\326\345\232[\276\277\352\014\377\036\352\305\201\277k\335x\326\003\037o\277e\356|\004\356\354\236\277\231\303\333vF\010\214?>\002L\343\002\t\237?Hg\312\007\246\302\240\277V+\014\215B\347\260\277\232J\\\037\321]\221\277\010a\361\260\327^\245\277\327\366\250\362<\314\267\277\344$\013\207\261\301\212?\371\221K\345%\240\231?\367p\255]\003\305\224?\332\374\177\244\274a{?\377\226-v[\360\215?>v\3502\026m\233\277p\356,c\260\304\233\277\201ig\232\322nw\277Y\312q\261U\n\252\277\323\377\250w$\247\247\277\\\326\312\013\341\204\260\277\210\257_,\373\t\236?\033[\315\177\204\333\254\277\251\030\037_\351\366\262?\265\345\034\265F\327\234\277C\221\201\310dy\240?\224r\226\311;)\202\277\233N\321~\321J\242?L\001\334@pW\262?\017\245\316\221\316/\226?)\211t\271Wj\226\277\334\222E\221\364\351\247?E\207\260\361<:\241?\315d~\207\263\303\266?n\233\177\212jF\260\277\367\355\310i\323\036\313?\030\014)\220r^c?\026\211\362-s\016\206\277\307\'JM\374H\222\277vw\351\001|\271\226?\272FM\310\342\277\252?\326\'\341\344K^\232\277:\340\225\210l\350\240\277\233w\034w0\263\247\277\227\360\335\340\036\003\265\277\234G\366\334\367^\233\277h@\221\205-\304\224?Eo/\324n\236\264\277\215\031~\340\\S\253\277S\333\332\204\233\346\222\277\3530Vh\241\312\241\277\272\"\034\"3\023\300?\243|\0049\220A\261\277\351f\003\330\251\354\217?\220\245\352\'\230\265\247?UZ\375\210\373\334\242?&\0203X\376\001\200?\200P:O\344\244\235?5{>I\370\n\271?\336\234!\372\016\247\236?\352\221\004\336\306%\201?\215g[\\\210\267\223?\tIH\350\307\361\230\277g+\204\002\025\303\265?\001\211\204c\014=\245\277\0074v\224\203\037\262\277\247\303>\032\245\177\226\277\335\037\206\360\317\216\261\277\220\207\363\371G\027\233?\004\236\233\252gh\222?^\234\205q\021\245\245\277\'\'x\243\334\032\237\364\247\277\026d\'5O\244\243?mW\023a\272B\267?O\307\3026B \210\277>U\310\356\355i\260?31\337q\314,\236\277\212\327\233\261\201\203y\277\262\366J3<\274\261\277*\001B\375\227\363\243\277\352\n\372\020\325\232\240\277\205O\366\213I\323\251?\346]\rN6\026\267?m\255\315\021k\263\260\277\023\"\362\027\307c\217\277\211$\245&\254%\266\277\002\352\204\204\031\264\246\277\005\207\024\216\356\260\204\277\262\337\334\202\020\344\270\277=\226\0068\006\337\235?VH\374K\243\260x\277\236\363\362S\350|\221?\371\316\3532:\243\207\277\202\231\025\264\337s\301\277\004\206Q\372\333i\177?\372\364\273\364\206\261\270\277\372fl\002\224\334B?I\206\036\251KE\231\277$\251P\364\360\251\266?\177Wn\260i\344[\277\334\307\001~{\207\202\277\3241\323\207\336\214\275\277E(n\226\222\036\270\277\225\027\377\230\276\334\220?\212\312\251\223\253\017g?\001\200\236\014\010\025\241?\244IF;[\317\254?\267\177\320Z\361\r\232\277\t\247t\360KOB?`\366WQ.Z\251\277\035\321\310\023\026H\222?\026\224a\001\272u\300?\200\233\335\306\004\205\233\277F\327\2476\032\253\222\277\n\t\026\235xC\211\277,?R\343F\024\221\277G\2661\340\371$\265?3\234\3401M\030\234?\302|g!\227\256\203?\251\2027\331\017\336\240?\"jg\312#\317d\277\304M\370\230\301\306\243\277\344\333B\264\277Q\235?\216\371O\256\177D\262?X\311\3163 \n\203?@5\020p\370k\265?\255Kc\243JS\244\277\362\254\206\\n\324\223\277\3452~x[o\274\2773\334\311\010\203\177[?\363G\037\341Y\272k?Ve\311\004\3152\230?\010\024mG/.\223?#\262\2138\253\227\210\277\346\271\324\271Ap\265\277\035\224\250Q\n\336\247?\247\353h\305\005<\245?X\035\203g\224\031\210\277[\232\020\361\367\324\263?\350\210\326\2566i\242?\352\257\236\257\323L|?.%E\037m\270\270\277j\266\277%\306\024\257\277\000\361\255\234\313T\205?\203\020\365W\336\030\262?\352A.@\003\\I?\220\224\207\250\0321\264?\010\331o\322?\246W?M\272\002k\371\301\235\277t\002\325e\305\322\241?\311\353\260\267\363;\237?\313aj\224x<\276\277\262\263\207\265\325\036\213?\331\212\307\332l\202\271\277\003U\234\361A9\223?\363-\304\341\304\314\277?\257\260\372\006\014:\227?\n\307\314\226\0206\257?\230>\343\275\264G\241\277\"\233\247\002V\364\242?\242\000_\212\265|x?\371\345\236C\206\351k?H\210B+\372Q\244?\nK\001\346%\030\205?k\335\237_\217\210\210?\226A\242HP_\232\277Z\262\027\235\217\254\261\277o\025\002\313eu\250\277\274\376i\036\017\345\236?\000\030r\233\353J\243\277\026\272\240@\242P\262?\242@\314\274\337\363\251\277\367\372\003\260d+\212\277N\203\372b\313\365}\277y\362\017\r\2567v\277\n\324\206\363mL\272\2775\261 \274\272\206\222\277?_\363\205n\362\243?\273;#\237&]\210?\nx\315\252cJ\233?-<\004\240cJ\223?\3432\315j\312\252\270\277\324=X\311\205\021\275?_\260\325\007\342.\260\277\251\354\342\010\202=\227?\370\315Lp\017\366\235?\236\264\274~\243\271\261?\303\202\272\376*\376\245?\266O\333\036b\227\244?\200\207p`\215\236\263?w8GB\252\300\252\277\211\022\203\342\035\271q\277v\006\320\037\311\226\247? |j(\202\336\241\277\002M#\004\315p\257?\373\245\360\200\'\375\253\277\230\362\345qg^\264\277\3663\3161\346\250\222\277\251J\212W\244\033\202?\2543b\030^\213\225?\375D?\357\262\037\202\277Y\260\270\375\024\244\220?\371&\335\377\002\210\261?A9\363=\220\242\226?\246E-\003\204\323\263\277t\201;\276Am\203\277~\307Do\000\037\254\277S\3236\331n\337\261?q]t\337\366*\233?H\tx\'\022\223\310\277z\3062]\261\203y\277\3153\372!\206L\267?\202n\301\360zH\252\277W\321\346\347 z\255\277\031&P\356;\231\262\277\013X\017\266\263\373\226\277\377\334@\234\316\313\260?\302\344\003\313\322y\200?B\312\345\211k\207\245\277\025\304w\237\363=\273\277U\205\375IF\027\245?\niX\357F\332\246\277\236\227\245\227]f\272?\235\323>\2618\367\243?>\256\242\265\320\247_?\002\223\347a\231O\253?\354\347\272\307?\323\205\277\032#\341z\214;\267?\340\247[\207\321f\211?\340\365\357.\360\307\227\277\303\270Q\313\220\003\210?\377\202\202\342\\L\213?\251\n\321\326\202\241\220\277t=\304D_\023\260\277d\032\271\243\3679\274?\257\225\315\234\336d\223\277J\004r\312\237\177\240\277B\327\364@\207\334\207?\202\316o\013&\335\227?\2218OY\022O\236?\3725\017\351\345w\203?\211\020\331\'\261r\231?\351\217\223w\372\242\242\277\253\235T\034:.\260?f\270\200\223\222\240j?\243\020Qc\323O\220?~\251+\340\276H\252\277\027Ky5\355\037\267?\222i\330qlT\221?\027\342\274\313\256k\226\277\321\010\345\002Q\231\253\277R\227\362\230\"\001\223?\243\022\230\217{\311\234?96\246\270\024\036^?\223te\365\355R\214?\351\3166\017\262\202\237\277\367q\361B\3001\244\277\226y\311\271\"\r\225?\356\324\305uQ\221\230?\331\342\275r\236\030\262?/\2778\257)]\225\277\n\242 B\353\274\260? ;,\333K\263\206?\334\333\256\300=\343\225?\3076\240\325\223)\257?\223DE +\332\267?\211\230\351\340\370\324\244?\352\327\2422\004\261\242\277\350\277\221q\225h\220?C\'\320Il\\~?\316\243\276v\313\375\220?\253Q\215~\341\245\211?\217\262\363\222L[\246\277\244\236\336\217>\275\222?\\\026\251\376\223\n\215?\341\001\\\3147\221\237?%\003om\256\356\272?t\217!!p\340\224\277\222K\273\025\267*\264\277\336\344\221\237P&1\277\036-\374)v\233\300\277k\0141\305\267n\252\277\212\270F&.31\277/U\352\211G\005z?_\"\352cx\n\246\277\254\353\200\304\342O\242?\264F\2443S\327\227\277\264\035\333(+\211\267?\245\304\210\266\272A\242?fT\200\240d\206\253?>\302\340\324\216\341|?3\036U\340\274\240\203\277\023\001\231*%h\223\277D\372c\212\246b\241\277\374V\333!\273#\223?\307i^\376\220\352\242?\272f\017|)f\262\277|\224\362R\367\262\243\277\223\304}\213\205\243\261?\3725\2417\230c\266\277 n\336\247\334*\245?FU\016\323\334\333\240\277\307\364[\000=\020\303\277yws^\016l\274\277_\307 \365Lb\245?\342\236\351\"\201E\226?\261\215\035-\347\314\261?g\233\243\344\364\025\273\277\374\265:\273\330\014\232\277(\220P\265Z\021\222\277\370\003{\375\301\306\255?S\353\305~\300\376\262\277\365o:\334\014r\262\277Q\215\305\005k\'\265?|\253=e\371\"\272\277Lt;\216\341p\230?\227\306i\020\340{\225\277\357\004#\253O\032\266?n\014\225\262\231\217\241?\243\270\377:D\361\256?\003\334J;\031\001\230?Xi\234\372\377l\251\277\206\312\317F\'s\226\277\346~\350\312\262\226\261?\006D\342 \325\311e\277}\276\355\303\354\313\254\277\237\347.v\t\367\225\277\277/\024b\306\240\256?\267\233s3\360w\207?{\016\243\352\253\360t\277~,\337\225\264\240\204?\200\205\304\215\317\235\273\277x\022M\216\303\275\242\277H\315\017\211_\210\252?m\351\007LPZ\246?\346\220=\307A\216\201?\362\234^E\'\202\244\277j\252\030/\344#\264?\323\255\351!^\316\250?g,\221@5\024\251?\324\3160\335\322\315\262\277\325h\341\335\311\204\253?\340!\357\315>\334\207?\031\027yK\365\256\304?\345\307\007:\321+\245\277\347?\350\003\267\344\266\277k\033\304\362\345\010\246\277\313B\025=\220\301\240\277\365\203\235\3109\355\266?:\215\022\001\304/\203\277\307\211\013\237\357\027\210\277}\372\n\331\026\272\210\277\374\337)\200\366,\252\277\354\013hn\375\021\207\277\027\rNT\t%\260?\2240\343\335E\355\270\277\201\225\033\364/\355\246?\243Z\310\004\3736\231?\372P!\274\335\330\231\277\373Y\\\274\275m\237?\3125W\267d\244|\277\210\323\261\225*,\261?\343o\317\020\204}\211?s\321\024\341-\277\233\277\272\007\273\0046\231\264?%\236\227b\223z\277\277\323(\001v\232l\253\277\317\006?\355\351S\233\277\355~\321\346\240i\221\277\214c ]\376\200\210?\260K\326\274w\306\265\277\363e?\001\222v\222\277\371\2174\013\010\251\271?Bv\274\333Yn\217?\353\210B}\r\204\263\277@a\037~~\315q?\3708\324+[ \255?\026\006#\255?\016\232\211\234b8\256\277D\365#z\004~\241?\235\021\322\321>\203\254\277\315r\233\246\263\027\244\277\330zsBgh\230?\352\341\'\027\331f\243\277\247\230\272B\000%\265?\226JP9(\004\244\277pAaY\242\030\225\277\001\024\240\367\013!\241\277\241\303V-\006[\213\277P\177\355\304\247\323|\277\306z\276\243\363=g\277\367h \014\377\200\254?G\343Q\'\207n\236?\032\234<\347\305o\300\277\237,\033H\3735\253?o\220\242\333s\301\256\277\337\251\277&\341\360\234?\026)?c\010v\231?\211Q\240\372dl\233\277\330Ek\257uA\250\277\000\262E\265rp\212?\215\n\036`\317\256\267\277\373.f\036\263\262\245?p\021\260\264\367\325\217?E}p\323\270M\261?-\302\324\361\364I\201\277\211D)\362Y\244i?\361\224|}\276\013\255?!\246\035\332\324g\266?\000\343\302\250\262\266\227\277\223\236\033\303l\224\261?\3210\265SO#\215?\240\214\014\204\374/\252\277\304(=\306b\264\264?\271\363-\006\273\016\235?\267^\212%\245]\247\277\352\t\227,\2444\246\2771\202\177:3\343\225\277\030c\237\354\036?\254\277\207\201\000\335^R\244?\361VN\216\335f\226?\220\030\264\356\214\207\245?\363\030\275\201\214\013\230?Xc\036\265\203\302\255\2770\244\264\314\330R\200?\313\003\304\034\231\027\234?3\360\213\237\204\373\220\277\374>\311Eg~\221?u\302\267.\340\376\242\277\322s\353\246\247\262\227?Ac\021-\0223\207\277\007\023\024G\001c\253?\342\360Or?\000\244?\304\232\ny\257\302\210\277\231\013E9\351\276\230\277\221?\215=q3\230\277\367\314OT\306&\277\277\224\236s\245\260\276\224\277\232\374<\202\351\374\205\277\252\321B\355\307\346\277?\ne\206W\253~\262?Q\022\246k\310\255\247?\217\000\311\316\273\223\246?\000\326\3449\226B\264?\371u\313\3461\232\221\277\211H\376\345/l\263\277\271\322M\033\224\014\260?\275\0066)\360\233\\?4l\217F\2434\252\277\200\375.\274\207\264\245?\230\267\"\355\353A\244?\336\274\372\321\026\260\237\277\177\372L\244\361\315\241\277\371\303t*!V\241?\303\177\001\264\314\374\261?Z\361\034R$\202\266\277\333\320[U\002\342\255\277\361\253\333\3659\365^\277\355\301`]7G\211\277\216\302\355\371\375\274\220?\265\260\363\273\017\002\230\277\020\252F,\337\376\241?\265\237H\004[\312\242\277z)\231\262\014\310\254?\254\370,\255\217\250\260?\340\317P\004\353x\253?\255\303\344\207O\205\245\277\340\260\347#\205B\263\277(\224\034L\260\272\241?\006\031qe\331\'\261\277\177\374\335\204+\270\242\277\245\355\207j\274\034r\277\357L\205\2621\006\254\277r\271\244h\272\277\262?\342\361\274u\020~\213\277\t\252\315i\037\312\241\277C\003\2200ZL\231\277@\032\247\314\2344\207?\362\2620\216X\303\205?t\213\364\374}\360\260\277\322k/3\362&|\277O\311\202\272s\031\262?\300\270\270B\216?\217??c\227\222,\301\260?\257\030\247\234\177|p?\314\232\274\204\327\302\227\277\202\242`\271L\217\301?~\036\021\242zS\244?\215L\327\"\247>\221\277\351[\205\367xI\226\277\222\324z\371\325\326\246?1J!\376\334\001\240\277*S\225\205\006D\254\277~\337\016P\007\336\211?]\037\020\307;\342\261\2777&+\357KV\222\277\266\334Zn\346\365\177?\221\340\r\303\352\310\240?\034\344\317\301\005\311t?\251\031#M\256\375\262\277\307\371\333\255\342\304\207\277\307\345\302KK\347\246?\304\254p\236)\034\200? \304\221\317\005\273\246?,;OI\003$\250\277s\230\351\321\364\005\213?Q\351}\220\352\316\276\277\177NZV\303d\202?\327D\325\235b-\256\277YR\362|\311H\250?r2\247\226!\022\253?\3644\211\300\300\027\247?\375\305\004\332\300\312\237?5\341\257\225\035=\225\277Z\275M[\032\347n?x\312\023\220DU\203\277\225\201\331\361\261A\251?vb\201\2578`\260\277\t\375\010\270o4}?\014n0h\253Bs\277vb\030\301_A\240?h\377\034\224\024\302\223?T\212OEa\3328?\022\337\223\253\306.\216\277r\3662HF|\262?\022zf$\343\257\226\277S%\373r\002\003\253\277\312w\211\303\236e\230?UB \234u\227V\277\223\363\377\314cf\224\277\370\326\224\352\234\300\266\277C\017\246O\376\272\243\277\302s\no\003a\260?\275\222SZ\243\210\203\277{\025:U\010\360\177\277\347\305q\255\221\000\257?\342\342/k\324\300f?\374?h\220YM\271?\241\330\014(\377\362\177?PYi\020\371\021\204\277\350\0221c\\:\240?/\223\255\033\325\266\243\277k\217s^\363\336\246?L*Z\216\034\322u?\251\273hO\220\256\223\277+\200\211\310\347M\232\277\260\352\263\336xt\255\277>YJJ\033\307\257?\275-\253a\313N\266\277d\244\3377jr\222\277C4\234\007\376\201F\277\333\214\206B\213d\212?\026nG\002\244\366\207\277`\262n1\335\337\231\277U\246s\206\311\n\225\277\344|\257\347\022V\213\277}k\372\337\307\357\276\277\031\227\277\304bo\257\277o\034\003rZ\315r?P\372\327\275\\J\243?d\231\271\305MO\251?\251C\316\221\257\304\246\277\211\233C\276\000Y\254\277~2\200\366\323\333\236\277d7d\010\2443\234?\353(J\314P\251\231\277R\317\007I\026\361\262\277\220O\210\270J\025\241\277\330\034\240\014\356G\264?\0027 \333\235R\271?\313$\315\255\227\356\222?NF\270w\243\364\220\277\334M;\352r\022\220\277\324!\337X8\351\223?x\264#\243}\336\224?G\246\322\252\360A\266\277/\277\335\336uB\243?z\353\226\202\326\205\257\277_\013\032\033T\374\246?\376\300\231\357\002\376\260?\024\263\326az\tf?\035\365Y\"\251\233\216?\242+\375\0307\253\231?H\246\341-\247\014\251\277\315\\\001\354\224@\261\277\0144\215R,fw\277\254\262\302\210\264\323\243?\217\212\331\326CB\207\277y\347J\317\371N\250?/f\020K\247\220w?\226\326\323_\370i\247\277\027 \345\357\232\313\252?\177y\212N&5\246?\\\024&\365m\330\204?u\030\"Q\024\\\253\277<+a\334*\223\257?f\030\372j\037\r\224?\234\315\\\342\227\346\227\277\034fo&\353\253\262\277.\261#\207\316^\234\2773\017g\275\\\321\241?\261\260\034FE\203\267\277G\3327\210W\316\250\277\267\325\031\346o\022\301?\204\2646\022\270\030\262\2773k*\177\005m\231?,\233\374\025(_\204?VMJ\273\275\360\264\277\003OE\271G\303\203?a\241\211V\2257\260?\251\005`\312/\226\226?\\\307\271\372O\027\243\277\206\313k\013\234\241e?\334\020\344H\214;\242?\000\237\300\266\243u\241?#>f\252\333\356\260\277X^\207s\224$\266?\3520-\325j1\256?\231\372J\235\036\304\277\277\n\014\306v\322x\241?\312Z>\330\363\264\277?\226[A\242\355\210}\277\313+W\020c\376\261?\340\020&\235\222\226\222?\262-\313I\272#\246?\276\3204\346\210\202\250?\204R\347{D\354\251?F\261\324\310R\246\204\277B_\205E| \245\277.c4\266/%\232\277X\n\177\246\257\007\237? \177EZNU\207\277\004\242\211\360\266>\231\277\300\273\327\210`\374\271\277\311v\333\\t\250n?\025\tk~V>\221?\360\013\306K\001\"\261?Nz\376\311wt\240\277e\320\'%\002$\242?\2068Z\366O\315\260\2770H\006%\231`\222?\217u\307:\331$\216\277\376\310\363\375#x\253\277\220\220zgj\000\265\277\233\017w\236\177\326\204\277?\022m\035{\224c?`B\344\303y\302\270?p\243#/\311X\240?C\007\271]\305,\212\277@\212\001\323\024\211\274?\201u\r\027\272\005\245\277\242:\306\017\264\347\237\277Q\216\213\373\347w\246?\243\212\026\235\213\300\254\277\255x\251\002O\227\235\277\271\000Oi\033\266\206?[\304\r\210\202\314\223\277\302\351\377,n\233\246?\245\341\247\347\266]\260?\202\367\342(\251\360\222?\001\3579\005X\206\236\277L\347\035\313\272n\261?\027m}7t\007\262?\341Pm&1\207\262?\200\207o\374\231\211\233\277\227N\340\215\225\372\241?>P\367\241S\214\210\277f)\033\310\305\333\221\277\342\004V\205\373\345\207?b\333&X\314\301\261?[c\265hO\315\205\277\375\210%\252Qc\263?:\354;<|\277\214?\265\311\223\203\013H\255\277\370\275\337\342\274\235\247\277\215x,\027\227\310\223\277\021}{\217\254t\240?\336\234\262\351\335\021\260\277\035\226M\332K\335\231?\022\273\256\364\304B\270?\323\037v4-\361\225?\301T\371T\332p(?vk\t\217$L\271\277\024\2454\'\247\207@?\340N\204\3661w\215?\030DyY\302\014\230?\'\302\341\231\375?\223\2777\222\232H\232\223\240\277^\323\241NJ\243\201?\035\217l\254\371J\232?f\357\343)\001\355\232?g\302\253\252\222s\251?b\232E\013\3756\263\277\337\\\234\354\217U\236\277\230\224\005a\264\310\272\277\"3\301\265\321\270\273?\317D\205:\235\201\213\277Z;3y\203\320\264\2771\366\2038+K\246\277\352\307\274\025\0258\264?i\362Dp\220\371\224\2779UC\006\016I\245?\301\237P\365\331\317\275?u\222x\n@\320\262?8\326\226\222\211{\243?:d\203\241\201|u?\3347w\252\277\326\232?D\325J\242\226\263R\277\373\364\222\334\270\005\267?!\302\332\320gN\234\2773w\321\016\335A\230?\270\017~\260\303x\263?)d\251w\030\224\241?\370\032\251V@\363\221?\210H\251t\367U\241?\204\026\316\301\023v\215\277\n&\177\210\247\213\261\277\266}\017>u\264\254?\005\241h\';\254\243?9o\023\r\237\231\210?x\31627OG\261?\3621\006\361v\022\237?p\241@b\255\013\247?\327\342\262\316\224\336\303?\362\331m\240\241Z\243\277?\002\260\275X\273\262\277\310nO\351`+y?bo\222\252\035\037\272\277\030\314\r6U\246\270\277\262\321\232\242iT\237?+\262w\013\251l\241?\361\037\360\227\334\027\220\277\206\312\310\313\0323\270\277M\027\351x\322\334\206\277\203\206n\313\343\333\245\277,\236\3151\316\205\224\277!/\354_\273\340\262?e%\374\333\213\243\243\277\354\021\267,\235@\242\277\340S\350\252`\377\222\277\256\\7x\264 \257?y\256y\256\214\317\200\277\302\341\221\271\327d\247\277/8\355\327\251N\222?\273}\014%\"0\260?\322WB\351\255\222\260?\323\315\177\343N\375\250\2776\226\221\336\227\252\277?\351LOr\"X\274\277y\217Rk\362D\256?\020G\221\203]\340\244?\375\330KM\276g\224\277\300\016>.\241#[?\031>w\255\2028\250?\375rD\242\366\255\250?zU\372f\215s\265\277\335\2461,\327\305\233\277\200H\307\202\273X\260\277(>A\201TC\263?\013\0377\002\246i\242?P\027\'\375C\325\254?\201\033\307\310B\315\255?l\342\024?\342\350\273\277a:\352\234\255\320r?\022\233\330\370\245\235\256\277@d\017Xh\022\261?n\261Zb\247G\260\277\377\211\315b\363\'\263\277\362\\\337\010V\250\251?\036\202#\325\305\212\274?\257\035\221~24\267?\n\365\017\177\243\217\243?S\231\311\\\374\251\242\277\374\257\343\236);\227\277P\205\310\232\034\306\227\2773&D\236\224\222\240\277\335\010\273v\250\017\266?I\267\224\004d\305a?>-\262\0209\'\255?&\037\2222\324\370\244\277\271\000AN\034\350\260\277\236t\237\372~\022\234\277>\0107\342\321-\257?xA\211\356\035\263\243?\322]8\001\267\355\207?F\253\037\363\016p\233?0dxf)\323\221\277h\206_c\232@\227?\227\252\014v\271<\253?\'C\010\031 X\265\277\007\236\336\003<.\203?\267\276\\\204\312\364\240\277\267\347>q\246\262\256\277B\262\204l\341\203\252\277J\321\341\376\374\014\262\277t\002L\344C-\227?2\364\343\372\031#\247?\022`\323\256\003\215\235\277\3430\327\330}\376e?\312pG\362.\021\255\277\352 \216\344\361B\275\277]\362e[\0324\200?/\363l\t\240c\244\277\021RPOh\347\276\277|\351[$\334V\264\277\"\031\344F8rs\277A\202\263\212\260\251\246?\247\314u\223=\037\301\353\250\277\265\316\264]\374O\273\277\231\354\356\257|\302\245?\273\320\374\267\347\021U?\367\366f\'\227\255\267?LC\270IS\203\217?\234\242(\351Vk\263\2771d\342\2167\241\251?\022\330L\333\354\273\253\277\010\313\333\010nR\223?\037\036\241<\274\200\270\277\325_\216+\317W\243\277K\340\253\032\301\273\245?\264a\251^\267\000\242\277\246\345\265\314:n\232?\246\257v\353Kr\220?\314\0146\r\022\347\271?\216G\271\231\341\353\224\277\367\351\3634\305\246o?\223\375\365&\353&\262\277=%\210$$-\254\277f\253(\270~\301[?\355\316\341\004I\316\266?)\373\277;o\351\213?\203\014\337\"\266\232\253?B-\255\341\004j\222?Q\027\264\004)\372\247?}f}\234_J\255\277\332\336n\346\200j\241?\327N\213\206\341\266\265\277l\360\352C\213\361~\277\236\016\\\020\305\277\200?&D\n\262y`\260\277J\'\263\316\256I\226?\232Go\307p\032\204?`\3215\243\034l\226?k\031Zt\237\"\272?\246\321\342\316u\314\200\277v\255\037\000\302\354\252\277us\264\320\027?\225\277\250P\225\362\273\211\215?\\\344\033\314\224\275\221?\254\0308Nf(\206\277!\\Ry\027\273l?\363\244FX\014\322]\277\177\237^\245\250\261\265?\220\2666\374\006I\263\277#q?\003\331v\301\277\005\253\tJ]\364\256?\213J\000\302\320\217\251\277\240\350\361\374\361\304\231\277\343T\211\211\225t\224\277\014\322\232\377\277\'\253\277V)?cK\305\236\277\247\370\263\264\234\327\227?p\375}\023\372\252\244?{\301\223\021\014\322\270\277o\336\305\226\203\347\235?G\257D\363\346|\266?\022\241(0\006;\250?-`\234,\3721\242?\361\300\311\232\031\353\245\277\221\241\213\230\036Hw?\n\202u\342u\271R\277\033Fp\366\230\355\273\277\314\346\210\255\221\212\203\277\024\261\343\020O~\263?\026\373&0M\241\260\277lo\313\342\366\322\241?B\262\307[\265q\260?\210i%\365\325\215\251\277=\026f\245v?\025\347\317\275V\205\216\277\274{p]L\035\213?\202\002rG\003G\252?\367\363+7\224\357\252?j\310\306\302\206\345\231??x\277\213\332k\212\277\377\030\374\360\032\270\214?\005\344\354\000:\266\241?\377\351\213(\300\311p\277\350\304\360n\214\013\247?5\303\327\243SL\227?\241\331n\352\335\267\252\277\252\251}\026\266\332\220\277\264\230\331\375\271\301\177?\373\220\037\372=\000\222?\351\305\227\017\301M\203\277\350\010\r\242&\310c?s\220\352\374\255\261\232?\300\r`\376*\341t?@\247\213\304\032\241\256?\215\213\315`\213\371\300\277\261B\207\244$\361\276?.\266\002\036\001\177\251\277\331uV8p\263\263\277\307\256\273o\345\360\226\277n\244\350\235\216\213j?\027P\271p\202m\256?\025d\312\020\245\327\243\277\264.\005V\'\'\241\277\315\014\2154\265\216\266\277\3037\000\025\363\003\240\277\202u\246\233\271\325\034?\310^\334:$\246\242?\212\305\037\024\220Y\215\277\267\242W\257\246\014\226\277y\001\254Kf%\221\277\252D\210\r\303\005m\277\257E\333BOO\271\277\223\032\363\361\010\031\241?\361o\005\362\324B\222\277\342\025\272\317\367\231\265\277\377\214\305\205Tl\237\2774V\"\233\270\361\241\277\254_\"?\313\250\246\277\002\222\024\034\033\202\303?\365\033F]:\364\264?JQVA\360\304\225?YqK\003\253\353\253\277\324\354i\312\036\260\222?\233*\226\230\335\202|\277^\377\264\033\375#\203?\034}\277\025\260\340\245\277\215\353@\024uf\247\277\346\023o\310<\321\272?H\341N\360Z\001T\277\265\215V\2710\016w\277e\227\033 J\367\227?\217\026\251+\016Z\265?Y9\266)\\\312\353\263?#\247\224\230Y\210\234?\225\271\357XR\223\253?\340/$\204v\235\241\277\021\322\013jq\276q?\216R\262\306co\210?\032\324\\U\312\306\242?@\354J\\\212\317\247\277u\314z\254\342o\201\277\262T\033\224\361\\\244\277\314\031-\302\272\326\275\277[\343\331\202\t\265\264\277\327~6\241gj\237\277\2605f\372\272,\226?-\212\340\255\0313\224\277I\023N\216Z\376\205?\323\016p\372\241u\275\277\306\224\303!@b\225\277\335Q\312\301\352\360\227\277\017\270hbRj\233?\026Lm=\256\256\250\277m_\362\201\200\204\201?\365\370\243^\214\212\253\277\242e\345\252`\353\233?\256`\267-c\014\241?$\017\374n\222\212\224?\237\316SQ\324z\252\277h\310\245H@\276\226\277\344\357H\312as\234?\346\206x\230\337\267O?\311\253U\213\212\324\246\277/C\032\260}%\225?\020\221\233\354e\026\221\2779\301\351H#\243\253\277\312^|}\341\032\256\277\214\264v~\336\263\231?\001\\U\262\031 \250?vM\022\331\234\370O?\201A/\363\267j\226?\334\266\375b6\332\236?\216v\241\330\324\022\230\277\250\243F2_O\243?\236\372\222\274\364\222\235\277\332\3262\343\010j\262\277}:\300\330\350\017\265\277\232S\314\247Ac\256\2774\271(\246\341\236\204?\n\000\204\353\342\013\227\277\002\360\372`\264\305\230?o\251K\303\026F\206?\361\354F?\020\303\251?\340\320\277@\226x\302\277\211\354mT\222\301p\277@\022\027\301\000\376\262?:\224\263\365\251\n\207?\006V\253\271\325\t\244?\033\tj\3049\373\215?\230\205^\305\303\004\210\277Jc@\n\0072\251\277{)\205>\2308\247\277l\"\311\003V}\221?\317=r(]\035\227?h\234\034\t%\377\271\277A*\212\273\306*\242?|di9G\272T\277\232/Z]\364=\257?\377X\225\211\355\n\244\277F\357\023:U\343\267?AV\214\024\307S\262?\206\021Q4\204*\260\277\261\256P\322\003\025\257\277\350\354]\000\004\277\244?\001\377\225eN4\220?\264\332\017\3028)\261\277^F\314\260\2329\254\277\320\327\021h\245]\244?HR\353>\252\310o\277\320\344\035\016j%\267?h\341,\'\002\247\254\277\250\323\005x\375\256\213?\234(\316E\342\203\250\277n\257y\302\204\274\227?\252\220\227g\273R\213?\321`\314\201\024X\237\277\275u\333\361\035\232\242\277\377\322\213\222\n\255\246?\rb<\316\332\314\255?\226UW\302\021\207\260?r\003}i\0313\245?\364f\207\321\026\370\260?\352N\034\3203\271\250\277v\322\256\341U\204\254\277/HGH\033\353\277?~\'\267Y\'\316\225?v\t\374\177\201\037\257?\014\274yT\351C\256\277\013\017^\316;/\232?}\357\326l\337\\\235?\037\356\017\233cf\260?h\200Wp^a\237?H6\036\344\373\t\223?\240e\320\354t\206~\277\243v\324t\026\313q?\235\205:9\230\016\260?\352\320\253\255\311o\244?\332q\270t\345\033\201\277\276hFU+\355\243? \013\374\265\235:\246\277\255Wz\326i\332\256\277\223\030\254\215\247q\246\277\273\255^p\260\201\254?W\217\240Y\205\013\226?\n\214\260/>L\225?\000\211FiI\303\255?I2IGl\317\212\277\307t\226\004\266\213\245\277\273\323\367\311\335\263\221?c\364\2362\023b\234\277\375\302\312\355\314\364\254?\220,f\272\025\t[?\005\177\266\343.9\250\277*\031\245#\357\037\243\277\347:\367\324\321\227\262\277\336\345\243:t\376\260\277 \215\354h+e\250\277\'\373i\323\204\233\257\277%\356Z\210^\202\223?/\353bP\225^\202?\006\361-\272\362X\217?\326,\363\033\'\372\240?\030\351\304*\220\214\234?I\246\026\345:u\210\277\276\031\"Q\014\033\277\277p2\003A\023|z\277R\226\243\274\372\034\252?\201\"A\340\034\326\201\277`W\306m\310]\264\277\033o\236k\265N\242?\300\025\333\255A\245\263\277G\213\321I\003\314\214?EVBI\270\216\212?\346\035\273\254m{\254\2771\002kt\335?\221?#en\221\333\364\250\277\332$@\271\340\026\301\277\224+%H\037\006\235?\022\3067\020\362A\301?\014`\223\t\210\242\260\277\257yAa&\034\247\277\236\251\027\206\366\332\266?x\360H~\357K\262\277\"(\355\t\276\265\216\277\370_\341\020S\010\221?\246,m\321\026\315y?CV\030Vs\264z?,\356\255\366S\365\263\277,\364\340\345\027\334\257?\033\022\027\222\244\n\267?\177\245k[\276{\246\277\'\342\205AQ\354\214\2774\3746\035Iq\212\277P\323\326\263\252Y\272\277H\'3\237\355+\275\2773\301\325)\212\244\232\277\211\242(\026\341s\301?\316U\202z\235G\255\277ze\241\263\245\346\253\277>\304Fb\334^\251\277\033\330\310~\335\204e?b\307\256\336F\226\271\277\310>1\010\021\272\241?\202\254_\255\3674t\277s8\254\341\370\036\264\277G\021z\2019\240\261\277H\r\\\001\305\217\263?\022\265\313^\357\350\266?EA\272vM\'\204?\363wHy\240\253{\277H7\305Y\235%\245\277K5S\341\037\263\246?\205\3169\331\255uy?b\027\270\205B\\\250?\251\034\364\261\200\262\247?\276\031g\337c\266\262?\024\023a\002\327\262\256\277\326\331\216\tc\\}\277\336\306\'A\356\226\262\2775\356\247+\345@\300\277Y\013\304o\323/\240\277\253\301S\264#J\267\277A\311\207o\232M\254\277/\272P\203sg\240?\366\003T\210\310\372\265?;7w\264\355\276\243\277P\251\266\363\376H\251\277\013q\371L\233!\236?\301\231\235\220\250\353\210?`l\335U\251\362`?\335JK\343O\031\300\277A\347\211\214H\022\236\277\267#\304\2050\344A?\352\024\033#f\303\270?A\021pV\266\334\227?^2\0353\362S\216\277\237J~r\276\373\300\277-K\324gb\325\246\277\010\345\371^\371Z\206\277\271\342\213$<\233\221?\304@\235N\330\375\252\277\010\025Gz\002\341\267?\255\364\331\nZ~\240?}M\206\311\207\342\270?\212\310\245\001;u\300?\030\n`1\334b\262?\363\205_\273\305#\200\277\037\031X(\315P\252?XC\334R\210\250\247\277P\263\210\272\242\364\262\277^\355\0061)\342\245?\327]\2151\3457\245\277\206\252\030\261cH~?\t\375O.\307O\236?\266\240-\000P\270}\277K\030\032\014\030\020\243\277H\r/\236\213\005\224\277\346\025\263<\300\342\227\277\024q\202\010\n\200\275\277\332#c\226\230\347\243?u\020\374i\334\276\226\277\t\341\323\304\316=\240\277\244\217o\244\005L\237\277\233\350\237R\243q\263?^\0041\272L\375\255?I\312zB\024\226\245\277M\324`\004}\220\253?\005[\"%Z\224\247\277x\035N\377\214B\203\277znH\322~W\202?x\336\352\212\266\204\242\277\212\n\2505Eq\254\277\\-\366\243Z\325\260?\330\276\360%\235\\!\277(\272\325r\333\307h\277v\366\266\202\026\264\250\277.\005\261\312=E\211?\302P\\-H,\260?$s\253j\037W\301?y\272G\231\340#\271\277\310bVNVe\234\2774v\357\030d\223\247\277\247L\0024\315\212\207\277\271|\n\250U\r\250\277\311\232\006\237D\374\241\277rf\357O/\312\273?_\357!\244\351{`\277x\330\323}\260O\234?\360\220>\357\032\205\245\277\346I\003\374Q\224\230?\'\225\325\037\350\375\214?zW\231#\327&\241?\350\277;\031\002\305\214\277\327\002\035\034(\376\213?\334\334\316;\363y\243\277&\230,\006K/j\277\016\t\273\316f\001\273\277d\224\372\026\367\237\222?\305`\221\031\371\201\262\277\251\035gi\001\205\261?\335\002\317\033\266\223\270\277\310\013v\3105\266\240\2770\331\247%\216\221\236?\023,\352w\3145\235\277\036\341C$\374#\224\277[\263}\266\275bw?\177\306\245\251\003\t\260?\203*\350\271\337\255T??\002\311\273\231\014\234?\204\225I\000\340\260\240\277&\353\251T\201\377\226?\017\031\347\035X\033\201\277\372\000\265Xfq\275?w3f\\d\276\223?\262\317\214\351\214\r\234?q]Z\242\020\030\260\277\242\220\024F\344\340\221?\232\331\336\365\315\023\300\277+\213\261\021\005\202\202\277\231\212)_\036\275\262\277?w\253\353\244F\264\277\347\227\313s|\322\266\277_\032BR[i\247?\310n4\240\365.\227\277E\356!\237-\253\267\277\244\253NJv\264\247\277@\373|\255>\312\240?\233\000\253\247\327\303\271?\374\314&\275q\372\250?\306c\263\001\025D\243\277.\215\373\272L\274\227?\224BX\275\300G\246\277\364\240\303\244#\014\241?\023&\224\023W\026\221\277\262Q(\205\367\334\252?}y\3706=]\254?\246N\252{\253\331\264?\033\233\355\361\036\335\241\2771\341\227\330\266h\255?\002\033t\216\345\016\214?K\356cx\226[\253\277\354J\351xT\366\241?\310\005\335\303k\226\251\277o\216`\265\002Mp\277\216X\224\313S:\215?\226\003\036\r\263\246w?\304o\2366\360\300\201\277\233\271\216wi_n\277k\267\313#F^\255?\025\'R\366:\346\263\277\310\'\216\367n\247\224??\022\002\261\303\350\222\277\222\370\326\256\003?\250\277\372\226\007\226F8\236?\306K}1\3003\262?\362\367\261\214\2103\243?H\374H:\306B\223\277r\244L~\200W\262?\003\022w\220< \260?\264\330\344Rx\212\234\277\021\253\001\204\221\332\233\277\000\225\203E\214\350z\277I^Y,\301\335\236\277{L\231X\312o\262\277\363\356\260Y\237P\246?\321M=\330\224d\202\277\350\376\233\026U/\206\277{x_Ak\301\177?C\231}Q\363\031}\277\365\233S\306\235:\246\277Z\2042\r$\316u?\220\345\322\0173 \256\277\320\371~\306a2\266?m\033W,\340\274\245?v\3719\230\211F\224\277\024\311\3559{:\267\277\347\025\n\254\016\034\270\277\017$\245\376\206\366\264\277b\372T9\354\264\231\277\034\272R\007Jp\223?\0059pFn\250\243\277zl\020\206\234\363\273?\326\r\352\311\304\357\206?\0142\313\362\256\023\254\277\214\351\006\0326%\254\277^\001\024J\203\355s\277d\271\317\300\264\367\260\277\373\034\306\354\363a\245\277}\017b\312\216\230\240\277\273b<\2128X\253?\007,{X\270\017\252\277P\237\232\342\r\227\261?\275\355\004z\325\327y\277iD\360N\324\371\211\277\361>\276\017\340\005\277\277\210)\207r\022A\203\277\351;\325\315\007\223\201\277\005\037\211\352\221\342\253\277+z\306:\366G\240?\364<\367:\235\233h?YC_&\206\217\241\277`\241\351%@\204\263?\215f\2619\374\307\236?\010\372\232qn\260\207?\330*O\377\261\007\207?x\271\200\274c\242\261\277)\375g2\021\033H?\320\362\017\030\263\266\222?F\215\320\013\317M\253?\027\243\245\204`Zg?\016\372\345w\225l\264\277&\330\275u\020zj?\326\361\340\365R\020\235?`\240\363\035.\036\224\277cQ`\245fFx?E3\262\023\202n\247?\304\020~\n\237\254\274\2776\0030\267\344+\266\277\032>\372\201\340\272\226?\021\251\236j\345\320\277\277vh\317\235\210\334n\277\255\247\301\324\337\363\247?1\032U\003\211\000\242\277B\023tM\322u\226?()\326Da\220\256\277`%\221c!\270\261?\027\000\265\355t\313\232\277\010\"aN\357\017\247\277\354\336\034c\256\341\247\277(\r\332\365\212\027\225\277\3209\2161\316c\276\277\326PU\367)\376\242?\313\234\226e$\313\220\277\215\336\240\331\306\311\225?\355\274b\177\025\200\247\277\222g\261\346\331Q\240\277\262K0 \217\000\205?\264\365\023Q\315CV\277\316\377-\323\356l\227?M\254n\000\025\304\233\277\251~\353_\304 \240?\271\332\332\271\037\261\260\277\255 \211\202\242E\204?3\343\372M\007\367`?8\312TF\320\310\243\277nsS\3129\355\250\277\023\366\337\221\362\'\234\277\025\035(\215.F\242\277\001Y\247\344\003\020\247?\2737\330R`\'\262?\361,\2457AG\250\277T\376\037\025\360Z\220\277)\250\351#\037`a?\202#\013\034\224\347\265?p\202\236\227\021\017\251?\251\242\020\255He\235?p\2328\".\310\230\277tZ\261L\271\245\224\277\266\210&\263\263\260\250?\311\241)\251\324\257\243\277\005f\340\316\337\300\244\277\353\346j{\036\n\256?\312\003y\013g\337\241?i\262X\370\367\034t?\2718\241\363\302\360\266?\220\324}\177\2214X?\202Hh\215\363ry?6\344\260cJ&\261\277@1\322\342\033\235M\277;\307\354\025\223\254\246?\274\276M\302\370\005\207\277\353&c\346\034R\260?\231\331\255\332\023\250\243\277\321\014\312\266\007\037e?[\203Y\326\003@\231?_\244\202:.\r\263?\244\2370\262\310az\277\352|\254\377\373\274n\277\242\350!\204J\220\265?29\032p\020\274\247\277\316R\247\344\007v\270\277.\005\031>\202\350\261?\252\033+\001\"\224g\277\252\016]\326\356R\246\277, \254%\303!\233\277f1\320I/\243\225\277\363N\313\344Bs\235?6\317\260\220\241\361s?\307\346\241\315;i\252\277l\350XT\342\320\260\277\353\224\270\364d\177\222?O~U\251\224b\257?\003\301\034\237\200a\276\277\332\221A\322JH\233\277\017\024\310\031\003\336p\277R\226g\300\273\272\262\277\2771\330B\023e\215?\310o\357#~[\237?\365r\303\3372$\263\277\203N\246\335\022\263\241\277)\276\003RZ}\243\277\005\rZ\277\355c\253\277$\2754\353\335r\255\277\3132\016e[P\240\277_,g1x@|?\301\243\242\367k\241\221?\314\216$\032l\366m?CB\303\310\022\017\246\277\311\224\242\324\271\257\243?q\211\031Wk_\227?HUX\240/F\271?g#\374\342\002\037s?\245\240\240\024\247L\243?\366\342JW\240\260\232\277M\366;r#\314\256\277\311\006\017\357\'\335\261\277hA\007\315\016\363\255?\376\306\3308#\232\177?\321\246oX;J\254\277\177\361W\025\002\334\227?BzV\207-5\246\277\350\241\t\017\014_\230\277S5\216\246\\\337k?\367\033\"\221\361\332\246?D\322\3368\263\366\226\277\305\216\373\026\270\274\241\277\005\362\211L3\r\265\277:\273\367\354\350\343\242\277e\264k\302/\331o?\340P;\030N\201\243\277\017\033\025\346E\245A?\025\323\257T\211W\263\277\261\0232\n\220\356\263\277\017\023\312\250\355g\204?\354irF\251\'\255\277M\270\341\210Z\361\257\277\322\340\376@\324\335\242?\3024X\363\203\300\261?\265\375qja_\212\2772\177\2359\326\004\244\277\n\005\022n\217\206\260?,\373\037\'\035\331\260?A\375\205\237\375\222\227\277^C\374c\340\362\207\277\345.3\372\271\0303?]\351\252^\021/\250\277\211{\323s\322\316l?\230\252DE\t \273?\233W4\326\333\021\254?\301L\016\242\227\247\217\277}\031p\312\272\203p?z\023m\253e\017\230\277\2549\r\216@o\241\277s\025\322\n\214\206\231?Uh\33521{\244\277\216\021\256\245\356\363\224?I\226\366\2137\215\221?\201F\255\262qzk\277\344\256\024\344\\s\265?\221\325x\303\375\262\244?a%\223\265\305ps?\315G_\343\225\035\240\277@\242@\316\373G\260?p\363\275\276\364\364\250\277\237M\356\000\266\337\207\277\214\006/\301n\240\244\277\013\377\365\214\3424\252\277\301\252\310\376F\257\\\277W\006\336\222\314U\237?n\007\265\205\340J\241\277\303\362l\306\315 \242\277)\017\324\1771\243\266?*\315[/\373\257\242?\353\365/\341oy\250\277\005\3477-7=w?\377\221Ckt\345\213?[\376\2547\301\252\216?[J0={Hu?\224\323\"\001bP\276?\007\367\036\3606\321u\277i\250(\365\250M\256\277\244\203e\361+\361\240?\371v*\026\253\327\273\277_\314\203z\0362p?4\'\344V6`\263\277\330\361\223\213\266Dz\277*\310\r2\361\332b\277\r\306\034\257\364\245\223?\031V\236\254\204>\240\277_j\265\014\253k\272?\211\367\1771Q\242n\277\245\024\230\216\315\307\261\277\362Wa\021\261\203\247?\007\236\365\345N\276P\277\261\327{\255\027\322\214\277e\373Q$x\321\257\277\321\247.hp\026\250\277\036Bb\325\023\352\250\277\217\266\202\323+3\241\277\207\177\227~\333\230\212?\323+\357\'\273\373\262\277<\225\006\017\311N\204?\241\315\320\206\231\361\263\277\315\036&\303\341\220\237?\272q\010fM\366\227\277\343\334\002s\241\224\231?\373\303\000\210\313\236\236?\337\034Jl\0223\202\277\302]\310\341\262u\264?T5_W2e\231?g\n\346\242i\302\234?\230\r\251x\036\237k?\2725\207\260\200\217\250\277#GT\302\r\274\247\277\376\267Y\374\254\321\254?{\250kt}\376\251?\326\324\370\tZ\214\246?\274\020\030\"\266#\250?\346\211\237\2151aT?\246.t\256_\264\233?\010\264\265\351\"\006\236?9\270\206\333\334\207\240\277\366\220E\"4\202\217\277KX\250\262\376\323\203?&K6\251\354r\266\277D|\250k|\337\220\277I\370j\230*-\207\277z\270\252/\346\321\303\277[\262\371\375\021\202\224?^\233o\332\004\353\233\277\336\230\225\035j\310\242\277)\\8\301\276^\225\277C\255O\273\277\323\273?\017\010\256\252\226\254\261\277\274\2235\377g\225i\277\034T\306X\271\304p?\"\205&\234\3303\250\277h\371\246d!\222^?\241}D\347l\205\244?\251\r\344\243P\001\235\277Y3\327\274\270\000\233\277\013\213+\2238Jt?\340KAo\366x\300\277_\013\204\005\232\331t\277o70B<\206\245?\334\375\350JO\032\212?_.\034\035\305\322\260\277\256j\014\270b\225\240?P\226\361\361\330g\276?\031!x\331\362;\251\277\020\016\366\342\376\303\226?\331q\223\201k\231\242?\246\360;bWT\201\277\206\r\273\005\211&\225\277z\200\235*\343\237\300?O?I\273\014\007\250\277\227\204\002\017\240?_b\204\351&\263\250\277\020Z\027\240\373\224k?\214c\253\023\325\326\224\277j\257\204g\273\233\210?\241J\310\177\343u\216?}a)\346/z\262\277\271\321\2444\320+\251?$\371\273\371\220i\230\277R \324,\341\030\224\2770\246g*6\345\223\277\312\336W\325Ye\240?\312\364\022\271L\014\210\277\376rp\206\006Y\250?\241{U~\201\323\253\277t>\036o;\304F\277\243y[:\362\334\264?\343\204\266\316EY\274\277\353H$\277\245\024\254\277\005\027D`\267<\266\277~\017\356\213?\201\252?+\253\\\354\"Z\241?G\243\250NMI\240\277\354\241g\0026\037\254\277#\252\202X\204\014\246\277\253\241\313VV\222\270?\302?i\361\252V\235?\370\222\177-\241\030d?\331~\210@\375d\256?\014T\276\325\315y\260\277hZ\'\014~\315%?\374\312\'8\226op\277\350\356\364\337\257\361\224\277\tQ;\020\247p\246?\352G\355\276(=\261\277@\331pv\332\361\261?\306\346\237pP\032\230?\332\023il!\032\263\277\320\311\245\236=#\276\277\2207\2659.\354\247\277\212\037\320\033\020\223j?\013\275\2111\313x\206\277\030\"\203\310\3552\223?\373\341\354(\302\352\243?\316,<\362)\006\257\277\370\257y\200.\305\271?\253\204\227\331&\372\244?\347\222\022\342\274x\250\277\244tnE\227\314\203?z\'\255\260\033\376\272\277\225\307\213\341\025\031\246?\363\222\277\244o\234b\235\221\227\277\210\303\235h\237\200\241\277\322UM^\":x\277\010\335x\016\001w\201?g1\021\335\254\307\240?O<.H\371)a?#ykj\222\235\250\277\365\365w\374\0274\241\277\325?@6\023\026\244?p\014\370\332\026\254\200?@U\372<\000\340\225?\017\354\007!\346\367\252\277\000<\303K\266\007\254\277\352zq\004\316a\230\277i\\\262k\020Z\261\277\201fc\377\304`t?\344\033\220M\304\n\243\277\334f\306\243\177\311\241\277\226o\305\333\222\264\205\277\014\227^\035\372I\217?\342\323(\250\252h\302\277\264\225\3106\254\033\255?x\222\202\310R\246\257\277\035\220B\215\361U\252\277AI;-\002Oi?\306\365\rY\212\202W?\'\372\212\0033\301\265?\327-|\032\rP\243\277\332JmjTx\274\277?\222\257\030\237\030\263\277\270!\266\031a\214\236\2778\343\315$\226\225\261\2772\\\200\252\016\341\240?lE7;\265=w\277\267\023\306fu\034-?\376he]\020<\242\277.\340\243\210\311t\264\277\264\001\255i\275\333\255\277\373`h\267s\024^?d|M\0244H\237\277\3118\021\277\023\204\230?\243\274\246c\017\000\213?\3043\030\021vM\217?\222h,\267\326D\222\277:>\223\3104\022\220?E\036\0034\225U\215\277\243\373&K\312\201\221\277\322\031\317\031\223\202\235?\0310S\343\325P\204\277\376\326\276M\274\332\267?\342\327\357?\022\262\200\2777\007X\327\325^\225\277\371m(\365F\025\252\277B\035w~\tq\252\277\023\020\314\356{Y\205?\323\341\227\225\303\321\247\277\014\"\207\326\212\341\262?\0229w\020\235\205\246?\206\016\237c\303N\240\277\344\237r\005f\256s\277\307Vw\253\337\354\200\277\305\327;:?\305\257?\247\366\365g\037\036\250?\257-\357\322q\324\264?3\341GU\355\323\240?\206\245\227\250\253\312r?n\3272\350\315\320\216\277dWu\020~x\255?\201{~8\337\"B\277\220M:W\365t\237\277\351\314\024\316i\252\227\277\264\365\276\236\226\232\240?\275\255bA\177\261\220?~/k\\\233U\202\277\267\212A\306\355:\255\277\034T\034n\010\001\244?\237\351\300\241*\346\225\277\031\214\227X>\303\247\277\366\343\300p\316\323\204\277X\217\230\306A;\261\277\303\340\351\277 \226\254\277u\242\214\310\355\220\275\277m\274\267\006\217y\225\277dg\023\312\351f\300\277\3136M\266\275U\230?\352\227\236\200\\\326\203\277\350O5?MP\271?\335\027\362OvN\243?\323\375S\250\215\360\250?y\214)Z~\216\234\2772D;c\264\231T?#\230\320\206\214 \235\277E\177\263=\t\r\247\277\2715\354\025\320\346\241?\325\233\275B\254M\247?\356\264Q\345B\024\251\277\274\017f\251\3178\234?D>\tL\255G\240?\301\261\236W\203\355V?\367\007\276)\203&\240\277\330\220\332\330m\362\250?\346^`\253\347\317\262\277\347\200u\252\334\357\201\277}\321\\:\3008\243\277\000\013}\371\002+\301\277\001o\332,Onw\277w\020\244\313t\374\223?\236X\327\2279\023\266?\214\366\2201\320\023\252?2\021\303mCC\301?N&\235\277/\271\244\277\331\3323C\006~u\277\376\235I\246\266L\220\277_\010\331\023\237\304\246?O\256t\253\005\275\225?\246\321\027:r\022\227?\214\274b\314\234:S\277t\206oC\235Bz\277\020\017Q\255aZ\222?\243|}\240\\\366\221?x\003>3f\201\256?D+\312\242\317\331\300?\251)*\202\273\021\274?\210\342\356\023\350y\204\277\353\214dV\314,T\277\263\312\327\330\302\260\243\277;c\177\217\357\245\227?h\224h\254\2322\277?\330\246\323\343\307p\242\277{\301rX\330\210\220\277^T\010;+\350\251?\215;\236\373\005\343\260?\254K\303\r\255U\225?Dh\213\r\000G\275\277\355x\023\2556\236\220\277\322\270\021\r\0030q\277\036\243}\330\316\037\253\277\366\021H\007\372\310\222?4\024\320gV\361\265?\315\362\357\257\303\257\234\277yC\273\252\353\212\220?\013\330R\321\325\030\220?5\264\3760\035\355\302?r.q\201\314L\204?\000b87\213\310\245\277\343\331x\3712\266\215\277o\036\357\363\343\315\262\277-GfV&;\247?O\317\n\332g\363\260\277\255ty\272\277\014\260\277\224&T\010}\346\245\277!\025A\250\311\253\245?\216\310\243\227\312\223\266\277M\374\261\370\"I\271?\271\247\214?\343\331\363\267`\002\227?F\027\\&*#\266?\017w9e\240A\231?\273\313\215\303\245\266\273?H\365\332\271\327\035\261\277\207p\360\274\320\005\243\277\037\357<^|\273\273?Td\016>\025!\267?OGU\017\360\345\263?\210J\256\rYR\260\277\027!\304\\\354\363\247?\234P\215pj;\251?\344\224\324=\375\275\226?\272\252\216\265\010b\265?\234q:q8G\227\277r\333_Qr\331\264?:e\227\211\323\223\251\277\205\257\336\246\321P[?@I\"\225\"u\231?Q\271\353\2312\366\257?\037L{1\341F\217\277\032H\265%Q\211\250\277-\204\227[\234\304\240\277\273\213\324\335\333\032\232?YV\022pw,a?\324i\350\220a\320\251\277S\3102\034\0148\266\277\237\374\326\023|\367\202?\310\304\267\370\241;\245?\236\014>4s6\220\277\357\317\2453\t|\235\277_\226\224\274\354\200\262?(@B \203\010\205\277&\321\034\265\374\357\226?\257\311\353b\241!P\277\253L\370\024c\006\255?\275\357\255\210*\275\233\277\265\322\004R@v\245\277\265*\252=\023\223\251?I~\211\213[\240\247?\2200\323\000|~\246\277K\342\001\300\301V\222\277&\204\244X=\206\266\277L\276u\272\222_\253?\306\233\265c\341\035\303\277/Z\266\020B\203\246\277j8\330\240y\361\177\277\023vC\034\022\357t\277\033v[jY\257j?\205\212q\205\nP\301\277\364\226\277\277\312\327\251?1\212D\224X\232h?\022\350X\020\0343\260\277\3464\273\017\351\227\255?\373*\310V\352=\250?\036\030\253Ea\013\237?\277\357\324|\201\006\244\277\'\\d2Z\221\272\277&JX\364\000(\261?2`)\344\315\247\247?O\222\331m\036\240\275?8\273\376\264\274\270\217?\177\255\033\343\200\244\203?\271\236;\275]\221\243?\027\022\224c-\\\245?\304i\330\030\'\013\207?\213x\336\303w\265\233\277\2670{ROA\245?\2658l\346\031\212\214\277A\354p\315\014Y\227?\231%\034o\340\036\217?%]u\321]\254\242?q\026f\256\234?\251\277\334\325F\266\237|\206?\277P\301r\355w\257\277\356\205\003\035}\231\246?[\002%i\345\214\261?\3151qpL\320\231\277?Ly\236\246\324\241?\237AE\261\370\231\242?l\252\242\2605#\267\277!H\226\247X\364\241?\025\323\007\224\316.\243?\2316\255Fh\377\256\277(\262\006\221\346\345\225?\356\267\0229\312\277\260?\226\207\014\250\037\363\230?U\353.\007\336DZ\277\377\246\263t\375\345\224?U2\032\313$f\267?\0219\336Q\336\013\266\277lc\003\366\357\271\253?\211\027\305\"\263,\271\277#9\342<\300\007\254?9\335\256\371rKy\277YDi\004\212\346\237\277\245Y#*%p\260\277\251\260\022\2716\364\260\277\213L\232V\226\002\235?L\022#\001\247\027\261\277\237(\3512/\306\264\277\'\361\036A\253x\271?O\267#\361\305\361\266?\035x2\377\227U\227?\201\206p\335J\325\225?\264\265\351N\336\211\214?\353\032Dd\240\367\266\277R0!{g\r\245?\334\210.\'\r\341\254?\221\347\246`\034a\206?\257k\013\241\257\274\233\277\002\215\366^\301\013\225?\rS\024\\\326R\226\277a\005\376k\341\355\204?\270gL|1\362\250?\336c\326\177\323F\255\277b3\326.\200\035\245\277\236\020*\214e\227\237\277D99\254;ju\277Y=U\224\256\253\261?s\024\362:\376\216\205?P#C-\016\033\262\277\350\351\360\021\257T?\322\254+\323\014{\267\277>\364\245\256\246\204\254\277a\314\316\257\306B\255? \025\327\212UJ\306?nu\177\2250\237\262?\242\3006%\331b\212?\222\265eg\356\263\236\277\014d~\271\346\204\201?\266\355-\007\201\221\272?P\242!\032\337\004\221?\030{\010r\363\035W\277\324H\220fd\177\224\277\215A\343oe\"\245\277?\351\332k#\021\254?\313\026\360\235D\n\242\277\276\204gF`\361h\277\354\3072^\354b\203\277\317\333@\310\221\241\206?\226\017\345\"\221\261\235\277E\261\255\245\233\330\225?\345,\\\257$\023\267\277\203w\004\005@\364\224\277\316C\325\032\336N\271?,\300{\242\204\216\260\277\375\344z\002A\302\246\277K\374\025\030\337j\240\277%y\003\354A\356\246\277\365\360\207\202\010-\246?<\311\320\177\001B\230?\376y\030\311(\265\242\277#k\3442\236x\230\277\265&5\213\2211\212\277\023s\336\017Uh\226\277\352\343k\307\271\205{?LGd\222 E\264? Sy\322[w\267\277T\237\230:\340\010\232\277,\200z\'{\013\241\277\361\375g\010\374 \277?\204\315\255\305\036\'\260\277\321\306\323l\017\274\244\277`\013\264\355/Z\245\2770\331]\270AA\246\277l)\013\004[\245P\277\000R\210m\000\205\265?\004m\327\246]\310s\277\325y\357\341\327}\234\277&\341ng\210\270\275?\206y\271\342\235\177\252\277\317\036\214\030\205\345m?\034\315\341z\327\264\225?\0044\222\334\302A\207?eT\3514\252\017\233?d\001\320q\265(\210\277X7-d\365\261[?\303r\243\375\177o\220\277\344\337F\352C\035\250?\245\340\002|\222\233\217?9\231\202\355\000\243v?OF\222 \226\247\240\277|(Z\247-6\214\277\222\n\263\327\273\265\224?_\366\255\244\361\374\246\277?I\215ll\001\221?\264l:\372\256\177\261?\034\225\346\237%\204\216?\'P\321\341\253\261\272\277\261\273Ji~\254\223?\226\256^(\200\210\232\277z#\237\204e u\277s\261\024T`\275\245\277\236%\352\247&\312\220\277+\367\016\'\377\3024\277l\306+\027\'\213\233?<\241b\001\217\343\260?\022\027LN\321)\222\277\224\316\245\222\377\267\201?@6U\375n)\255\277\255W\002\371\263\362\254?\323\352\032\'\016\362\222?=\205\222\340\324\343\231?+67\303\213*\256?\335V\273A0\345\234?\007\206Y\210\352\037\240\277\343\0026o\017Y\253?#\343hz\231p\267\27720\034\253\260\301\225\277d\003\233\010\021\213\256?\300\303\223Wy]\247\277\247\3348h\001\364\246?C\365R\224W\315\262\277\217\342\234\362&\236\255\277\332\357\376Ln\300v\277\243@\234\270\227\315\236?\021\307\325B\026\001\235?d\315/\035\312\305\261?\265\244\373\0228\206\247?j\217+\217;\355\267?\344]pm\266\027\222?\2310\023\304\272]\216?\226\030n@\315n\232?@-\207\366U\356\264\277\375Y\372\013\037<\216?2\227=\t\267\023\252\277uz?\257\306\241\270?\003\t1\234\035\243\210?\314/C\374\356\326\255?s\313~\240\257M\236?\016\217\360\312\003\030\242\277\267|e\234\001u\251\277\375\321X\177\331\\\262?\311\3641\262G?\260\277\337\247M\202{\303\221\277@\215F\245\020\205U\277\003rBH\2139\246?my\0033\3342s\277\233&\340k\362\261\275\277\321$\265\271\3300c\277=[\031\315\305\263\301\277\210x:\230Z\245\240?Pd\257r\314\221\304?e\035[\031\352)\253?\032\226\215\300w.\254\277B\250\373[\030\344\242\277\201\n\001.\253nj?\242\000\322\313s\330\244?\267z\2216\020I\253\277\303G\267\341P\214\240\277F\263\032\2065\243\267?\201\033J\272A\231u?\001\304G\024\313\216\223\277d\330\265\200\366/\243\277\034RK\331\205i\277\277\352\247\227\247\032A\255?\260\253YZa\271\201?;\233\370e\210\227\242?0!t\261\014\340\262\277 \2236C\315W\251?J\020B\244\010\207\222?\213\031\203\352at\261\277\025g\213`\345\n\220\277\374\301\263y\037\362\272\277/\233\210\003\201^b\277i\316\360\270\026x\262?U\377\214\366\375\246\254\277t\232\243\264\343>\212\277\271Z\036P\007\372\251\277\007f\027@\366l\265?\036K\372\300o\242\265\277\301N\022T\307\221\242?\246*\2303\255_\247\277\271.Ct\377\252\257\277S\177\355\236j\257\256?\231\220dA;\217\234\277\273\377\315\272\361d\221?\303\t\234\034\363k\251?7\311]B>O\266\277<\024k.\224?\277@\330o\353\230\201\277\243\221\2427b\277j?\265,w\3739D|?\305\3071\222\347\323\247?\221\212\321\035\"q\273\277\002\251\312W,1\266\2775\370f\227^\315\226?\024\351\273\003:/\251\277\204+#I\344~|?\263\345z\217X,\244?\241\0135\235\242\251t\277\276]\241c\361P\237\277\371\343I\334\020~\252?\265S\247\033V\360\253\277_y\032\371\374\262\243?7^L]\207n\262?J\300\262<\327\312\232\277q\002O\341*e\240?\261\351\003\216w\200\275\277\363\307\004~/\364\203\277B\247T\317\033\200\210\277\345\375\320]\326\343\204\277Y\034\227&\232\356\252?\253T\322y\244\203\233?x\210\361\211\003\271\222\277\205}*\203\326\334\332\271\277t\030\330z\222>\253\277bNA+\202\207\207\277A\321\213\224.^\241\277\376\313\236\340f\035\307?\376\334S\016E\003\227\277R\326\tH1j\254?\354\001|>\335\342\200\2775Y o\324\362\263?\005\335\007\034\\\022g\277Q\362\320\352\035\022\250\277\242\265[k\020pu\277 \372VXw\374\236?\020@\336\371\365S\242\277U\301z\002(q\211?d^<4\246D\300\277V&\327\204\365\332\242?\3648D+A\250\261\277\330\263\002D:u\242?\027\261\310Z0\332\231\277\277\276>p\030r\224?\305\3723?\234\251\267\277/\370\345\001w\302\223?\360\307Y\006\334]\231\277\316C\315\351\322\275\242\277\001\032?|\363%\261\277S\374\025O|;\242\277\321\323%\212\214\331\214\277\351\313\233\320\311\336\235?Ql\000\017\235+\241\277\324~\236\300;\000\210\277Um\014\2038\234\227\277\343N\340\030\312\274\223?w\261\261P\221\021\245\277\236\256\263%v\030\264?\020Q{\375\345\362\243\277\0034\310\330\337\031\301\277X\302\223r\025\034\244\277\365\260F\255\311\"\222\277[\314\362C\375\207f?~nU\233.\201p?\267f\341\367q\227\260\277\276I\301\314\362\231\202?\374\243\0213]\035\211?\255\013\r\010\245\361\272\277\031\177*\303X*\240?\207?\025\315n\373\203\277[2\354\027\336\207\243\277I\311\257Y\233\214\257\277m\032\3374\032\312\224?\347\314\230\022\r\353\221?\316\211\005\211\345\245\233\277:w\022\265j\004\303\277\267\333\262\266Q\004\224\277p,\265\235I\224\252\277Y/\303FsN\256?2@t9\211\330\262?<6\207=\310\305\265\277\242\021\340\2355\026\246?\220\361\030;/\361\227\277\236\263\023\254>\177x?\223]\310\243\264\313\246?\352\351BO\255\367\272?\223\0305\273r.\230?\247\305\177O-\220{?\253\007\352\235\3240\255?\025\254\354_\337\030\230?\325\302Y\202\206\014\200?\333\251\223Y\362e\260\277\207\3473\373\346\220\200?,I`\367dt\207\277\267\356\037&\266\371\240\277\337\261\276r}\026\234\277\262\314+^\214M\261?p\200\210@\366#\233\277o/\324g<}\302?\034\347\016(\264\245\272?\324.\320\226\211l\243\277\233\023\210\232f\306\206\277\002\252\325\201\375K\256?\342\260\3662\305\200\265?O\031\205\360gQ\274\277\325;\330[\034\275\242\277\307\266W5Sd\253?\275\020v\367\3146\250\277\005E\'\020\277^\225?\371\277\264\217NP\252?\037\214\200\327\335\026\220?\177\236\360n\034\n\223?\001\r\274*o\301\244\277\317\222\305\264\017\354\261\277-\210b^\035\302\262?F&gw(\226\235\277\022\010\0204H\236\233\277\320\217\017\205\262E+?\3348\261\337\026\030}?\224\272\310V\250w\255\277\311\335KLy}\235?x\025\316\220\305\202\267\277\r\271\216q\026\204\222\277\232O~K\221\226\237?\274`\216\302\342s\240\277:\035\010\306\203P\275\277\352:\212}\273`\244\277e\303K\t\320\361\201\277\207\301{\323\013\005\243?ivD\026K\343\252?\234\210\2250r\217\235\277q\221\371w\016Z\244\277b\216\220\335\241>h\277\354\354\350\342\253!\266?K\272\3317\367\305\217\277c\033q\364\367\236\237\277\n\207\013\2404X\254?!\376~j\354\036\247?\271\237\031%\373M\237?\260\027W\014\037l\260?\222i\310,}\275\244?4\266Z\236B#\234\277!\322\262\233\312j\263\277\236\317\030\225m\205\244\277\366-\246\360jz>\277\003\371$\305\nP\200\277k\377\"\375O$$?\360,\033\371z\037\232\277\r\3311\362\307n\266\277\021\002\212S^H\227\277\010\237\205u\027\313\255?\371\t\275O\262\227\252?H\220\203\325&\367\247?!\2459|\030\205\274?l\026\342\257~\230\200?$\355\000(B\355\246?.\230\374\010X\363\301\277\213J\302\201R\206\232\277\206Y\255\2472\212\300?\214\352~\255\016\246\242?F\353g\363\322T\233?\017\220\327\244{Or\277\254\326\332k\236\200m?v_>\001O\336\223\277Q\352\215q\202\207\250\277|8\243\364sV\241?\363\\\252iT\200\203\277\374\276\032a\320\036\265\277v\207k| \220\242?\362.\221\361x\020\243\277S\210P:\230\002\224?7\357\261X\345\267\253?E=\213-! \267\277\211[N\322F\356\237\277\265d9s\311b\240?\000\206\204\310\316\000\223?\r\377\023\016\210\327\215\277\223q<\343\\I\243\277\305\226\263\274\253\023\272\277\260\206Km\010-\246?\226UI\362\000\356\240\277f\022\257X7N\252\277\324\374\225\3273X\247?\216\346M\377#\004\247?e\255\023\203\337\312\242?\345\242\330\344\217A\247?P\212,.\343\307h?\236\210!<\212\375\254?\3753\233\036\225N\223\277:\3109\000\342\311\234\277\\L\371Z\307!\246\277\263\177\377-s\003p?\303#\263U0\177\253?\230\t\216\006d\337\257\277\343\303\331`g%\266\277\24448\344\233)\202?\020)\271\037\022\333\221\277\233\343\210\312\257\231\226\277\026[*\355\320-\206?|\361\321+P\204\201\277\366\251\255\363b\354\260?\027[\3371\360\245\221?\375&=\311L]\240\277a\t`/\304\207\226?\271E\366[\221_\261?4rm\206?G\240\277o\360C#<9\267?\365\177\023\371Y\335\233?\266\225\\\233.\301\206\277\213K\311\016\206P\223?\343\'\337$I\226\263?\001\301\240-\362\022P?\332.\274\230o\267b\277\327 \336\321r\367\203?\002\240\2105\360\022\251?\231\346\342%\365[r?\247\335S\370}\304\177?D\302\031\322Z\357\201\277#\237\221\274|U\247?!\361U\211\367A\260\277[\234\n\313\361t\227\277\255\255c\316\320\316\224?_\264\027\263\227\311\200\277\223*\300\2308\363v?83\360M\023v\262\277\362\230[\332\030^\200?\323\311J R\325\260\277\254\007\034\017\274\022\226\277\000\034\231\233\242%\241?t\255\334\277\227z\222\277\374\253%\346]\236\274?\241;L\250]A\255?b>^\262\274x\263\277\034\032\365\001\276\036\247?\2029#N\303!\271?\253\266\000\375U\246\260\277\004Y\346?g9\236?\357\305\211\307\031\016\236\277\000&\350\254\\A\234?\366\221h\t\346\253\213\277\262\312\356}\306T\213\277\242\r\020\253jO\234?T\312\025\034\022\264g\277c_[\002\222\304\216?\342X\r\320\361s}?\242l\223\263k\033\221\277D\244\370\300\224<\252\277\033\024\216\376\365m\244\277\240\315\014\335Fw\243\277\331\341\256\276\262\275z\277\0216\247\234h\352\272\277\354\036\356\234\224)\243?w\346\242\362}\214\271\277j\2729\033\226t\240?\336\264\264\267\tS\222?\036\305\037\330\300\007k\277\243!\353vkV\215\277\314K\025\231Q\377\234? \333\250?\303\313\226?k\247\316w\260\365\236?rfL|\333\253\275\277u8F\353\007\366\217?\337\274\235\216:\005\246\277\027}\307\002\225\306\241?$\356\205\020\264\017\261?m\234\230\r\363\344\261?\220\023V\375\235,\302\277J\253o\240\316X\234\277\346\224\3441\341\241\214?\331G\022\306}\002\216\277(O\025\034\247E\227\277\372\t2\224\373v\271\277(\002\r!\0075\221?\334urx1|\244\277qQ\002\260\323G\216\277\261\262Eb\n\274\251?X\000\367Z95\260?\r\360\307\372y\360\241\277=\272\021\231\3667\223\277\206\3511\362\277\241\266\277<\317p\006-\335\247\277zd\324\307U?\222?M8Qw\364\255\234\277\372\210z\007!j\247\277\034\346\t\010s\265\252?\255\030v\3629\201\271\277j\244~4^\005\265?N,\236p\202\002\261?k\253?\244\202r\267?\204\217\027\004\223\177\203\277\375\343\301^\206\t\207?\276\001!U<^\220\277)r\207\321v\345\220\2775\241Qg\323x\252?\244\227h\365\245KJ\277\223\254\317\006\216\234\242\277\372\216\264d\r\333\240\277\260\310r!\304c\300?\236qB\034\277\367\227?\334N\222\232\365\232\240?\032s\211\255\313\316\230?f\344\n\005\357\226\261\277\305\314\030\242\252\205\254?\271\021\374\000\002\274\246?E\311\312=\177\326\213\277/A\221Z\324\342\255\277s\263\253C\277\032\250\277T1Rs\\+q?\276\207\014Pv\377\256?\007\310 \242\310\'\300?{\300\375\201\322Bz\277I\236m6Gn\273?\365\206\360v\277\353\240?\327:\265\255&e\272?J\377;\\M8\224\277\037\352\006\305\n\210\254\277\320R\231\231\363\263\215\277\202\255\361\027\026\240\263\277\310\315\36755\330O\277G\262E\271S\033\260?\271\215#0E\013\303?\024\277\257s\256\276\252\277-\271\323\325\016\353\222\277o\361-\252\237\356\253?\007u\242z\3060\200\277\001.\2040g\200\242?\3377\322\305dd}\277&#\253\246\321\344\270\277`j\247\305\370\231\254\277\212\374\322\232\271W\267?\020|\006\223\263R\213\277k\013\203^\327\270\233?+OjO\246\032\247\277r\327\256\26774\304?=\020\225\230\206)\257?\362\240\027\205\323\346\264?\247,b\246\201g\266?\322\373 \367\037K\224\277\276\352\343\372\226)\232?\027\231\376\311\2707\240\277\020\321\035z\303:\227?\250yX?\327,x?w\320\274\007\273\320\255\277\253\337L1\355\310\277\277\007\215Hg\344\375w?\377|T&\\\223\220\277\2228W^9\310\252\277\333\366\260\377\376\334\224\277\3325\254\n\224\316\225\277\226t\341\365\'\360a\277\356G%8L\221`?:\307\376R\024\n\226\277\257Vd7\325\354\213?z\300\245\235\256U\232\277N{\021HdW\244\277\251\205\351\267\313\251\250\277\276\253\232\234\t\035\242?\266\'\226\014\276\352\233\277b\"\362o\355\366\243?m\214\242\344\307\367\231?u\376\342\027\313\233\220\277U\221\314\245s\026\201?\237\222\351,\234\026\270\277\361>\325\356T`\247?\347\241\371\345\307\260s\277\367Ak\003\\\235\216\277fg\r\354\271}\243\277W\331\005\317E\251\242?\021\362\204\202W\244\271?z[\007\234Hbc\277\235Z\204\342\234\335\226\277)\032\202\230G\305\260?\324\243\202\364\340u\207?9\260\265\214\000\267\250\277\243o9\307\367}\300\277\204\177\307\272U\030\222?\266\333\330r-\013\260\2771\016\266\374\352\024\247?f\234L\301\360\005\237?\036\017\266\326\277\036\250\277iks\035\226s}\277hA\365=2\247x\277\026\343\273\364\210\225\243?\032\"\324\234F\007\251\277\220\315\330\202\372\001\220?_\334zL\265\033\217?M\2539\0175\262\261\277\344@\335\270!T\221\277\302\202\354B\235\376\236?4\2601J\347\035\236\277\261\313\272\371\272\177\235?fT\356\362\"\260\262\277j&\262X\000\250\240?\302\204\002\251Tk\252?t\006\254dP}\265?P\336\236\275\327W\220\277\030\007\025\301\001\004\250?\270\333\3330r\222\261\277 \036T\2274\002\267?\036z*CY\314\233\277\256/\265P*\357\241?\321Z\331Y\251y\230?Ye\214\342\243)\264?[\236\010,1U\230?\"\213\315\350\267!\225?\272\017\347\222\266\013\251?\030\035\316;e\214\241\277?\224\274Dv)\257\277A\004w\304,\301\245?swb\023\326P\245\277\275\t\\3ah\223?\331\361BX\273\263\262?t\240;\025\335\207\254\277\027\007\177k\202F\233\277oI\0229S\t\236?\336\037\320y91\242?c\rM?\325\023\260\277\260\021}\034\000J\240?o\365\360\225\205\244\301?)\034\2511\262i\271\277\375\3309\357\342F\244\277\347in\375\031\n\231\277H]\013\007$Q\276?n\303\223\020\240-\216\277T\010y\373\"\274z\277\203\273U\244\236\200\230\277V\243\254\231{\027\222\277\371v\020[\020i\241\277f\276\317\343\246Y\236\277\357)\253\344\373\374\254?Y\305\263)\370\361\242\277\221\207\025\267\031\245f?\207\370\213g\312\332\303?7\345\363\"\023\016\277?q\203\365\363_\324\234?\341\201\n\355\225_\245\277\017\215\035Cv\001\206\277\237A(\177\021\300\276?\3215j\302\005\342\261\277@I\302UOE\253?\210yL\2544-\243\277\311\\Gh\"\372\203?\351}y\200tj\273\277)\3337\321\221\031\262\277\346\365\2748F\274\240\277\212\246Rq4\314\300?\307\374\364\362\323%T?\251n\003m\314\223\201\277\023\000\304\374Dh\254\277\237\350[\241\370B\203\277\372:\205\245\222\203\270?Dyk~Rc\263?f]T\026\021\325\246\277u\014\303\300\233\222\206\277\214\315N\010%/\262?\211\276\306\"Phq?K\265\004\377\017o\242?\r\265\r\226\301\272\240?)\"\336\205\231\336\263\277$\010\211\005o\273\240\277$\006\353VvP\223?\320XTs.\035\264?\r\224RD\346\362\240?Q\207-=\262A\222\277\201\000xp\360\205\243?\376:\347\235U\376\215?F\253\24200\240Q\277\232\n\307\005\010\375\270\277>\231\335\016Z5\233?\347\334\263\332\033\353\244\277C\302\336!\233\271~\277R\271M\237Q\005\253?3\233\244\364\361\224\225\277\235\017s.j5\240\277u\316\216\025\n\372\271\277\007\207\275\022\252\022\306?\254\014\034\362\0034\227\277\013\313\3235\226~\204?0\264\020\330\263@\261?o\034I0/\021\223\277|\251g\204}\355\224?\323=o\030\3376\253\277\236\216\177\374[\037\254\277C\362\301H\310c\205?%\004\301\211\020\344\237?\214\306\270O\236\004\264\277\211j\206\341\360\203\230\277\347\311\355\267\347 \211\2773\300\345\342m\260\255?\323t\351\362O\213\241?\027\324\r\302\261v\241?\271\177K\243\2618\222\277\2520\310(\342\336\261\277&\252`\241\334m\242?v\376\026F\203>\220?\014\327\216|\017\223{?|\3245\n)\355\247?\360\216#\026\342\261\231\277\177\324L\353\240\214\240\277\305\364`\310\361b\270\277\315o}C\211\304\225?\006=\366P\307\270\246?\320\002\376\362\311^\244\277b\344\211\263 /\250?\255\3652+}\251x?\250\215\005Z\257\353\270?\004\243\375y]\362\225?p\326@\221\001\270\200\277\270q\311\264\026\035\224\277\310\345\210T\262\034\262?<\201\n\316\307\300\233\277\211\362!l\374f}?\036\352\220b0\035\266?\362\250^]a\347q?\330a\322N@\226\265\277\350\013\016\314\335%x?\006\026w\214\364v;\277\330\002\226x\014S\201\277Qn\341Jju\270?\034\267\311R\033\370\263?\275\022\004I\002H\264?\\#\350V\033\224\256?\200\321\034\246v\\\222\277\236`Np\245$\210?bX\303~\303\231\227\277\024\221\356\352_z\263?\\\242\024\32004s\277X\330\025\r\335\254\260\277! W\356k:\263\277\001*\344Y\252\230\254\277\201G\227\207`\036d?\355\224b\000\037\375\251\277\350\202\013\375\256\006\260\277\305\221\273S\367\271\202\277%\000\240\343M\025m?\302\032c\2615\327\225?^\"\033\213\006M\256\277\000\235\2330\347\271\237?\246kXE\341\353\262?\310tw\302\272X\226\277S\242\246\020\220\261\224?=\021~~<\375\263?|\034\240\031\214#\224\277\3028\270\355\363\362\244?\006\307\373Rbt\261\277lV\223\310\031\334\207\277\354\313^\276u_y\277\236\230\307\214ee\252\277\357\037\335\364\277\313\245\277w\273\320\312i\377\244?\224\262\237~\204\242\241?o\254\020C\336c\247?\031\227\327\000\251\227p\2773.\220\326\270\366\270?\tq\270\304&\035l?@\316=\375\342\'\222\277:\236\270\267 \225\240?\243\026(\201T\343\266\277\371i,\367[\302\214?\2333\3121$\305\201\277@\277\307\314k\252\274?\257\3247\371\3316\275?\205saV\204d\251?h\301\020-lc\220\277[\033\241\325wZ\251?\327\370\306\300\263\271\224\277\320F3\026\357\354\260?\350j\241wX\030\237?\234\020\016\277\234\211\240\277\251\340\274\007\312\023\240\277,\250b%w[\260?Q,\251\204\202\267\260\277y\006\331\222\014e\273\277\0103r8\'Q_\277\223\366\344\315\023e\212\277\264\331\343\213BB\204\277\331\343\227:\373\242\254?:\316o\262\260\030\205?\324X3\255ST\237?r?\007\206$\301\302?\265\356\207 \006#\200\277\360\371D\302\371\327\262\277\035\037L\2458N\240\277\0321\234\244v\362K?\220\214S\302-\344\277\277\241\303\036\213\213\303\273?\370\200\365\246F\352\225\277%\317\010b\237Y\251\2772\024\260P\202)\205?2\013ZA\240\367]\277\334\373\314\354-\302s?qv\327\013ug\234?\325\360V\214es\253?\333\005\355\253\033\362\270\277\346,u\346\222\016\252\277\007H!\357%t\237?m\266\225\305\004\207\247\277\377\265\035\272\212\006q?\201\235\247\237&\022\252?H\346\260\271\\4\217?\3118=?\335\363\261?\"\\\037\230,\361\267\277\272\3152\351:\366\270?Av\203\177\243\225\237?x\'\234\210\2442\346\244\277\273\034m\241F\021\241?D\272\265\343\220\005\246\277\314\204\362Y\237\333\260?2t\003\013+\250\216\277(v\000=%\210u?\000g\035\276\256\340\231\277:\000V\334\310\241\264\277\204\354\330\264\023\230a\277m\260\034p\200\343\264?\367\177\311\202\315\357\300\277h\245\222\353kx\256\277L\320(nD\330\240\277\245do\324^3\206?\316N\375ln~\214?\036\223s\217\376\301\262\277\023\324`\n\023\373o?,C;V\375\331\246?\342~\020|\265\302\261?A\273]\304\211\212\263\277\271\261\344\266\366\201\260?\301Hc\261\2238\247\277G_\227-\232V\260\277\340#\005?\334\232\232\277\247Y\302Z8\243\230?\343^\305n\343\256\273\277K\210\374\214\374\030\203?\202E\022M\231\006\216?RF\213A\r\275\234?.00;L~\263\277)\301\236\'q\025\253\277\263,N\303W\252\304\277\304\'\364f\030\002\213\277\247\022\027\376T=\244?\304~\343~\000\\\217?0\200\034\014f\344\247?m\233\036\036\357\244\254?!XDjtK\241\277\272\302/X\"\336l?\013\202\321Q\277g\233\277\377\370\230\021\2472\244\277\254\254?O\344\\8d\004\266\277\233\234\036\301\364\344Z?N\010\314\317\215t\233\277I\001\026\274\270\002\264?\233\\\337\305sN\201?\005\250%\217Z\302\211?\321Bz\00724\220\277\207TT\303)\340\272?\250\371\360\302\300\246\217\277\351,\344\220\0005\260\277Y\254\2775\327\301\274?\370\216\360e\365>\247\277\331;\201jK|\261?\343\025\024x\321\262\227?k\235\265+\355Ov?\205\022j\357\2677\262?(\025(\320s\204\247?\346\270\355\245\260\014\251?$\177\373\203\215[\241?>\'k\252\177V\243\277E|\245\244\270s\202?tb8^\t\017\221\277\240\005\024Fol\212\277\277O\336\0222\177\272?P3\025\327\335*\260\277\213\345\026\367\010q\206?\033\037\221exz\252\277\232\031}\000;\252\254?!f\024\365{8\216\277\016\230/\304}H\214\277\207c\350\356\334\036\247?\037\"*\2016\332\257\277\317\361\335w6\025\245\277\264%\001\206\370\311\254\277\361x\332\203\027a\210?\355\347\3701\325z\243?\035\237@\306Pz\233?-\376\307\273\245\263\276\277l[\325\240E\237\242?\234\332_\333\327Q\245\277\314\000\016\337/\307\204?\274r8\254D5\240?\344a/T\217\225\267\277\205\000\034B3w\243?)\305\343N \350\260?\201|!\366j\366\271\277Ov\264\026\214\240\261?\223j\221\352\362\224\265\277g\030J\204\2066w\277\313\261i\351\361z\256\277Y\314\033=\037s\301?\352\026F\255\266W\277?\th\201/\254\210\264?\211\361X\260\355\004\270\277}\265\226&\344$\263\277\373\300-\256\242\316\242\277-\320\310\3418\211\177?\221zo\001X\212\221?\223\213\321@\374\260\270?\243\226?\301\344\302\201\277=j\212\330r\020\241?\307\030\345(/\241\212?MIk\354NJ\263?6\365g\315\347\264\240?x\255\345jCE\205\277b\232\037/\232P\240\277\240\3079\0176\007\261?F3U%\300P\242\277\363`\355\363>\230\267\277\324\3058\024\246R|?\314a\320\351\204\302\244\2770\t\365\264q\352\265?\000\217\275\340\014:\263?\277.=\347!\232\260?KY\324px^\255?j\335=\303v\037\240?\253Zw$O\207\246?\324\216!+\020\250\247?h&\263\2140b\237?]r\210\317\026\243\232?\352r\001\017\177R\240?X\263\325\020]\'\212?!Kr\014\2646\221\277\223\241m\333$J\271?\216\002l\356\242\234\233\277*\013\260\2723\342\267?\340\236oLI\034\263\277&R9\311o\370\305?$\367_\312\330\036\243\277\322\036f\350\033\271\244?j\243\325\220z\300\262\277\272=\020Z\321\r_\277\033)\252.\212G\206?x\251Q\340w\311\255\277\030\216\210\275\353\310\256?s9\334\355\234\n\272\277\277\r\207U_\236A?3!\014\211\031k\206?\0145\375<\225\031\231\277\005 \003\234\212\256\264\277I_\275\230\221\227\224?Q\312`M\016Y\242?\275\213\332a\334\262\241?\240\372\314\014`!\262?\260`]}Y\272z?U\352\260\022bC\213\277\361\314iVC~\260\277U\324\214\206\312Nt\277\317m\213\212\375T{\277\376_S\014\263\203\243?\356\311.\027;\230\230?w}\3746\275v\241\277\201\245\245\342\224\'\245?\235$\331WE\362\273?dG\003}\354r\254?k\321vq\177<\265\277\320\246:]\374\260\204\2776F\347\025`\256\244\277O\251\344\240\302\354\231\277\036_[rC\202\215\277\236`h\250 \t}?C\201P\365\204\244\210?80\216m}\013\235?\017/\267R.\360\240?w\035\316C\235\364\267?\350\000\276\340\2017~\277%\217GR\n\346\216\277b\330\366\372\275\354\272?\0362\236\230\245#\302\277\260#9\257\0106\220\277m\276\351\362Ik\256?\315]\376@\266\231w?\350y#\231R\037\241\277\253\335\260\345#\320\241?\377bzr\226c\271\277\032\234\347\262\204\357\225\277~G\221\346\350\235\236\277^I4\013\240\005e?\245\347\033\030\001\332\245\277\273\255i\037\024U\236\2771\304\325]cY\224\277g\370\024|\352\017\250\277[\274\306\264]\\\274\277\206LW\311?@\210?mG\220\240@\202\260?\265\241\271w\373\221\247?,\001Dd]Ho?e\t\005\330\313\223{\277\340}\r\346\021p\243?\00122;a\374\240\277\270\240;h\'Sw\277\367ie\326\216\033\264\277\371\357/\240\372,\205\277\250\03747\010\315\217?.f\275\177\244(\240?<\247\037\346\236\242\205\277\352\314\247\271<\312\253\277\177\317\233U\003\177\260\277iYK \366je?6\007\214\207W7\201?\315\207=PK\227\255?\363^\274\020\237\000\232\2771\237\310\343\373\216\267?\300\007A \353\255\261\277\261\032kl\332L\255?mV\303r\247\206\210?M\355P\274\004\200\267?\325\217I\251\014\371\241?\004\0140\305k\000\177?QK\237\031A\343\257\277\314[l\244\336\350\265\277\243\255~\360\3160\261\277\276z\256\023e\252u\277#\277x+\306\020\201\277\0318\304\205kw\220\277o\351\250\310\203\323\205?\347H\255\235\344\216\253\277\261\331\270\2419\242\275\277T\341\024\361\262\312\247\277R\224\201x\t\347\246?v\214)\351>\240\233?\312\321\341\022`\204\240\277\273V\n\267\317\277~?F\271\354j\334\036\225\277k\237\267\367\006P\265?m\027PU\230\033\242\277\177\2118\220\330\025\223\277oT%\230\202\317\254\2774Mj\210\366*\251\277\026\207\345yN\251s\277\222=\035\200\227\226\220?\262P/\350\200\036\300\277\247\332\"b@Xw?\316w\300\001Q\001\257\277\341]a\257@\271\236?\233\261\245\364\'\370\214?=\220\004\342s\255\215\277q\220l\351\211\217d?\252j\275\304#_\260?\247,\264\203\3153\255?\025\331\032#J\371\226\2777\231\224\221\to\247?\316\013\t\371k\360\264?\264a\036\213T\350\256\277I\372\'5k\006\240?:MpZ\367O\267?\276H\311\214\305b\266\277\365\357\240Q\227i\225\277\354\305\213\376\377\022\250?k\332q\320]\313\207?\035\032^\"\233\326k\277o\273t\266\366)\253\277\345Ll*\210\013\253\277\273\363\222\2166\n\226?\372`\343#$U\264\277\251\341\313\360\027\t\257\277\255\021\312l=\377\245\277\331\337\257tp\306\240?-\177^\251\\\254\267\277-2\0353&\270s\277\000\366{^\2460\223\277\340\031e/\242c\240?b\323\253U\033\006~\277\001\033XE\353\326{?tTP\235\370\003\256?v\355+}Ur\202\277\325T\321%\377W\202?\275\211\334b\227g\270\277\242\325\300f+\023q?\263\306\351\033\257w\255\277\320^\222m\213\210\237\277\303\321\257\230\2079\263\277\213J\037\007;\322\203?\311\361\213\032\277\321\244?\324\231nW\241?\240?\034\375\2130\246L\241\277\023)\236\003x\231\263\277\363Jc\221\370\335\212\277c\356\214\363?\375\236?h\024uE\212i\234\277b\022\024\331\036u\205\277H\026\321\206_\250\244\277\355\005\355\321F\224\234\277\177\320-\226\266\200\301\277}\216\"\350\255\230\245?\201\027\014/\266\253\264?\217<|\333?\360\203\277\001\342\233m\224\311\257\277\023\261\326\002\261\230\241?\275n\r\324jn\254\277}H\020\244\311\274\222?Ll\005~x\030\245\277Q\207\374 \214\273\241\277Y\251\315\002\034\340\217?\002\340\352\230\264u\266?\234\214\354\373\366\271\215\277ZF2\307\275\313\205\277\302\230]\232X|\270?c\367\030q\036\"\270@L\261?<\360f\r\351\217\256?I\t\224\020.Z\274\277\217\3642\332\\j\256?,\226s\226\233Z\241\277\236\211\251\356\207Zv\277H\266\325f\361[]?^\260\230dr\366\246?\311\026riK\200\241??\377\013\212\232\341\245\277\326\370r\247:\304\262\277\201\2167r\255\253\215\277\207M\0036\236\201\260\277\254\322\263\217\370\217\240?\323\212\026\031q\265\254?\207F\346\213A\030\233\277o\306\224\356\000\024\264\277\212@7;+\203\245\277\204\265\271\376\010\350\254\277gn+=J\235{\277\233ogqU4\224?\017\371\017\352\317Y\260\277\255\243A\010_}\270\277. \264\002\223pU?\032O!\253\306\341\255\277\227\360\013B4\355\224\277q\016}\014\331\016w?\337op\033\274\261\207?\270P\2521FS\272\277?\322\227}T\rj\277\207\363p\331\304\240\222?^5?\025\305\303\222\277\322\311G\313\232\342\241\277\2770\255W\265ms?\312\320C\353\333.\233\277bj\000\372\014\n\260\277\350)\026\020x\206\240?wE\307\2309i\257\277\001\257s\240\370#j?\212\354\374\341H\324\257\277\261N=\006\306\261u\277\352\323I\335\240\273v\277_r\274|\305\364\243?d\245pY\021NC?\344\236\035,\374\324\231?x\000\315\007\224\201\232?\245\216<\013#\252\243\277\303v\352r?\023\266?\356\257\036\312\200\343\204?U([`\2201y?\326v\224\224RS\225\277E\271\237\325MqP?\307@\365P\025H\254?$\376YDk\246\264\277r@\300{<\376\221?\365;\322\243[\237\222\277\217\277\265\246!\202\304\267y\277\220\'\320\000\342S\226?\370\014\267\'i\005\263?\225\374\206\037\365\335\257?\270\344\265\007,\251m\277\371\301\r\236\022\201\225\277\374\370/\007v\273\226?\3239\374F\224$\255\277\335\357\335\022\265\312\254?\254Q+B\256i\252\277\0359\354\340\342\263\245?\203\232\354\361\211\247\204\277|\271\356\327\347\354\253?A\036\264\360>\260\230?L\272\0015-\375\263\277\270\017\026\365f\217\225\277\000\233\220\024\214\254\242\277\3228\304\007\345\371\227?\021\302\212\017\224\026\310?FZ\n\014\273\322\222?{\347\343\212uc\260\277\244\016\374\256\232\201\257?B#\037\324\360\031\262\277\340\333Sc\003\205+\277f\360i\245\036w\243?\351E\242~?\025\260\277!\216\210#\377\222\232?vses\361\\\261?\265\246oNr\310\242?B$q\326\265\333\252\277\341L\327\250p\371\246\277|\3776\270g\355\256\277M\350\211\005Hd\202?\267J\222\0331f\270?\021\274\325\002\271\243\223?\224)\th\3546\230?4\346\342\r\321C\203\277\226\251\300UGk\233?\272\306-p\225z\264\277\374z\320\265\023\033\227?\231deC\177\020\223\277\340g=\274\202\020\220?\001\360V\037J\360\241?H\256(\3001\311\241\277\206v\330;g\013\230?C\372%\2569\007\227?!Q\'\222}(\262\277\026\305\352r\264\355q\277\356H{I\253\271\222\277\206\362X,\333\313\260?\363\013_\210,\200\265\277:\365\240\217`\265\224?\362\251g\245\240\213\372\234?tw\035G1\321\233?\236\355\353A\231\266y\277]\332Z\340\275Jk\277FK\225\225\312\347\261?\001X\024\256\333\325\227\277\034\036<\007\321\334\244\277\217\242[\347/\206\252\277\204\322\\\327\330\260\243\277\003\360\to\336\261\263?D\364\202\202P\\\245?\210W2\247\3346z\2775\354\300\364\017\341\254\277\351!\251\375\'\\\266?;>Bl\177\021\246?\002\346A\353\231\036\204\277-{k7\272\014\246\277n\271%K\336V\252?\34791l\341\374\220\277\261j)\326\254\373\235\277@Y\34258j\256?\332\031\367*\312p`\277\315\2128\301\250\315\271\2771\236R\346\245\311\301\277\352\214\023\222uU\247?%\234\365\321\273[\224\277\332~Q\316\257:}?\367,6\260\026*\243?\257\340c\244\304\367~\277\256\035\344\3403?\261?\255\3330\251\366\205\205\277\006|\254\317\3157\236?\003s3\331\354}\217?\221\'\200\007y\252\251\277s\251Lh)\265\266\277\367\031\200\031H\226\263?{6dD\204\223\237?\3153\332\256W\337\270?\037W\272\302f&\256?\345\235\346\370\341\373\211?\372d\337\305Z^\260\277d\324\\\025\227t\263\277\270J\224\333\276\352\263?2%\024\004i4\215?]\300)\252n\005\240?\"\316\370\346<\247f\277\324}\255\315h~\234?\353\211\223\322\265\013\264\277\232\023\305\310\376,\232?4v\020\230\005\t\242\277\034\223\277\027T\225\261\277\024\307\030\035\265f\272\277\265D\266\\0Q\262?\333\204\245y\322\250\214?4\207\207`\232S\204?0r_2\\\335\242?\013@fw\027\225\301\277\367V0\301v\363\211?\202\233\244\233\353\337\264\277x\201\247\371\344q\226?\257\342\027}3p\217\277\325]\003%\2578\232?\013\327a4\3451\225\277\010_\214\020\345\000\276\277N)\375I3\266\240\277o\203I6Ra\265\277\217/p\030\376y\265\277\312\005\020\262\205\277\263?\232qy\010{\225\215?\244\267]i\037\021\262\277\345_-U\361\001\204\277\177w\t0\257\241\263?\261\231V5U\313\200\277Q\252\373\200\377I\271?\366\215o\010|\200\240?\024\014\356\034@ \260?\243R{Bp\036\223\277\251\344\3536\327.^?\032\206\246\254\303(\206\277\322\242\251\013{\256\257?>\2010\010\252h\257\277\315\345(\235\247`\266?\360\002\004\031\340^\260?\253;\275\020N\267H\277\257\213\305\353\033o\241?\330\377\0177\334\211o?\261\257\257\217\374\307\215?:/\0100\036\263j?;\26636d\237\260\277\305\251\237(]\007\225?\272\336\216,9s\211?)\374_a\332zg\277\350\216|\2566n\213?\267\272F\361S\327\235\277D\367\341G\265d\254?\3154\237\215\006x\255\277\250\235aL\033\203\204?\330\304\245;DS\263?\340E\334\017\'\021\252\277a\311$P\373\325\220?\247>6\212\354KK\277\030\216\225\341\023,\303?\234L-a\300\264\261\277Q\334x\205\265\200\256\277\305Wl\022 \321\257\277\275\213\226?C7K\277u\021\311\211\241\013\240\277\245\205\231\023PP\227?Dn\224\t\360e\246\277\373\223<\355\\x\220?2*\241/\330\235\247?OH\031Y\036\243\252\277\300~Tq\336%\263\277f\376\322\226N\277}?\200\322;\027w\316\235?-\203\3473\3700~?\312\216\r^\200g\231\277\301\371\314\004\250^\200\277\205u\325f\206N\227\277\310\335\037A\333\347\237\277\263\235Q\0236c\230?\002\316e\305\351\374\266?\212\250\352\213\224\270\241?p\r\345^j\260\261?\221\221L\240\313Nz?\256vQ_\312\t\263?\223\205*\253\314\033k\277\367\r\0379.g\207?$\311\306q\322\037\224\277j>\027eg[\233?\345\201f\242yz\246?\r\205{\017_\325\264\277,k\234\361z\234\252\277?\350G\206\014k\212\277\220\365\030H\235j\212\277\010yX\272\001\364\227?\013\252\253\020\344\334\215?\357\336)\370\274\327\226\277\360\030X\242t%\265?\007K\n\026Z\307\250?a\377X\315\237\367\275?\001\"y\001\037\035\237?\374\232m>\362\241\265\277\023\333W-%\340v\277e\032L\203\2462\263?xv\242\001\2028\300\277\274\333 \367<\221\225\277A~\271\232\310h\225\277\314\270:\026\316\201\253\277\231\216\213\016\207\243]?\3762*\304\1777\251\277N\302\033O\024\251\253?7x\234\246o\357\247?q\303<\217\t\223\256?\250W\245\224\020L\211?)\"s[\314\267\213?(\t!\373\223\342I\277\271:\205\371\226K\263\277^\372\017\217\240m\261\277\332`\275\3400\036\256?\247\344\244E\247\213\267?\376\364?\367(\221\236\277\310\245SM\344b\207\277\211\316\215\375,\322\255\277b`T8\234V\265?\000\374\216\2329g\224?\235h\200\032\021Y\254\277GH\0332\"\312\253?d\230S\345\202_\206\2776l\004\325\225\n\201\277B\377\341\355\305M\245\277^_\020]\376\357\202\277\264\334\367MD\302\214?\"\363I\334\1777\235?q,/\010\177\320\304?\275H\310\375[\260\260\277\332$\343\344i\315\204\277\231A\2016hf\215\277\253U\357\362qB\242?\364\234,\316X\312\253?\\e\354g\3511\207?XZ\305\357\3277z?o+\006j\033\271\224\277\374\206\027QE<\247?\375\320\271T\0214\245?\221\245M\021\337\r\230\277\023\327\257\374-\202i?\t\367|\232\243\201\260\277\263\303\353\033C\361\257?\233x{[,x\253\2778\313\227\303\326\006\225?x\252\'\223\313\023]?\271e\233\377\276\344\232?\031&\206\363\3263\260\277\n\235t,\241Y\232?\363\370DR\232\033\235\277\213\342\301\354\261\350\245?|\353\215\337%\261\223\277$\210L\2254?\245?\376\205d\207)|\265?d\233k%\214\345\224\277\020;\';\216\242\264?\3026}[\325+\220??\241p\25709\253\277\016\257j\357\033{\234\277\036\247*\343g\325\225\277\270\033rQ\243\322E?\037\256\004\245\313\344\263?T\037{\301\000o\253\277k\254\315:\014m\221?\2078\270\263\017\315\242\277t\272r\224S\001\214?\310S\303\334o\262\255\277\211\267R\202}\237\265?X\353Pk\361\272c?@\216\347lCV\266\277\020\352~\031\n\317e?\372\'\3658\232\036\255\277r\000\261\316\350\234}?:\206&\334\034\377\260\277\262\\\311\203s\320\242?yzK\0211i\244?\307\252\023\005\334\314\250\277\007\022\250\007\202\341\226?\230Z\"\203|E\240?\355\010\207\250\275\000\262?R~q\"\233\322\211\277!\306\210/w\315\242\277k\271\267\033G&\242\277\264\356\357\326\207\354\240?\3434|\265\276\246Q\277\302\006L\212\310\336\275?\014\241d\000\260\330\240?\303\245`\221(\240\251?\244\036\227O\355\333\240\277\267\272\337n\365\237\206?\2028K\266\021%\207\277\270\213\304\215O\317\203?\002\030\nP\016\312\253?\312\3632\351LY\236?\"\372a\335\335\311\265\277\247\005\333 \347\267\277\277\0178\264\371\016\201\267?\200D\rK\355\r\220?\274\206^a\022\337\226?)\372JM\305\252\234\277N\311\253\344\3479\250\277\216\036\273I\247\200\220\277\234r\262\272\361\256\241?4<\220\337\353\033\227?\306+\252\332\267)o\277\006\007\326u\211T\261?Q\344\343\3161zm\277!O\223+)\245f?\223t<\0316O\206?Ri\274Z>\034\251?B\301\202\002\032\014\254?\316t\2649\372\203\245?\2476\3216>U\240?f\213:\2661`\277?2:Nz\207\021x\277\367\375\336\361\3577\223\277\000T\344\352\004\227\231?\340\036\253Bn\212\262\277\306\341\344a\255z\225\277e\020\273\001\332\257\254\277T\301\351\231+\262b?=\230\223>\335\233|\277\204R\007T\016X\245?\235\363\376\353\232\365\231\2771\016\003\265]\256\233?e\303\037\032\220H\265\277\222\3422\025\277f\201?\343\030!\2425x\272\2776\3747\235\016\327\242?\243\306@\374\316\014\221\277\3038\250\376\007\021\306?\346\030\3259a\222\233?\316T\244\024\037\251\245?C<\311?\262U\212\277\361\302\346\352!\375\244?\257\355\354\367\0019\265\277\245\333\305d#2\262?\277\'\000L\365]4?\244\204<\215.\256\207?\204\347+=\321B\220\277f`\305+nm\227\277dX7\006!\271b?\357\251\202\010lo\262\277vU\306>\232\367\241\277\\\311\231\376\0208\243?_C\224\254z\271\241\277\216Y\211.\001r\250?2\351J\376\313$\252\277\240\014\340\370\227`\273\277$)\272\241\304{\253?\277.R\306\377x\210\277^\231\300\221Un\243\277#\201\027\032!\027\232?\235\361\007#5\005\265\277^\363\370\362\034\235y?\350$d\313\204\247\257\277\227\347\230\2434\263\252?\351\034\205\2544xe\277\037\252\253\213r\263\265\277\222H\317\216K\246\261?C1j\252\360w\206?8\310}\252\233\001\266\277.\230\342\003)\370\244\277\321\321\004\222\224po\277\234\024\321\235\203\'\261\277A\243ws\003Y\230?K\016\031\034\010\304\231\277\211\326\271\334\254U\270?Z\332\306\225\013\\\222\277\200<\237\020:A\263?\303\201\005p\023\371\242?\300\330Q\240\333\ti\277\350\254\320\353-F\243\277j\003dS~#\210\277\332t}Y\t\236\203?\t\314Z\353\331]\216?O\271\224&\303\001\177\277\031_\202\203B\312w?\352\ng\306\264\r\222\277b\323\203\034~)\230?\226\372a\'\002Rt\277e\223?Qs\316\215?\363+\376L\331\323\226\277\313\351\245\220\374\367\246?VJ4\243\177\254\257\277\020\216\206\313q\304\233\277\370q\237\016\327\244p?u\276\177V;\352\261?\255\373\203\342,{\201?\\L\23562?\266\277J\376\026\032E\022d\277\265:\336\010\221\352\265?\3251\256\263\014B\234?\344o\222\003\332\r\257\277\257TR\263\031\005\252?\336_\370\374\240\225\254\277\242&\361\2457Y\263?\335B\353dzD\262\277W\326\373gE\034\304?\023\002\001\241i\331\241?U\205\023-\333\235\224?\352Ch\211\327:\245?\365k\035[\004\216\215?\353C\236\236\237\312\272?\263\215\236\253\306\270\256?\207v\251\273<\252\205\277\303$.\361\211\256\233?\3665\213\n\212\277\265?\362I\253\207\325\376k\365\205?\007\343\3529\213c\224?\352\365\350y\274\n\254?\340\362@=\370\205\224?a\013$bX\312\300\277$\322\275\0219\017\243\277\277\264-\311\0074\304\277um\212\340\030\314\242\277\016#\223\024\204@\237?\247\242\207\325\330\006\303\277\326\300|\232w\370\277\277\345\356\3136\244\370\222\277\252\033bo&\374\265\277 \221\036\004\224\033\276\277\342\232[\333C\334\222?\320\257\214\245\360\237\235?\341\254\327\244r\303\224?s\363\344\251\253\247\221?F!\221m\235\325\257?\263\031\247\257]\340\207?\226\202\303\327v\307\223?\332\035\205 \254o\247?\204Z\254\021\206\270\233\277\036l&M#\014f?\351}\305\003/\026\271\277HESE\363\350\265\277\2055\365\2273\355\262?\302gaF\250\'\240\277\030\'\253\217\304h\270?\003\224\367<\250d\221?=\027\251\364\213\340\255?\353Da\242[\262\243\277\307\322\177n\305\241\224?\363\013Q\246\351\302\240?\205\263;\361Y\357\201\277\353q]\022\004\212\255\277\330I\223\206$\\\256\277\305q.\247DF\257?Q[\335DL\033\262?\207p!\212_:\272\277m\257\260(H\t\227\277\321\366\210\227Nua\277\355\236\nd\276\306\221\277\254>\"\275\256\364\203?a:\341W\366\322\257?;\302GOG\340\264?q\004k\tz\226\234\277\216\355\357\375x\304\212\277%\234b\324\375\312\261\277G/\251\327:\000\252\277\353\274w\354\263v\243?Y\016>\260E\216\220?\375q@\227W\216\250?\264\274\304MY\241\246\277\324Ex8\010d\277\277\326\244\262\034$\177\226\277\303\337\374\233(|I?\234\236\312\352rr\263\277/w\352\004AQ\245?\"\375%\226\303m\260?_6,\335d\221\242?Dq\0250\222G\247?\016\246\212\323\2627\263\277\356?\225\004\305\226\217?`\330\017\211\373\232\236\277\307\005\277_;M\271?\237\243\252\247\322\217\227?oD:=\267s\271\277 \001/\330\211\242\207?\223\022\254\235)\375\240?\353\2754\337\032\206\227\277u\327|\324\345-\254?\027i\257\250}\301\275\277w\216\343\262\364\244\253\277xwn\350\351\350\253\277\236\0205\251`\244\234?\243s\235>\216\013\201?\363\"\205y\024\224\243?zR\351\241Z\330\260\277<\353\264\247\370g\206?\202\302\211p]@\252\277\232\245\253\354\017H\254\277\263Y#m\221B\247\277\244\333\340\270\022\353\247\277z\240mO\267\201\274\277\323\236[\210\307<\227\277\242\036\032\024\010H\241?\244\260\344\0218rs\277\241|V;:\212\213?*\247$\231\t7\253?\234\343n\265\277\031\215\277\206)\030\307!\035\247\277$\006\244\275T\374\257\277u%\304\035\272D\205?\221D\356\251e\277\243\277\353\230\032\265*}\225?E\343O\230p\201\217?\010&\351\333\245\336\251\277\361\026\337%\3170\205?\256\264\277\252Tt\372o\241\266?^\020\363\315m`\252\277F\322\257\365\227K\204?F\262\216VR\032\246\277`\364\324r\216F\251\2777\246\024\233\032\312\200\277\'\221\342\252\324\026\222\277\367k%R\312\241\253\277\343~\300\017\200\322\224?\\\006F\250\306\340\252?\"r\346\326\220\367]?\240l\305\246\032\262\262?\022M\374\023:H\207?\324\n+\303%*\240?Ta\321\221\366j\213?\t\016\2133\235\317\262\277\217D\226\210\362A\256?\361\203\374\222\206?\240\2770\276\203Ss\223\246?(\211\326Z\330d\267?}\3565\367\007\323\241\277##\310\317`\226\267?\243G\315j\370\312\240\277oo\376\331p\266\260?\261\216\236+\003\322\214?v\177\331\3342\017\254\277~!\340\272AC\256?\216\352\361^\273\016v\277\240\251\203\351\362\t\206?\250K\232wVA\260\277m\031\005\037\373Q\236\277$\241F\003\226)\266?\026\376\\_\216\257\251?&\350\270l\345\306~?\327\2439\201\035\350\265?.X,E\211\234d?\2150?\354\030\244\262\277\362S\367\216\350o|?\371\301\262\336\'\200\271\277\333\306\370\323\214\241\256\277\315\345\315\227\222\277\241?\375T\361I\371n\216?ez\000d\374C\245?\264kf\226\220,\200?\\;\265\325\223)\214\277X&\203\034\315}\260\277~\241\337\016\227\'A\277\023\331\224b\000\332\240\277Q\344\236f\363[\276?\210\371\007\3623\225\230\277A\313\264\345\023\035\260?\036MOz\376+\241\277\237Pj\232nt\201?oB\331\273g\361\244\277\031\"Td\006\"\227\277\352\3454\005\220\354\304\277B\314\202j\215O\222\277g\024\357|C[\230\277\354\247\315\"\277\271\230\277gE\223A\262_\260?\200\261\031\024\006\235\222\277b\226\204\324\270\361\223?\203\205\207,\313\\\263\277(\033\361\246\357\004\263\277p\006\tA\033b\264\277sz\323\037\356\331\256\277\025\2559i\245\257\253?h\211u<\0204\245\277\314[VG\306[\264\277\315\005\004\266\000Vz?\374\210\376w\365{\253?\016\032_64\274\265\277p\001\"\032\235\247\200\2774\365?\377\314v\262?\300\034E\304E\324\261?\365[\014\030Di\235?\202B\334\313\376\277\262\277\351\037.\024\351\024\242\277EB\024$\206\260\261?bk\203\214n\357|?\253\272\316\267\000\275\264\277]\377[M\345\266\230\277\301\007\235\205\350c\237\277\337\016Q\215\327\"\261\277\363\310\013,O\337\242?\264\356p\031\363|\242\277\025\277&\326\334d\237\277\007\220\321\351\004\332\255?(n)\201x\331u?r\241}\355\226*\232\277g\004DN\360\322\244\2775\323w\177~\307\217\277\374\303$\0305\323\245?cv\005\350\225\311\240?\203\240s\221\367\252P\277\262\255\235\276\247xy\277z\035\014\t\255ph\277<\202\373W\315\315\260\277\360\247P\322\212\237\245\277\220\220`2R\235\273?t`\202\346<\330\261\277\237a/\331e\245w?AX\235\002\001\277v\277\372\260Z\344.z\260\277\330\317\300A\242\215t?\001\3057\004\232K\204?\205\210T\355\023&\271\277W\320\034\356\037\275\254?\246\224F\373\0031\262\277\016kRU\300\003\250\277\302\337\235\003\3513\261\277y8\234 \340\201\251?\350u\361B\305o\262\277%\313V\261\246[\221?P\3714}h\243\241?\023\321\004\367\231\317\300\277\335\201b\355c\377\252\277*\'(74}\257?\314E\274\256\207\203\241\277Y\361\301\313\302\332\242?\364\366\233\035B\302\210\277\371\322\347\013\010\360\214?\321a\262Mq\340\260?\332\261\200{\002\354p?u90\344\276)\262\277\346\306\343R\207\025\250\277\366\313\2773\315\234\254?z\233\322\3758\242\242\277\242\331\271\317c\340\255?\212a \227\\\244\241\277Y\345\336\2115\025\222\277\251(}\355\333\013\275\277\024k\013|\377\361\214?\336^\010\232bB\232\277\210!\301\202b\210\200\277\300dI8p\021\202\277\257\206\303\216*=\224?\234,\"d\216\215\256?r\021B\035\272H\240\277U\2054\331k\231\204\277\263\201\313\320\021\266\251?Z5\277\355\021\314\235?,PA\335\224H\201?\2179\264c\017\275\240?\263x\365\316\302$\243?\213\031\265G\265\200\252\277\200#WaB\036\240?\373\203\321\014\331gp\277\322\244%\004Q\263\265\277\266\324gH\001\226\274?\036\274\2468\016\227\240?g\223\261/dXy?\302\306\344!-\331\251?\263\324\365\230ox\233?m\212\316\376/\342\250?U\322GdW\013\260\277\214\007\347\375;o\272\277w\204\366\364\325\326}?\317\222u:f\n\222\277i\037\333u0Z\242?\027\225#m\3136\263\2772\266J\r\217\263\250\277=!\333\037\313\205K?\276\361S\306\353U\244\277\340@\2469\347\205\235\277\256\224\262\262\276\312\003?\302\266:V\327g\263\277q 9\216Kt\251\277\003z\321\305\306v\232?\352O&}\251\214\241\277\032:~Ea\007\266?\360r\250\002\260M\244\277\177\350\'F\017(\252?Ad\367\212\021e\205?e\240\"\255\216\017\233?X\377\210\202\331~\226?\372\257@\016\347R\230\277 \240\333\347\314x\211?T\002}\257\246\364\246?_<\016\277)\374\247\277\302\373@\310|?\250\277\220?\266\211\374\220\237\277\030\010\210\226\350\235\245\277X,(\345\200\242\253?\023\365R\256e\264\260\277\344\373\343f\316\265\240?\030\232\320\335\215\332\266?\335\024\026\215\r\313\233?F\315\257\257\346y\210?\340\340\010\347nU[\2771g\343\376U\n\220?\245NH\201\261\210\264\277\007\224m\356\356\343\311?\314\215\307 \255s\253?\210\0078E,\211\275\2777>{s\333\005\254?dU:H\226\271|?\243\340~B\035\377\253\277\365\314Z>\303\3675\277\204\267\302\\\r\013\262\277l\003\354\006,O\237\277\034\214\214_\177\272\241\277J\267\345\340e\262\237\277\245lc\214,\307[?\265\332\305kI\276\252?Pl\264\276\233,\265\277\234\337\205f\2150\230\277\nE;\017\202\310\254\277Qi\272\013)\342\264\277\312\265\326t\324\214\232\277%su\0166\032{\277\236xB}\352\177\246\277\203Y`\225iAg?{\262\342\036\361\202\227\277.p\335#b\343\244?;\310;\023\"d\256?5\261\010_\227\003\237?%\247\332\037\333T\251?\027\221*\033\023\226\276?N%\242\242\026\221\243\277\217\371\0226u\254\256\277)0q6\022Y\245?km\320\277\236\244e?O\035\225\306\276\332\226\277\334\202[\327%Nk\277[\3334\\\2153\211?i\2627\246.\222\235\277{z\3721\356\035\245\277X\033\206\230\031z\242\277\001\206\2736\033\016\263\277p\335\324\'OT\236\277\251\\\375\345\001\002F?\247\311\267\351\215\211y?\335\302^E\265\024\207\277W\3170\370%\267\264?\316`\351c&N\242\277\037\222u\246h\302f?\350aD&E\326\265?\301t\202D\331\203\222?\266\237\2568\310=\245\277\376N\212\21543\250?\207\251k7\336\\\274\277NrCaK)\252\277\316l\217\246t\351\236?5\332\235J\222H\223\277t\345Q\342\036\314\254\277)\334F\342\301\2763\277\014X\t9Sk\240?\345Y\313\351wA\252\277\315\331p\"$B\265?\231\3345\177\271\366\205\277\317!ZT\035\233\204?1\353\262\275\3423\221\277\037\r\200\006\343\272\217\277G\331\263\354\013G\225\277?\252t\345\177\001\255?\355\212\365\242EH\247\277#I\005\266\007\373\232?\333q\361\241\327\236\253\277\010\374\017*\243\341\270?zq)\237\001\361\230?d\3649oo\247k?R[\235rGM\240\277\216p@6%1\260?\\\245OZ\331\376\273?\360\203\261\325\275\t\243\277=D\\\334\232@\261?q\355}6\004\032\244\277[e\363\2231\311\270\277\312\230w\010\013\320\233\277\366A\312\311\307-\226?\250a\263\n,\262\262?\245&eH\251|U?h2]n4\353\223?a\024-\361/E\260?\025\370\003\221\nq\265?\306*U\250\3524p\277\037|\311\322\247\034\300\277\n{\240:\013j\251?e\303\'\233\217\007\224?\020\252\350\326dq\257?\352\213\266\223IT\244?\270\313\246\005Bd\204\277\222\244\270\325hn\210\277!\244\3055\212b\252\277)K;A\357%\265\277g9\342\201\nL\243\277jA{[\214\223\251?\243\246C\230\216\230\262\277)F\237\236K\270\277?v\237\347B\200\323\260\2776\3752\0167\231\245\277x\274z\267\253)<\277w\330]\177\366\332\235?+\211\036\243,\334\274\277\331\351\274@Fc\227?\320e4\323W\241\236\277e\213\326/\201p\242?\2556\352\220\365\373\261\277Y\364\252\306^>\204?jg\326\272\373\302\252\277\240\335\307R\211Et\277\376=I,\274\252\276\277\327\203y\255z\007\232?~l{\244G&\234\277\255\203\201\330\311\030\263\277\203\242\227F{\331\247\2770\255E^\326\003\215\277D\027\266}M\r\265\277.\370\037\277>\372\265\277\203\313\030L\007\014\263?\217\327~\254\234\326\264\277\027\rs1D\276\237\277\275\243\2406\317d\241\277\350J\217\303V\240\247\277\364Gx\361\311\032\241?\261\177 \327\006\237\262?\246V\253\325\206\214\233?\237\352\314\376\2317\260\277TE\225\314Z\212\224\277Q\351?\361\242I\223?\026\001R>\243\336\266?\342@\177l\022\327\237?\223\206G\332\350\203\212\277\203\337=\036\232\310\251?P\337\234\362v\177\235?\022\267\227\235\273\241\252\277\014\214\267O\261\342\272?\247\036\'eH\374\240\277\215\034I[\312S\221\277\332\270W\327\206\211\247\277x\010\'\216\341\263\221\277\310\001\316\252\235l\202?C\361(\323\215\216\226?\032\203\025\326uI\265?\331\017\212\035\001\212\215\277\013/\300\343\203\217\255?\232\267o\351\311\353\244?\255\263|\360\310\215\251\277 D\364\253\266\204\272?;\266\241\316\376\360\225?t\237\322w\220\356w?#\007%M\000\006\227\277\024(e\345\354m\300?1\224\312\224g\032\300?\246\r\271\r\351|\213?\354k\337Ts!\252?\347\250\242\022x\265\224?\252\'\300\372\343\317\254?\373\026\315\331\3357\177\277k\371\2764\267\232|\277I\367\203\273\017$\232?A+\375\375\276\242a\277\226Z/^|Q\225\277\371 3\334\225\221\251?T\212n\016*\325??`|\211\324,\267\243?\341b4Np1\234\277\214\337\337\'\230x\245?\307\022\363\216R1x\277F\240E3`K\262?\235\331#\264s\371\257?xW\347\277~\312\223?e$B\n;\202\261\277\363&%\263}^\246\277\204\322@8\317n\243?e\314\226E\347\367\206\277D\300\266P\323\373\260?\371\357%\306\377\221\220\277&\254r5\001\305\252\277*\362\261!\202\005\255\277\311\027\326\220\261\374\227\277WA\022Oa\377\200?O\210\376\226\271.\234?r~5\210i`\277\277\376\307\254H\321\201\260?Yc\377\3774\022\254\277\245\324\330Q\337\325\245\277F\251\007}\263\004\230\277\343\275\236G\n\246\262\277S\332\0147b\314\246?\257\335uB\252\244\243?D\267\206*\355\'\241\277\r\25784-N\241?wa\367q\2341\240\277\010\227\324d\316\t\253?\351\220.6\337\002\226?CJq\177\276#\250?\035\0014\307K\361\236\277\262\372\324\227l\245\266\277rR\014A\260P\275\277b\374\304\2471y\263?\314\376|\215U\324\301\277eM\202\204\372\233\273?]\027\003Q\225\342\241\277\303\005\331\375_\000\220\277\007\026\225s3\365B?\252/\312\257[*\253?.\310\026\272\346\356\245\277\204!c\237\374\273\223?d\236\r\"\307{x?\242sZ7\3177\223?8\"\356\220\322\010\235\277N<\377|\242\367\240\277t\017\323\20490\271?\347\224\244\020q9\251\277wN8\212\3518\234o\244?Nn\236@\266^\275?\313M\267=\217\023\245\2779\213\020\000\024\271\220?\330\371\231\226\337G\271?R\323\2207Qe\223\277\241p\211\352#~\223\277\250\370j\210)\211\264\277\374\365\366\206\217X~?\2224\377Gv\321\245??\337V=\002H\242\2774\357g\275d\260\245\277\017\333\203\\\242\022\245?\267\3573\\w\365\242\277\'\326\0359&B\205\277+\n\243\211\240\346\246?\001h?\2478g\257?\2405\304\213\344\267\213\277\021\334D\214KP\255?\261\324\277\226{\223\237?\262\006\301fdW\255\277\000\254(\255\177+\247?lQ\031\316\331\301p?\312)\017\3675h\223\277F\022Y\254\246\002\240\277eX;\322%\\\262\277\001\254$\320\233\301\222?\233;X\250\023\207\254\2776\253\242\267]\330\270?\246$\337\003\360#\205\277\322g\367\2663f\206?\220\025\262IJ\021\261\277\024\024\241\241t\374\245\277\337\001\271I\355\223\265\277T\303\353\366 \273\250?\321Ctgr8\225?@o\237\237Y\316\203\277\343\303\306\355pO\243\277*\316\321\004\000h\264?P\0242w+A\230\277\367S\201\362\232\032\264\277\247S\3644/\327\263?\345_\310S\362\326\236\277;\307*;\275D\224\277\247!\330:qH\215\277\337L\206[\276|\254\277c,\260\367\215\n\216?\223}\242\337\010\233\200\277\324\365\355\"\343@\255?W\270\214bHr\247\277\334$`\225\264G\266?\301W\005I\346\207\257\277\310\200*A\314\013\212?\301\006c\037\347Gk?\344KI\240\267\324\240\277\353!\371\356\254\336\201\277\201\3523\377B\317\301?$\353\rD\232\205\226\277\022\275&\271\223\370x?\234@\256\241i\363>\277Ja\330@IYx\277\316\234\026o\2567\224?\312\\\005\214\230\324\260?\007\226\252\222I\323\231\277\340A\322\347\256+\302?\\\241\312We\267\227\277\tE\353\2355W\254\277\221ak\033$\376`\277s\236\224yr\210\255\277\353\364B\365\324\270\240\277\276y\236\203eg\217?\221B\305\222\252\254\220?c_\352\321v\254\224?Xq\0321/\277\200\2777\276\364J\373\343\254?\3603[\327\2020\303\277+lO\2011v\214\277\260\032\253I\024\274\237\2777\266\005<\301\t\211?d\305\227r\250\200i\277\370\225\020\004\273\356\223\277\250\244\250\370\021\241\263\277\177\356\252\376\2660\255\277;\253\332\273D\271\222?#\376I\231\037f\236?\236\341\313~\311\277\243?\r\315X%$\017\250?\013r\307\271\345\200\245?\030\263/\277\367\332\233\277\301\237\333\231\037,\243?\375~\036\027C\324\222\277L\205\321oR)\253?c_\260\007\342\271\212?\251\225JV\365\307\233?cF\351\364|k\231\277\351\245X\364\355t\261?\036\'\3451k\\\237\277X\316\254\357NK\227\277\232\330\n\233*\207\237?\320\347\200\352!\375\217?\223F\252\026\251\207\241\277\021\373\230\260\245\376\254?\211\"\025\302\251(v?P\255c\032\367L\263?\317\256\305d!\272\270?G\337\257U\337z\255\277\271\217_\005vF\234\277\262}\357\324\013\326\211?\230\207\264\337?\304{?\247\035\034\333\223c\260?\312E\302#L\370d?\261\213\355\372\223\215\250?\367\361OY\252my?\334}\006\345\234\331\207?\001y\304J\032\326`\277\311\023\321{Xj\240\277d\240\341T{\320\235?\255\360\327\032\223:\224\277\0341\255D\265\261\263?s\016Z\323K\035\261?\276^\221\305\212R\217?XS\336^\211\317\256?\246}\264\363\255\323h\277\005\006\377\351&8\253\277]\036\177\242v\026\302?o\'M\013\355\265\265\277%\t\334/\222\254\241?\360q\026Wa\3234\250\277\234\354\314\035C^~\277Hh\206a\212\220\250?\210XZ\026\032.S\277\371N\267\2752\023\231\277\357\244Q;\250\336\263\277\223\253\337\006u\321\242?(q\315[\336\372\271\277J\372\317\324\211\220\204\277j\340\330p\213\266\273\277\265<\t\243\324z\205?1i\331r\n\023\276?rpig\320\036\225\277\341\357\027\323K\323\262\277\200\355\324\277\177\334s?9\356Rw\312\226\222?\374\252\370+/\345\262?\372\000xr8:\220\277A\317\256\346\010\270\217\277\330\275\221bf\037\260\277T7u\035rt\225\277\217\272%\353M\317\230\277\331K\003\305\241j\210\277\3511a\303\003\017\177?d\241J.\343\326\230\277]\273\326\262\344\023\244?u\004(\263\005\314\257?%3.\0023\320~\277T\236\332C\321\207\250?\002]\'p\214\033\260?\346]\0223\360\351\252\2776\260\004&\344\333\257?\362\221\307\324\277\365\222?\357\322\353\tx\314\265\277\303\345\301WP;\273\277M\236`\2738\016\260? \3765\005\\R\263\277\351\265\230\217\243\361\260\277\344\241Z\025\271\337\253?h~\253\366\261+\254?\016\027\231\336\300\216\261?\032\275}`e\224\233?\222#3\026V\373\250?\005\326/\212M.\227?W\276K\354\305Hj\277\005S\306T.\252\213?\341\215\336\010\023\355\213\277\014hF\215\002\251\200?/v\270\237\366\301\261\277e\353:\354K\262\264?\017\357\024\335\256D\221?\217\326\266,<[\223?\227!\355}P:\231?\241\233\325\316A\022K\277|r\254\347\243\222\261\277\340/&\216\232\276\244\277\337\367\'F&L\244?\265\251\302\017g\277v?\343z\224\305\221\342\213\277\256\250\311\255\014\006\274\277j\255Gy\202\035o?\231\021\230\221\216\230\270\277\363\241;n\357\021\244?(\214\357x\361\270\263\277\323\245\277\343\213\203\240?t\262k\006\221\342\264?\224\254\210\365Jg\223\277@ \3657S\222\237\277\010\263\003z\242+\252\277\326:U\030\321\331\272?\027E\332 ;L\253?-\\\313\367\313\005\257?M[\020m\240v\261?k\323U\343e\026\265\277:\032\2059\244K\262?\252\337\036*K\362\200?X\216\304\355L\207\244\277\263\376\225{83\235?+\321m\254W\335\261?{\'7fQ\032Z\277\3559\351\336R\262\264?\001\001\232T\270|\246\277\364<6>k\207\254\277d\3038\215QV\260\277{\225\252\235[\221\256?oU,\245\322\003\255?\2419q\007\320\225\244\277F\276\356\032#g\225\277\270\270\272\360L\237w?\036\0052\377\264\270\224\277\365\377o=\216\210\260?\325\303\225\300B\313\224?\202\0025\341f$\260\277o\225\210\245\312\253\216?\236\350\000\320\027\267\255\277\321\252\237\340\217.\301?\271Z\031\214\253~\242\277\303SA\347\022\227\260\277+\336_\252\307\031\235\277\334\337\341\236\272e\260\277\257<\372b\241_\266?\366k\336L)~\241\277\246\224;]\263h\222?\365\022Y,v\201\233\277\035\360Q\000\311`\270\277^\300\002\367~kl?\313dL\363=\253\206\277\020\273\220\270\002\017\233\277\351\353\234\032[\305\267\277\246\224b\357O\225\205\277\370\233\333I\342\222\265\277[\206\242\326H\220\270\277_\240\235\320T\027\246\277\020\322\274g<\331\266\277\231\304\273\267\270\310\244\277\213]\346j\266\307\210?C3\370\037\037C\250\277\304(\233\247\245\034\245?\357\330\352I\351Q\261?O\033\273\213N\266\261?\250\326\357\266\371\367\210?>\351\315\374\264\351\261?\237z\354\\w\215\214?\306\306\000\370M\310\251\277\373X\001!\251L\220?\006\200]\260\376(\234\277\354\233}\312\002\252\230\277\nP\203\'B\311\265?\200\250\363M\205\271\223?\351\315\326\033\003/\241?a\3127G\025q\261?\375\3615\267\231<\230?N\n\277~(\215\220?\005{\245q\277{\264\277\235\356\343\016\225\255t\277\337y\"Wr=\266?\207R\306\007Y\272q?S\310m\260HM\245\277\014\375\352&s\376\242?U\315\'N\263\331\260?5\313X\356\336\334\230\277\356$\033\33344;?y_]\224xH\221\277\027w\377\365\024\234\233? \r0\237\341\373\234?\001\035L\236T7\262\277s\231\314\007\325\316\224\277\355#\314%\017\325\252?\225\217\264;kg\264?N\317\377\352RE\303\277\342\006\332=o\314\220\277\312\263\022\272~\367\215?O\324\320V\236Z\244?\004\301\317\225A\233\257\277?\000\223i\010\200u?\004\020\321\260=*\230\277S\242!\236r\"\264?\255\240\361\005_E\201\277\"\347\030&f\277\202\034}\324\203\322\245?cvL\211\266_\215?\365\257\211\303H\356\247\277\313A\2306I\342\'?k$\314\001\316J\265?v\264rT\0229\241?\001|o$Hr\260\277\316[\313`\336\r\247\277M\222\032\350m@k?\316\023\353|\256$\277\277|-\257\266\013\333\247?\312\211:>\212\257\263?>\010\202-\027\202\244?\3732\265\2160\337\261\2777G17-{~\277\235\177Aq\324\003\227?\357\324\322o\340y\241?\010\030\325\205\001\361\255?\300#\273\232\347\322\251?\265~~\021Fx\242\277\306\276\365{S\307\263\277\'NP-\234o\224\277t:\240+H\031\202\2770?\304\321\241\377\245\277\331f5]\205&\237\277+l\014d(\004\265\277\360\\\354b8\030\260?:\325\243]\\S\270\277\306\201\001pu\341\254\277\273\252j\256^_\260?=\246\020\242\3556\271\277\363U6\364F\013\247?\265\220hQ\221\301\263\277\357\225\203\305\330R\232?\265w\337tj\351\243?a)/.\332\260\257?\213k\311\331\034\350\213?\032\000\300\261\241 \260\277\2613\237\010\335R\277\277\266U\227M,\261\241?P]Lah7\223?\216\244\323I\231\241\201?\2735\220\305\203\206\243?\255\370\316Wl\256\242\277\364 \'\000\363\354\202?Ml\"\331Ez\221\2770\264\274\212\344DZ?=\324\370\373\265\261\246?8r{k&\301\243\277b\245d\001i\303\275?\271KY\262\377\205\216\277\211\330\210\224\275-\225\277\272[\271\245\337\302\260\277\341j\256_\300\021\222\277\243v\376\201\002\317\275?\332\221~\352M\231\276\277\330%$4z\370\262?\236*Nj\243\371\241\277\360c\273\324\037\204\255\277v\351T\320z\224\265?8\325\016\2146\330\245?\372\322\274\251\\\341\244?\253\217\205\262\276\341\246?c\022\270P)\215\265?$a\020iA\201\251\277\245zs:\306\322\265?\'\344\344\231\315\272\245\277\200\255+C\322\214\250?\375\037\244\261V\354\252\277\244\0017maP\205\277\014\370{\003\237{\242\277\225\302\264\342\213l\220?\256Eh4g)\242?!vl\233\241\316\227\277\362\336\300\326\266\204\240\277\221\300,y\225\210\247?\365[Dp\306/\241?\345y{Q\275R\265\277\004*R\253\033e\266?\3703\207\340\301\272\206?P\373\000\nq\366\216?\374\211\024@4F\253\277\256\330A\377\317o\241?6\307c1\360\004\244?b\247\177W;\355\250\277\031J\246)]\026\267?A\240>Q4C\220?\0103J\360\245a\261?\252\007p\225\317\021\232\277\310\325\026w\277\004\221\277$t\267\336\234\352\243\277\364\023\302$\355A\240\277-\272\002\320HHu\277\255F\017\3019\202\250\277=\315s\271q\205\221?\345\331\321\000\2526`\277>\221mO\360\332\261?t\230;\230h\026\250?\300\362\005\016\031\205\271?\366a#x\0272\270\277|B\023z\265\252\255\277\334\307\304\243s\366\223\277\373Dt\022\377\243\246?\244O\237\335\227\317\260\277\325|\036\346\355:\243\277&\260\316\306\206\310\222\277\204\324L3p\373\232?\3309\352\3708\004\200?\336\t\336s\0162\246\277e\351\245\033\227\222\250?;\262\273r\235V\214\277\266O#\023\324\264\250?R\363c\216K\272\210?\000\037E\'\314\022\230?\n\230\260\332\342\271\215?\266\241\244\273C\331h\277\032\2209\213-dp\277]\3005\023\274\261\237\277{*\207.C4^?C;2`\326\201\224\277\006s<\201m\200\244\277\301\316 \331\'\364\242\2774Y\r5\210\014\203\277\277\'\346\024\343\333\264?`\310\305\361\253E\220?7\271\215\216\234\305\227?@\270Iq\346\317\241\277\3277r3\311\356\216\277a\3525\242(\240\265?\3636\"\2473}\251\277\215iW\276M0\224?xH\204Ba\375\223\277\362\227\361\217\317\321\220?\225<2uG\257\270\277~\311\032\312\327\276\253\277s\316{\246A\005\227?i\321d\243P\271\250?F\204\240\'\322 \206?\032\366C78\355\251\277\222}N\ne\341\251\277J\230>!\007w\204?_j\231\270\334\030\267?=~\255\222AQ\255?\350\004&8\205&y\277\033\302\301!ZK`\277\264tQ\030e\244\300\277\374\265\302\226nV\246?E\315\324\264;w\236\277o\r\250~\207i\223\277h\032\233\027\356\001\227\277\237\325\307\323\'\340\264?<%r\346\254\367\262?c\027\017\244K(\220\277As\361\240W\327\302?\344`b\220\2466\222?T/1\317P\210\213\277\273$\0020\321\032\235?\311*i\377\255\301\262\277\356\317\277Yh\024\240?\350,\277\374v\ta?\300\307B\375\223\222\273\277\344\356\373\235\000c\272?\314\0002\370\001\005\253?1\257t\004*n\255?\301\311-\355\032\035\242?\274\366\322\235\013l\246?\037\326\201{\2460\262?kw\031\000\371\'|?xZ\231\006P\336\222\277\216\347\"\237gN\232?+\316\230Qh\271\255\277J\376\373e&\313\236\2779\237\371\377:\004\265\277\332/\345Gbe\243\277\244\201I\324\323\244\224\277\302DA\367t]\212\277\021\275\301\231\3665\245?U_\330\365aV\257?[\330ww\256wu\277\r\367\025\332L\026\235?4\371a\207x\232x\277\260\003t\335\333\023\234?\351rv\005\233)\231?\206\026\016AS1\260?|\335\262\353p_\257?\273\033\371}\311\317q?\361\344zL\272\322\242?\006\010\3239\007`\242\277\026\037\036\356\007*\254\277\233:\0072\2172\230\277\340\3175\003\277\302\271\2776\354\250\364\nf\240?u\231$\344\351v\246\277\354\326.\212y\262\243\277b-\344a\342W\244?\235eX\0133\007\233?\224L\373\301\242o\240\277\006\330\346o\224\337\244?!\321\245tG\033\263\277<\003;\256\373\371t?N\341\261\273\316L\260\277\242\263\024\236K\211]\277\3255\td\265P\224\277\274{\205\212[\367\254\277s\343\001\315\222\247\242?B\240\304z\300\363\206?\025\370\235J\275`\236\277Iv\307Z$\324\244\277\350\025\325\241%\253\207?\331\312\377\373]\320\210\277\317\020k\002B\007r?\302\363\324\207N:\276\277\361\215\232B\005\236\261?\202\352\243\213y\300t\277\342\231W>\330\031\243\277\246\220m\341D6\237\277]\220\262\223\247\257\252?\256\201y\204\030n}\277\265\"l7\205\255\261\277e\256V\353\203b\234\277\231\204.\300w\233\264\277\332KA\271\220r\300?\213M\327@\020\376\235\277\325\003/d\016\255\254\277p\377j\266E\231\271?\231\246\021\267\237|\240?8\331}\220\023o\262\277\270\275\214\022\r\326\252\277\246>..C-\211?\rr\331eoY\224\277\221\341\261\246\357\354\261?\031U\203\212\313\314\250?\374?\372A\364\004[\277\357\350f\222\211=\251\277)6E\020\354p\264?\352GbIW\221\223\277W\356e\363\371\340\271\277y\302\270u\304\'\233?\347E\311\031\252\217\236\277\255\2721\335$\241\217\277\203\366\003g\036\215\246\277\236O\361\330j!O\277\337\270\177\020G\010v\277\304\016%\333\204\326\261?\300o!\372\317\306\217?\017#M\365\315=\261\277\214\362\207\262\r\304\255?\t\245qP}\311\250\277$p?\265\002\n\217\277\360\303\223\220\212Z\202\277\317\353\311 D\343j?,\350\365{Ja\220\277m/\376\214:\315\244?\314$\360su\320\203\277:a\3264\374j\264\277\330pb\342\262\322\223?\276\325\363Y\346\275\207?&\210\276\302\212;\222?8\231_\\X\343\254\277\234\366\340\014\033\222\230?-\372\347\200\277\242\275?\341\273.\225\260L_\277\210k\033O\271\357j?\253N}\014\027\372\246?\220\321\250Y\037\204\240\2777\025\304\342~^\225\277Y\\\036P\370*\261\277\316_%\301`\327\257?\000\004\312e7\336\203?\345P[~\224\221\255\277\356}\003HQ@\265\2773\217\303\374\217\334\264?\346\255\373\367\336\343\200\277\314\312\3039k\346\253\277\267\232R7\027?P\277/>\267\273}9\230?\336\030\004k\017\304\246\277>s\266^\326\020\243\277\033\357\032\241\264S\247?\274\002kv2\240\303\277\325bB\321\t\367\253?\r\240\364\365\257f\231?\263\tU\013\305\332f?:\r\363Q\331\302\213\277I\224\240L\354\320j?\307\320\022\270T\t\244?\217\253\034\337m\007~?_\335u\366\223\\\263\277\241\364\337\240\232X\222\277\312\203\353\347%;\261\277\203BPtl\177\252\277\277A\271\256?S\224&\313HL\243?\271\211Wg\020t\237\277\n\035\204\224p\001\264?#a\334\310H\002\203\277\356\370\350\264\215\213f?r\224XU\204Z\226?\311\034\216C|\335\223?k\302{\267\031\334\206?\345\235d*hN\241\277\245\025\372\316H2\245\277\037\340\255\365{\025\262?\257m\177\212b\355\243\277\273e\026\335d\234\261\277!\361\003\335^\266\226\277\266O\253\346/V\225\277\3268t\235g\214\223\277\220B,\237\333\203x\277\225r\350\265\n \247?\212q`\265\376\351\274?\241\235\373\361/\222\257?l\365^s\351\206\247\277O\213\016\n\006\250\256\277\347F\274\317fXb?3td\345R\321\261\277n\002/\027`\320-?\363\322\271!?P\232\277Y\016\363Q>\233\243\277\014:\304\026\377\253\257?y\032\036\277\276\341\236?\241\220\002\342\351E\245\277\034\311\025x!_\201\277lG\000~\312&\226?\214\212\227\266%a\277?|\031\007\320\360\276\254\277V\246\277\017\204\305\243\277\236\265\023c\377\212\257?\337bw\202\246\210\236\277\300\326\267\356\356$\241?$\211j+%\373\266\277~\035\211\014\177;\264\277\262ST\260\266\201y?\357\322\260\254\t\256\245\277\375z\370\017.A\302\277Xgm\370\020\353\243?E\222W\216\304\220\251?\361\024:)\375\235\262?\335\323\240\337@\311\240\277\372\361[\277\366\205\246\277z\214\006\200G\272\252\277\325tS\207\001\262\255?\364Q2\257?@\272\277\350p\3171EN\205\277\366n\214\271\212\231\212?V\030Y\342F?\252?f\373\246fY}\222\277\272\035I\261\016\013\232\277\321\210\243\017K\271\262\277\242\210^\223\rz\262?\364\340\232[\031\026\203\277SU\222&p\025\261?\300r$W\244J\233?\311\336\260\204\033\266\226?\252cF\207\302R\250\277}\227\rg\2278\215\277\241\003\310\301e\347d?\370\tc3\357\024\246?\321\363\020\340\337N\236?\010\350\260>_>\237?\255>\270\004m\026\227?*O\220~?\243\235\277+\252\005~u\254\217\277\010\036\253\243\305\367\240?/\200o5H_\220?\323\n\005.\001\237\245?\325\215S\"\013\270\236\277\002&\337q\256\325\235\277\322\311p\235\006,\200\277^\323s]V\355n?\222\245\370\356V\230\202\277\n\031\367X\311\224\267?\277\324@(\'\006\246\277N\353s\r\233-v\277\306\310\216\333\032Q\224?\022J@\357yj\235?\256Z \006\0013\223?\351\232\262\351\r\336\253?[\276(\204\307\336\252?\343\200F\037#\305\251?\355\305*\271nH\273\277\014G{\252\336\352\262\277;-`\276?\373\227?D\216\337\'\021\036\266\277\322\305\206\336R\362\226?\271\\\235r\222\341L\277\rB(\344\212\021\244\277\252\237;\304i\301\255?\256\'6 =\251\205\277*\220\"\301\251\236\244?\023,\004f\017\317\245\277\017\340\357\343\002Y\252?&\344\031\016\303\252\236\277\330\353\373wlg\261?\001\'\376\2226\214z\2777\016\274\264T\345\301?\200\364\314\023\230P\267\277\207\222jF,[\204\277\305\255\305\215\343\000\236\277\350\253\224\220\225\326\261\277\323\333o\226\220r\224\277\t\324\341\310c4\214\277\215\rZ\325j\311\250?o\320\206\245q\305\265?J\274\023\'\231\256\236\277e?\247[\373\306\237?\246\n\236\317\240/\257\277~\232&h\262G\235?\003\341\356T1O\255\277G\r\260\370\202\023\230\277\226\332\275\341\223m\241\277L\314\"t\237-\210?xbw\321\274\007\205\277J\227\035(\276e\260\277\'\224oY\323\006\274?\2141\367N\232\203\245\277\252a\221R\357T\243?VI\204\376\216\002\240\277l\251\376!\200\273\230\277\345\321D\320\266\316\246\277X\341^o:\226\230\277\314Vv\\\313_\261?;\\O\357\233E\264\277\260V\204gnL\243\277\315\323Fo\360\033\235\277\207\217\'/\225e_\277a\247\'\261@]\227\277\026\006\232`\017^e?r\366\"\315r\\\212\277\202\327i\\Xk\245\277\0310\221\317\361\237\227\277\243\267BYlb\222?\214\330\266}\324\360\227?\333\353/O\210\017\255\277%\255w\232Y\306\236?\017P\236t\031\010\214\277l\265P\020\221f\252\277\302\275\214mp\222\234\277\323\227\t\347\240\324\241\277\37711\257\007\355\231?\264Pc\235\320C\260?\366e:\310z\352\242\277\232\304\216\335\236$\256?\027\373\200\334\312u\203?\003jN\325g\301\242?B2\350Ph \255\277ugCK{\033\224\277\t\330\227j\251|\245\277\333\205\271\212T\003\250\277\244]\016M\247\315\260\277\035\"\007\201\217\357\223?\"\340e\371\330,\201\277\275`\033K0\337\241?;/n\336\035\266\204\277y\0220\221e\257\266? V\020A\215\236\262\277\372\215\030\342]1\244\277`\017\301\225lN\300?\257\272?G\261\027\246\277\262\220m=\025l\234?R5Yw\202h\300?F\346\305\321EA\262?\304\310\210\207\232\340\276\277\010\035y\252\312\300\225\277s\370\266\000\3515\200?\027\306\002 \241ev?\246m\336\304\177P\264\277&\362\004&t\235\207\277D\237\234N\322O\244?\347;\377AuU\267?\342\230v\004[\303\266?[!\207\326F\245\263?\236\371*n\034\344\205\277j@l\006Z\337\220?\016Y=C\204(\226?_\'\017\240^\226\237?>ur\360\302\013\241\277\032\202]i\001\245\257?z\224\200d\231\000\220?\032\241\215\003J\033\251?1\257\372D\277~\241?a\301\376\226n\354\203\277\240\247\3752g\210~\277\'\241\017\014\034v\245?\202\340E\001&\301\246\277\233\274\274v\\\352\220\277\234\010J\336!>\205\277\371k\312\375\371\026`\277\254\207\"R#Y\240\277<9J\2673_\247\277%\032\030t\262\305\235\277\'\235H/L\310\254\277\354)SS\221\236\243\277\3134\357\206\373\223\233?\231\032\253-\357\215\264?\267\361\022\000\233\013\300\277eL#bw$\230\277X\304\223\236\211\200\251\277\363\320\265#\215\367\261\277\300b\256\314\236\355\200?\372%\333\377-f\210\277\331\240\256\215;!\255\277\367\321\311\274\252\277\262\277\311\357\364\241\350\231\267\277\272&\034\020\362\220\250\277\022\'JL\230mp\277\322\321f\007_m\263?\356\352b\010\216E\271\277pjH\333\247\256\272\277,\237\004\240B\346\213?\000X8\332S\255\203?\317\"A\"tS\246?Iq`\362)\267{\277\223\355\227[\357\300\236?\334\310\247\034\004\376\177?3\302\006\341\334\245\273\277\274gF\3472\233\211?%t\233\244n\034\240\277\026\235\017I_\026\251\277\022\"\255\240N\301\236?\313;\337k\032\243\260?\025\327\252\215\0032\220?~\346\334E\354\261\263\277Hl\334\351d4\254?\235\330\300Fd\013\245\277\260\227&YD\256\201\277\225\375\256\257,\275\260\277SS_\325Tn\254\277\256\235%\302\031\336\210?\020\252$r$\366\256\277#\030,\366\032\306\300?\303\363T\025\360\222\254\277\237\213fG\324 \264?\254O\333L\352U\227?\n\324(DM|\204?\342\332\256\356\273\025\241?):\246\274\332\312\264\277\271\000\306\231\310\311\250\2778\020]\222>\301\260\277\017\320\322\206n\377\247?A\312\365\301\364\305\271?F\221{\311\272\337\263\277|\200M\r\341\346\237\277\"J\335\001d\312\240\277\364\275B.\r;\242\277\255r\270\245-\274\206\277\337\352\374S\013\336\266\277\213\343 \274\030Z\262\277\274\367[\275P\370\235\277Ir\306\322\037\344\215\277\276\261v\177\3113\261?\345c)\271\243\352\215\277\354\305\235\335\257\316\203?\225{-\026 \226\225?\327|\266\263i*\261?\377>\227C\226\014\263\277\3579\000\377\220r\222?\215-\343\350A\321\230?i vA\213\277\236?,\276\004\356\310@\252?8uh\376\220)3?\277W\354\275m\364\236\277f\343mfh\246\257?^\211q]/\024|\277\232\302\0313\302\355\266?z>u\031\236\010\273?\341\322R*\020@\262\277G\014\222\335\254\202\246?\010\302\037\023\250\242\224\277F\372\221\226\303\251u\277\3426\372\225\024\222\273?Bv\027U\211\306u\277`Kx\251\324{\272?zc\333\356rF\204?\366\247\310\226\267C\250?\214];\346W\332\222?\211\222\027-\353\371\271?2T^\273\304\316\243?|\022#G\320C\211\277\220\002\202\374\334\215\241?&\037\"\237Y\236\240\277\361s\027O\336\302\270?\375\006\003\2127Q\205\277\213<\265t\270\353\241\277\354_\366\3369\351\250?\021\353\355\255\347\226\230\277oi\207\234\3710\253?p\253\370\037\353H\210?-ie6\214\322\250\277d\264\207@\353(o?\350T\262O\230W\242\277\361\250\241lIR\271\277\264M\326j\330\341\227?\341\"\367\336\373\352\211\277\031\326?(\024\257m\277\366\346\276t\241\257\245?\242|i\216\003\373\231?;\325\003S\035\375\243\277\371P\017\274j\206\206\277Qr^\224\224\371t\277_\363\242h\356\352\241?\354\266S:\002E\232?\302\274\324OE9\254\277$WF0\334\025g?\305\375\200\343t\335\254?<\367\022^o\224\262\277\027I\330\215\314\372\253?)\333\010f\274\020\252\277\221$\030L\323y\207?J\336\366\360\211W|?u?\031\375\025\350\237?\240\317!\212^o^\277\021<\266=\330_\245?\207\2521|\317|\202?[\002O\300_\225v?[HF\355\177:\241?\232\311u\315\342h\257\277L\353\333\320Z\211\267\277,{\003lK1\250?\336|4\344`4\232\277;u\304\357\262\311\245?\307f\003\312\2017\243\277*(\210P\355\275\237?-\365\017\201\271q\251?\324\370B\030\242h\225\277K=\027\270\315_\245\277\247\343ct[F\235?\000[\3270\225\017\234\277\350\313\350\017+\331\276\277\220\017\017\357[\376\216?L \304k(:\236\2777\305V\035\030\237\211?\246\237R/\354\000\263\277\242$\373\037F\220\267?\216bT\315\310`y?\'V\357y\002*\220\277\261\305}\260&4\241\277\200\356\332M+\311\251\277\255\303O\336\261H\256\277\244q\226\030\227\333\265\277\215\256z\032\242\'\267\277\035\244$\200\034\312\206\277\022\237b}-\"\303\277-$esV\305\271\277E\310\326+\255\321\272?X\010+\222p\205\227?EEHi\356\374\242?\322\315\260\243\263H\201\2777T\252\272yt\250\277\245C\030c\033}\242?\022Q@\305Y\240\223\277\245\362~\\\311\\\251?\304\035\277\213\034\266\226\277\305\020\256.\357\266\254?\306C\301\213\034\212\224\277\200`\353\2263\254\261\277\343)\331\376\2515\251?\006\200(5\267^\223?\343\347\330\222\215\375\265\277I6\242\334qi\220?8\302\234\035\247\014\267\277\332\336\261\232\t\246\224\277\300\255\032\000\t\273\262\277\376;\371_i\267\235?A&\177a@\345\252?\257\302\027\265\371\341\270?Bu\025\220\300^\247?\271\200\237M;\372\267\277\337UI\210]\227{\277\016\205p\262\037\276\267\277\331\226l\253rv\261\277l\255\355u-u\265?\230$sg0$\271?k84C\326\270\234\277\200\346L``\332\245\277\211\357\266\235\227\222\220\277\316Y\016A\322\177\234\277\033R\2671\271\217\224\277\231\010\361\347\263\375\207?\241\r\263%\273U}\277\017*\370\355\237g\234\277\2251r\3755\000\275?\252\303\354\241\301\350\244\277Q\322\013\227\255m\265?-2\325\030\275\362\261?rq\260\354o\200\256\277\316E\rbnL\247\277(K\374\2726\t\211\277\353\360\006\335\363\221\260\277\367Yz\254\215\241\252?9\207\r<\034\356f\277\255\016\352\202\'\005\240\277O2\375\215w\254\270?\365\267\352-\217M\237\277J\206\017Dw\343\273?\r\364 \327\300\000\274\277D^\271\347\344c\261?\262\020e\275\266]\246?r3\017\366\234\360\225\277\"\004c3;\214\277\277\323D\202\342=*\266\277_j\353\203\013\307\235\277\234n\006O\026\017\222\277\331cX\223\334\364\202\2774\233R\253\240\300\234?\200\200\365E\202\177\261\277\27701P>/\254\277\013\257\tL-\363\220\277\266akD\324>\243?\021\310$\307\243a\264?r\022\326\337ox\202\277yD\207\236\255\341\276?0\230\212\260\232\254\245\277\341\242\306g\316_\263?\231\'\006\274\025\273\273?\031\273`\264\253\033\217?cJ\321]\317n\233?\243^\366NU\350\225?\345\2026O\320t\216\277Gn#r\n@\260?{\251\240\252:\236\254?\216\354O\346\216:\247\277,\027\356\\Dk\227\277\345;\314dL\321l\277Hy\246\314I\2372\277\362+\212\364p\210\266\277\260\247\245:y\004\244\277\204@co\023\222\260?\273\243\2317\3653\220?\253\240\0348;\332\301?=\266\225\000\002\n\266\27707\356XB\037\243\277\337\0351\323\214X\236\277\312\3006\332\235{\271?\014\027\201.\353\210\260?\262fu\327e\220\220\277\370d\206\272\274\023\256?\310\305_A\0064\252\277d)U\257\307\262\256?/\277\321\021\255V\243?\320\363Y\313\312\177\262?\213QR\320\001(\265\277\347\304\200\265\020\205\205?\251p0\333[+\260\277\r\255\004w\312\237\252?P\236\304U,\276\261\277]:\230h9\025\261\277\005 \2607\223|\246\277o\032\324\'ce\211?R\232\022\002\240v\204?,\207\236\336\357\323\244?b\260\311\225\3672\227\277\003v7v\344Z\243?\276Y\266\332\341#\222?A_\330_\362\262\203?+\236\274\322M\330\201?\354\352\272\271\206A\255\277\215\305R\342\276\026\223?\341\004\376[k\261\216?\253\370q?C\331\224\277\213\353@9\020\211\224?\014\021\342\3629\254\265?\235\322\321\345@\374\263?\245_{\023\307\020\204?V\027t\311h)\276\277\223\372\020+\356\320u?:A\345,\007&s?a\025x\313\217]\274?F\"I\275i}\275\277\0225\030h\013\322\264?w,\203\000BM\242\277\305,Hm\014 \201\277\222M\023\244\210\206\210?X;\327\037\200k\241?\\\327\225\243o\377\253\277&\307\336\344\301\350\233?\210\227\364(\203\323\251\277O\267\265v\225\025\256?\327\346\352\302\342\024~\277\214E\333r\004\211\263\277]\275R\333r?\300\277\346O\332f\325a\277?\335\233\347\357(f\243?\341T%I\270\326<\2770\265.\n\351\206\264?\300\037p\033\'+\247\277\270\267$\'\336\344\232?\371\217\346\000\360\366y\277.r!\345\302F\261\277\016{\226\206\2153\235\277\357}\304\214I\252\254?N.\232GAB\262?F\016\277\361\275\271\260?\003\000\206\332(\240\227\277U\217\264YCL\254\2774\000Z\310AR\267?\301\364\246\010\025H`?g\225\375\250\370N\177\277_f\354\343\353U{\277z\206EP\376}\234?=\321\371/\324\272\250\277\213\006d\224\264\264\237\277\313K\210\000\311\232\220\277\345\3515\277\334\211\243\277\016\324(zy$q\277&\253Z\353\233\257\271\277\373\215\032\377R\335\200\277\365g89\217/\220?\314\334n\317pg\225?\203\304\275e\253\230\243?+\224\234\323\337\"\250?\025\222R|\327\000\261\277\332\313\265\245\007\224\263?T\332X\344K\275\236?U>\261\257\205M\245\277Rx3\212D\212\224?oj\367\376\207\025\260?\340\250\246\334\214\347\242?\367\002\021\327\275\024\220?\374\005r\242\004x\255?\327\337\357\227\264K\267?[\333\261\246\211z\242\277R#\035\301\332\037\245\277\270\3709$B*\242\277\240\\\253\251\200Z\266?`\345v\252\342\333\240\277]F\226N\302H\227?W\333\373]\261\276\245?\320\276\364\010\252?TC5\275\022\234\235?\207K\236\030\177\361\252?u\223\355\253\303r\231?\210\330s\326\026\005\231?\004\017\241\370\257\002\237\277y\266O\352\000\214\245?\035\0219\235[\275\227?\033\265\2727\347\263\226?8dE:\262E\244\277\327Tp\345.\341x\277G\205-\321GZ\247\277A\221\002E\307\t\241\277w\220\213h\377q\302\277<&\210\277HX?\006\013\306\3043\316\271\277sBiT\034\026\234?\267\335\202G\020\207\246?\230\376}j\313>\215\277\232yj;\305\023\301?\303~\314\023\225\267\251?\003\364\037\215=\337\203?w\267\276\262]\022\250?\305\236\322\223\211c\215?\031X\215\347\354\020\207?+\315\245M\341\t\223?^~\311d>Z\266?\345zz\0272\332\243?Bi\235\027\375\225\233\277\" Z/\221N\262\277\247\010\330A\365i\266?\323\354\252wjn\243?\037\226\373\246\335V\253\277\370\002\025g\244)\255?\324H#\227\221\004\215\277\310.\2509\202\235\243\277u\200\230\177\300\320\231\277\273Q\221\263\270\273\261?\214A\214\353\221q\262\277\332\373m\3674\255\254?.R\177O\376\234\257?\263\030l\345d\323\246?Dw\r\315\313\221\255?\203b\014\314\035U\233\277=h\230F\332\030\231?i/\365\245\207Y\207\277\352K\275`A\t\240\277!\300\320\035\250O\247\277\303,mx\364X\243\277\373\344\374\rW\305\265\277#\244\347\304\365\035R?\210\222\345e\007\374\251\277\211\232\'\310\261\346\246\277\3334g0\205\026\253\2770k\201\345\370\236\240?N\211d\277rj\242?\225\0106R\214,\251?\223\252\251\031>\033\253\277\307\241!\t\003\271\225?\323\207\267\312/|k\277\321\\\301\313m\356\262?\251LEK\231\261}\277e6X\005\177\306\267\277\351\346\036\206\265q\220\277\022\244\'\211}\025\226\277\2708}\356\331\331\266\277\327\222\357\314\224\316\255?4Z6;\214<\231?\236\346\233\373\334\314\264?\022\t{]x\251\207?\033\017+\262\351G\237\277iM3\371 \340\243?x\024\306,6ag?Y\300\031\332\001Z\263?R\026.\371\024\365\203?E\013\274rr\204\200\27718<,U\351\275\277\r)O5)L\177?\003e\331\203\333\323\270\277f\341\243\006\322\'\260?\336W|\353\254\354F?\277\202\327\215\320\354\264?\300L\225\037\343\312\266\277Yh\247\257\216\237\253?\264\354\213\323\356\212\223?(\3041\227U\320\177?\317\273\251 \3268\257?\377\265Y\213|\305\231?\366\002\177\255\321\037\244?N\364\227\177\203\333\234?;\307\246\212\201M\243?A\014\314\377g\'\231\277\227-k\250\223}\223?\007\350\364r\005\301\254?\"z\236e\036\330\204?\274[\360\354\177\360\251?W\t\217\330\\\260\240\277\220\001\233x\214\013\257\277\013f[\r\252\333\254?\264\022\314\335\264\204\275?\313B\025\242\021$\222\277\251\007\222X4\302\212\277\335>d\247u\332\245\277\025$\252\241\211}\242?\304\033L\233\031f\224\277\337W?A\304\225\261?\033\267k\261\0373\251\277Z\314\344f\274!\233?\214\232FB\315\320\251?\216\254\027s\345\377\253?\210\005\341\024L\223\202\277t\345Z\223\033\240\255\277d\277\036\032kx\243?\016\027\247 ,\'c\277\301\030\257\244\326C\260\277!)\320\010\370rp?\303\377X\255/,\247\277\027k\344\227\222\001\270\277\224\3457o1\363\302\277\316\367@\234\212\205\265?\255\270\241\365\204\313v?\2222\3049\226\217\256\277,\'\226\376\337\272}?\027\037\'\252\030T\267\277\243\006\211\336\306!\260?@\010\236\214\234`\225?\026\352G&\341\372\246\277\352n,gs\266\200\277\322\n\332P\374\264\261\277\354\230\220M\335\243\217?a\235\024\020\227\260\221?\376@\376\344\032\373\234\277\255\0310\363#s\264\277C8\377\007\233(\245?\001\231\314\335\303\331\240\277q\"\243\254\315\014\275?G\205\0149\034\370x?\200\344d\212\334\364\210\277\337\207\023\220\237\353\237?g\214\260s\t\326\303?\222\017\2164k\254\240?\256o&\371\306\002\253\277\226\334\027\232\024\360\214\277\234f\355\260\025\262\204?\341\224\215\336W\251\266?\360\"\022U\013\365\226?\240\330\226Y\314\316\225\277d%2\234Y\253\256\277\220m\267\361\305\231~\277M\336\325tW\225\236\277\371\rJ\320\271\274\254?L%\337\002Pm\210?\034_\201\301;!\202?\006\327--(F\200\277\266\267.K\316\010\243\277\202\363sA>\305\261\277\376\357\320{\251\331\234?\304uT\013\273\254\243?\365%\034\350\030\033\263?*%\220I\304\355_\277\202\200\275\260\037\354\264\277\360_(\254\203\363\245\277\232\016\372d(\177\177\277V\304\320\031\273!\252?\306c|\\\020\302\213?\273\036\2411\315\350\236\277\217`\210\272\005\316\263\277\013\003\025\'\246w\256?#\302\234\031x\311x\277\010}?\212\223\257\241?v\345\351\272W\245\253\277hcd`\260\026\261?\017\331 0\250\270\265?\303\222\363%\023\324\243?m:\265\263\177\255\270\277\304\301\371X\335\234r\277\t-o\\\032t\225?\311\031\200H\265\270\263?\221aV\035\226\216\231?\273\363\201\016\261\306\252?\344\221B\013K\316\265\277\244\250Ed{\241\274\277\347\017\322\211\240S\252?\211\034\022\\7D\243?\363\206\260\014PH\222?4\2541M\256$\205?4),i`\031b\277A\362~X\315]\216\277\347CH\362\376\3501?\271\022>qe\"\221\277\323\313;VGh\241?\357\301\\s\010\231\203?\351\253\335\373\374\013\204?y)og\214\006\233?j\227\363 \311S\235?K:\025%|\262\242\277bqG\302\212\nx\277cN\334.\010f\260\277\261\265{\022\272u\260?\014\354\271\202r\010\276?\nh\256\006S\260\260\2773\367\250\263h\020\243?tBC@5\027\243\277O\361\305@\345T\221?\017j\344\006;G\246\277~BtT=\334|?\326,\376\001\215=\265?\324\030]\363\207M\242\277|J\000\223o\244\253?\323\352\340\331\224)\210?\277\201\253w\211\263\267\27718\333\314\374%\242?b\373\031WO\035i\277\334\3763\277\254m\263?:\2352\036\025\222\250?F/\203>\246\272\256?\334Sv&3\232\262\277\373\276\273\247\237Pz?\235\332\016\363\366\343\233\277\214\347\0107#Mi?\200\017\034\373~\310\240\277\236\030K#I\220\260\277^\025\320u\303j\243\277\036z]~\271p\262?HV\361\207R\010\256?\r\367\211\201\235\234\211\277\256\336\023\2051\334\241?\213\354\271\240y\344\241\277\222\372\022\336\234\177\242?\377\203\036\256\246\013\230?\356\357\336\331/\177\217\277&\231\256\203y.T\277[\321\203\330\245v\261\277^xNJ\301\340\252?\214\304\332\363)\333\225\2776\\\343\010:s\230?@\265P\367\356$\216?\325\030\226\r\000-\217?\355\'\035\352c\010o?\021\316-B\210\307\250\277\3618\270\0343\200\265?\213c[h\021L\223?\271\000\')\177\247\224?\322\354\374\357b\251\264?\320e\274\312Gg\244\277L)tO\306\na\277\202b\323\330\177\331\273?\177e\226\355)_\261?\343Qj\236C\217\213?\177Wa3,t\235\277\nki@\220r\205\277\352\317\207\230\010\326\226\277u\2644\0247\263\200)\331r?\240\237\266\312<\017\274\277p\r\344\250H\370\271?7\345\215\320/\245\241\277\014m\372\234?\031\262\277\2262\335p\354\252\223?\324r,\031\217\354\200\277\001\313\317\tct\300\277M\236o\177\215ho\277\311\342@\275K\316\205?)\002\032\333\347\317\241\2778\227\013\022\366\364\215\277nRXk\366U\270?_\201\343\205\'j\260?\302\272\n\020^b\262\277\004\022]\341O\364\205\277S3\265\223\334;\203\277\035\274\317\313V\020\251\277X\323\327\035\1771\251\277\316\320\366 \262\235?\374Y)\272(\303\240\277}B\361\371\240\354\272\277ZD\316\003\214\365z\277\366\362\270\347\035\242\243\277\232#\364%Z\320\243?\266rI`\022\313\275?\355\231\231\177\317\004\227?w\255h\t\2272\206?FQ\031O\305 \273?8\211\321\214\227Gi?\277\204*\325\306\352\276?q-\026>\035l\263\277G\232\246\3775\353\260?\n\331\372L\372\277n?\2117\226\365\254\267\260\277=\034Z\312\"\225|\277ZU\024\014*\344x\277\232\312\034\233K9\261?I\254\327\366\332,\224?\2623\254&\247\320\232\277\2210\027\224\317\310\246?\036\311:L\253\316M\277\257\344\264F\003-\264\277<\363 \010\245q\265?\354F\353d\022x\232\277\363\006\201\321\n\346\243?g\325\347\215\223~\256?\262%\r\036\241Q\272\277z\241\333Y\271O\277?\334\374m\005|\212\227\277\220BY;\231,\207?gK\202\303\232\320\210?\374\244\323\274~\345\242?\014\207q,\035?\260\277M\205\266\266\377\037\251?\203\247\262J/[\245?\003O\314he\333\261?K\304|n\3412\247?\024\335\323\274\311\002\271?\003\352`\354q\336\260?E\332MG\t\230\222\277>\237?(\306\014\226?\026\t\314h\306\333\241?3z%M\322\336\222?\033\267QT\014y\240\277t%\341?\222#\236?\306\264oD\320\360\263\277|w2\372L\032\240\277\177\324\304~R\212\267?\266\221\220\351\327\r\224?/#\177\021I\245\271\277\250\2756\024\014}\206\277:\254r\237\321>\205?\004\223\217\217\234<\205\277t\213<@\233-\270\277\247\306;%\314\363\257?\304\316d\027\263\347\233?\214\'\247\310\2629\246?9\303\301Aj\347\207\277q\323\377\232\010\265\255?z(\232\316\272#\200\277\256.\010\212\013\256\220\277\'\306\022\2774l\202\2778\303\331\303%\371\262\277\272A\333\331y\207\263\277\005\267%N\242\337{\277B\027\031\214/\343\215\277\363\207!\205\034;\254?Y\376\227\373o\nV\277\307&\215\300O\312\222?Xs\307\264@U\253??\377h\264X|\211?\306Y\330\246u\244\306?~<\320E\311@\233?\367L\206\261\n{\251?T\227{\200\334\232\214?r\232Fu%\007\227?_I\376\314I%\254\277R\253\317F\322W\236?\010j\005\206\364\236\234?\\Z\345\276\371\216\252\277\335\225HI\352\370\236\277\273\2156\203t\216\263\277\304\370?\201\340\377\264?\300\246\231\347\221\021\243\277RNu\240\333k\254\277\304\221G\023\1778\241\277\222{\322\3462+\241\277\242<\344\300\202\363\253?H\217\302Z\233\227\234?\246!Mx\372\260\206?\022R\232I8\261\247?\313\334t\206\246r\262\277\253\253m$\034\220x\277V_z/\014\336\247\277\305\275\375\276\245A\246\277\330+\372p\320\007\266?\2772$\223\014,\272?J\010@\nW\221\241?\337\234\034[\376V\226?O\255\377\2775@\273\277\033\256\277 \216{\236?\016\356\346K;\210\243?\277N/\260\243\232\201?\304\203\3233I}\227\277\020\023\306\253E&\252\277\014F\356\316\016\264\241?\344\325r\215EV\224?&\251\241\255\365Q\235\277\245\374`%\315\342\225\277\356?T\252\315\236\267\2779\2254\302;J\233\277\005>V\037z\266\236?\355\017\337\rB?\235?$\244\205\343\245\300\242\277=a\003n\312\302m\277\373\027e!dg\243\277\263\224p\314\002N\220\277\014\371d\253\000\031\273\277\255\300\346LH<\265?\037\216\033\352>J\261?\357\252e#\333\234\264\277\306%\017R\334\224\265\277T\002\3538\316\323\244\277\367\361\030\003\010\330\244?5<\347R1\324\253?\314d\352\272\355\200\217\277\202\316.\3566\241J?\0057$\010F\332\257?U\370Q\250\333\031\231\277\002 \362|e:\202\277\342\265YX7\276\300?\326\252;\210\000\220\270?\276=\025\005\023?\272\277SgD\207\013\023\261\277AC\377\2133\032\214?reTqNI\265?\361Q\020\2715i\265\277\321Z*\263\220\033\257\277\222\266|\347{\300\250\277\320\021Gw\212e\226?a\247\033\211x\355\240?\361<\220\247X\355\270\277\017\343\257]M\004\265\277|\327\322\262C\201\261?k@\r$)Z\262?\265G\232\364J>\220?\212\333)\261\317\276\266\277\030\375}\333\2054\270\277\227\340\332\314\364\365\253?a\275\233\022z\017\242?\301\226\355\223\200~\253\277\367\210Hd\036\t\262\277la\211l>Zh?&w\377l\2114\254?\346\3331\335o_\243?uX\361\2756~\212?g\314{5\222\014\304?i\002\375\n\2612\267\277\274\230\302\347\2540\237?\007)\361\324\'\232\265?\230\213SrCK\204\277\221\020\261\204P\016\260\277r\\#\356\225\271\263?\213\200\304\354\3343\207?\273\330\207\220\222\364\240\277sb\307)\002\375\240?#\003F\010\252\177\250?\222\275x\301\207\373\246?\006\354\304\345\363\221a\277\275\241\265\255\346\212\203?\031\273\365=\2108l?\343\373\353D\344\264\232?$\r(!\036\203\265\277x\211 \234QY\244?e\274}\346\261d\267?>(\262\\\353\031\244\277\304-\307\006\035\013\231\277x.\267\365\314B\261\277\017*\006c\332\266\253?R\243j\274\343\216\243\277\254\022J3\'\241\204?z,\213\013\031\240\234?p\204\352B\212\205y?\261\375\235\353\353b\215\277X\236@\333!\343p?5\037\317wJ\274\254\277\342\211\375\245\001\302\264?D\357\300\273!+u\277\014\312\336\335\r\303\261?\333\225\017\034\246\341\200?@\323\227\035\370\267\247?\342\001\007\216l\224\250?lfs\373\026lv\277b%\033\241w\n\262?\256qJ\362HZ\270?\357\257\2219\371\350\277?\361\206\344\000\330L\203?\244\306e\204\'\261\225?^\020\364W:\323q?\007\306Q\273\032b\215?\336\226~z\210\031\245\277r\377\270Z/\210\246\277\325\351[\353y\205\240\277(@\370\323\231\201\251?;\177\277\352:t\215?}\006\023*\325*\252\277\270\311Q\003A[\244?\235[D)\352\276\221?\263\235N\231\361\037x\277\375\006\316?\321\353\260?\250[\202OZ!\263\277\222\037+\302T\354\265?g\254\223S\006\301\300\277\014\266g\230{O\253\277\203\312\347\263\0352\260\2771\2753\031\201\373\253?\016\321\234\3739\327u\2770^~\232\013\215\254\2778\200B \037\306\223?P\263\365\375+i\234\277\360\223a\372\375\370\241\277\231\356:\321\036\205\235?\025\322\322\031\2441\236?\2607C`\204\234\270?\306\334?\210\206\025%?[\325\300\225@\303\240\277\332\243K\236\272n\262\277\245\3734\332\323\350b\277\370\260\244\231I\377\233?\230\216\223\335\326\236c?X\250&4\001\374c?\022&\253\267K+\274?h#{d\201L\211\277ta2\217K\312\245?\216.\304b\324K\227\277\234\224\240\014\2033\236?\232\345w\022X7\223?\277\316hx\017o\265\2772*\303A\013\006\361\276\261\224\2446:\\\253?\302\244\0271\314\030\225?\006N\350inp\252?\226\214\250\342\2565\236?\202 )\033\373w\263?\003B\035\210\272p\255?\335\343\265Db(\223\277\001\375U\273\001\274Z\277\271\225\242\224\025U\252\277H\352\213\231\2403\271?\3747\357\244S\362\230?\376zrb\023\2578\277\003\027\372wi\317\250\277\222\212/\351\340\343\265\277Y%\035\266\035\302\201\277\317\250\010\324\241\204\204\277Y\362fd\326!\210\277\212\211\005\335:\223\264\277\316\363X\301Q\"\230\277\217\305\305L9F\242\277D\324\206\351\"H\234?\264\207\232r\221\365\241?\234\\\376\221\237\010\267?\366\365\0337\227\022\213\277\202\006-\017\341ra\277~\353}\350\'s\224?\317k\3449\177q\272?\221]<\023\360\236\233?^b\016`\033\021\265?\360x\207c\024\305k\277\242i\342\023\n\373\253?\363\220\372[\017\264\275?\232\265+I5\361\240\277\321s\004\250z\003\240\277H\370\013\003\240\003\252\277L\243y@\240i\225?0K\004\030\017\307\254?oW\310\254d%\267\277\023\314\323P\365\033\203?\352M\004\227\202\324\256\277\341l\340\242\357\3374\2773\314B\206Q\206T?Wo\220\3551\332\266\277*\260%M^\376x\2771\227\205+S\343\220\277\3736\231\231Q\271\217?t\"\264\204\r\237\232?t\232\022\034\244\261\253?\035\234E\305\207G\271\277\334\237\206\010aX\225\277J\\`\020&\330\227?\270\255\334\376\203\226\234\277p\217\236Lk\316\224\277\304?\234\207\256\237\247\277\206(&\325A\210\277\277\374S\325r\320\367\205\277\312\235$\243\326\375\272\277L\272?\223\014\032\260\277cR\321\224Wlh?\r9\252\203\\,\204?\377\242x\200P\257\255\277B\366\r\302G\312\245?B\254q^\037\221\240?\001\341\360\201^\211\252\277\273L\036\243\364\370\243\277\330\306\225\237\377\300\207\277\223\024\227w{\314\233?\247/&\302\322\247o?\nJe\224\274m\255\277D\324G=()|?\335Ai\251\277W\215\277\031[&\325\216\212\261\277/w\231\022\004p\263?\325\341\273\336\006!\200\2779\220\030\037\203\302\222\277\235\330$>\034\236\242?\314z,\207\203X\257\277\260hk\203\003M\277\277\032\2420\337L\274\206\277\223A\226[\027>\277\277\033\017\315\t\030\233\251\277\222,\001-l\016e?\254\007\302}\037\034u\277>\352\006\263\327\270\233\2777o\323\213\005\302q?0\\\263l\304\377\250?\005\323\207\351\2614}?\372s\037nO\325\222\277a\003s\253F\246\236?yMOw`\361\207\277\035\266f\335\205\026\210?\337\313j\233\275\342\227?\206\370\013,|d\232\277\327@\317S\372\037\201?\317\022o\264o\214f?\235\220\\\010\370!\260\277a)V\210]%\241?\232\225A\300d\016\250\277oGAi- \276\277E\312\213\254\241u\242?C^eP\356g\272?\350\202\203\224S\355\245\277\372\007\277\333u\264\264\277@\252\370e`\315\246\277/\30166/\355{\277\031\0101$c\351\234?N\216\253\356j\266\240?}+}g\347\355\224?\301}K\241;\272\245\277\005\361\326)\367\251\246\277\347\023\003\215t\034r\277\263(c\220ya}\277\007\017\026\212n\021\262\277\34786\300\256Q\226\277\n\365\313L\001T~\277\243s\037L\203\336\265\277\314\004\242\224\350\276\222?*\317\014\232\3161\262\2773s\201\353S\202\247?x\304b\271\344\343\264\277h\002OP\262-\204\2770`t\363$\273\256?\235\336q\370J\260[?\313\205\217B\262\373\240?\234\353\200\304\327O\212?1\326\270\031\230\261\234?1v\021\006\264q\254?\222\340\262\210.\256\252\277\270\243S=U\313\222\277\'\206\302\377\362\266\220\2779\303\253<\372(\262\277\002\351\267P\273\275\233?\306\316\030\337s\276|?\021\030\225K\325_\266\277\341\375f\274t\220\233\277=\226\334S\024\033\252\277\364R\262v\275/\226?QB\210\270\267\242\254\277E\337\357sK\206\264?\365\330c\275A\205\247\277\330\345\333\354\177$\231\277\326\373\224\242\360]\227?t\347\271\211\213\326\262\277\216\233\r%\360\327\264\277v^\335\'(\237\253\277\265\347\337$\351@\224\277&\006t\222\037\215\246\277D\374\370w\351\177\226?\304\374\237\345k\270c?a\363\362YJ\346\230\277#\006>\267\t\351\252\277t\361\206\303\\=\202\277\376z+0\303\272\242\277p\035L1\236\345\232?x\305\316~Ah\253?R\231\022\207\263\365\243\277\233\177}Pq\365\222\277X\205\317\262\014[\240\277\234<\320`8\035\261\277\240\341\350\307\2752\300\277\262\377T\212\263\347\221\277o\353\315\230=\320\244?\247\332\021\246$\373\252\277\'G+H\037\'\271?\370\245Gg8\214\303\277|\342\202\n1\305\244?\204n\247i\277;\270?c\241YR\022\222\223\277\004w\216\353\374\022\257?\tp\312\030\020\007\216\277\314I\014\266\002=\224?\031M)\005\246L\211?\376w\247\334\"~\244\277yX\272\033\017)\227?d_yWK^\253?}H\260X3\237\251\277\307\371\204\006Q\253\252?\275\347\377\224gJ\261\277\t\304\216\351\337\202\255\277\231\035\217\223\316\203\251?\343\310N-\005\357\254?o\001\242Sn\026\214\277\346\330\016n\304\262\243\277\334\"6\033k\266\272\277\365\223{\307i\253\262?\217E\200\321\272\335\215\277\342G\025.\231\230\233\277\261FJ5\203\324\230\277R\214\205\265\204\255\241?\246!T\354\362\010s?\315\0171\025\206\362\222?!\317\262\244\nt\251\277\nw\321\202\376~\242\277\367bv\273\006\036\262\277q\200rx\272 \277\277Z\n\256\272\241\nd\277d\223<\031\232f\302?\367U\252\375o\257\260?:L\353\225\342w\267\277\362\217\325c\223\277\260\277\0306)\032zD\242\277D\177\264\300\235,\245?\342\032A\262\217\343\246\277\364\351\000M\223\334\245?\325\334\332\0227\232\204?\230\317\200\353\264\335\230?\n\207X#\022\000\267\277\361\022\212\211\311I\243\277\237\264b?;g\250\277\nW\032*U9\270\277w~\277\350\271\366\242\277\350\010\305\311c\242j\277\311\002\027\026\276i\223\277\232dhwRz\231\277\035\375\203\357\201\'\234?\267\r\r&\263\373\220?\272\0308\306/\337\221?E\312\304\243\327*\261\277\277\335q\212\322\224\225\277\266\210\233`]\363\275\277\220\226\262\235f\227\233\277\213C\322\203\236\244S?\304^\335\3213bl?\364\321z/\006U\246?\267g,\307\225\301}\277\013y\'\311\032q\217?I\300\363\213\221F\257?\225@\313\227}\300\251\277\340\371V\250\326\277\260\277R\010H\201\2557O\013\232?\231\266\347O\342\322e\277\337\307\274\245\253%\240\277\255\232\301R\371F\230\277\325\233\320\342q\317\202\277\267\235\256^\003-\200\277\325\212uA^\355z\277\370k\327\257\232\246\300\277\323?\355\350\306g\207?\377*=\250N\251\247?\337\371\017\r\023\360\233\277\322\262SL!Lt?\035\020\025\375\362\013u?\220g\202\240\216\221\263?\321x\374\362\305\013\225\277#\222\311\365\262\363k\277\364kXu/\217\300?\376m\033\214\033U\264?\346\232*Q@#\204?\013I\257\374\375?\217\277\276\234>\307\021!\245?x\241\222P\016\277\210?!\244\264I\234\340m?4\345H\030\251\215\226\277\353\212\026\326]0l?B\034\251\333\216 \240?\224x\375\007\312\232\246?\201\037\212\326\030\205\221\277\032\027\340\022\252Y|?\204\232\034&\341\267\236\277\035\243\024>T\007\231?\204\235\257\331\237\312\246?E\347\351L\366>\232\277\300a\231E\357\370\242\277\230\247\370\243~\000\201?R\001\010rP\241\234?\272\036E\n\302\017\203?\346\343U\241Rw\242?0O>\316w\224r?\264Z\277\353T\230\250\277n\221\300\036\306\244\224\277\211\355\2577f/\273\277\223|\364\212\347&\232?*`\214\361-d\221?\212g\212\005Z\365\261?\356\325\002{Jy`?\316hz\310\311x\257\277\243\301\006[\231\215\206?\006\370\345\003\201\366\260\277 \031\203z\t\211\271\277U\356Q\244e\022\265?sPD\227S\355\244\277A\252\2307.\013\265\277\250\204a\355\210U\305\277i\327\260-\030d\237?\336\356\361\341-X\242\277z\232{F\315\270\243?\027]\027\343\313\253\225?\353\304\307.\177\306\223\277\335\376.\353\332\250\271?\253W|j\003\244\252\277\310\235\315\356P6t\277\260V{\200\256Iz?\317\303\241\010\033\366\213?U\370\346\t\355k\221\277pa\313\331\254b\264\277\321}\266K`\'q?\300\371\257\270\264!\257?LOy\305|\302\262\277\263\035\263^\255J\242\277d\324W@\216y\206\2778\205\325\250\016\302\256?\343Xd\357\016?\204?\372\235\235\233\240\252\242?2I\035\233c;\207\277\2710\201G\326\365k\277\rt!\313\273\007\300?\303\021\00527\335\230?q\367r~\331\342\224?\272\242Y\321\326L\267\277\223\372\003\215yx\237?\237\021W\306\320+\260?\266\025\266\264\264Q\221?\357\331|L\211V}?\005\267 c\033\212\262\277\013eC\2356\203\210\2770g\242\311j\272\203?y\361\002\225\'\007\257\277+\242\375\033\357\214\267\277K\227\370:Z\271\232?\231\277\323A$\021\230\277:\211\"\2075\'\234?U\367w-\344\204}\277\016{\025\364\261$\254\277\200\226\204\321;V\245?\373OA>|\242\267\2773\030[\313N\231\253?\263\276(p\013\014\232\2772\221zqQW\267?\334\366\241*H<\261\277>X\252T\037Q\223?)\357jg\305\323q?\222\275\353\376QB\213\277C\262y\247\n#\240\277\021|\223\341\r\315\244\277\022\013[\207.\030\220?9\216\004\017\323-\251?\023\3142\021\004\301\260?t\2475\366\270\316e?\374\354\261\240\302\"c\277<\230\234p\030\253r\277\021[\333\201\232\tp\277M\003\242u\013Q\260\277\346e\005\334Oph\277\317\276k\376\204\377\246\277\277\277w\366\227\223\240?\350\207\002\255\2517\254\277Q\352\315yq[\250?\325A\266\023\312\206y?\245\245\245\277&\350\217?i\321\'#k\377\246?\323\n\246\204Wr\243\277\252\354\334\312\003i\223?`\006\214w\333\215\255??\352\226\365\236\200\240?\003\3021^A\016\246\277d\tD\214\372\242\303\277s\030l\230\222W\260\2776\210C\306i\352\277?\221\251\354c<\215\262?0\312b\213.M\213?\311\003\n\324|P\262\277\342\222\377\010\267P\266\277,\301$\325J\010\212\277M\267hX\236\314\247?\274 \261M\336\242N\277b{ \237\261z\305\277\025\204\312o\232v\021?7\310\312\362\262!\261?\037Ti\220\270\231\270\277d\335\252\364\320\007~\277f\341\202I\013<\256?\n8\323BI\323\252\277\212\303\252\272]\332\261?\300y2k\370\271\256\277nn\034\257H\230S\312\260?g\007\004w\374z\245\277\367\222\327\317k\256\300\277\201\357*\373V\371\271?\256\025=r\t>t?\226vA\023i`t\277\220OI\006s\332\252\277\236\363-\375\250\333\301\277\023\345\376\226\365\310\242?Q \036\342\363~\264\277\037\"O\214\270\316\265\277\252\000O\366U\374\220\277\271j\224\227U\240l\277\355W\252\267\246kd\277Hd1\365P\254\223?fg{n\240\006\250\277m\342i\353[\225\225?\205\2739\275SM\267?\364i\370\377|\264\210?\01328K7=\205?\365\224\007g\207$\265?e\371,?0\257o\277w\3135\001\271\343\243\277;)\"\354\327\021\252\277\235\242\215\362\000\354\263?\334\256\317f\243\324\262?\347\031\232d\035\344\247\277T\247.J\210c\242\277_\200\237\'?\226o?J\323D:$\033\231?\242m\315:\274\033\202\277\260\335\2616W[\241\277~l=\3120\270\245?\255\251\306\004;\355f\277\270\335u\345\362\204\222\277\030\273\245\037\233:\223\277\025B\250\177\237\323X?\234\300\335\356\325\343\270?\244\r\326\177\366M\255\277\r\325\013\t\006\343\257?F\241\316\260\221\r\277\277\276\251DSr3\251\277\206ZcC%\275\225\277\372)\210\317\251\272\247?A\005\203\017\360\246\300\277\232\333z\000\263\026\236?\034\014\212\223\223\274\256\277\233\017\360J\301$\300\277\263&C\246\217n\274?;\231\301\306\352[\235?\341\003\2573\300&\262?J\305H\352~[\233\277\322\366Y&>[\225?~\252\247\357H\322\213?rO\2516\3441\264?\\\212\243\006!0\224?\356$x<\n\374\265?SI{\353qX\266?\351\377\247\215U\303o?\333E\2146\326yf\277\362\206a\"\020z\263\277\352\267\307eXY\243\277\003;\274 !\007\231\277\354\371b\353\316<\255?\310\340\364\305\253a\261\277\026\267\342K\231\024\217?\260l\030, v\266?\313\027\2357\026\030x?P\277\277\236\326\026\230\277\025J\257X\303\036\245\277\343\313P\332\342\035\221\277\236\362\220P\277\323\260\277\032\2335D\374\365\276\277\250W\220(?;\265\277EZ\202\340g\252\247\277]A\342\241}\030\244\277\300\204\202qnW\253\277\354}\026\341J\206\260\277L\2625\225ZU\250\277\257\326\3352\256\326\202?1\035\214>e\024\225\277\351\330\350;\027\301\264?\275\363\226\200[\352\222?\356\036\233lt\014\253?\350\317\251\356\331\034s?>\331\232\315%\\\261\277y\343k*U\336\271?\344\304\020\307\301s\255\277ZL4\305\365\n:\277\340JC[\026\334\203\277\206=\271,z\036\300\277\312\227\013I~\177\242\277K_C<\024\224\226\277\360\316z\245\310\000\274\277\206\220\317\242\303\216x?\"S~)dU\301?\312\330\322U\334-\223\277P\336\001\363v\232\233\277\2560\001T\361\361\265?\322o\3662\312\210\263?\t\311\250f\242\227\201\277\375f\023\021Q\244\263?\363\327s\217\014`\260?Xs\262BYj\203\277\343\363\327R8\200\247\277\023>5y+E\237\277NVZ\337e\205\300\277\312\006_\322iJ\207?|\327\357da>\241?>\351\354[kB\260?\277B\360\030g[\246?\007\243\375\356\362W\260\277UM \225\263\032\201?\014\232\264\322\361\240\261\277\231sj\003@\371\244\277j\264\311\330\"D\223?\322h\373m\261\314\274?g=\260i*\032\264?\014\314\313\311x\212>\277\t\235\352]WV\253?,i\3759,\344\274\277\216,1\243\007\270@\277\305t\323d\271\333\255\277x0\371\005\213\262\241?a\005\230\362\314\203\273\277\375\307@\325\374Z\273\277Pyn|\371\227\251\277\370%\314@]\236\272?\315$\255EfM\203?\266\365\356~\n)\231?\271,6RX\227\240?y\rbM\300_\231\277\341\321\227-\255\342\270?*\241Y\360\273\032\252\277\270\007\3200\256eb\277\t\360j\242Se\247\277sU\032\357c\333\225?\324\346\260v\241\372\256\277\353\005\350L\203\021\247?\315M\271\224k\304\240\277\200\207\036\014\377\234\244?\236g\206{\325\362\207?M\317\034\317[=\242?\250R\261\301&\341s\277\236\2509\364 J\253\277\007 2\336gw\256?2\226\335(\255\344\261\277IJ\371\270\007>\250?\372\277\221\203\372\036{?\205@\303\036\026\343T\277z\323\260B\003\003\244\277\3679!#Y\336k\277\'~\371\252\273F\260?\370\362\237R\315\205\273?\310\006/\217\355\005}?e\026P\200\371\356u\277\005T\325\333\322\355\261?\177\370r\033\025\233e?\304\301Yr\005B\234\277\234\316\207\222K\n\222?7\337\202Q\306\307\262?-\177\024`7\260\263?!\366\031GE\006\226\277q\303\364e\255\257\255?Xy\26029\010\210?O\362\010\324qp\221?\000H\030\234\030&\215\277\300\216GC\275\264\271?\243\rR\364\272\331\207?L\223\37499?\244?\267o\301a\330\010\243?\255\006>\261K\362\275?\333\245\313\317\322v\266\277\354m\271\254\305\233\246\277\210\231-NS\213\265?.\003\216\303\257\315\222?\025\320\343M\276\000\256?\312+\364\314\241\221\302\277\t\331\376\334\241?\024\216\243\366\203]\262\277&\306\272u\303\203\221\277\256\006\016ij\004\241?Y\241\306t\364\367\260\277\233\236\224B\000\235\205?\265\026_K8j\216\277\3467\374\t\235}\272\277j\211\327\n.E\255\277\251\264\333\ta\005\201\277Tm\006M\255i\222\277\0319;/J\222\241\277\277\310\265\222\302\242\262?\320\021\273\263\276\315\206?]\225\205\265\t\363\242\277\307\347\363\0147\315\262\277X\351\275d\350X\227\277\350c.\345\225\362\242\277\276\204\033\\\030\376h?\035q4\301\342\363\257\277\320\235\250\360\202\361\223\2778\357r\251R\201\226\277\242&\311\212Zh\252\277t\200@\253{\357\252\277xm\335Ms\032\227\277\377%D\213~\344\252?\301\206\215\032MGi?\r]\332eo\274y?\306\302\234\031\256\315\225\277\366\220\r\007I\t\224?aX\272v\3053b\2776jo\375#\240\251\2777\r\225=\344J\246\277\260l\370\007\262!\232?\256\327\033:\346\376g\277\210\376\217W]\345\222\277v[\357,\345Wl?\320\242\241\366\377\026\210?/\010hQ\310(\270?B\203\204\337\240\261w\277,\325Y\317\207\332\204?]\213\226f+\356\203\277\340\241\013\315\225\264\260?\373\004\210\377\346\177\252\277i\364\301T\216\267\245\277\344|\243\242O\320\240\277*$\2460+}\226\277G\020\0162/b\231?\2262\316%\212\024\242?\364\177\322\'\034i\200?\t\336aM\351\323\245?\327\266\317`\241y\276?m\300\233\003]yr\277\372J\003\371\337\255\302?d\230l\034t\270\201?\355\315@-\314\010\230\277\207:\370\326\240hm\277\242\254{\'\240\264\222\277;^C\226S8w?\034I\344\336$\314\214\277Qv\347\326\260E\256?\247r\252\'t\275\236\277\037\244\277\246\226\361\234\277\276\225\220\035\200\212\261\277\334 \250V*\372\246?6\022\221R\373A\233\277y\031AX\001\214\240?9\205D{\204\225\224\2772\033\207\250\376\236\216?\347\344T\201\321\017v?\023\017,\310\204l\303\277s\347\271\336\266\013\263\277\305\333\262\251+\303\306?\313\001\231\340[\231\242\277t\264\253\345\020\341\242\277%}\356\205\200j\252\277\031\222\257\030]0\243\277V\347k\317\236\017\241\277\030\203g\246\206\250\204??\373?C\316\031\220\277\002\367 J\243D\245?\304#\004\262\"\002\211\277(\350\240F\200\326\256?\316\"u\366\031]\264\277\226\032\232>?\262\241?\360\255\241[\377\217\245?[Y8*Xg\251\277\312,\024\000 q\222?F\317\033\200\244\345\262\277\021\310\264\236\355=\241?\033\367\243\372\257.\264\277k\304\010\033\\\221\230\277\003i\326\341\255J\217\277\336\016T^\022\010\237?6\264\t\347\300\342\206?>\236s6t|\252?\326,1\263t\254\234?r\265\334\201\216q\266?\036\204\240\316\202\342\206\277\010&\035~\341w\226?\321J\370)\215\344N?\227\272Jz\243`\213?_y_\253^\222\241? sp\033)\367\266\277o\232\036\2623|\252?v`z#UI\254?Yn\353\tz=\242\277\t2\362/\341\234\240?\t\310\311d\317\353\245?w\271\226$(\271\263?\325\262:\324:\202\300?D\022\317\365\361\360o\277IGsSey\235\277\231\301\377\350\033<\227?\237\260)\246\026\242\202?\250\223(\"D\n\206?\023K\333\214\237P\277\277\210r\301\273\371D\253\277\257\275\242hy\265\253\277\177\r\030P\363_\245\277\016.w\371T\320\213\277\367\256,\217\260\235\301\277\334\270\326\204\246=\236?\003R\210\037Bk\260\277\014\327\247\005\371\242<\277\025\366,P\365\271\244\277J\344W1\234\204\242?\215\333\306\217\233C\253\277\352\365\212\270\321;\244?J\225\211\347^V\246\277\274^X\355\021\034\224\277\026&\363T\306<\360\276\007\217\241\246\346=\246?/\204\2437\376%\257\277\214\321Rj\262\275\252\277(\343\205\2124\204\262\277X.\303OOM\233\277\000\335\252\'\016k\230?1\265B\364\001Y\254?\243,\263;\2634`?H\014\212\363\227k\243\277m~FK*\315\243?T\375\261\214\217,\257?N\376\002\343f\243\237\277\324`mQJ\222\246\277S0:@\322w\244\277\257\261\202\361\341\217\253?\240\373\361\350h\210\260\277\361w\271\202o\001\220\277\341@\257\027\343\'\255\2778\252\261\326\324\261\264\277I\346\031z\274\204\252\277x\356\226\261\2452\272\277q\024\264\263$\324c?`/\275:~\t\254\2778\244\270\010\356~\243\277\314\343]H\233\315\263\277\035\212\347\310&\336\270?$\007\307\220J\251\252?7\025L\031\256-\216?\021\300\034\032X~\240?9\304~\220\221\237\226\277\3207Y\263\335\013\200?\037\323J*\254\022\241\277jf\333^V\350\274\277\006b\376O\361\266\252\277\312\326n\301t\241\260?\024\201\335\345%S\251\277u\001\245\242o\032\256?\354\344\352\262&S\241?\367JL\214\377\010\244\277f\267\214\014Q?\232?\272\336\025\024om\261?\177\022\311\272\226]w\277T/=\036\360\346G\277\243\024Z\257\303\210\253\277h[c\027\331 \227?la\210~\334\236\241?-\206\355h;\316\220\277F3\254g\275\376\245\277I\203\345\'\210\000\246\277\270\333\235f}M\257\277\005N\026\377\222y\206?\025\311\204r\362\355\250?\325X\006\221T\307\224\277\365\233\177\215\303\305\243\277j\372\340F\226\224\263\277\265\230p^\021\034\221\277\342\202S\204W\304\223\277\370\326Q\311\305\037\245?2\240h\255\376\003\234?\014\351\316\375r\007\233\277\306s\264e\254\324\236\277j\365T\007CR\212\277\023\306_\302\177\266\266?}\327]\231\267y\262\277\037\326\337\025\351\265\241\277t\234l\177\006[\230\277QO\272\347\006\004\263?#\265\346[\035tr?\306\007\226\222\223\252\251?\206\255F\306\332\305z?l\251\307\311\2003\272?\333W\014\213\217\023\233\277L\333\"\313\255\374\235\277\020\207\303\352\310\254\217\277%\353OU1\373\265\277Nb\234\240\032$\274?w\004_\201\255Y\263\277\305\301\032\323\254\335\246\277l\264O\2235\253\217\277\203\023\3144\317\037\262\277\245\266\244L:\245\273\277\322\317\037\004\373V\273?\277\016\264\266\262f\202\277\352\2364\001\276@\243\277,\264\336j\247>\247\277W\301\222\215$\372W\277T\0373\001\276I\243\277.\005\371\3545\352\251?\341\003*)\220X\264\277\322\371\023\'\352`\240\277\360\022G\"\377\022\261\277\240\216\211\204{\nM?\023\324\014\326{\365\270\277t%\364/\216\373\235\277\251y\354\343\317\271\246?\310U\r{G\365\246\277\244\331\257\373\215G\200?\347\337\365\2107S\261?\033l\347\376\304\300o\277\025D=\247\365\202\216\277M\325\272s\031\206s?\000\222t\034\276\334\246\277`=\034\236\277~\261\277\341Y\036\304\353Y\227?\365\210\007\273\037\021\254\277\333\360\365\013?`\262\277\260\021\262\3408\330\246\277\032~BIP\213\246\277\342\254\227\231(d\234?\021\025\022~\302g_\277Q\337E\203`[\264?\275:\307\204\311-\224?\217^N\\\236=w\277,?z\200{\377\242?\240\255\373>\242Q\220?\205\021V\201b%\225?$\325\'\213\272C\252\277\245.}\253\305\025\240\277\334\2004\022\313\033\223?=f\250\2702\010\217\277\362UZ\321\213\274\237\277.\270\306\305K\326\243\277\000\211\300\356B\037\236?u\342u\200.%\245?\214[O?)H\277\277\333\017)\246RJ\214?\350\326\232\330-\010\301?\355\r\016\316m\204\267\277f=\306R\331)\244?B\343GU\241t\252?\320\"\010\033\357\377\263?^\035\335J\351\201\264?\037`r5\"\231\260?\237;\275\205\243m\236?\364U\345\230\364\205\211?\261\026nV\207\303\225?\247\267>\244\266X\265\277\003|\360\223\341\363\204?\022qQ:\365\002A?\023^\224\017v\207\231?\034aY\332\301\366\302\277Lc=N\246\316\214?\332\302)\031\006\000\230?w+\020\035\245\352\244?_2\240\355q\350\257\277\017\010\203\210\030\344n\277+U\322\371\253\216\270\277\313\335\3255+\005\276?\263J\324|h\243j\277#[/\311<\013\207\277\273\251\273\202e\246\261?\272\3012\343\377\225\201?c|]X\213\342\260\277WN6\253\035\"\275?\017\260W \3379\246\277\354o\344R\226\305\200\277\350G:}(\234\260\277\200X?v~\235\303B\317\240\277hjf\260&\337\254??T\"\274\256\302\302?_\374\342\354$N\225\277\255j`6\362f\227?\272,\211\032\276\321\252?\2675\326\374\351K\242?5\034\016\376\343\035g?\225\340\261\321W}\223?\004\324\231\354\377\265\263\277\373:\317V\370\231\263\277\204\224\nT\247\320\222\277\261}\004\017\211\225q?\'\374\3240\006\333+?sM8@t\314\250\277+\316\276\342\'\224c?\316\313\357Ht\202\240\277r?\342\263*a\275\277:\207\022\n\201\237\265?\247C\254\223\221\346\275?+\241\2128B\252\233?\272T\230M\325\250\277?1\006/u\300l\243\277V\033\232\\\230\017]\277?@{:Z\267\273\277\231O\023\244\352\004\263\277|]\240\235\351\372\250?\322\344\230mMF\303?\361\3139\354t+\272\277\275$,R\305\017\212\277\204EjnV\003\216?N\226\373k\243\330\256\277\211\314\004\002\002\335\244?\306i\027\\\177h\227\277o\025\tT\006L\240?s\237\277\351+\357\216?\304^\251D\201q\264?qS.\000\002\005\213?\221\035\361\367\tP\261?\365GH\033,;\234\277\357\304\310\003\252\210\200\277y\000\237\374\006\237\263\277|F\016l\201\246\247?;\264c\350\344\231\236?\370\224\317\332\225A\266\277/\350\352b\241\214\246\2774\240\317}\364\270\226\277\022\022\267\233\003\024\245\2776\324<\207}A\241\277jM2\013\215d\220\277\310^\272\251J\357\233?\231\233;0\273\316\243\277\2550{\316\242\'\253\277:C_d\301k\260\277\222\364K\260c\035\261?\021:\276G\354\200\251?\353\372\3103\234\024\270\277\352\273Hjk\335\252?\271\214*\266\007\316\270?\261A\237\202)\357\261\277x=\312\302um\304?\263\343\322\262>P\256\27792\273\016\356a\226\277$\037d\n\r\345\224?#\270\260\364y@\263?=7\313\337\207\327\235\2778d\006\177W\177\224?F\003\234\003C\n\236\277\202\032\016\250\330o\235?\305\315\273~$@\260\277\375\212\210\233PR\270?a\013Z\240\352\322\244\277\207\016\300\227\304`\240?sb\0142\304\305\227\277\263\2232\247\252\356\240\277)v\200\245O\366\220?\230\203Q\334\354\275\257?\306\333\370\223]\204\231\277swf\271\203\035\240\277L {\373xX\261?\030\324unx\270\262?Q\032\375{u\233\262\277\013\007\263l`\340\226\277k<\004\203\'\355M?4Y[\301\0173\265?)\010\303)\006E\267?p\376\372\343|W\244\277\256F\217\204kP\204?)\345\345c\327n\231\277\315PIz\323/\214?\227s\252\035\366P\265\277\\\037\360\rW\316\255?\3626\221\005Q\367\226?\302\\\344@\033\034\275?\363\275\033\306\354\310\263\277\370!\246\325E\377P? \341u\361\337\301\262?\350\231\370\205\220\366\207\277^\272\364u1\036\260\277\270O\357\342s\342\232\277\213\300\217\265w\n\260?\222\026\r\376YT\224?\201T\235R\351\247v?m\022sQCf\257?\374\366\\\346\377\004\247?\001\362T4\361\227w\2778fL0\247\177\242?\271\364\320Yc\316\271?;\233\0054\2603\212?\310\233G\216\313\315\226\277b_\316\327\t_\256\277\240\240\222X\237\344\222?\007\0353\256w\036\272?\371\013b6Bn\245\277\322\344\221\033\000=`?\332\r\373v\201y\273?\271\005$\373*v\261\277C\347\'\000\336\326\246\277\252b\3601\333-\241?\346\275t\002\342\324\232\277a@\016Q\356\241\244?\325\302\264\372\3242v\277vNV\266\203\301\233?\025\321#\337z7\250?\020\026\275v\021|\260?0\024\251\376U\363\261\277\232\350\356\3327\330\245\277\277\351jvb\377\253\277\346\252B\217\3624\241\277\255\307\227\001\265\026\223\277i\256\371\306@w\250\277%\341\035(\020\273\234\277|\340k\257tk\244?]b\016\345$\325\257?~\026\247\320e4\254\277\315\007\273[\345\230\220\277\341\256\233\330(\320\231\2778\210\341Rda\266? \250\307\021a\317\264?\233\334L\334\355\232\212?\335C\220\266\030\002\246\277\237\024\216\343H\360\262?+\373\200Au\007\252?\tVN\243\375aL\277\336\3410\325\003\264\262\277\2100Q\362x\010\245\277w\274\005\242\277\021\3401\260(\302R\277\236q-\3116\242\222\277\206\006*&g\313\262?\255\272|\343\370\226\244?\211\250\217\214)\371\226?\230\370\376\t\202py\277#!\"X\034\370\356|?\331\365/\316\024\344\240\277\016\270\304x\337hu\277\301\223\311T\003x\244\277\337\300\001\342\263w\252\277[\314|\'\206\245\241?\362\376\263\212\271\'\201\277\361\204\216\265\367\023\230\277F\236+\233\363\203\217\277\342r\204u!\007\255?o;\240\225\017\373\212?\224g*\370\271\330\236?\356\222\331\266k*\242?\331U *I6\236\277\\\222\001\027q\202\246?\021\260Z\372]\203\255\277?,\207*J\202\227\277\260a@\341yY\223?\362V\250\334P\237\221?\322\032\313\325\016\364\256?%\305\311\352\025\006\225\277\241X\313y\220\203\250?\233\000\375[e\327x? =.k;\311\264?f\364\332\262\2022m\277\035\031\215\320\345\363\265\277\275W\3173\003\003\256?U\274\347\333\301\254\255?\232\037i\372\0064\t?*\023 csg\211?u\336\354H <\237?\3111\r\203R\203b?\266\307:\007\260Q\257?\225\266C~\332K\243?\\\276\347\0174\253c\277dfmO\007\323\277\277\351\237Q-_l\251?\"\275Z\221\r\030\302\277\303]\376\r\375\346\222?\267T\215.\277\013\263\277\273\013fc\027\004\261?\022 \233cf\316\232\277e\372Y\371\263\351\272?DA\2779T\276\240?l\247\205\312\232\372\260?\302\247^\264\215\363\226?(>\276\034\220]\261?\001\330\204\311X\351\232\277\245~7\300z\250\262\277\333\001\350\206K;|\277\362\265\365\324\213J\261\277\323\200\371\007\320A\250\277\2446g8n\215\214?\312%\311\202\027\351\263\2771\000\276Uzj\233?>`\225\366\312D\222\277q\230\353\243\271\037\201\277i\322\305Zub\257?\374;Fy\221\312\204\277\355\227\016)\023\324\006\277\222{Z\2443\302p\277h\255\276!\247\311\256?\333\204v\325^\300\270\277\005\2634\022\n\216\221\277\005\373\3111v\263\231?\214\372ch\377t\243?*\254;4\335T\260?\000\211\'\252k\320\261\277\367\355j\035@\363\257\277\003\2778%\301}\260\277X4\226\250\227\000[?\373\302\027\307\241\322\210\277e\'\240J\001\014\244?a\314\223\315\301<\304?<\332\223\236\225Wx?\013\257}\373\253\014\201\277\265R9c\372\262\254\277\336\333;\036f\226\223\277\337\362S\362z\272z\277\277\367|&17\213?R~C\323u\020\227?\3235\341\204\020Q\177\277\374IE6uT\224\277\271\033`\310Y\016\242?X7q\363]\020J\277\220\004\251f\206a\253?\362v0&\323\201\244?\215m\033\205\307\315\300?\310E5\215\346\323\254?\205\363bj(,\272?\215\375]\317\271\260\262?\270\201\03751\266\274\277\037\363\351\020\350\004\252?\216e\376W\251\010\231?\232\225w\372\276\226\254\277\'\237\006@D\245\260\277\264>\201\327\335y\254\277\352\207RU\'\334\252\277\330\212\241\030\211+\264?5fW\005$$\260?\272\0362\317]\212\212?\215]H\200\331\347\203\277\334\311\263\360\243\307\246\277\202\177\263\301\270\274\257\277\207\003[\010I}\225\277\205\274\215\302\203E\220?\247\352\362\333\023>\260?9\232M\313\010\302\250?\341\251T\310+\226\250?)\250\362\322\004R\271\277\2439\036+\023c\243?\303\342\344\016\375\014\250\277\334\253\335\220\026\246\237\277\t\004\275LW\023\254\277\004\344-\323Es\266?\305\"\363\021\332\252\212\277\241\253\257.f\244\243?\241\360i\373\255Q\304?\177%\237\037\367\342\213\2779\364\363\303\n\013\260\277\332\331\333\337\264\231\224\277=8\247U\340\343\267?\272d\030\237p\216\250?\335w:\212k\330\224?}\232\252\342\341\032\203?\036\254\3222I\032\251\277\300\333\343\216\310\233\253\277\230\230\316\030\036\r\222?b\223J\\\222$\242\277\006\246\252\315b`-\216\277\024\253+\211\312L\201\2777\372\202i\024\345\262\277\344\362\\\271\004\354\265\277\343A\361\331:\241\217\277\253\203\305\222\260\300d?\261\365[\024\306\360\230\277B\3410\021\002\226\242?A7\316\323\027c\177\277\017Doe\313\362\205\277\305\0227\260\314B\255?(\302\332\311\276\374\261\277\245Tq\310\375l\253?\004\352\345\t\356c\301?\3524l\255\316\274\252?\372]s\214\313 \267\277\251\336\035\356@\372\220?F>\234\260?\255\263\277\250\377\035\255\374\254\240\277\207\271\007M\342\215\200\277,o\024J\321!|\277\003*\204\204)\250\213?\251\006<\203\212\216\227\277\372\347!\222O\347\215?d\262\343\374\321V\263?\005\242/\0344\217\251?\201\340_w\004\026\217?\377M+\224\237\217\260?\361\323\365p\266\236\251?\303\210\001J\325@\247?\301\241\312\035\337t\301\277A\246\365\362g\244\245\277\013\250\033\356\030\236\270?x\243\373\230\341;\260?O\021\337\010\322E\226?\276\231^\366_\260x?\002\374\262\321@f\254?\374\032\000\265\020\372\204?\302g\216\323#\'\236?\253\320\316\242V\002&?n\365A:\215\224\250?KK-\r9\301\237\277\225\366YVpf\202?\035>\254\177\0356\252?\306qJ\336\302H\243\277\312s\010-\244\274u?\345\302\262Q\242\322\242\277\314~\236j\\\237\266\277\226d84\227\332\222\277Jo\207)\221\277\300?\031\370c\td)\253?25 \222\242Y\245?:\037\265\373\334\275g?\270\367\nOh\244\263\277)\307\277\277-\207\257\277\357]q\370@\340\273\277\257\rv\261s\216\236?\336\270\260\222\354\201\263\277\237\362\017\014\006\032\232?\3229Q\001\225Z\231\277C\345$\272\312B\241\277\336\275\022\343\215}\257\277\337\005\264\262\036\201\260?a}ODA\037\220\277@\3054\\\007\322\217?\027GFG\2369\254\277\3068`\321\231\034\222\277_\004\210RGw\234?\\%V\340\213\216\271\277\364\232\026.n\337\271\277\\\236\233\021fW~\277`\024\205\2413\032\251\277\246\370\344\010\027\007\247?\213\354g\353\275\223\210?>U\306\272\257\340b?\320\340\024\036\340p\250?\331R*\252v\034\215? \266\270\244\345\370^?!\314/\241\225E\222\277\227\007.\253\206}\272\277\342\310\264\240M+\243?/\307\004\262h\263\262?\312\005x\360P\010\200\277\254\030D\326XK\255?B\023\250S,G\251?\320G\"\252\333\224\235?\277\t\207y^\267\247\277\034n\"Z\207\376\270?^\243W\250\232\037\272?\203\371u\000\273\204\241?\361z\302\207\177f\245?\317\374\224\t7T\244?\n\t41?\214\227\277C\351;:\240\211\210\277\222\036}Y~\215\272\277\260H\336\035\361\023\214?o\374)5\213\030\267\277\241\352\314\206\343\373\276\277\326\246\337\326\244\306\247\277\2620\245\221\022\013}?\020\255\205\376\230\352\226?Sm\375\006\242\325\230?@\177\223 |\331\264\277\203\214\363\260\206a\305??\227\341\004n\026\223?\354\210\000\232D\212\270?\200\214\271\372\255w\302\277\213\2123\247\224\326\260?\224\177\304\006\275*\235\2775R\003\244=\346\200?g\240\223\202|\325\265\277\244\251m\031\243;\251\277\365\020\265,YQ\265\277\265B$[\2213x?5\370\336\233\231\325\264?\031$\322\320\235\313\253?\231\356y\023RN\251\277R\r]\330,\302k\277o\233\344\327\336\320\250\277\n\313\372\'\355\224\247?d\205\"\252\035e\255?G\205\026\030\010x\262\277Tv\221?V\224\273\277DXpd\010a\270?\264r\261\316\337!\303?\001`$\006\022\360\255?w\242\302\247\036\227o?\261\354\203#\351\031\235?\320\325\306\265\244\274\242\277\342\372\222\217\215M\257\277\016Z\347\306#\262\230\277>\347\301\331PV\247\277J\362\312~F\024\215\277\326\r\"T\344[\252?9\3277\030\301\260\265\277\"E\001\n \310\232?\335,\334\004K\244\224\277\356\317?\037\222\207\226?\t\3560\257(\356\267\277U\"\354\354\203\357\270\277\240\241w\336\234A\273\2779\327\266V\277\342\364_f\206X\244\277\200v-/g\205\244?7\2767m7\213*?\260\345\032\177\232\372\243?`\017\346\332O\310\300?\216\253\\\336~\204p?\006 R\035-Z\251?\333q\334\225\357\216J?D\303*t\032\311\247?F0\275$\001\375|?\356\352\216!\226@\212\277Z\'\022%t5\245\277\370s\307\341\375\201\242\277;\3422\306%*\236?\361\363\314<\220d\237?X$po\\\252\251?\255\304\306,\312d\225?l\305M{=\244\247\277hu/\240\024X\220?\306V\254\300\240\336\235\277@G\221f\217\346\301\277\023\335\205G6\302\243\277D2\301\003\345\031\236?d`\344u\021I\276\277\001\251\307\236\317\263\221\277\306\255\035b\0005{\277\316\242\303b\373\274\227\277\2036\242\264\234\'\275\277\177e\374\231(E\253?\254\366G\264>\271\230\277z\324\256\254\277\264\241\237RV\263\272?D\025\230(\177i\206?\2678,\214\005\022\226?\245/i\177\035\207\233\277|\223\020\007\324\332\243?\007\211D\364%\005\250\277\206\213\314\343]p\240\277\334\201Y\003\224\250z?s\321+\347[\263\230\277R\311\344K\253\306\255\277\031\313\343\324\340\246\204?pF)\253Q\215\302?\221R\257\273\rx\177?\005\272r\373\227 \260\277\344i\210\340\037\214~??p\321G-\271l?\177\313\035x\352\223?o\206=z[\315\243?\301o\231\\q\344\240\277\276\246\273Je\213\232?s\272\250Y\024m\262\277\256\305\375\314@\360\271\277\"%\010\355\355\020\260?A&\221[B\320\222?M\tM4j\264\301\277\337;\3327(\001\245? \343H\227\316\013\215?$+\367\367\202\217\250?^P\300\225\215\246\250?qh\267\023\216\257\242\277\000\271\336Fu@\230?-\370\200\242?\006\260?\205\310H\037\254\031\244\277\251;\243\025\271\004\270?\334\337|!\373\330\260\277K\035\265j/\255\252\277c:\235\340\202-z\277\217\002\347LUe\261\277\306P$\026V\307~\277\301\320,\t>x\250\277@z|\035\326q\255\277{\257\2641\367\215\210\277\036\226\214\316\000\250~\277=g\247\362\323\037\242\277\332\325\020\030\205F\203?\2321%\216Wk\275?A\326\271\033\214$\272?\n\303\313b\024(z\277!\344E\"S+\232?ny^\r\337W\262?\003~\374\331\205\377\220?es\324\206\377h\226\277\323\325,j\017\302\240\277\020\026\262\352\215\230\226?\217M\366\344\362\261\230?\031\204\301\255w\343\277?g\240Y?\331/\237\277\323\2132g\266T\247?Y\354\205f\352\257\244?\340\372\033I\'N\210\277~\336\304q\275\266\220\277\327\016\234|\320\241\246\277\377\224\233\203\203\333\260\277V\336\217\264\253\247\207\277\013\244\366i\034\246\243?\023\025\375\310\215Q\240?\261\214\377\006\317\265\265?" + } + } + } +} +node { + name: "layer_0_type_1/matrix/read" + op: "Identity" + input: "layer_0_type_1/matrix" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_type_1/matrix" + } + } + } +} +node { + name: "layer_0_type_1/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "K,\357\235K\360\367?\247\001z\226\326\222\374?`U\203\241\250\\\356\277=d.\323\021n\340\2779\217\224\303Y&\360\277ym\215MV\007\360?1m\000\230\005G\337?\370\361\007\034|\031\300?5\320\206\354\233\352\311\277\250d\222+\363\352\362\277=\242az\255\343\327?\203\001\253\017Ur\355?\356\262\374\277:;\357?\212$\266N\255q\306?D\2435\003\255\267\374\277\210\224**\307\342\252?2b\266\201\004\203\356?\2317V\250)b\237\277\377\374H\214\227\370\345\277\234\311\334\345\257T\337\277" + } + } + } +} +node { + name: "layer_0_type_1/bias/read" + op: "Identity" + input: "layer_0_type_1/bias" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_type_1/bias" + } + } + } +} +node { + name: "layer_0_type_1/MatMul" + op: "MatMul" + input: "Reshape_20" + input: "layer_0_type_1/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_0_type_1/BiasAdd" + op: "BiasAdd" + input: "layer_0_type_1/MatMul" + input: "layer_0_type_1/bias/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_0_type_1/Tanh" + op: "Tanh" + input: "layer_0_type_1/BiasAdd" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "layer_0_type_1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "layer_0_type_1/Reshape" + op: "Reshape" + input: "layer_0_type_1/Tanh" + input: "layer_0_type_1/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "final_layer_type_1/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + dim { + size: 1 + } + } + tensor_content: "\322 [g\237\312\327?)R\313\'8\355\224?\253\274\220//\\\321\277B\257\363@\372\n\306\277\240$\252\345\321\335\245?\366\372\366\215\300v\304?\253\010\277\257W\235_\277L\"\026s\267\322\235?N\010@\\@\276\313\277\254\364O\245\307n\306\277\344:4\307p\203\315?\301\221\213$|r\312\277Gc\375\314\177#\330\277\243^|\2658\301\242\277h\331\215\230\210l\250\277\026\r.\256H\345\300?\n{\005\313\204\037p?\032\276\230.\000s\250?\307\225\267\033\304\262\313\277\320\371\345T\264\'\247\277" + } + } + } +} +node { + name: "final_layer_type_1/matrix/read" + op: "Identity" + input: "final_layer_type_1/matrix" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_type_1/matrix" + } + } + } +} +node { + name: "final_layer_type_1/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + } + double_val: -0.06734894508541613 + } + } + } +} +node { + name: "final_layer_type_1/bias/read" + op: "Identity" + input: "final_layer_type_1/bias" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_type_1/bias" + } + } + } +} +node { + name: "final_layer_type_1/MatMul" + op: "MatMul" + input: "layer_0_type_1/Reshape" + input: "final_layer_type_1/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "final_layer_type_1/BiasAdd" + op: "BiasAdd" + input: "final_layer_type_1/MatMul" + input: "final_layer_type_1/bias/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "Shape_5" + op: "Shape" + input: "Reshape_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_22/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_22/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_22/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_22" + op: "StridedSlice" + input: "Shape_5" + input: "strided_slice_22/stack" + input: "strided_slice_22/stack_1" + input: "strided_slice_22/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_23/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "strided_slice_23/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "strided_slice_23/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_23" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_23/stack" + input: "strided_slice_23/stack_1" + input: "strided_slice_23/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_21/shape" + op: "Pack" + input: "strided_slice_22" + input: "strided_slice_23" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_21" + op: "Reshape" + input: "final_layer_type_1/BiasAdd" + input: "Reshape_21/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_3/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_3" + op: "ConcatV2" + input: "Reshape_19" + input: "Reshape_21" + input: "concat_3/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_1/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr/t_bias_atom_e" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_1" + op: "GatherV2" + input: "fitting_attr/t_bias_atom_e/read" + input: "Reshape_17" + input: "embedding_lookup_1/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr/t_bias_atom_e" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_1/Identity" + op: "Identity" + input: "embedding_lookup_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Shape_6" + op: "Shape" + input: "Reshape_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_25/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_25/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_25/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_25" + op: "StridedSlice" + input: "Shape_6" + input: "strided_slice_25/stack" + input: "strided_slice_25/stack_1" + input: "strided_slice_25/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_26/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_26/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "strided_slice_26/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_26" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_26/stack" + input: "strided_slice_26/stack_1" + input: "strided_slice_26/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "Const_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "Sum_1" + op: "Sum" + input: "strided_slice_26" + input: "Const_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "Reshape_22/shape" + op: "Pack" + input: "strided_slice_25" + input: "Sum_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_22" + op: "Reshape" + input: "embedding_lookup_1/Identity" + input: "Reshape_22/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "add_3" + op: "AddV2" + input: "concat_3" + input: "Reshape_22" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_3" + op: "Mul" + input: "add_3" + input: "Cast_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_23/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_23" + op: "Reshape" + input: "mul_3" + input: "Reshape_23/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_27/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_27/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "strided_slice_27/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_27" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_27/stack" + input: "strided_slice_27/stack_1" + input: "strided_slice_27/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "Const_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "Sum_2" + op: "Sum" + input: "strided_slice_27" + input: "Const_2" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "o_atom_energy/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_atom_energy/shape" + op: "Pack" + input: "o_atom_energy/shape/0" + input: "Sum_2" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_atom_energy" + op: "Reshape" + input: "Reshape_23" + input: "o_atom_energy/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_energy/reduction_indices" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "o_energy" + op: "Sum" + input: "o_atom_energy" + input: "o_energy/reduction_indices" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/Shape" + op: "Shape" + input: "Reshape_23" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/grad_ys_0/Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "gradients/grad_ys_0" + op: "Fill" + input: "gradients/Shape" + input: "gradients/grad_ys_0/Const" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_23_grad/Shape" + op: "Shape" + input: "mul_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_23_grad/Reshape" + op: "Reshape" + input: "gradients/grad_ys_0" + input: "gradients/Reshape_23_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/mul_3_grad/Mul" + op: "Mul" + input: "gradients/Reshape_23_grad/Reshape" + input: "Cast_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/mul_3_grad/Shape" + op: "Shape" + input: "add_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/mul_3_grad/Shape_1" + op: "Shape" + input: "Cast_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/mul_3_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/mul_3_grad/Shape" + input: "gradients/mul_3_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/mul_3_grad/Sum" + op: "Sum" + input: "gradients/mul_3_grad/Mul" + input: "gradients/mul_3_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: true + } + } +} +node { + name: "gradients/mul_3_grad/Reshape" + op: "Reshape" + input: "gradients/mul_3_grad/Sum" + input: "gradients/mul_3_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/add_3_grad/Shape" + op: "Shape" + input: "concat_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/add_3_grad/Shape_1" + op: "Shape" + input: "Reshape_22" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/add_3_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/add_3_grad/Shape" + input: "gradients/add_3_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/add_3_grad/Sum" + op: "Sum" + input: "gradients/mul_3_grad/Reshape" + input: "gradients/add_3_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: true + } + } +} +node { + name: "gradients/add_3_grad/Reshape" + op: "Reshape" + input: "gradients/add_3_grad/Sum" + input: "gradients/add_3_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/concat_3_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/concat_3_grad/mod" + op: "FloorMod" + input: "concat_3/axis" + input: "gradients/concat_3_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/concat_3_grad/ShapeN" + op: "ShapeN" + input: "Reshape_19" + input: "Reshape_21" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/concat_3_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients/concat_3_grad/mod" + input: "gradients/concat_3_grad/ShapeN" + input: "gradients/concat_3_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "shape_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/concat_3_grad/Slice" + op: "Slice" + input: "gradients/add_3_grad/Reshape" + input: "gradients/concat_3_grad/ConcatOffset" + input: "gradients/concat_3_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/concat_3_grad/Slice_1" + op: "Slice" + input: "gradients/add_3_grad/Reshape" + input: "gradients/concat_3_grad/ConcatOffset:1" + input: "gradients/concat_3_grad/ShapeN:1" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/Reshape_19_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "gradients/Reshape_19_grad/Reshape" + op: "Reshape" + input: "gradients/concat_3_grad/Slice" + input: "gradients/Reshape_19_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_21_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "gradients/Reshape_21_grad/Reshape" + op: "Reshape" + input: "gradients/concat_3_grad/Slice_1" + input: "gradients/Reshape_21_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/final_layer_type_0/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/Reshape_19_grad/Reshape" + input: "final_layer_type_0/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: true + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/final_layer_type_1/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/Reshape_21_grad/Reshape" + input: "final_layer_type_1/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: true + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/layer_0_type_0/Reshape_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/layer_0_type_0/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients/final_layer_type_0/MatMul_grad/MatMul" + input: "gradients/layer_0_type_0/Reshape_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/layer_0_type_1/Reshape_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/layer_0_type_1/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients/final_layer_type_1/MatMul_grad/MatMul" + input: "gradients/layer_0_type_1/Reshape_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/layer_0_type_0/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_0_type_0/Tanh" + input: "gradients/layer_0_type_0/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/layer_0_type_1/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_0_type_1/Tanh" + input: "gradients/layer_0_type_1/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/layer_0_type_0/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/layer_0_type_0/Tanh_grad/TanhGrad" + input: "layer_0_type_0/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: true + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/layer_0_type_1/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/layer_0_type_1/Tanh_grad/TanhGrad" + input: "layer_0_type_1/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: true + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/Reshape_18_grad/Shape" + op: "Shape" + input: "Slice_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_18_grad/Reshape" + op: "Reshape" + input: "gradients/layer_0_type_0/MatMul_grad/MatMul" + input: "gradients/Reshape_18_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_20_grad/Shape" + op: "Shape" + input: "Slice_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_20_grad/Reshape" + op: "Reshape" + input: "gradients/layer_0_type_1/MatMul_grad/MatMul" + input: "gradients/Reshape_20_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_3_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "gradients/Slice_3_grad/Shape" + op: "Shape" + input: "Slice_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_3_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_3_grad/stack" + op: "Pack" + input: "gradients/Slice_3_grad/Rank" + input: "gradients/Slice_3_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/Slice_3_grad/Reshape" + op: "Reshape" + input: "Slice_3/begin" + input: "gradients/Slice_3_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_3_grad/Shape_1" + op: "Shape" + input: "Reshape_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_3_grad/sub" + op: "Sub" + input: "gradients/Slice_3_grad/Shape_1" + input: "gradients/Slice_3_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_3_grad/sub_1" + op: "Sub" + input: "gradients/Slice_3_grad/sub" + input: "Slice_3/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_3_grad/Reshape_1" + op: "Reshape" + input: "gradients/Slice_3_grad/sub_1" + input: "gradients/Slice_3_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_3_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_3_grad/concat" + op: "ConcatV2" + input: "gradients/Slice_3_grad/Reshape" + input: "gradients/Slice_3_grad/Reshape_1" + input: "gradients/Slice_3_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_3_grad/Pad" + op: "Pad" + input: "gradients/Reshape_18_grad/Reshape" + input: "gradients/Slice_3_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "gradients/Slice_4_grad/Shape" + op: "Shape" + input: "Slice_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_4_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_4_grad/stack" + op: "Pack" + input: "gradients/Slice_4_grad/Rank" + input: "gradients/Slice_4_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/Slice_4_grad/Reshape" + op: "Reshape" + input: "Slice_4/begin" + input: "gradients/Slice_4_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/Shape_1" + op: "Shape" + input: "Reshape_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_4_grad/sub" + op: "Sub" + input: "gradients/Slice_4_grad/Shape_1" + input: "gradients/Slice_4_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/sub_1" + op: "Sub" + input: "gradients/Slice_4_grad/sub" + input: "Slice_4/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/Reshape_1" + op: "Reshape" + input: "gradients/Slice_4_grad/sub_1" + input: "gradients/Slice_4_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_4_grad/concat" + op: "ConcatV2" + input: "gradients/Slice_4_grad/Reshape" + input: "gradients/Slice_4_grad/Reshape_1" + input: "gradients/Slice_4_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/Pad" + op: "Pad" + input: "gradients/Reshape_20_grad/Reshape" + input: "gradients/Slice_4_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN" + op: "AddN" + input: "gradients/Slice_3_grad/Pad" + input: "gradients/Slice_4_grad/Pad" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/Slice_3_grad/Pad" + } + } + } +} +node { + name: "gradients/Reshape_14_grad/Shape" + op: "Shape" + input: "o_descriptor" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_14_grad/Reshape" + op: "Reshape" + input: "gradients/AddN" + input: "gradients/Reshape_14_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_12_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377@\001\000\000" + } + } + } +} +node { + name: "gradients/Reshape_12_grad/Reshape" + op: "Reshape" + input: "gradients/Reshape_14_grad/Reshape" + input: "gradients/Reshape_12_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_15_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\024\000\000\000\020\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_15_grad/Reshape" + op: "Reshape" + input: "gradients/Reshape_12_grad/Reshape" + input: "gradients/filter_type_all/Reshape_15_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/MatMul" + op: "BatchMatMulV2" + input: "filter_type_all/Slice_6" + input: "gradients/filter_type_all/Reshape_15_grad/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } + attr { + key: "grad_x" + value { + b: true + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/MatMul_1" + op: "BatchMatMulV2" + input: "filter_type_all/truediv" + input: "gradients/filter_type_all/Reshape_15_grad/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/Shape" + op: "Shape" + input: "filter_type_all/truediv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/Shape_1" + op: "Shape" + input: "filter_type_all/Slice_6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_6_grad/Shape" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice/stack" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice/stack_1" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_6_grad/Shape_1" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1/stack" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1/stack_1" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice" + input: "gradients/filter_type_all/MatMul_6_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/Sum" + op: "Sum" + input: "gradients/filter_type_all/MatMul_6_grad/MatMul" + input: "gradients/filter_type_all/MatMul_6_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_6_grad/Sum" + input: "gradients/filter_type_all/MatMul_6_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/Sum_1" + op: "Sum" + input: "gradients/filter_type_all/MatMul_6_grad/MatMul_1" + input: "gradients/filter_type_all/MatMul_6_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_6_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_6_grad/Sum_1" + input: "gradients/filter_type_all/MatMul_6_grad/Shape_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/Shape" + op: "Shape" + input: "filter_type_all/Slice_6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/stack" + op: "Pack" + input: "gradients/filter_type_all/Slice_6_grad/Rank" + input: "gradients/filter_type_all/Slice_6_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/Reshape" + op: "Reshape" + input: "filter_type_all/Slice_6/begin" + input: "gradients/filter_type_all/Slice_6_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/Shape_1" + op: "Shape" + input: "filter_type_all/truediv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/sub" + op: "Sub" + input: "gradients/filter_type_all/Slice_6_grad/Shape_1" + input: "gradients/filter_type_all/Slice_6_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/sub_1" + op: "Sub" + input: "gradients/filter_type_all/Slice_6_grad/sub" + input: "filter_type_all/Slice_6/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/Slice_6_grad/sub_1" + input: "gradients/filter_type_all/Slice_6_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/concat" + op: "ConcatV2" + input: "gradients/filter_type_all/Slice_6_grad/Reshape" + input: "gradients/filter_type_all/Slice_6_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_6_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_6_grad/Pad" + op: "Pad" + input: "gradients/filter_type_all/MatMul_6_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_6_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN_1" + op: "AddN" + input: "gradients/filter_type_all/MatMul_6_grad/Reshape" + input: "gradients/filter_type_all/Slice_6_grad/Pad" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all/MatMul_6_grad/Reshape" + } + } + } +} +node { + name: "gradients/filter_type_all/truediv_grad/RealDiv" + op: "RealDiv" + input: "gradients/AddN_1" + input: "filter_type_all/truediv/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/MatMul" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_3" + input: "gradients/filter_type_all/truediv_grad/RealDiv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } + attr { + key: "grad_x" + value { + b: true + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/MatMul_1" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_4" + input: "gradients/filter_type_all/truediv_grad/RealDiv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/Shape" + op: "Shape" + input: "filter_type_all/Reshape_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/Shape_1" + op: "Shape" + input: "filter_type_all/Reshape_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_1_grad/Shape" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice/stack" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice/stack_1" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_1_grad/Shape_1" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1/stack" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1/stack_1" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice" + input: "gradients/filter_type_all/MatMul_1_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/Sum" + op: "Sum" + input: "gradients/filter_type_all/MatMul_1_grad/MatMul" + input: "gradients/filter_type_all/MatMul_1_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_1_grad/Sum" + input: "gradients/filter_type_all/MatMul_1_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/Sum_1" + op: "Sum" + input: "gradients/filter_type_all/MatMul_1_grad/MatMul_1" + input: "gradients/filter_type_all/MatMul_1_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_1_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_1_grad/Sum_1" + input: "gradients/filter_type_all/MatMul_1_grad/Shape_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/MatMul" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_8" + input: "gradients/filter_type_all/truediv_grad/RealDiv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } + attr { + key: "grad_x" + value { + b: true + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/MatMul_1" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_9" + input: "gradients/filter_type_all/truediv_grad/RealDiv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/Shape" + op: "Shape" + input: "filter_type_all/Reshape_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/Shape_1" + op: "Shape" + input: "filter_type_all/Reshape_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_3_grad/Shape" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice/stack" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice/stack_1" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_3_grad/Shape_1" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1/stack" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1/stack_1" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice" + input: "gradients/filter_type_all/MatMul_3_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/Sum" + op: "Sum" + input: "gradients/filter_type_all/MatMul_3_grad/MatMul" + input: "gradients/filter_type_all/MatMul_3_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_3_grad/Sum" + input: "gradients/filter_type_all/MatMul_3_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/Sum_1" + op: "Sum" + input: "gradients/filter_type_all/MatMul_3_grad/MatMul_1" + input: "gradients/filter_type_all/MatMul_3_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_3_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_3_grad/Sum_1" + input: "gradients/filter_type_all/MatMul_3_grad/Shape_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/MatMul" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_13" + input: "gradients/filter_type_all/truediv_grad/RealDiv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } + attr { + key: "grad_x" + value { + b: true + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/MatMul_1" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_14" + input: "gradients/filter_type_all/truediv_grad/RealDiv" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/Shape" + op: "Shape" + input: "filter_type_all/Reshape_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/Shape_1" + op: "Shape" + input: "filter_type_all/Reshape_13" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_5_grad/Shape" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice/stack" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice/stack_1" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients/filter_type_all/MatMul_5_grad/Shape_1" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1/stack" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1/stack_1" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice" + input: "gradients/filter_type_all/MatMul_5_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/Sum" + op: "Sum" + input: "gradients/filter_type_all/MatMul_5_grad/MatMul" + input: "gradients/filter_type_all/MatMul_5_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_5_grad/Sum" + input: "gradients/filter_type_all/MatMul_5_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/Sum_1" + op: "Sum" + input: "gradients/filter_type_all/MatMul_5_grad/MatMul_1" + input: "gradients/filter_type_all/MatMul_5_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all/MatMul_5_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_5_grad/Sum_1" + input: "gradients/filter_type_all/MatMul_5_grad/Shape_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_4_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_4_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_1_grad/Reshape" + input: "gradients/filter_type_all/Reshape_4_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_3_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_3_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_1_grad/Reshape_1" + input: "gradients/filter_type_all/Reshape_3_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_9_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_9_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_3_grad/Reshape" + input: "gradients/filter_type_all/Reshape_9_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_8_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_8_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_3_grad/Reshape_1" + input: "gradients/filter_type_all/Reshape_8_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_14_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_14_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_5_grad/Reshape" + input: "gradients/filter_type_all/Reshape_14_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_13_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_13_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_5_grad/Reshape_1" + input: "gradients/filter_type_all/Reshape_13_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_2_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_2_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/Reshape_3_grad/Reshape" + input: "gradients/filter_type_all/Reshape_2_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/Reshape_7_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_7_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/Reshape_8_grad/Reshape" + input: "gradients/filter_type_all/Reshape_7_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/Reshape_12_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_12_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/Reshape_13_grad/Reshape" + input: "gradients/filter_type_all/Reshape_12_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all/Tanh" + input: "gradients/filter_type_all/Reshape_2_grad/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/filter_type_all/Tanh_1_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all/Tanh_1" + input: "gradients/filter_type_all/Reshape_7_grad/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/filter_type_all/Tanh_2_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all/Tanh_2" + input: "gradients/filter_type_all/Reshape_12_grad/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/filter_type_all/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/filter_type_all/Tanh_grad/TanhGrad" + input: "filter_type_all/matrix_1_0/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: true + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/MatMul_2_grad/MatMul" + op: "MatMul" + input: "gradients/filter_type_all/Tanh_1_grad/TanhGrad" + input: "filter_type_all/matrix_1_1/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: true + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/MatMul_4_grad/MatMul" + op: "MatMul" + input: "gradients/filter_type_all/Tanh_2_grad/TanhGrad" + input: "filter_type_all/matrix_1_2/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: true + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all/Reshape_1_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_1_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_grad/MatMul" + input: "gradients/filter_type_all/Reshape_1_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_6_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_6_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_2_grad/MatMul" + input: "gradients/filter_type_all/Reshape_6_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_11_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_11_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/MatMul_4_grad/MatMul" + input: "gradients/filter_type_all/Reshape_11_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/Shape" + op: "Shape" + input: "filter_type_all/Slice_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/stack" + op: "Pack" + input: "gradients/filter_type_all/Slice_1_grad/Rank" + input: "gradients/filter_type_all/Slice_1_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/Reshape" + op: "Reshape" + input: "filter_type_all/Slice_1/begin" + input: "gradients/filter_type_all/Slice_1_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/Shape_1" + op: "Shape" + input: "filter_type_all/Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/sub" + op: "Sub" + input: "gradients/filter_type_all/Slice_1_grad/Shape_1" + input: "gradients/filter_type_all/Slice_1_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/sub_1" + op: "Sub" + input: "gradients/filter_type_all/Slice_1_grad/sub" + input: "filter_type_all/Slice_1/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/Slice_1_grad/sub_1" + input: "gradients/filter_type_all/Slice_1_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/concat" + op: "ConcatV2" + input: "gradients/filter_type_all/Slice_1_grad/Reshape" + input: "gradients/filter_type_all/Slice_1_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_1_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_1_grad/Pad" + op: "Pad" + input: "gradients/filter_type_all/Reshape_1_grad/Reshape" + input: "gradients/filter_type_all/Slice_1_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/Shape" + op: "Shape" + input: "filter_type_all/Slice_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/stack" + op: "Pack" + input: "gradients/filter_type_all/Slice_3_grad/Rank" + input: "gradients/filter_type_all/Slice_3_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/Reshape" + op: "Reshape" + input: "filter_type_all/Slice_3/begin" + input: "gradients/filter_type_all/Slice_3_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/Shape_1" + op: "Shape" + input: "filter_type_all/Reshape_5" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/sub" + op: "Sub" + input: "gradients/filter_type_all/Slice_3_grad/Shape_1" + input: "gradients/filter_type_all/Slice_3_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/sub_1" + op: "Sub" + input: "gradients/filter_type_all/Slice_3_grad/sub" + input: "filter_type_all/Slice_3/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/Slice_3_grad/sub_1" + input: "gradients/filter_type_all/Slice_3_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/concat" + op: "ConcatV2" + input: "gradients/filter_type_all/Slice_3_grad/Reshape" + input: "gradients/filter_type_all/Slice_3_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_3_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_3_grad/Pad" + op: "Pad" + input: "gradients/filter_type_all/Reshape_6_grad/Reshape" + input: "gradients/filter_type_all/Slice_3_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/Shape" + op: "Shape" + input: "filter_type_all/Slice_5" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/stack" + op: "Pack" + input: "gradients/filter_type_all/Slice_5_grad/Rank" + input: "gradients/filter_type_all/Slice_5_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/Reshape" + op: "Reshape" + input: "filter_type_all/Slice_5/begin" + input: "gradients/filter_type_all/Slice_5_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/Shape_1" + op: "Shape" + input: "filter_type_all/Reshape_10" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/sub" + op: "Sub" + input: "gradients/filter_type_all/Slice_5_grad/Shape_1" + input: "gradients/filter_type_all/Slice_5_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/sub_1" + op: "Sub" + input: "gradients/filter_type_all/Slice_5_grad/sub" + input: "filter_type_all/Slice_5/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/Slice_5_grad/sub_1" + input: "gradients/filter_type_all/Slice_5_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/concat" + op: "ConcatV2" + input: "gradients/filter_type_all/Slice_5_grad/Reshape" + input: "gradients/filter_type_all/Slice_5_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_5_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_5_grad/Pad" + op: "Pad" + input: "gradients/filter_type_all/Reshape_11_grad/Reshape" + input: "gradients/filter_type_all/Slice_5_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/Slice_1_grad/Pad" + input: "gradients/filter_type_all/Reshape_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_5_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_5_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/Slice_3_grad/Pad" + input: "gradients/filter_type_all/Reshape_5_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Reshape_10_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "gradients/filter_type_all/Reshape_10_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all/Slice_5_grad/Pad" + input: "gradients/filter_type_all/Reshape_10_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN_2" + op: "AddN" + input: "gradients/filter_type_all/Reshape_4_grad/Reshape" + input: "gradients/filter_type_all/Reshape_grad/Reshape" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all/Reshape_4_grad/Reshape" + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/Shape" + op: "Shape" + input: "filter_type_all/Slice" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/stack" + op: "Pack" + input: "gradients/filter_type_all/Slice_grad/Rank" + input: "gradients/filter_type_all/Slice_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/Reshape" + op: "Reshape" + input: "filter_type_all/Slice/begin" + input: "gradients/filter_type_all/Slice_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/Shape_1" + op: "Shape" + input: "Reshape_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/sub" + op: "Sub" + input: "gradients/filter_type_all/Slice_grad/Shape_1" + input: "gradients/filter_type_all/Slice_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/sub_1" + op: "Sub" + input: "gradients/filter_type_all/Slice_grad/sub" + input: "filter_type_all/Slice/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/Slice_grad/sub_1" + input: "gradients/filter_type_all/Slice_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/concat" + op: "ConcatV2" + input: "gradients/filter_type_all/Slice_grad/Reshape" + input: "gradients/filter_type_all/Slice_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_grad/Pad" + op: "Pad" + input: "gradients/AddN_2" + input: "gradients/filter_type_all/Slice_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN_3" + op: "AddN" + input: "gradients/filter_type_all/Reshape_9_grad/Reshape" + input: "gradients/filter_type_all/Reshape_5_grad/Reshape" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all/Reshape_9_grad/Reshape" + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/Shape" + op: "Shape" + input: "filter_type_all/Slice_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/stack" + op: "Pack" + input: "gradients/filter_type_all/Slice_2_grad/Rank" + input: "gradients/filter_type_all/Slice_2_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/Reshape" + op: "Reshape" + input: "filter_type_all/Slice_2/begin" + input: "gradients/filter_type_all/Slice_2_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/Shape_1" + op: "Shape" + input: "Reshape_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/sub" + op: "Sub" + input: "gradients/filter_type_all/Slice_2_grad/Shape_1" + input: "gradients/filter_type_all/Slice_2_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/sub_1" + op: "Sub" + input: "gradients/filter_type_all/Slice_2_grad/sub" + input: "filter_type_all/Slice_2/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/Slice_2_grad/sub_1" + input: "gradients/filter_type_all/Slice_2_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/concat" + op: "ConcatV2" + input: "gradients/filter_type_all/Slice_2_grad/Reshape" + input: "gradients/filter_type_all/Slice_2_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_2_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_2_grad/Pad" + op: "Pad" + input: "gradients/AddN_3" + input: "gradients/filter_type_all/Slice_2_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN_4" + op: "AddN" + input: "gradients/filter_type_all/Reshape_14_grad/Reshape" + input: "gradients/filter_type_all/Reshape_10_grad/Reshape" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all/Reshape_14_grad/Reshape" + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/Shape" + op: "Shape" + input: "filter_type_all/Slice_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/stack" + op: "Pack" + input: "gradients/filter_type_all/Slice_4_grad/Rank" + input: "gradients/filter_type_all/Slice_4_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/Reshape" + op: "Reshape" + input: "filter_type_all/Slice_4/begin" + input: "gradients/filter_type_all/Slice_4_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/Shape_1" + op: "Shape" + input: "Reshape_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/sub" + op: "Sub" + input: "gradients/filter_type_all/Slice_4_grad/Shape_1" + input: "gradients/filter_type_all/Slice_4_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/sub_1" + op: "Sub" + input: "gradients/filter_type_all/Slice_4_grad/sub" + input: "filter_type_all/Slice_4/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all/Slice_4_grad/sub_1" + input: "gradients/filter_type_all/Slice_4_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/concat" + op: "ConcatV2" + input: "gradients/filter_type_all/Slice_4_grad/Reshape" + input: "gradients/filter_type_all/Slice_4_grad/Reshape_1" + input: "gradients/filter_type_all/Slice_4_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all/Slice_4_grad/Pad" + op: "Pad" + input: "gradients/AddN_4" + input: "gradients/filter_type_all/Slice_4_grad/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN_5" + op: "AddN" + input: "gradients/filter_type_all/Slice_grad/Pad" + input: "gradients/filter_type_all/Slice_2_grad/Pad" + input: "gradients/filter_type_all/Slice_4_grad/Pad" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all/Slice_grad/Pad" + } + } + } +} +node { + name: "gradients/Reshape_9_grad/Shape" + op: "Shape" + input: "Reshape_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_9_grad/Reshape" + op: "Reshape" + input: "gradients/AddN_5" + input: "gradients/Reshape_9_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_8_grad/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\320\002\000\000" + } + } + } +} +node { + name: "gradients/Reshape_8_grad/Reshape" + op: "Reshape" + input: "gradients/Reshape_9_grad/Reshape" + input: "gradients/Reshape_8_grad/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_7" + op: "Shape" + input: "o_nlist" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_28/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_28/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_28/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_28" + op: "StridedSlice" + input: "Shape_7" + input: "strided_slice_28/stack" + input: "strided_slice_28/stack_1" + input: "strided_slice_28/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_29/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_29/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_29/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_29" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_29/stack" + input: "strided_slice_29/stack_1" + input: "strided_slice_29/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_5/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 720 + } + } + } +} +node { + name: "mul_5" + op: "Mul" + input: "strided_slice_29" + input: "mul_5/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_24/shape" + op: "Pack" + input: "strided_slice_28" + input: "mul_5" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_24" + op: "Reshape" + input: "gradients/Reshape_8_grad/Reshape" + input: "Reshape_24/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ProdForceSeA" + op: "ProdForceSeA" + input: "Reshape_24" + input: "o_rmat_deriv" + input: "o_nlist" + input: "t_natoms" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "n_a_sel" + value { + i: 180 + } + } + attr { + key: "n_r_sel" + value { + i: 0 + } + } +} +node { + name: "ProdVirialSeA" + op: "ProdVirialSeA" + input: "Reshape_24" + input: "o_rmat_deriv" + input: "o_rij" + input: "o_nlist" + input: "t_natoms" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "n_a_sel" + value { + i: 180 + } + } + attr { + key: "n_r_sel" + value { + i: 0 + } + } +} +node { + name: "strided_slice_30/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_30/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_30/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_30" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_30/stack" + input: "strided_slice_30/stack_1" + input: "strided_slice_30/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_6/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_6" + op: "Mul" + input: "mul_6/x" + input: "strided_slice_30" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_25/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_25/shape" + op: "Pack" + input: "Reshape_25/shape/0" + input: "mul_6" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_25" + op: "Reshape" + input: "ProdForceSeA" + input: "Reshape_25/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_31/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_31/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_31/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_31" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_31/stack" + input: "strided_slice_31/stack_1" + input: "strided_slice_31/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_32/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_32/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_32/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_32" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_32/stack" + input: "strided_slice_32/stack_1" + input: "strided_slice_32/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Equal" + op: "Equal" + input: "strided_slice_31" + input: "strided_slice_32" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "incompatible_shape_error" + value { + b: true + } + } +} +node { + name: "cond/Switch" + op: "Switch" + input: "Equal" + input: "Equal" + attr { + key: "T" + value { + type: DT_BOOL + } + } +} +node { + name: "cond/switch_t" + op: "Identity" + input: "cond/Switch:1" + attr { + key: "T" + value { + type: DT_BOOL + } + } +} +node { + name: "cond/switch_f" + op: "Identity" + input: "cond/Switch" + attr { + key: "T" + value { + type: DT_BOOL + } + } +} +node { + name: "cond/pred_id" + op: "Identity" + input: "Equal" + attr { + key: "T" + value { + type: DT_BOOL + } + } +} +node { + name: "cond/strided_slice/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice/Switch" + op: "Switch" + input: "t_natoms" + input: "cond/pred_id" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "_class" + value { + list { + s: "loc:@t_natoms" + } + } + } +} +node { + name: "cond/strided_slice" + op: "StridedSlice" + input: "cond/strided_slice/Switch:1" + input: "cond/strided_slice/stack" + input: "cond/strided_slice/stack_1" + input: "cond/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "cond/Cumsum/axis" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Cumsum" + op: "Cumsum" + input: "cond/strided_slice" + input: "cond/Cumsum/axis" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "exclusive" + value { + b: false + } + } + attr { + key: "reverse" + value { + b: false + } + } +} +node { + name: "cond/concat/values_0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/concat/axis" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/concat" + op: "ConcatV2" + input: "cond/concat/values_0" + input: "cond/Cumsum" + input: "cond/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_1/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice_1/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_1/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_1" + op: "StridedSlice" + input: "cond/concat" + input: "cond/strided_slice_1/stack" + input: "cond/strided_slice_1/stack_1" + input: "cond/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul" + op: "Mul" + input: "cond/strided_slice_1" + input: "cond/mul/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_2/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_2/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_2/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_2" + op: "StridedSlice" + input: "cond/strided_slice/Switch:1" + input: "cond/strided_slice_2/stack" + input: "cond/strided_slice_2/stack_1" + input: "cond/strided_slice_2/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_1/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_1" + op: "Mul" + input: "cond/strided_slice_2" + input: "cond/mul_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice/begin/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice/begin" + op: "Pack" + input: "cond/Slice/begin/0" + input: "cond/mul" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice/size/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice/size" + op: "Pack" + input: "cond/Slice/size/0" + input: "cond/mul_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice/Switch" + op: "Switch" + input: "Reshape_25" + input: "cond/pred_id" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_25" + } + } + } +} +node { + name: "cond/Slice" + op: "Slice" + input: "cond/Slice/Switch:1" + input: "cond/Slice/begin" + input: "cond/Slice/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_3/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_3/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_3/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_3" + op: "StridedSlice" + input: "cond/concat" + input: "cond/strided_slice_3/stack" + input: "cond/strided_slice_3/stack_1" + input: "cond/strided_slice_3/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_2/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_2" + op: "Mul" + input: "cond/strided_slice_3" + input: "cond/mul_2/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_4/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "cond/strided_slice_4/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 5 + } + } + } +} +node { + name: "cond/strided_slice_4/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_4" + op: "StridedSlice" + input: "cond/strided_slice/Switch:1" + input: "cond/strided_slice_4/stack" + input: "cond/strided_slice_4/stack_1" + input: "cond/strided_slice_4/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_3/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_3" + op: "Mul" + input: "cond/strided_slice_4" + input: "cond/mul_3/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_1/begin/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_1/begin" + op: "Pack" + input: "cond/Slice_1/begin/0" + input: "cond/mul_2" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_1/size/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_1/size" + op: "Pack" + input: "cond/Slice_1/size/0" + input: "cond/mul_3" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_1" + op: "Slice" + input: "cond/Slice/Switch:1" + input: "cond/Slice_1/begin" + input: "cond/Slice_1/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/add" + op: "AddV2" + input: "cond/Slice" + input: "cond/Slice_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_5/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_5/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_5/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_5" + op: "StridedSlice" + input: "cond/concat" + input: "cond/strided_slice_5/stack" + input: "cond/strided_slice_5/stack_1" + input: "cond/strided_slice_5/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_4/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_4" + op: "Mul" + input: "cond/strided_slice_5" + input: "cond/mul_4/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_6/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_6/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "cond/strided_slice_6/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_6" + op: "StridedSlice" + input: "cond/strided_slice/Switch:1" + input: "cond/strided_slice_6/stack" + input: "cond/strided_slice_6/stack_1" + input: "cond/strided_slice_6/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_5/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_5" + op: "Mul" + input: "cond/strided_slice_6" + input: "cond/mul_5/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_2/begin/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_2/begin" + op: "Pack" + input: "cond/Slice_2/begin/0" + input: "cond/mul_4" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_2/size/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_2/size" + op: "Pack" + input: "cond/Slice_2/size/0" + input: "cond/mul_5" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_2" + op: "Slice" + input: "cond/Slice/Switch:1" + input: "cond/Slice_2/begin" + input: "cond/Slice_2/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_7/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_7/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_7/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_7" + op: "StridedSlice" + input: "cond/concat" + input: "cond/strided_slice_7/stack" + input: "cond/strided_slice_7/stack_1" + input: "cond/strided_slice_7/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_6/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_6" + op: "Mul" + input: "cond/strided_slice_7" + input: "cond/mul_6/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_8/stack" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "cond/strided_slice_8/stack_1" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 5 + } + } + } +} +node { + name: "cond/strided_slice_8/stack_2" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_8" + op: "StridedSlice" + input: "cond/strided_slice/Switch:1" + input: "cond/strided_slice_8/stack" + input: "cond/strided_slice_8/stack_1" + input: "cond/strided_slice_8/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_7/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_7" + op: "Mul" + input: "cond/strided_slice_8" + input: "cond/mul_7/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_3/begin/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_3/begin" + op: "Pack" + input: "cond/Slice_3/begin/0" + input: "cond/mul_6" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_3/size/0" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_3/size" + op: "Pack" + input: "cond/Slice_3/size/0" + input: "cond/mul_7" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_3" + op: "Slice" + input: "cond/Slice/Switch:1" + input: "cond/Slice_3/begin" + input: "cond/Slice_3/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/mul_8/y" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.3140456936484258 + } + } + } +} +node { + name: "cond/mul_8" + op: "Mul" + input: "cond/Slice_3" + input: "cond/mul_8/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/concat_1/axis" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/concat_1" + op: "ConcatV2" + input: "cond/add" + input: "cond/Slice_2" + input: "cond/concat_1/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/concat_2/concat" + op: "Identity" + input: "cond/mul_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/concat_3/axis" + op: "Const" + input: "^cond/switch_t" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/concat_3" + op: "ConcatV2" + input: "cond/concat_1" + input: "cond/concat_2/concat" + input: "cond/concat_3/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_9/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_9/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice_9/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_9/Switch" + op: "Switch" + input: "t_natoms" + input: "cond/pred_id" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "_class" + value { + list { + s: "loc:@t_natoms" + } + } + } +} +node { + name: "cond/strided_slice_9" + op: "StridedSlice" + input: "cond/strided_slice_9/Switch" + input: "cond/strided_slice_9/stack" + input: "cond/strided_slice_9/stack_1" + input: "cond/strided_slice_9/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "cond/Cumsum_1/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Cumsum_1" + op: "Cumsum" + input: "cond/strided_slice_9" + input: "cond/Cumsum_1/axis" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "exclusive" + value { + b: false + } + } + attr { + key: "reverse" + value { + b: false + } + } +} +node { + name: "cond/concat_4/values_0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/concat_4/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/concat_4" + op: "ConcatV2" + input: "cond/concat_4/values_0" + input: "cond/Cumsum_1" + input: "cond/concat_4/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_10/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice_10/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_10/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_10" + op: "StridedSlice" + input: "cond/concat_4" + input: "cond/strided_slice_10/stack" + input: "cond/strided_slice_10/stack_1" + input: "cond/strided_slice_10/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_9/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_9" + op: "Mul" + input: "cond/strided_slice_10" + input: "cond/mul_9/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_11/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_11/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_11/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_11" + op: "StridedSlice" + input: "cond/strided_slice_9/Switch" + input: "cond/strided_slice_11/stack" + input: "cond/strided_slice_11/stack_1" + input: "cond/strided_slice_11/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_10/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_10" + op: "Mul" + input: "cond/strided_slice_11" + input: "cond/mul_10/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_4/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_4/begin" + op: "Pack" + input: "cond/Slice_4/begin/0" + input: "cond/mul_9" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_4/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_4/size" + op: "Pack" + input: "cond/Slice_4/size/0" + input: "cond/mul_10" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_4/Switch" + op: "Switch" + input: "Reshape_25" + input: "cond/pred_id" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_25" + } + } + } +} +node { + name: "cond/Slice_4" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_4/begin" + input: "cond/Slice_4/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_12/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_12/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_12/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_12" + op: "StridedSlice" + input: "cond/concat_4" + input: "cond/strided_slice_12/stack" + input: "cond/strided_slice_12/stack_1" + input: "cond/strided_slice_12/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_11/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_11" + op: "Mul" + input: "cond/strided_slice_12" + input: "cond/mul_11/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_13/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "cond/strided_slice_13/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 5 + } + } + } +} +node { + name: "cond/strided_slice_13/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_13" + op: "StridedSlice" + input: "cond/strided_slice_9/Switch" + input: "cond/strided_slice_13/stack" + input: "cond/strided_slice_13/stack_1" + input: "cond/strided_slice_13/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_12/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_12" + op: "Mul" + input: "cond/strided_slice_13" + input: "cond/mul_12/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_5/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_5/begin" + op: "Pack" + input: "cond/Slice_5/begin/0" + input: "cond/mul_11" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_5/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_5/size" + op: "Pack" + input: "cond/Slice_5/size/0" + input: "cond/mul_12" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_5" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_5/begin" + input: "cond/Slice_5/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/add_1" + op: "AddV2" + input: "cond/Slice_4" + input: "cond/Slice_5" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_14/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_14/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_14/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_14" + op: "StridedSlice" + input: "cond/concat_4" + input: "cond/strided_slice_14/stack" + input: "cond/strided_slice_14/stack_1" + input: "cond/strided_slice_14/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_13/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_13" + op: "Mul" + input: "cond/strided_slice_14" + input: "cond/mul_13/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_15/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_15/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "cond/strided_slice_15/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_15" + op: "StridedSlice" + input: "cond/strided_slice_9/Switch" + input: "cond/strided_slice_15/stack" + input: "cond/strided_slice_15/stack_1" + input: "cond/strided_slice_15/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_14/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_14" + op: "Mul" + input: "cond/strided_slice_15" + input: "cond/mul_14/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_6/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_6/begin" + op: "Pack" + input: "cond/Slice_6/begin/0" + input: "cond/mul_13" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_6/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_6/size" + op: "Pack" + input: "cond/Slice_6/size/0" + input: "cond/mul_14" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_6" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_6/begin" + input: "cond/Slice_6/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_16/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_16/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_16/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_16" + op: "StridedSlice" + input: "cond/concat_4" + input: "cond/strided_slice_16/stack" + input: "cond/strided_slice_16/stack_1" + input: "cond/strided_slice_16/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_15/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_15" + op: "Mul" + input: "cond/strided_slice_16" + input: "cond/mul_15/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_17/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "cond/strided_slice_17/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 5 + } + } + } +} +node { + name: "cond/strided_slice_17/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_17" + op: "StridedSlice" + input: "cond/strided_slice_9/Switch" + input: "cond/strided_slice_17/stack" + input: "cond/strided_slice_17/stack_1" + input: "cond/strided_slice_17/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_16/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_16" + op: "Mul" + input: "cond/strided_slice_17" + input: "cond/mul_16/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_7/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_7/begin" + op: "Pack" + input: "cond/Slice_7/begin/0" + input: "cond/mul_15" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_7/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_7/size" + op: "Pack" + input: "cond/Slice_7/size/0" + input: "cond/mul_16" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_7" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_7/begin" + input: "cond/Slice_7/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/mul_17/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.3140456936484258 + } + } + } +} +node { + name: "cond/mul_17" + op: "Mul" + input: "cond/Slice_7" + input: "cond/mul_17/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/concat_5/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/concat_5" + op: "ConcatV2" + input: "cond/add_1" + input: "cond/Slice_6" + input: "cond/concat_5/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/concat_6/concat" + op: "Identity" + input: "cond/mul_17" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/concat_7/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/concat_7" + op: "ConcatV2" + input: "cond/concat_5" + input: "cond/concat_6/concat" + input: "cond/concat_7/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_18/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "cond/strided_slice_18/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\001\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "cond/strided_slice_18/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\001\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "cond/strided_slice_18/Switch" + op: "Switch" + input: "Reshape_1" + input: "cond/pred_id" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_1" + } + } + } +} +node { + name: "cond/strided_slice_18" + op: "StridedSlice" + input: "cond/strided_slice_18/Switch" + input: "cond/strided_slice_18/stack" + input: "cond/strided_slice_18/stack_1" + input: "cond/strided_slice_18/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 2 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 2 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/strided_slice_19/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice_19/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_19/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_19" + op: "StridedSlice" + input: "cond/strided_slice_9/Switch" + input: "cond/strided_slice_19/stack" + input: "cond/strided_slice_19/stack_1" + input: "cond/strided_slice_19/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/Const" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Const_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_20/stack" + op: "Pack" + input: "cond/strided_slice_19" + attr { + key: "N" + value { + i: 1 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/strided_slice_20/stack_1" + op: "Pack" + input: "cond/Const" + attr { + key: "N" + value { + i: 1 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/strided_slice_20/stack_2" + op: "Pack" + input: "cond/Const_1" + attr { + key: "N" + value { + i: 1 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/strided_slice_20" + op: "StridedSlice" + input: "cond/strided_slice_18" + input: "cond/strided_slice_20/stack" + input: "cond/strided_slice_20/stack_1" + input: "cond/strided_slice_20/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "cond/UniqueWithCounts" + op: "UniqueWithCounts" + input: "cond/strided_slice_20" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "out_idx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Cumsum_2/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Cumsum_2" + op: "Cumsum" + input: "cond/UniqueWithCounts:2" + input: "cond/Cumsum_2/axis" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "exclusive" + value { + b: false + } + } + attr { + key: "reverse" + value { + b: false + } + } +} +node { + name: "cond/concat_8/values_0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/concat_8/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/concat_8" + op: "ConcatV2" + input: "cond/concat_8/values_0" + input: "cond/Cumsum_2" + input: "cond/concat_8/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_21/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice_21/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_21/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_21" + op: "StridedSlice" + input: "cond/strided_slice_9/Switch" + input: "cond/strided_slice_21/stack" + input: "cond/strided_slice_21/stack_1" + input: "cond/strided_slice_21/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/add_2" + op: "AddV2" + input: "cond/concat_8" + input: "cond/strided_slice_21" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_22/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice_22/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_22/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_22" + op: "StridedSlice" + input: "cond/add_2" + input: "cond/strided_slice_22/stack" + input: "cond/strided_slice_22/stack_1" + input: "cond/strided_slice_22/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_18/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_18" + op: "Mul" + input: "cond/strided_slice_22" + input: "cond/mul_18/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_23/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "cond/strided_slice_23/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_23/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_23" + op: "StridedSlice" + input: "cond/UniqueWithCounts:2" + input: "cond/strided_slice_23/stack" + input: "cond/strided_slice_23/stack_1" + input: "cond/strided_slice_23/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_19/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_19" + op: "Mul" + input: "cond/strided_slice_23" + input: "cond/mul_19/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_8/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_8/begin" + op: "Pack" + input: "cond/Slice_8/begin/0" + input: "cond/mul_18" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_8/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_8/size" + op: "Pack" + input: "cond/Slice_8/size/0" + input: "cond/mul_19" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_8" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_8/begin" + input: "cond/Slice_8/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_24/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_24/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_24/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_24" + op: "StridedSlice" + input: "cond/add_2" + input: "cond/strided_slice_24/stack" + input: "cond/strided_slice_24/stack_1" + input: "cond/strided_slice_24/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_20/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_20" + op: "Mul" + input: "cond/strided_slice_24" + input: "cond/mul_20/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_25/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_25/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_25/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_25" + op: "StridedSlice" + input: "cond/UniqueWithCounts:2" + input: "cond/strided_slice_25/stack" + input: "cond/strided_slice_25/stack_1" + input: "cond/strided_slice_25/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_21/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_21" + op: "Mul" + input: "cond/strided_slice_25" + input: "cond/mul_21/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_9/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_9/begin" + op: "Pack" + input: "cond/Slice_9/begin/0" + input: "cond/mul_20" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_9/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_9/size" + op: "Pack" + input: "cond/Slice_9/size/0" + input: "cond/mul_21" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_9" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_9/begin" + input: "cond/Slice_9/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/add_3" + op: "AddV2" + input: "cond/Slice_8" + input: "cond/Slice_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_26/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_26/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_26/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_26" + op: "StridedSlice" + input: "cond/add_2" + input: "cond/strided_slice_26/stack" + input: "cond/strided_slice_26/stack_1" + input: "cond/strided_slice_26/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_22/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_22" + op: "Mul" + input: "cond/strided_slice_26" + input: "cond/mul_22/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_27/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_27/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_27/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_27" + op: "StridedSlice" + input: "cond/UniqueWithCounts:2" + input: "cond/strided_slice_27/stack" + input: "cond/strided_slice_27/stack_1" + input: "cond/strided_slice_27/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_23/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_23" + op: "Mul" + input: "cond/strided_slice_27" + input: "cond/mul_23/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_10/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_10/begin" + op: "Pack" + input: "cond/Slice_10/begin/0" + input: "cond/mul_22" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_10/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_10/size" + op: "Pack" + input: "cond/Slice_10/size/0" + input: "cond/mul_23" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_10" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_10/begin" + input: "cond/Slice_10/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/strided_slice_28/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_28/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_28/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_28" + op: "StridedSlice" + input: "cond/add_2" + input: "cond/strided_slice_28/stack" + input: "cond/strided_slice_28/stack_1" + input: "cond/strided_slice_28/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_24/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_24" + op: "Mul" + input: "cond/strided_slice_28" + input: "cond/mul_24/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/strided_slice_29/stack" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "cond/strided_slice_29/stack_1" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "cond/strided_slice_29/stack_2" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "cond/strided_slice_29" + op: "StridedSlice" + input: "cond/UniqueWithCounts:2" + input: "cond/strided_slice_29/stack" + input: "cond/strided_slice_29/stack_1" + input: "cond/strided_slice_29/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "cond/mul_25/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "cond/mul_25" + op: "Mul" + input: "cond/strided_slice_29" + input: "cond/mul_25/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Slice_11/begin/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "cond/Slice_11/begin" + op: "Pack" + input: "cond/Slice_11/begin/0" + input: "cond/mul_24" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_11/size/0" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "cond/Slice_11/size" + op: "Pack" + input: "cond/Slice_11/size/0" + input: "cond/mul_25" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "cond/Slice_11" + op: "Slice" + input: "cond/Slice_4/Switch" + input: "cond/Slice_11/begin" + input: "cond/Slice_11/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/mul_26/y" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.3140456936484258 + } + } + } +} +node { + name: "cond/mul_26" + op: "Mul" + input: "cond/Slice_11" + input: "cond/mul_26/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/concat_9/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/concat_9" + op: "ConcatV2" + input: "cond/add_3" + input: "cond/Slice_10" + input: "cond/concat_9/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/concat_10/concat" + op: "Identity" + input: "cond/mul_26" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "cond/concat_11/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/concat_11" + op: "ConcatV2" + input: "cond/concat_9" + input: "cond/concat_10/concat" + input: "cond/concat_11/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/concat_12/axis" + op: "Const" + input: "^cond/switch_f" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "cond/concat_12" + op: "ConcatV2" + input: "cond/concat_7" + input: "cond/concat_11" + input: "cond/concat_12/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "cond/Merge" + op: "Merge" + input: "cond/concat_12" + input: "cond/concat_3" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_33/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_33/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_33/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_33" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_33/stack" + input: "strided_slice_33/stack_1" + input: "strided_slice_33/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_7/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_7" + op: "Mul" + input: "mul_7/x" + input: "strided_slice_33" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_force/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_force/shape" + op: "Pack" + input: "o_force/shape/0" + input: "mul_7" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_force" + op: "Reshape" + input: "cond/Merge" + input: "o_force/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_virial/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "o_virial" + op: "Reshape" + input: "ProdVirialSeA" + input: "o_virial/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_34/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_34/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_34/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_34" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_34/stack" + input: "strided_slice_34/stack_1" + input: "strided_slice_34/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_8/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 9 + } + } + } +} +node { + name: "mul_8" + op: "Mul" + input: "mul_8/x" + input: "strided_slice_34" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_atom_virial/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_atom_virial/shape" + op: "Pack" + input: "o_atom_virial/shape/0" + input: "mul_8" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_atom_virial" + op: "Reshape" + input: "ProdVirialSeA:1" + input: "o_atom_virial/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +library { +} +versions { + producer: 1882 +} diff --git a/source/tests/infer/deepspin_nlist.pbtxt b/source/tests/infer/deepspin_nlist.pbtxt new file mode 100644 index 0000000000..d7b5e1ecc0 --- /dev/null +++ b/source/tests/infer/deepspin_nlist.pbtxt @@ -0,0 +1,22628 @@ +node { + name: "train_attr/min_nbor_dist" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.3999999935274064 + } + } + } +} +node { + name: "train_attr/training_script" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "{\"model\":{\"type_map\":[\"Ni\",\"O\"],\"descriptor\":{\"type\":\"se_e2_a\",\"sel\":[60,60],\"rcut_smth\":5.4,\"rcut\":5.6,\"neuron\":[20],\"resnet_dt\":false,\"axis_neuron\":16,\"type_one_side\":true,\"precision\":\"float64\",\"seed\":1,\"activation_function\":\"tanh\",\"trainable\":true,\"exclude_types\":[],\"env_protection\":0.0,\"set_davg_zero\":false},\"fitting_net\":{\"neuron\":[20],\"resnet_dt\":true,\"precision\":\"float64\",\"seed\":1,\"type\":\"ener\",\"numb_fparam\":0,\"numb_aparam\":0,\"activation_function\":\"tanh\",\"trainable\":true,\"rcond\":null,\"atom_ener\":[],\"use_aparam_as_mask\":false},\"spin\":{\"use_spin\":[true,false],\"virtual_len\":[0.4],\"spin_norm\":[1.2737]},\"data_stat_nbatch\":10,\"data_stat_protect\":0.01,\"data_bias_nsample\":10,\"pair_exclude_types\":[],\"atom_exclude_types\":[],\"preset_out_bias\":null,\"srtab_add_bias\":true,\"type\":\"standard\"},\"learning_rate\":{\"type\":\"exp\",\"decay_steps\":10000,\"start_lr\":0.001,\"stop_lr\":5.92e-06,\"scale_by_worker\":\"linear\",\"decay_rate\":null},\"loss\":{\"type\":\"ener_spin\",\"start_pref_e\":0.02,\"limit_pref_e\":1,\"start_pref_fr\":1000,\"limit_pref_fr\":1.0,\"start_pref_fm\":10000,\"limit_pref_fm\":10.0,\"start_pref_v\":0,\"limit_pref_v\":0,\"start_pref_ae\":0.0,\"limit_pref_ae\":0.0,\"start_pref_pf\":0.0,\"limit_pref_pf\":0.0,\"enable_atom_ener_coeff\":false},\"training\":{\"training_data\":{\"systems\":[\"../data/data_0/\"],\"batch_size\":1,\"auto_prob\":\"prob_sys_size\",\"sys_probs\":null},\"validation_data\":{\"systems\":[\"../data/data_1/\"],\"batch_size\":1,\"numb_btch\":10,\"auto_prob\":\"prob_sys_size\",\"sys_probs\":null},\"numb_steps\":10,\"seed\":1,\"disp_file\":\"lcurve.out\",\"disp_freq\":5000,\"save_freq\":10000,\"save_ckpt\":\"model.ckpt\",\"max_ckpt_keep\":5,\"change_bias_after_training\":false,\"disp_training\":true,\"time_training\":true,\"profiling\":false,\"profiling_file\":\"timeline.json\",\"enable_profiler\":false,\"tensorboard\":false,\"tensorboard_log_dir\":\"log\",\"tensorboard_freq\":1,\"opt_type\":\"Adam\"}}" + } + } + } +} +node { + name: "model_type" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "original_model" + } + } + } +} +node { + name: "t_box" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_coord" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_type" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_natoms" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: 5 + } + } + } + } +} +node { + name: "t_mesh" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "model_attr/tmap" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "Ni O" + } + } + } +} +node { + name: "model_attr/model_type" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "ener" + } + } + } +} +node { + name: "model_attr/model_version" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "1.1" + } + } + } +} +node { + name: "strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice/stack" + input: "strided_slice/stack_1" + input: "strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul" + op: "Mul" + input: "strided_slice" + input: "mul/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape/shape" + op: "Pack" + input: "Reshape/shape/0" + input: "mul" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape" + op: "Reshape" + input: "t_coord" + input: "Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_1" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_1/stack" + input: "strided_slice_1/stack_1" + input: "strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_1/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_1/shape" + op: "Pack" + input: "Reshape_1/shape/0" + input: "strided_slice_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_1" + op: "Reshape" + input: "t_type" + input: "Reshape_1/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "spin_attr/ntypes_spin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "spin_attr/virtual_len" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + } + double_val: 0.4 + } + } + } +} +node { + name: "spin_attr/spin_norm" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + } + double_val: 1.2737 + } + } + } +} +node { + name: "descrpt_attr/rcut" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 5.6 + } + } + } +} +node { + name: "descrpt_attr/ntypes" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "descrpt_attr/sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "<\000\000\000<\000\000\000<\000\000\000" + } + } + } +} +node { + name: "descrpt_attr/original_sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "<\000\000\000<\000\000\000<\000\000\000" + } + } + } +} +node { + name: "descrpt_attr/t_avg" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 3 + } + dim { + size: 720 + } + } + tensor_content: "\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\216{\001>MW\307?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\021\016\321\241\265\275\305?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\331-\372\336\377\350\306?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "descrpt_attr/t_avg/read" + op: "Identity" + input: "descrpt_attr/t_avg" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr/t_avg" + } + } + } +} +node { + name: "descrpt_attr/t_std" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 3 + } + dim { + size: 720 + } + } + tensor_content: "\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?\004\326\021\217{\341\313?\006\023\300fA\376\304?\006\023\300fA\376\304?\006\023\300fA\376\304?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?{\350\361v\336\033\302?d\034V\204\nV\300?d\034V\204\nV\300?d\034V\204\nV\300?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\204K;\367F\032\314?\346\031\375^\371\356\304?\346\031\375^\371\356\304?\346\031\375^\371\356\304?" + } + } + } +} +node { + name: "descrpt_attr/t_std/read" + op: "Identity" + input: "descrpt_attr/t_std" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr/t_std" + } + } + } +} +node { + name: "strided_slice_3/stack" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_3/stack_1" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_3/stack_2" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_3" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_3/stack" + input: "strided_slice_3/stack_1" + input: "strided_slice_3/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_1/y" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_1" + op: "Mul" + input: "strided_slice_3" + input: "mul_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_2/shape/0" + op: "Const" + input: "^descrpt_attr/original_sel" + input: "^descrpt_attr/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_2/shape" + op: "Pack" + input: "Reshape_2/shape/0" + input: "mul_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_2" + op: "Reshape" + input: "Reshape" + input: "Reshape_2/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_3/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "Reshape_3" + op: "Reshape" + input: "t_box" + input: "Reshape_3/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_4/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_4/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_4/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_4" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_4/stack" + input: "strided_slice_4/stack_1" + input: "strided_slice_4/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_4/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_4/shape" + op: "Pack" + input: "Reshape_4/shape/0" + input: "strided_slice_4" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_4" + op: "Reshape" + input: "Reshape_1" + input: "Reshape_4/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ProdEnvMatA" + op: "ProdEnvMatA" + input: "Reshape_2" + input: "Reshape_4" + input: "t_natoms" + input: "Reshape_3" + input: "t_mesh" + input: "descrpt_attr/t_avg/read" + input: "descrpt_attr/t_std/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "rcut_a" + value { + f: -1.0 + } + } + attr { + key: "rcut_r" + value { + f: 5.599999904632568 + } + } + attr { + key: "rcut_r_smth" + value { + f: 5.400000095367432 + } + } + attr { + key: "sel_a" + value { + list { + i: 60 + i: 60 + i: 60 + } + } + } + attr { + key: "sel_r" + value { + list { + i: 0 + i: 0 + i: 0 + } + } + } +} +node { + name: "Reshape_7/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\320\002\000\000" + } + } + } +} +node { + name: "Reshape_7" + op: "Reshape" + input: "ProdEnvMatA" + input: "Reshape_7/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_rmat" + op: "Identity" + input: "Reshape_7" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rmat_deriv" + op: "Identity" + input: "ProdEnvMatA:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rij" + op: "Identity" + input: "ProdEnvMatA:2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_nlist" + op: "Identity" + input: "ProdEnvMatA:3" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_5/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_5/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_5/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_5" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_5/stack" + input: "strided_slice_5/stack_1" + input: "strided_slice_5/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_8/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_8/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 720 + } + } + } +} +node { + name: "Reshape_8/shape" + op: "Pack" + input: "Reshape_8/shape/0" + input: "strided_slice_5" + input: "Reshape_8/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_8" + op: "Reshape" + input: "o_rmat" + input: "Reshape_8/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_9/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\320\002\000\000" + } + } + } +} +node { + name: "Reshape_9" + op: "Reshape" + input: "Reshape_8" + input: "Reshape_9/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice" + op: "Slice" + input: "Reshape_9" + input: "filter_type_all/Slice/begin" + input: "filter_type_all/Slice/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Shape" + op: "Shape" + input: "filter_type_all/Slice" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "filter_type_all/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice" + op: "StridedSlice" + input: "filter_type_all/Shape" + input: "filter_type_all/strided_slice/stack" + input: "filter_type_all/strided_slice/stack_1" + input: "filter_type_all/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "filter_type_all/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape" + op: "Reshape" + input: "filter_type_all/Slice" + input: "filter_type_all/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice_1/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_1/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_1" + op: "Slice" + input: "filter_type_all/Reshape" + input: "filter_type_all/Slice_1/begin" + input: "filter_type_all/Slice_1/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_1/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_1" + op: "Reshape" + input: "filter_type_all/Slice_1" + input: "filter_type_all/Reshape_1/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/matrix_1_0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + dim { + size: 20 + } + } + tensor_content: "\357\223y\021;\222\224\277\033\371|\001\007\350\311?o\026\343\227[\251\327\277\350p\000\272\311xt?\211\321:5\274\034\313?Y\305_\375\032\032\273\277\215\270\025\360\214\254\321?o\227\253pv\201\272? CJ\353\225\304\323\277\235\r\205\354em\213?\000\026m,A\306\312?\000v\2779\252\315\265\277\233X\332\311\372\206\236\277\"\002\206\327\307>\262?m\306\025\311\352\377\224\277\231S\257\251\245!\326?\370\317\233\350\020\033\245\277\237\224\033\236\211/\320?\253\352\0334d\324\304?I\347p\017*\231\312\277" + } + } + } +} +node { + name: "filter_type_all/matrix_1_0/read" + op: "Identity" + input: "filter_type_all/matrix_1_0" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/matrix_1_0" + } + } + } +} +node { + name: "filter_type_all/bias_1_0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\013\036da]\351\335\277\340\tY;\367\177\000\300_\242\327^\375\350\347\277\270\215\300\321r\211\327\277\311\313\310\003r\270\360\277U\225\352\210\261\205\000\300\321\371\032j\221\373\351\277\316\207\013\275\340\353\364?\'\353\221\341\270\005\324\277\240\020\244H\345&\204\277t\271y\200@\004\342\277\212\337W\254\347.\321\277\007\310\265\220}\273\341\277V?\331\3028\244\346\277x\275.\320\022\000\335?\332\232=\367\250<\001@\250M\210\031\220\303\350?\305\331\341\232A\246\362?\"\3034J=\232\366\277RAT\226\364\250\377?" + } + } + } +} +node { + name: "filter_type_all/bias_1_0/read" + op: "Identity" + input: "filter_type_all/bias_1_0" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/bias_1_0" + } + } + } +} +node { + name: "filter_type_all/MatMul" + op: "MatMul" + input: "filter_type_all/Reshape_1" + input: "filter_type_all/matrix_1_0/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all/BiasAdd" + op: "BiasAdd" + input: "filter_type_all/MatMul" + input: "filter_type_all/bias_1_0/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all/Tanh" + op: "Tanh" + input: "filter_type_all/BiasAdd" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_2/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_2" + op: "Reshape" + input: "filter_type_all/Tanh" + input: "filter_type_all/Reshape_2/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_3/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377<\000\000\000\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_3" + op: "Reshape" + input: "filter_type_all/Reshape_2" + input: "filter_type_all/Reshape_3/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_4/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 60 + } + } + } +} +node { + name: "filter_type_all/Reshape_4/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "filter_type_all/Reshape_4/shape" + op: "Pack" + input: "filter_type_all/strided_slice" + input: "filter_type_all/Reshape_4/shape/1" + input: "filter_type_all/Reshape_4/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "filter_type_all/Reshape_4" + op: "Reshape" + input: "filter_type_all/Slice" + input: "filter_type_all/Reshape_4/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/MatMul_1" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_4" + input: "filter_type_all/Reshape_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/Slice_2/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_2/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_2" + op: "Slice" + input: "Reshape_9" + input: "filter_type_all/Slice_2/begin" + input: "filter_type_all/Slice_2/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Shape_1" + op: "Shape" + input: "filter_type_all/Slice_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_1" + op: "StridedSlice" + input: "filter_type_all/Shape_1" + input: "filter_type_all/strided_slice_1/stack" + input: "filter_type_all/strided_slice_1/stack_1" + input: "filter_type_all/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "filter_type_all/Reshape_5/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_5" + op: "Reshape" + input: "filter_type_all/Slice_2" + input: "filter_type_all/Reshape_5/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice_3/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_3/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_3" + op: "Slice" + input: "filter_type_all/Reshape_5" + input: "filter_type_all/Slice_3/begin" + input: "filter_type_all/Slice_3/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_6/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_6" + op: "Reshape" + input: "filter_type_all/Slice_3" + input: "filter_type_all/Reshape_6/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/matrix_1_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + dim { + size: 20 + } + } + tensor_content: "\2242\263Q\210i\326?\317\363SPrm\257\277mp\315Go\207\300?*6\331\3717o\333?\376\037E \356\300\256?+<\375\345\262\346\305?\032\271\037S\270\200\334??6a\036\340\253\310\277\360\301\260\r8T\304\277\304\3474*H\006\316\277\331\244\200V\003e\321?\035\\Be\234\320\307?\363\006\274W\367+\267\277\203\254\177)\310\322\335\277W\326\365\331,W\312?\244Pk\211\365\226\251\277UF\232\242)\215\334?\030\366\023\001\023C\264\277\004<\376p\007\257\320?\237\354sF\2418\255?" + } + } + } +} +node { + name: "filter_type_all/matrix_1_1/read" + op: "Identity" + input: "filter_type_all/matrix_1_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/matrix_1_1" + } + } + } +} +node { + name: "filter_type_all/bias_1_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\244\273\223S\332F\310?0T;\363\325X\351?\300~\276\326\026+\367?7$\013\006.:\331?\343\375_Oq\032\374\277=Q\304\243A=\363?~\210 \204\211\331\370?\306/\364\177\216{\342\277I\260\"B\227,\324\277\255#\273\313\345\261\321\277k\3569V\022\234\274\277\035\276p\2004\225\347\277\355,/l\340\330\376?\200X7+=\311\246\277\017\250\225E\243\243\326\277\243\231\277e\300(\320?\271\3239j3\310\341?\340\257\351c\265\224\363\277\231\0056\003\036Z\360?1V\0246aJ\364?" + } + } + } +} +node { + name: "filter_type_all/bias_1_1/read" + op: "Identity" + input: "filter_type_all/bias_1_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/bias_1_1" + } + } + } +} +node { + name: "filter_type_all/MatMul_2" + op: "MatMul" + input: "filter_type_all/Reshape_6" + input: "filter_type_all/matrix_1_1/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all/BiasAdd_1" + op: "BiasAdd" + input: "filter_type_all/MatMul_2" + input: "filter_type_all/bias_1_1/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all/Tanh_1" + op: "Tanh" + input: "filter_type_all/BiasAdd_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_7/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_7" + op: "Reshape" + input: "filter_type_all/Tanh_1" + input: "filter_type_all/Reshape_7/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_8/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377<\000\000\000\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_8" + op: "Reshape" + input: "filter_type_all/Reshape_7" + input: "filter_type_all/Reshape_8/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_9/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 60 + } + } + } +} +node { + name: "filter_type_all/Reshape_9/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "filter_type_all/Reshape_9/shape" + op: "Pack" + input: "filter_type_all/strided_slice_1" + input: "filter_type_all/Reshape_9/shape/1" + input: "filter_type_all/Reshape_9/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "filter_type_all/Reshape_9" + op: "Reshape" + input: "filter_type_all/Slice_2" + input: "filter_type_all/Reshape_9/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/MatMul_3" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_9" + input: "filter_type_all/Reshape_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/Slice_4/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\340\001\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_4/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\360\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_4" + op: "Slice" + input: "Reshape_9" + input: "filter_type_all/Slice_4/begin" + input: "filter_type_all/Slice_4/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Shape_2" + op: "Shape" + input: "filter_type_all/Slice_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all/strided_slice_2" + op: "StridedSlice" + input: "filter_type_all/Shape_2" + input: "filter_type_all/strided_slice_2/stack" + input: "filter_type_all/strided_slice_2/stack_1" + input: "filter_type_all/strided_slice_2/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "filter_type_all/Reshape_10/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_10" + op: "Reshape" + input: "filter_type_all/Slice_4" + input: "filter_type_all/Reshape_10/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Slice_5/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_5/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_5" + op: "Slice" + input: "filter_type_all/Reshape_10" + input: "filter_type_all/Slice_5/begin" + input: "filter_type_all/Slice_5/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_11/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_11" + op: "Reshape" + input: "filter_type_all/Slice_5" + input: "filter_type_all/Reshape_11/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/matrix_1_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + dim { + size: 20 + } + } + tensor_content: "-\363\345\213\274\250\316?\\\355g*t\037\316\277,\0321SE@\314?\346\331\026\326m \336?3\370\026\213\275\276\241?:\366\332\246\301\316\267?\223Y\031\250\341w\316?vs\224R\355\010\303\277\311j\021|\245\343\321?\332\343\274)\375\321\266?x\306Y\213?\177\343?\376\\\374|\002(\331\277\362\205\366~\341R\341\277c\030_ \274K\241\277\017\227\216\267g#\330?\241\2769J\356\n\327?+0\206\202\377\035\307\277M\356\223U\310\003\301\277e>\325X\017\243\272\277\341\254v6\327?\323?" + } + } + } +} +node { + name: "filter_type_all/matrix_1_2/read" + op: "Identity" + input: "filter_type_all/matrix_1_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/matrix_1_2" + } + } + } +} +node { + name: "filter_type_all/bias_1_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\n\232\245$\245\302\351\277\210\307-;:\316\311\277\266\237P[\3235\274?\327\001O\022<\201\374\277AW\203\246b\017\272?7\232#\262\370\262\362?\264\356\253\202#\320\333\277\001R\'\224\020\003\367\277{\275\321m\302\377\360?Y\005\216\311\250\227\353\277m\374/\026\276j\370?<\374\020\273\336\376\370\277\350\001\2226\265\205\351\277\217\024kO.L\340?\216\324\275\367\201X\325?\377\235\010E\324\325\351\277\257Q\300\360\302\177\351?6\rj)\224\363\357\277~\n\240\307\255\025\354\277_{\2079\022S`\277" + } + } + } +} +node { + name: "filter_type_all/bias_1_2/read" + op: "Identity" + input: "filter_type_all/bias_1_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all/bias_1_2" + } + } + } +} +node { + name: "filter_type_all/MatMul_4" + op: "MatMul" + input: "filter_type_all/Reshape_11" + input: "filter_type_all/matrix_1_2/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all/BiasAdd_2" + op: "BiasAdd" + input: "filter_type_all/MatMul_4" + input: "filter_type_all/bias_1_2/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all/Tanh_2" + op: "Tanh" + input: "filter_type_all/BiasAdd_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Reshape_12/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_12" + op: "Reshape" + input: "filter_type_all/Tanh_2" + input: "filter_type_all/Reshape_12/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_13/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377<\000\000\000\024\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_13" + op: "Reshape" + input: "filter_type_all/Reshape_12" + input: "filter_type_all/Reshape_13/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/Reshape_14/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 60 + } + } + } +} +node { + name: "filter_type_all/Reshape_14/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "filter_type_all/Reshape_14/shape" + op: "Pack" + input: "filter_type_all/strided_slice_2" + input: "filter_type_all/Reshape_14/shape/1" + input: "filter_type_all/Reshape_14/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "filter_type_all/Reshape_14" + op: "Reshape" + input: "filter_type_all/Slice_4" + input: "filter_type_all/Reshape_14/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all/MatMul_5" + op: "BatchMatMulV2" + input: "filter_type_all/Reshape_14" + input: "filter_type_all/Reshape_13" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/AddN" + op: "AddN" + input: "filter_type_all/MatMul_1" + input: "filter_type_all/MatMul_3" + input: "filter_type_all/MatMul_5" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/truediv/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 180.0 + } + } + } +} +node { + name: "filter_type_all/truediv" + op: "RealDiv" + input: "filter_type_all/AddN" + input: "filter_type_all/truediv/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/Slice_6/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_6/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\377\377\377\377\020\000\000\000" + } + } + } +} +node { + name: "filter_type_all/Slice_6" + op: "Slice" + input: "filter_type_all/truediv" + input: "filter_type_all/Slice_6/begin" + input: "filter_type_all/Slice_6/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "filter_type_all/MatMul_6" + op: "BatchMatMulV2" + input: "filter_type_all/truediv" + input: "filter_type_all/Slice_6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } + attr { + key: "grad_x" + value { + b: false + } + } + attr { + key: "grad_y" + value { + b: false + } + } +} +node { + name: "filter_type_all/Reshape_15/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377@\001\000\000" + } + } + } +} +node { + name: "filter_type_all/Reshape_15" + op: "Reshape" + input: "filter_type_all/MatMul_6" + input: "filter_type_all/Reshape_15/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_2" + op: "Shape" + input: "Reshape_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_9/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_9/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_9/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_9" + op: "StridedSlice" + input: "Shape_2" + input: "strided_slice_9/stack" + input: "strided_slice_9/stack_1" + input: "strided_slice_9/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_10/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_10/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_10/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_10" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_10/stack" + input: "strided_slice_10/stack_1" + input: "strided_slice_10/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_12/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 320 + } + } + } +} +node { + name: "Reshape_12/shape" + op: "Pack" + input: "strided_slice_9" + input: "strided_slice_10" + input: "Reshape_12/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_12" + op: "Reshape" + input: "filter_type_all/Reshape_15" + input: "Reshape_12/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_1/concat" + op: "Identity" + input: "Reshape_12" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_descriptor" + op: "Identity" + input: "concat_1/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "fitting_attr/dfparam" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "fitting_attr/daparam" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "fitting_attr/t_bias_atom_e" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "d[\236\207\317\263\033\300d[\236\207\317\263\013\300" + } + } + } +} +node { + name: "fitting_attr/t_bias_atom_e/read" + op: "Identity" + input: "fitting_attr/t_bias_atom_e" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr/t_bias_atom_e" + } + } + } +} +node { + name: "strided_slice_13/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_13/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_13/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_13" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_13/stack" + input: "strided_slice_13/stack_1" + input: "strided_slice_13/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_14/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_14/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 320 + } + } + } +} +node { + name: "Reshape_14/shape" + op: "Pack" + input: "Reshape_14/shape/0" + input: "strided_slice_13" + input: "Reshape_14/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_14" + op: "Reshape" + input: "o_descriptor" + input: "Reshape_14/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_14/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_14/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_14/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_14" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_14/stack" + input: "strided_slice_14/stack_1" + input: "strided_slice_14/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_15/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_15/shape" + op: "Pack" + input: "Reshape_15/shape/0" + input: "strided_slice_14" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_15" + op: "Reshape" + input: "t_type" + input: "Reshape_15/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_16/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_16/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "strided_slice_16/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_16" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_16/stack" + input: "strided_slice_16/stack_1" + input: "strided_slice_16/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "Sum" + op: "Sum" + input: "strided_slice_16" + input: "Const" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "Slice_2/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_2/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_2/size" + op: "Pack" + input: "Slice_2/size/0" + input: "Sum" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_2" + op: "Slice" + input: "Reshape_15" + input: "Slice_2/begin" + input: "Slice_2/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GreaterEqual_1/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "GreaterEqual_1" + op: "GreaterEqual" + input: "Slice_2" + input: "GreaterEqual_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_1" + op: "Cast" + input: "GreaterEqual_1" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_BOOL + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_17/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_17" + op: "Reshape" + input: "Slice_2" + input: "Reshape_17/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_17/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_17/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "strided_slice_17/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_17" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_17/stack" + input: "strided_slice_17/stack_1" + input: "strided_slice_17/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_3/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_3/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_3/size/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_3/size" + op: "Pack" + input: "Slice_3/size/0" + input: "strided_slice_17" + input: "Slice_3/size/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_3" + op: "Slice" + input: "Reshape_14" + input: "Slice_3/begin" + input: "Slice_3/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_18/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377@\001\000\000" + } + } + } +} +node { + name: "Reshape_18" + op: "Reshape" + input: "Slice_3" + input: "Reshape_18/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_0_type_0/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 320 + } + dim { + size: 20 + } + } + tensor_content: "\233n^\240c\344\206?+\261\344\023\331\221\221\277\357\031BVM>\250\277\003\255_3q\360\206\277\271\365\3501\332\006\273?\272:\232d\344\212\226\277hd\335\014<\010\226?\200v\016\357\260Q\267\277\271&\2074\223)\241?Z\345\216\367\300\347\243\277\350P\007\3506\\\206?\223\340`\250-\034\255\277y\354J:N\307a?%\306;\311\333\232\265?s\330\3218\226\036\244?I\0253dhK\226\277\344\010\006l\363&\256\277\300\301%\365u.\301?S6Yr\210\r\221\277\005\371\035-\037G\232?\234\372\274\221\"\024\244?5\355\2266\\$\237?N\314\033\005\354\334\213?\013\211s\260\177\027\226\277R\2236M\216N\231\277$\275\233h^\340\211?;\341\262\307\272_\365>f/\001\017\033\263\275\277\315?\211\231\322\020\255\277\305%v|\177j\236\277p\221\250\027\010\267\247\277\257+\221\223T\240\232\277$\241z\360\317M{\277\321\000\345u\276|\256\277.`\352\225 a\245\277\204y-\310\217\245\222\277\340(K\347 _\256?\326\330\275\360\311T\210?u25\210\014\224|\277\275\025\177\177\365L\245\277\334n^;8*\243\277\355\t\210j\246\253\272?\355\317\352\326\031\327\261\277\256x\320\367\236\241\220\277R\330\307\020\027\243\244?\311g\005x\0310\241\277\320\236x\330\377t\262\277\310\330\313\266\253\332R\277\326-\234Y\224h\177?|\240\341\002\214q\260?\001r\300O\363\027\221\277\234\3616\376\203\264\226\277Hc\212\'\243E\270?%\370\251\005V\014\202\277<\273\320\251\214\371\300\277\"\035\362\306gi\237?k\022\020L\244\236y?\356\232\247:\263p\223\277.D/$\204K`?\370Ysr\363T?\277\222\261\206?\000\332\254?\310\262JC\305\032z?\303C\375\033\234\255\262\277\345\033w\234\345\220\221?\264\353\371UZ$\237\277.\316JT\233p/?\263;\177\257\307w\234\277\330/\027\224\373\231\264?\246\320\007\334<\200\202?\252\237\nP\300\346\274?N\024)(\227:q\277B\014\217go|s?\364\266d(\212\016\231\277\370\254T\034\215\261\236\277\3433X\260d\312\252?&\020aA\273\266\204?\205s\300\302\252\311\265\277r\377\331\236\323\272\261\277\267\273\357\245\312Y\263?;\020-i\375z\250?\nR\007~\377\321\250?\007^H5w\037v?\300d\235\177\205\026\240\277H\357\226$\347\242\243\277\236\303N\216\242\345\251\277\227\027\264\345\014\227\277?\263]/s\026\002\242?F1N\310\211\231\213?R\3211\260\266\364\272?\312\245pj\024f\265?\226~R\236+\347\246\2778\030\034-/J\251\277\241\207.\360F\217\224\277\371\334\205\245 $\202?\203_\313\372N;\214\277\305\277E\310V\000\252\277\377\267\351&\331\025s\277\034\355-\247\266\177\254?\n\371\321\337j*\262?\036\333\2407\221\031\256\277\017\347=N\224\375\237\277[%\2714\240Q\256\277\260\n\332.\r\322\235?\225\246\321\036g>\217?3\376/\230\267:\\?\274\227>\016\331\366\245\277E\027\221(=\'\252?\177\362Cl\303\300\262\277\036Qv\031\244\351\241\277\3754E\355\350\253\266?b\006\266>\314\200\256?\352\341\205\023\004^i?\250\367\273O\327\352\225?IR\272\000\374\375\246?\026\251\242\315\276\010\261\277aPZ\375\0057\272\277\022\256\211\2626D\221\277\3479 d\306l\206\277\3666hA\314\356\236\277\252 C9Qb\246\277F\013\006\252)N\240?\304\316[q\374\303\231\277\214\215\271\306\016\276\241?w\364\256wq\337\202\277\304\3752\034\244K\260?<\225 \3774\216\232?n\374\324\343I\003\205\2778\357\200\373\262\270\246?\326\232#\355\250^\222\277\244G\302\206\235\023\220\277\"\365!8\032\374\213\277\001n\322\261\322\037\225\277p\346\200\312\366\030\232?\217\207<\236\253\202\220\277\324La\201\344\026\242\277\317\307w#x`\222?\276\202\236`\252\352\271\277HJ\267:\273\364\234\277\257\225\340\314\327*\263\277U\335\342D\330\017\215\277n\265\377\"\222(\254?\2370\024W\035\203\260?\245[\371/\2450\264\277\226\340\216\370\301\323\262?\214\314\216O\034\2406?$RP_J\255{\277u\r>]-\001\245?g\223\346\323N\014\224\277\031\351\211\003t#a\277`>\022=d\210\243\277\364\345e\231\007*^?G\327f\275\272s\204\277Jh\213C\177)\263?\246\035\224\346\214\212\247?\027\007\340C7\022\264\277\202\310\303\237\326\217u\277\270\215{p\020\227{\277\254u\210V\316\244~\277\331\373\335\001p\241\267?\\\220Y[\306>q?\250\304\213\250\210z\265?s\361\332\242P\350\251?\226\373\303.\354\316\220?\335\2465A\377\204\261\277\036Qw\001gK\265?j\346\006\177\027\311\201?>\363\\\213M\261\267\277\213{\254\213[I\240\277\025\370&;NT\246? \202\343z\270\272\273?\025\246\317a|D\211?+\237\316\351t\356\221?\363\035\202S\347\310\267?y\271\326\340X\374\235?X\227\220\345%u\224?\325*\233\222\357W\272\277\3320\254\246\352\331^?(\262\343bW?\223?8\207\t+\352]|\2770S\261[\326\355\251\277+\305~\030\312R\246\277\233\020/\227mg\252?{2\347\375\027m\263?W|*T8\332\226?\334D\271\207\237\341^?\215$4\013\315,A\277\216\024\302>\201w\265\277\314q\264\304\255\360\260?\271\000\334h\233|\262\277[\023s\273\237\356\257?\247K\033\350\026l\234\277\310>\327N\274\357\242\277\321\204\222W\177\t\262?\316\263\216\304\036\264\212\277o.\270!\247M\271\277\037\351m\3736\245=\277\324>?\337\376\254\214\277\036\363\302\272\034\237\257\277\377Z\275JP\020\212\277\271\007#V\262\306\260?V\343\234\232\271\273\223\277\361M|\256\326\372\224?gV1\216I+\261\277 |\254\237\013\216\236?RP\307_\251E\261\2772\352\035\217\3461\251\277*\327r\322\3577\205?\317L\\\342\322w\212\277\343\301\022nl6\261?!\357\rE\2567\261\277y\001\234=o\302\252\277\310s=\355,\240\240\277\324E\227\304\305\212\260\277q\305[\225\351\263\237?S@#L\023\240k\277\323/\021\365\377N\235\277\225\262\225/\363\256\242?\313[\351\023 \377\252?\020,\220\250\345A\225?\t\321\013\030\246\245\253\277\307\027\363=\022\321\264?\035\335_X\335\036\260?\"\t\213L\023\272\252\277\301\264G\335\275\t\245?\234\325\352D\320\253\243?\222\222l\342\235\246\264?\344 U`A&\233\277\3340\261FC\014\261?\222A\272S\315\375A?\324]\206\020\t@v\277\230\234\357.pZ\243\277\231g(U\273\007\256?\273\271\363\260\022\266\240?$\2151\262\3115\221?Y\344\356\353\363\350\256\277\230\235^\360\313\025\223?^\212\366\332L\025\243\277\366\232\030\235\t\321q\277\034\206[\356!\315\240\277M\006\002%a\337\240\277\322\226\025\237\240\275\240?\343\',\334\230\247\242\277\265\241i\345x\324\234\277\270\204\275J+\211\301\277\261\303*\346l\n\241?m\37639\177\n\260?5$\361+\330\327\243?\356\312\321v/\032\246?a\251\231\022\366\016\244\277.\341\240\212#\326\261\277\224h\013\213E\264\254?*\313Q!\326s\260\277\rP\333E\206\217\267?\223\346\313\270\031\276\255\277&\207\335\374\"\246\234\277gLn\322\033n\243?\202\301t\243?\343\317w\0204\023~\277^\200{\016\247P\226?\360,-3\361\213{\277\236\350j\310\332\372\235?J\262\325\331:\344\260?x\014\352v\335\244\242\277\336S\335\334\227=\224\277\332\034&\373\267h\262\277\301\263Z\233\323\007\210\277v\000\036\264\202C\245?\340\024\317v\032\237\200\277*t\010%\313F\251?_\211\356\346!iy?\3337\253E\252=t\277@\313=J\313\360\206\277\330\226D\214Q\255p?\210\342\351\350\233\323\236?\265\030\330_\216\276\233\277\300nI`\343\273\255\277u\357:V\000\310p?G_\300\2062\325\257\277\343_\021\236\275\202\235\277\216\367[\013\302{\235\277\255,\237B8\323\203?\376\255\223n^\236\264?d=\225%[\036\263?\246\005\223\350\276&\256\277\204=W\205\007\305\262\277S\274^\362\300\375\213?af\372_\373\343\274?K,\331 l\307\227?\270c\013\260\250\n\241?\177\027pyk\002\230\277\252tt\215C\332\205\277\034\367\260BQ\355\223?\261\3306\330\262s\242\277Nh\302C\026?\246?m\026\242\006\336\t\215\277j\267\305[\366\333\251?V\341{\277\357\207\200\277\250\261\336\\\024\342\240?\261\006\020\341)\262\262\277\005\014+\207\213\303\300?G\271p\206\314\231\266?\202\226<\021\204\314\252\277\020\306\266s\236z\234?\245\317\006\n\275~\230?~V\250x!g|\2774\035\376\232\241\024\215?Q\236\325z6\307\242\277\006\r\356\320\001^\230?(\353\327\257w\334\222?\272\3126on\237\276\277<\217$l_G\222?\013D\2747+\216\264?\02204\252\"~\227\277`U\363\275x\255\240?\021S\t[$[q\277\244\206b\177\\\327\230\277\225\330\242\264\227\276|\277w\220\3716\216\324\203?\371]\315\370\364@\262\277m*IM\221{\255?\343K`\236\265\351\220?&\332w\314w\314\273\277U\325%oc}\205\277hW\177]\234\266\242\2770\264\311\221\016t\263?\336\301\365\251\304~\246?zt\202<\275E\244?\245\372Vq\363\311\273\277\013w\367\375\345}\211?\301\017\311\221\3704\231\277L1\003\337t\347\242\277\377yh\0051\344\235? LOY\265q\220\277\360\201:\204\367\250\236\277\362@\020^\270Ui\2774\213\356\253A\'\203?(\370\267\017\240\213\241?\262(k\036\352\003\303\277W\347$\272\260\343\263?\004\325\177\306\025\273\235\277\266i\255u#[\260\277\006\300\033XL\340\251\277\034\3479\333\337\204\226?kB\200\217\220;\241\277\001\303JuL\001\263\277X\020$\213\342\336\242\277\000\\\351\030m\200`\277\355\267\356\271l\206\275?\213\216\242\r\225[\227?w\344\371\0245\033\245\277\2057\334\366\216\332\225??zj\006\366\254|?\246c\270\365\245\355\245\277\233M\230n\224\263\252?x\377\227\371\267\251\246\277\220\334rN\000 \243?tG\354@e\267\265\277\2633n\263*\211\262\2777\2345}|\034\233\277\254\"\246\310\220\372\240?[\334\3753G2\262\277\337#\354T\020\303\256\277\272\236\243%\357\337\226\277\234\001\353\264\316\371\235\277\3054$\207\364(\210\277\277\275\262[i)\254?\253t\034\366v\361\244\277\364g\362\343\035\006\232\277\277x\316va&\263\277\n\375\225\331\2468\230\277n\032B[Rb\220?8\260\245\355\016m\224\277\326\233k\t:\253\266\277\224q)\324\3454\300?\333\r\225w\300#\252\277+/\237.r\033\251?\301J>m\231F\214\277\265\211\265\013T\207\225?V\245$^r\352\200\277\346{\030\275\3573\226\2777\350h3\277p\232\277\231J\301\330E\201\252\277uN\200\345\342r\255\277\366\267w\270\253=\263?\340\304\350\360\214\215t\277@\014\311\003x\320\272\277\203b\016\346>{\177\277\302\006\370\306\274[c\277\320k_\344\344\220\211\277oK\316\376@,\261?\014\310\000\365\323\377\255\277\221\212\340\r\014\364\250?\371\213>\322\250\231\251?\270\255\374S\335s\231?\252\277`\260\374\205\227?.\260,O\265\240\251\277\254G\215\321Q\207\234\277\355\343\264\355W?\267\277\355\257*\246\363\213\246\277h\3170\317q\014\214\277h@\004w\373\027\237\277\227\321\037]\"(\223\277\327\024\351Y\031\374|?\236\377\222\363\223U\206\277\020\234\224\212\211\024\224\277\2525A\260\227\307\241\277\023\244\235\027\t\362\222?Ec\025\2513\301\243?\n\266\260\334S\314\240?\261j\265-/\220\247\277\016\230\234i\344k\244?\215\344\3007\206\371\252?\270\'b\343O\037\300\277\302\345\355\2304k\233\277\353\035D\017\370\322\261?1C\030ya}\253?\200b\032\260s\202\254?\334\025\022? \035\266\277\334\271\354s\224<\252\277W|\002Q\014?\227?\243\004\263`\213\024\253\277>&\203\230\005.\266\277iP\017?\227\332\227\277\034\034\370r\350\257\212\277M\316\227\"\377S\244?\032\313T\nB\234\273?\263\216\324V\256\310\250\277_x\256G \207\227\277\213\244\362Z\n\304\270?\325\\!\200n@\252\277\270\354\024\361M\262\251\277I\356i\334\237\275\231\277\310b\277\023\023\007s?\274:\301\r\322}\243\277\324\3374\256\036K\206\277\231\226\016<~\r\217\277H\2552\300\nw\261?L\002\004\261\350\022\241\277\002\013\326\354cEy\277\010\332s\031Y\313\211?\344*#[\r\262\275\277\204\225(C\017\215\245\277\254*\177\221\354\024a?\246\031\326\301oT\224\277\306\n\363\021h8\265\2778\013\336l\263&\252?(\242g7N\220\265?m\033+\037L\304\255\277h\323\025\314l\331\254\277\336\362/c\251\313\217\277\005}\270\020V\256\220?\033\200\213\237|\330\254?o\351\345\264m\014\246\277\337\330\222\332`\r\215\277\005\344\333\244m\026\240?W\262\305\235\261\344\260\277y4\220\220\235^\302\277Q\311Td\'o\216?gC\2114\252B\260\277\355\212\005\264\301\254\243?\024\307\205\r\356\014\246\277\253\346\201\013\273\273\230\277\026T\302\312y$U\277\265\251\006\232\036s\243\277\330\2646\336\247 \265?\352\027\014\272\371+\275?\370\316\320d6j\244?\001p\244\224\256\316R\277\234\314`\323\302A\267?\230\013[[\201\304\244\277\345\247\242\271\352\335\260\277\364;\367`\0077\232\277\3712\234>`\370\230\277\240\266{\304\001y\222?\003\242h\330+\266w?\314zt=\351=\304\277%\3266\302\220\260\271\277\230kp#\"a\226??,y\3177\233\241?n\335%\212\024I\246?\037h\'[|{\202\277h.OH\271\322\243?\314\033sC.&\272\277Z\252\3242#\215\266?!(\334\377}8\246?\304e\r\010\276~\224\277a\302\313\337\255d\201\277\343\022\354\337S`\230?\021\243eA\254\023\246\277\334}V]\207i\256?X\013\236\325\305=\253\277\273\036\"\313\215\025\240\277;\271\311ci\307\223?3\014\243&\277Q\250?\\4\025\003\345\350\247\277\272\221?}\237\313\254?\315\275\366\253n\307\221\277\205\343>\301\323\203r?\254\223\230uW#\245?\341h\211}\346\270\241\277\nX\247\336\005\361\201\277Pjn\301)>V?\240$\205V\361\207\244\277\305\244p\202\351\033\242?\002\357\3762(\356\200?#\004-B50\241?9\215\243\"OmR?s\rw\3308\257\233\277\204\031\305w+\037\301?\306\310DnA\177\226?\226f\003\204\254n\245?\314+\256B=\265\253?\245\202\032^\330(\243?\235\334*\256\330\036\266?\240u\242\314\0055\240\277\235\326u\001\216R\220\277\233[]\346O\026\212\277\017\024\226\220\234\303\242\277\227<\367\260N\323\264\277\216\375\024Z\207\223\245\277\242\312\216v\264d\213\277\314\371=\273v\317\204?\332\\C\013\206\032\246\277\003k\346\233LJ\257\277(k\016@\206/\255\277$\305^\001\3620\276\277\266\351\317\3673WX?\366\312\211\020\234!\260?W\311\354\206\214Q\244\277F\022\305\233\320\355\261?8\204y\353\026U\244\277)\205\215\323\321\007\211\277\321\314n\372\325J\273?A?r|\260\322\247?s\261\203\017\372jP\217\200?\032@\"W\332$\263\277\232\370\023\252\246\317}\277\314l\212\267\034G\256?\336V\273\234\006\320\212\277;Zp\"\205\313\022\277\024\345\264\003|i\264\277x\233\214\327\035%\236?X\221,\030\323\331\263?G|X\331r\264\267\277\337\332[\223m\253\202\277\226\234]\230\010+g?\353nov\260@\255?H#+\276\355\326\255\277#\340e\312\366\006\302\277&1\022;\365\304\301\277\306\375q\323\032\262\241?\274\224\223\032\251\231\262?\355\"\302:C\203\263\277\315\2110\tI\201\222\277_\017\001\224\343\035\257\277l8\207\206\334\234\205?\033^\005\253x\"\250\277HX\005e\260\234\210\277-\201\2614\232\r\270?\343\276\300b*\275\201\024\201?)1\005\277^\230\205?\306/)\375%yx\277\373\212F\007\240\232\224?\207:\307\tF\033\267?\327\303\264\016\307\211\264?R/\254\360U\307\233?\344\235\313\220\361^\241?:Q\262\234\254\310\255\277\350\207m\217\201@\306?s=\027\300\275Z\244\277\350Gv\367x\303\245?\204\204\367\240\031~\236?\211\2760\224Z\035\233?\375>\203\030\250\034\265\277i\312\260.uW\240?KM?};\005|?\311\210y;&\212\261?\211h\373\332\025J\253\277\324a\324\026\354L\207\277\354\364\206\230\334V\212\277\304Ck\375\325\034\233\277\363\210\262\032\3224\237?\266b\202\306\035\222\226?\201J\247\246\361\333\215\277\\\342\346\266\276\377\234?\030\245\032{^\343\266\277\212F\255\2173\265\267?\233\'Y\204\024\342\203?k7g\372\240\021\217\277>\014\306*]\t\263\277\"\270_\230\231P\221\277\3616\304\021\307\241\246?#\027\001\207\332\350\223?\310\237RhCT\272?\020\311\260\232\262\256\256\277\323\353J(\243\361\223?\341\357\"6\341\320\225\277L\0276\337\0172j?j\002hN\370\240{?d\'\314\217\213\302\264\277\274\231\371nG\216\222\277\346\243k\322\200\230\224\277\371}\236\333\\t\263\277\3130-\370.\361}\277_\214\002[\243\265\207\2774\226\303\242\0270\202\277X:n\0168\312\244\277\007\211\312A\261R\243?\223\262\334\257\202y}?\227\017\2353\215\257\252\277\277D\366W\314Vr?\371\225\037\206\021B\234\277[\020r\025@\257\273?\002\264\037\271\277L\261?\356\312\222n\220\333\262\2778\356\332\343\2037\276\2778I8\343\n\334\246\277\367srW\017\241\242\277\263\341\220e\224\275\242?9\027R\357 #\237\277\223??\226\321\n\272?\344\232\266\tL1y\277R\237+\221\254\264|\277\036\211{\017\222\251\314\233\277~\363\305\372\020\211\264\277\023\035\016\206M\264\223?\261\351\210o$\363\271?\343\341\225\356\225ws\277\021\226Y\360\330\273`\277\257qo=\003\006\200?T\376\221.\215\272\236\277Y\240\251\337U\027\250?\320y\231\nhx\247\277\211\004\202z\311\210\250?\016\213\333\374\371\373\260\277\277jpgdo\255\277\r\317\313\234\320\267\224?\366J\225,\004g\250?8\261\201\236:\245\241\277\300Wo\247\227\354\263\277\270(\376R\266gv?M\014\300*\256\374x?=\016}\357\331\247\226?\371{\263m\251\242\202?\213\267\361!\252X\256?\247H\231\321\272\354\273?<\000^uJ\341\224?r\024F\017\311M\252\277o\031\215f\260K\245\277\220\034\227\312c\372\246?\020O\004\361\021\023\243?\277V\033F\333\232O\277`\325\212\372\307\257\247\277\317m\032\366$\260\230\277\007\213L\362\323\326T\277[?5\002m\n\247\277\274\225L\200\244F\236\277a\260<\206M\261\251?5\353\2042L\007\241?\007\303\034\254\237\352\257?^\277\301\0315\"\276\277\355\263<\r\366\372\264?\030\274\335(3\010\235\277oIM\271E\032\227\277\003\006\030\273\263\r\212\277\201T\235T|\371\265?9\222\036\277\217Y\250?\026\224\207\0004:\223?N.44\246\226\204?\361\017\210w\201\350k\277\260*\302\027\215s\240?\362\000\372L\363\226\220\277\021\220\033\357H\303\220?\263w\353`Gs\236\277s\001\240\375\001*\212\277\020\317\315\330&\327\213\277\030x\020\033\017-\243?\007\273\013b\306<\265?\257rt\233\332_u?6=k\2432\360\256?\231\324\247\250>\323X?\260\315Z\256\ro\302\277\033\026Y<\272\207\256?U\316|\215\352\225\214\277\270p\321\201J\324\233?\337\0037\316z\353_\277r\036\276\334\212Y\235?\250\026\334\345\305\317\252?@\212\300\246K\277\225\277\3150\222\272A\266\242?\337i\232\022\203\202\262\277\265\335\352\313i\262m?2\325\232|\364\352\246\277Y;,$\307o\223\277\031\212\005\347\234}\243\277\362\2635\307L\257\240\277\233\023\233{\315\232\202\277\034\261\317\255\010\210\300?\305\224\360\211\213\266\234\277\216^>\275\274Z\202?\344\342\212\320\231\022\256?\016\0026\002\010\021\300\277\001\262\256\257HC\241?\307\270\231\310\214\030\251\277Y\325l\216=\246\235\277\365\366b#=\303\227?g\351\366>H&\235\277jPL\330g\264\264\277\304\245\t\251\3576\261\277F$\246\274Q\376\262\277\025_-v6\372\250\277^k\226?:d_\277\203\321O=\247\247\207?> \321T\376i\234?\356\232@<,J\250?Z\264\3313\006\317\237\277*\327\304\266\006e\223?\231%zG\242J1\277\357\221\2331\"7\262?{\377\030-\244\366\241?X*\265\215\252L\243?\211\376jJ\203H\236?Lmd\251\303\211\256\277-\213y\036\214\357z\277\014\027\265\260\003t\217?\261 3\320\\\244\214\277\3025\261E\263<\264?\331\233/\037;\267\204\277\363\213\222\332\244\226\265?\354\032\210\233w\250\251?\207\371\340\310\201p\227?\337g\252\000\211\337\201\277\326C\274\223\"\351\230?G\371S\205\332\023\267\277SE\264\\\326\267\242\277o\300\265\376\365\375\204?\2625\225Q\r;\272\277tGp\213\217\276\231\277m\314FA\317W\237\277\201K\316\221U=\242?\033q\364Be\257\243\277VsF\000\360\360\270\277J\244.\302\233\243\261?\016yQe\354\345\241\277\204\230S\246\355s\264?\0261\366\215\316W\264?\257\245d\337\241\372\217\277\234\355)\223K\016\230\277\177#F%\372:\237?M\255\230\363\233}\215\277A\326#*\372z\240?\314V\216um;\232\277j\327\2343\244\356\240\277\305m\215\265{\267\200\277<\017\356#\223\006\230?r\336\313\010\317s\260?\314\336YU(\355\233?A\235Y\337\335q\231\277\346\344\245\307\221\233r?\327\027\256\027g\241\271?\223\337N\353A\030\241?\370\006\235\366\227\361\256\277_\305]\364\322/\243?>\362\207\013\232\031\235\277\021\200}\032\275\207\260\277f\'!\300\t\217|\277\243w\240\242g\010\240\277\252\322@5Q\254\216?eZ\332H\215<\271?\324 \273\232\212R\247?\023\202\231=\016F\235\277V[!\345\000\237\204\277\024x\203\344\304>\300\277\367\362\302FN\214\301\277Q\330q\3469\252\250?\027G\016\233~W\205\277\252\035,\240\346n\224?\230~\030\266\314Q\231?]\253w\353\321vh\277\311BZ\035Wy\252?\216\225\341\0043\207\227\277\224\215\2544\377\024\236\277V4\331\347V\335\222?\221\231\177\312\364\316\302\277[#y\'\316\343\213?|\2765[\344n\243\277[[:\257\213p\276?\255?\326\321b\240\277\216\020\207D\016\333\260\277\214I\232\377E\307r\277\223\351j\340V\211\266?\264\232\263\000\026H\237\277p\'\346\230j\016\302?\313A\022F\265K\264?\266\373\267kn\202\227\277\363\257eZ\353?\242?\361.\032b\333\370\243\277v\275V\'\na}\277Q\360\32032\323\240?cH\005\376\313\217\266\277\276\r\221\300\251{\\\277\227bD\001\2163\257\277\023\312\316\210\363\024\221?\3066L\202\2456\244\277M\232mP E\231?\0148\212\237\001t\246\277\017\376x`\335\n\247\277\333\256U\331\253\375\220?_\'\352\315\351\202\212\277e\357\227\025\227\310\224\277\357\326|\273`\335\242?\214w\240\314V\211\254\277\004\\\264\360\311\361\264?\032\365T\245\316\211\203\277\205z&\222\036r\206\277n\305\233\255Q\247\272?\3652\227\267\302\001\223?\314z\337\236\365\377\205?P\270\324\246\313\374\232\277v\"\227? \372\264\277F\237\246@\024D\265?\260\275#\207\362\267\243\277\035/\210\372\303O\215?\206\273(\022s\\\275?\315\310\273\267Y\276\250?\021\013g\232U\033t\277\326\244\3745#\017\246\277@A\234\354(\347\211\277\356\260|\003\223\217\246?\343K\242\005\234\336\223\277<\031\301\257\312\302v\277\276KBrAn\245\277#\345X\332(i\257\277\326#51\346]\243?\033^\000\030W\220\273?4\t#\342\302\037\262\277\226l!\023)\025x?\273\215\256\336\315\317\301\277\200\273o\360/\227\233?.p/fv\277\245\277\335\265\270\264\315\227\241?\270\274\2468\303i\246\277>v\321\240\037\203\265?\223\244Jp\230\205\204?_\362\251\006\275\245\226\2779\242I\332\304\352\233?\000;[+z\372\263\277\001\010<\255\207\231\220\277\201\273\222%\326\232\267\277\303\023,a\340M\262\277\017w\355D\213\335\265?\305q\016H^d\210\2777\212\247\357\213\000\243?\225\2379euK\260\277\372:\333\2653k\213?\367\322\370g\252&\213?\2510\255\246e\212\235\277\263G\035\231\276\364\242\277\341\346M\032Z\030\222\277\252\002`\207\277\036\247?\355\356\247\017d\373\237\277M\336\205qi\021\251\277\242\333\354g\346`\264\277zw\360\230K&\303\277\031\332\347.Ff\241\277\353\242\372\007\311\221\211\277\3207K\204*\302\262?Iq(kj\005\300\277\321J\237t\220Tz\277\237vz\201o\242\257?{13\301V\376\261?\324j\2354\224\206\207\277w\230\214\247H\205h\277{\235bq \r\240\277i\301\371\010\330\033w?\r\346\347\314\300\325\242?\315\343\361\2168:\204\277\332\253\352>L\235\214?=f\301\352\014\221\266\277\354N\275\np\001l\277\205\226d\352\013\351\212\277\177_,+\304\233\247\277!{\256J5k\212?\021\273\367D5\364\220\277\314{?\314e\326\204\277\365\200\333l*\221M?\006x\034\2238\272d\2778\031\372\334g\016\240\277\001/\252\"X\374\206\2776D\317\t\345X\275?\220\354O\'\376\032\255?\004\320\335!*\035U\277r\017O*\242c\262\277\212B\247\020\362\326\263?e\202\013\013\007\224\236?\331\216\325\213\004\230\234?\342@\013/\224\255\260\277\212\206%x\245\022\213\277\0326\357\014/\366\236\277`G\363\336\267\247\224?\01401\255i\253\247\277\270\206\377i\235U\226?j\'\201\357\242z\273\277\207\177\035\326\3379\243?\233H*\004\3710\216?1)\254}\305\323=\277\2452\221\364\023\213\232\277\233W\225\020\266\336a\277\"g\237y\340\244\246\277&\216V`\355\230\256?\351\314\224\233\3750\200\277\200r\033;\245\371\274?\220\242\375\":\313\266\277C1]\276\307\231\260\277e%i\237\014\371\264?`\271\315\224\310\307\244?\222p\262\013\244\355\274\277}1\026\256H\245\243\277JY\216\\*\\\251\277 {yV\330\377u\277\327\251M\016>\264\273\2775\271\360\210\336\325\212\277\232\345c:\177\031\223\277\020<\211A\247\264\241\277H~\256\207\366\320\250\277\203\345\000\332\367\337\272?,\266\231\222\277\r\250\277\323\247DI\306\325\226?\211\033\371\224g`\246\277\331\261\270\251 \233\265\277\334\337\021\343\222\211\251?\323\033\267\304\302M\206?\227\370\327\230\254\347\261\277\220\207\204\005\006|\276?\207Q}\314<\r\232?\376\325Z\022x\335\201?\250\r~\375\200\374\244?\356\364\331\226&\330\216?\316v\377/j\347\225?S;|\226\370d\225?\007\222@\374\020\224\243?\366\r*O\226E\245?DG\314\025\010\327e\277)\323b\20131\224\277\267$\032\356Q\344\242?\352 \277\013VK\261?\002ZY\022\351\247U?t\341\220\002\260L\277\277\255\211@\270\337&w\277!M\022\353\020\000\250?\250\177<\374XaM?\217LJO\211\373\177\277\244\344ww\307E\261?\020\256n\363\362u\246?\371u,c\342\307\224\277J\177\364\002\346\016\226\277?\331.\231\207R\273\277\306l\352\252\021\t\246?\036/\240\257\312\250\231?V\314\365\205\357\231\270?\006\347\325\031\203\005\255\277\230\300XS\032\304x\277\202\352o+\252R\230?\003\377\205IVT\261?\247h\031\255\365\"\240?`!\220 {\310\263?l\231\354\301\365B\260?\256[\033t\253\262\252?\025\303\331Y\240;\231\277\320\332\0337\201\354\266\277\235H\206\275l\204\234?\220\303\022\373\024yt?1\354K]\004\375\250\277\371\360\367\200\264\256\242\277H\027\r\303\3250\233?\373\273\257\353\215\215\222?xn\234\334\213\232\260?\322O\271?c\353z?\322\236/?\374\301\261\277\3116-C\214R\207?o\301x\205l\253\266\277%\336\t\212\341\376\265\277\257\336D\315\233?w\277=\271\3206~1\203\277We\351\264\351\020\271?\237\274}\240\311\030\220?\342\022x\263\346\227\244?\216\177[\010\200\256\177?\350\362\376M\033\037\203\277Xn#9k\350\230?\365AW\260\233\302\251??\375%\313\260\206\207?[\347\316;\260\020\232?\234\356\246\271\334n\222\277nIY\234\006\274\242\277/\320p\267S=\235?G\313\014\332r\375\204?\271\003@%\242\341\232\277\370\254\240\253\340?\235\2777\313r\352IE\222?\300\231.\010.\340\245?2\030,\000/s\250?\343\005\306{Kc\254?\323Q:\217\3315\243\277b\":k28\266?,=pB\260Y\241\277\227\226!\202(q\251\277\330\370\031\352[l\233\2779ozkF\"\220\277ND\036\240Z\205\221?\251T\321\261\323>\222\277h\004\026\021\376\031\241?\336\033\"\232\\ju\277K\363\350k\267\027\232?\327\361\217;\346\211\213?\"\360\226\205\3431\260\277i\227b\005s\236\245\277~\242\225\241\342\\\224?]\346\t\373\233%\224?\237\234\257\231\272\017c?m\312s\210\201\"\274\277\004R\316\346\210\033x\277?\271S\333+J\221?\312\013|!w\037\211\277\210Tv\252\257\370\200?\3734J^\214\t\246?;\274\021\300u\201\261\277\013\205r\267\232b\246\277\226iT\0072<\262\277\2123\237\375\021q\241?;\035\t\306\251\031\210?\204I\346%M-\202?\331G]\300\241a\206\277\324\234\277\033\357\346\231\277\347\3471\004\274x\247?\340\271\010g\362\336\236?-\215!\n\236u\255\277^\026\000\276\325\'\262?\211\356\306]\001\311[?\303\017\024\325\364L\260\2778\207\"{b\010v\277\257\024\225\265\262\316|?t\235\351\013\266\016\227?CP\020K\231\240\265?\001seX\006\322\237\277Zu\\\264s\032\254\277\027\230\205\266\024\032\225?\341\ni\027\342\245\222\277Tr\t\000\273\355\234?\240\222\261\207\314n\264?F|r\325\234G\256\277\013}O:i\261\262?\343\347\025\215\333`\247\277Z\356\330\200\334\205\217?\231\007\337+\365\250{\277\022\373(\307\242\333\301?\272We\013\021M\223\277ZL%\254\326\021\210?\021\304\320\276?\322\264?\312~\037\300\320k\253?\374\221zU\016@\257\2776\366\311\334\355\363@\277\026\016K\351\240\031\202?\332\276ke=\206m\277M\353\300h\305\037\252\277e\200\345\r\211t\251?\353\373\215D\350\247\237?\332\313&\261\260\276\217?}\361\n\242\277\252\277\220\271\020?R\003\257?\t\212[\210\345\210\244?\255:\253t\313\022\240?\036*#\212\206\316\246?\0210\215\350\355B\217?\253\000\341\020{p\223?\025\326\275f\306\256\273\277\251\303Qq\242\020\271\277\233:o\241\225\277\205\277\223\233TW\344j\221\277h\326P\336\004\016\224?V\025\271vR\023\273?\375\221~\010\247\273\264\277\013\275\2660[6\257\277\227\010 \224\352\377\215\277)\251\213L\302\265\203?\237&\005n\363\305\263?\201y\355\327)*\251?r\341Et\256\236\223?\303H\003\2154a\243?\0209\214\211\030\371\200\277\257\227kl\241\213\272?h\305h3\237\207\256?\202=?\374\3511\235\277\004S\201n*\367g\277\031;\365\3645\270\255?\002\331@\205s)\251\277\017\273\237\237:\364\264?\2075!5t\335\251?N\350\371\362\304|\234?\273R\003)3\345\262\277\220\265\255u\014\033\265?j\246\301RT\261\213?\375\026\261\245\265J\265\277\203&L\016\272\306\252?\240&c\301\332\360\274?/\313\203_|R\226?\322#\223\241\265\030\260\277>\223\003\256?\206\256\277 \307\257\234\3060\251?OG\003\345&\\\260\277\035\026{\247\021\007\231?Cy\330\023\331=\260?\337\366r\003\242\212a\277\372\205\252.\360\232\204\277\236\322\020\361\234\220\237?\0205T\006\212\276\264?\235K+\032\277\260q\277\235\016\332\224o\351R?Y\232\304%8\002\270\277\3508aL\354\272\223\277o\364\034R\220\241\244\277\002n_W*\225\261\277\323!\307\006\311\217\243?\017\334\346\362w\330\237\277)\255%\272N\003\243\277\220D/\342Y\205\231?\204\217h\360=\344\271?xn\220\262\203\217\223?t\260\225\0104\304\244\277l\371>\277\315o\245?N>\206\223\000D\253\277I\276\322\241r\250\221\277\303RP>\314\237\021\277\003\341\225>1\354\276\277\261\353\313\3355V\241\277\227\337\237,=\242\231?\224\374\253\177\350\032\225\277Z\317\223\210z\006\260\277HRk\241 .\231?L\337-C\226j\261?)\241\203\252w\021\273?X\213R[M\353\245?l\316(\025\006S\235\277\263\353\2541/h\232?n\302c?\234S\234\277\377\357\365\2439 \223\277R\334\274\031\010\004\246?\207t\231\013\"\213\220\277\315\331\027\256\260\r\242?M\256*!:\367\217?\036.\351\270:\302\245\277\314\353\260\246\007\374\226?yEqE)H\255?\205*2Y\007\363i\277b\210\364Q\316\217\260?\\\032y,X;\267\277\214h\034Wx\230\251\2777c\325\266\307\310o?P\312\212\341\0014\204?\327\204\02617cy?x\244\316l\232\237\247?\350A\220s\332D\272\277\027[$\374B\345\243?h\330\320\261\212+\254\277\334\322\004\217P\007\252\2770g\306\215^\217\234\277\037\004\3372\354t\252\277\340a\231\206\215\372\255\277\007\014\353#\231D\255?\025\245\036\321\253\214\223?\357H\262\341\216\005\236\277\356E\006\276\222\360\226\277s^\233\333JJ\244?\212\366\232\270\241(\253?\033t\264\354\265\342\203?\366\313u\260J$\263\277\330>~\307_\\\240?\261\221\016\202\rP\254\277\327\277\017V@\031\266\277\363z\332\220|\316\276?\373\374\243\020h\215\260?\353\307\372EJ5u?\2172\255p\017\222\262?\022\\\262\r[J\214\277v\350\030\276kg\213\277\275\277\267fasWO\246\277 \330\334qCxv\277_\324\266\211\247\203x?hn\272\237\347>4\277l\345m\324w\024\200\277\307\330r\374p\251\223\2772\253\267h\273}\267\277*\214\255\246\300\337\274?\220Z\312H\213\322\231\277f*\337\354yi\201?}f\365\310lJ\234?\302\254\350\312\346\314\245?o\306\211X\246\004\224\277>\217\000#\317[\265\277\370\020\363h\0167\267?\241N\027qbA\271?*\356\355H\260\016\225\277=0\355Iug_?\264\220\341Y\305-\246?MA\233\220R\030\264\277\307\231\277l\206\027\256\277\322\303K\310\222\347\263?a\261J/5\317!?0\224D\271\345\031\236\277\370iq\363\263\223\207?X\010\371\347\3562\262?\223\220\324\306W\253\227\277,\350\005\275\357\266\235?Q\nj}\366\004\272\277\274\220\257\223k\035\266?++\345f\030m\252?lI\0229\335%\224\277i:\337d\\~\212\277\201\034\245o<@\232\277g\036K\313^\372\247?4\353M}\302\313\247?\301\376\0177\017\027\273\277\300f\321T|\177\260\277d\232\375U\202\006\264?\204v)\234{V\255\277/:\"\016\001\303\206?\210bD\321\244\323\243?\n\201\277\316\2113\266\277\314\211\342Nm\345\250\277\363\341\230t\303\223\256\277\276\303E\026\363\310\271?\2761\026\362\025r\241?z64|\215\306|\277\305\226X\204^\343\265?Q\221JUW\220\241?\251)\202\302S\373\243?M0&\t=\322\232?\3346\205\300\033\020\251\277\372\027\177\227N*\272?C+\3156.S\241?\301\352\262\342\215C\201\277}\237\331<6h}\277\325\\?h\004\322\235\277\247W\226\350\177\341\224\277\364$\317\357\2757\216?&\237\263\276\360\030\275\2775\201\326\225S\001\252\277i\316\377/\024)\232?e\267\276\216s\240\207\277`.\352\230\230A\236?\212pb\353\231\336\260?=\020\262j\3253\216\277\225 l\335\363\007\260?l\246\307\352B]\222\2771 B\251R\321\261\277\305\377\205\221J{\250?\355=N&\272\352a?\227u\177\001\345\226\263\277\362\271\020Y\351J\251\277\344V\311\327\366\250\254?2F,1\356z\262?5o\332\212\235\246g?\037i5\301\007\272\261?=2\334g73\205?\007\207\331y\272e\256\277:zZ$lHp?\246\352\016R/\373\301\277#\3415\354v?\240?49)\343\020P\260?\t\2047\246#\204y?\324\327\221\242p!\203\277\177\371\337\251\227X\245?\356\204i\303V\305\267\277\367/gs\204J\240?\332\362\245P\227\223j?\033\265>1^\261]\277\325\226\305\371e\'\240?\035R\207\331\376R\233?\217N\257\027\327Q\267?l\213G%\002\256i\277!IA\211\025e\242?]\324\356.e\023\264\277\016=\016U\272\277\255\277\036\210\010\002L\361\262?o\037\252\276~\021\246?\220\207\260\000\343\373\223?X4\213\246>\312\231?\351\233k\216\2333\260?\360x\3050\236\354\250\277\302G\301u%J\236\277M\332E\203\264\030\300\277VM.\036u5\260\277\\\005\234Q\271\335\243?.#\273\353#\215\257?\350\314X\246\n\336\215?;\275\327\246\014\247\250\277Z\235\301ETB\267\277n\022\330\366\001}\265\277x\345\212\233\221\321\241\277B5h]\204!\250\277X\"\330\255\337\317\270\277J\356J\236[\032\260?o\030j\006Dm\262\277\213\0320\177\234\t\275\277\365\343\021S\243\031\241?Z\263!\363\241h\261?\366\2650\213\2243a\2775\254\010\204\332O\245?\000\214Qw\263n~\277o\227\367\231\231>\243\277\000\210\245\250\363b\216\277\333\273Q\013Bx\267?{2C]\207m[?}s\2376&R^?x}\250\2110\375~?\206\014\312\272\223\356\254?\343R\242\3213U`?\013\355\020\203?\tr\2777r\253\217\247E\222\277YB\236\020Z0\271\2774\355&\351\344?\206\277k\322\334Z\272%\267?\217\033\220c\342\323\244\2777\230\022[\315\333\277\277\346bZ\234\007\250\261?\316\367\345Y)0x\277\351\010\267Cz\247\204?\211\325J\207\232$\244\277\006T\276\300K\377\226\277u\3352\033OX9?\212\206\276\ri\331\263?B\271\353#\332s\261\277\223\226\241\025\347\302\245\277\244\301k\271\257\352\215\277\245U\325\2560\234\242?o\003\275\025\312\340\273?o$\271\216\235\234\274\277OQE\030\202\302\241\277\336\177\307\347[\217\300?\252up~!\240\253\277\264\037{\025\263\377\210?v\024\007\033Z\375t\277\022\024\226\r1\364\300\277\322\254\361\016\240\204\220?\275M\226g\316\031\264?\214\217\320\332\260\307\265\277\r?\241\337(\013\205\277\2254:\250\326)\254\277\222*\231;},\240\277i\277\341\020\326\255\276?p\372r\336\204\267\255\277/\235\276\315\203\236\263?\231\260L\363\265\207\302?\2046,\227\352i\202\277\244Ih\371\214\216\206?o]\362\336\177\017\253?\\u}\031\225\375\266\277pm\331\354\216\364\237?1\003X\227\235\271\234?\332\031\366g\345\035\231?\000N*\272\355\004\205?\310&\320\347\271\224\226\277\r\271\312\304\"^\240?\241]\035\331\216\300\247\2778`\234\001\351\245\240?.K:vF\251\222?\026\334\232\314^\237\242\277%\0031&q*\236\277\376\3650e\367\246\247?\224P\t]\346\340\227?\375\370\227\355\313\210\227\277\264\002\333J\205\343\207?AVL\006\016q\270?\250\030\264\210s~\263\277\311\020s(G\250\215?\245\337}B\371k\236?\020\271\340\2434\375\223\277\355\004\321\202\333\211\237?\267\020\236\223\230\245\230?\345x\331\255\272\252\265?\214?m\016\020)\260\277\212\330\n\357l\354z?\311\335\311\370a\341\241\277C\351\223\304x\304\220\277\211\"!\034\343\3367\277&\312\374\316\221F\202?\266\211rF\035\256\242?\337\307\020Bk\356\261\277\271\025\272\305\242\247\250\277\210\253\312\023f\235\214\277\263\3446z\202\267V\277;\3324\214\312\321\251\2772\nH\247\376#\217?\263&\036\303\237{\265?\314\371\231\021\'\353\246\277\204\033\027s\\~s?\252h\266\007)f\244?\306\211\344\257y%\251?\273O\202\242--\222?Z\272\273/\203(\242?%Hn>\200G\250?n\r\345]\\\327\303?\357\016\002\300)\000\212?75n\305n\376\242?w\taA<\023\251?uTco\243|\251?\303lb\274\274)\246\277\240b6&\206L\275?\247\"\014T\375\016\254?\2452t\000L\321\256?3\367Aw\025\373\262?K\210Enm\212\277?\227\236+Df6\244\2773x\217=d|\261\277\264\264&K9|\213\277\376\376\247\032\3217`?\275\316\031\034`\237\231\277\376\262\332\371\014\344\261?\275D0\357W\364\243?5\213\261H@7\262?\004V\222)\3452\254?\335\314\313\200;\341\261?\000\024?h9\341\233\277\226+\004s!M\224?_v\251\251\352\025\260\277y3\267\311;}\250?\370\014$\201\346\211\212\2778\362:\375\333e\240\277e\306,\322\3625\270?8\277\2164\276\245\236\277\301\322\325e\320\022\270?\357\367&\315\216e\265?<\005\025\201\262\2321\277\265\264\326G\024.\236\277\252\365\225\244}\353\260\277\255\246\355\"\350\213e?\037\306\356\230]\214\205?\301\277\016a@u\256?\341\305?v\353\006\235?\340\250\265\310N\320\250\277\332sn\325xV\236\277t\335\n^\007R\245\277\321\273\200=\243\336\254?\212\t\037};0\264\277\376\270\342x\274;\202?\331\247\363\301\261\013\223\277\272wZ\321\257.\206?C\272\177i\246\320\245\277\264\356\367L\312\326\270?\325\257i\367]\253\264\277k\004D\372\221{\300?\315\300>\231U3\262?\360\276\337\343Y\211\223?\2453z$)9\277\277X\023\203\n?\036\225?\305q\2158\000\340\261\277\301/;\377,V\254\277R\300\'\016\214\021\270?\366b\210d\260\271{?\276\303\372A;\352\225\277\313\234\n\311\206;\201?\245\327\243P\2245)\277\r\301\027bvy\255\277}\241\372\223\272\331\262\277?\240\224*\373\256\262\277\316~\214\030\201\016\306\277s\332x\036\001\263\224?\017iW\211\337\252\206\277\007\370\364\327\222\216\226?\017\357\207\326\372}\253?\205\023P\215K3\271\277\231\016]\030\310\370\263?\032\252L\0161\204\231?\2214\211\020|=\264\277\205;F\336(\374\205?0\335\222w\376h\261\277\350XP\256\315\222\265?\335\245\312J\010\006\207\277?\244k~\316$v?k\223\317w\324\271\247?\230\217\307\236\242\322\242\277=\351\036\202ji\270\277\335\353fE6\250\221\277\317}\363I\021\221\267\277\260ivwu\212\250?\016\346\244\013\023\324\226\277\242\257\025\024\225\252\222?.\321B\237\273x\217\277F\026\346b\030\205\253\277\360\016&\345\240D\212\277\356\212w\307\322\260\227\277_\373\006\ts\346\220?\333(\215g\363\023\247?\026n\202\2637,\276\277\036N~\016\340\320\207?\204Stb?a\202\277\"\322=8\233a\246\277!\302\0225\361\033\204\277@\345/\370\026\005\220?\201\263\206\016\2568\256?.b\250\216\275\355u\277\002\364P`\353\313l?3o,\261\004\003q?\223\243hl\327^\236\277\017\225tm\361\345\221?\315\343G\000\330\327\200\277\026C\360\321\272\021\257\277\272\273\327\351\335t\227?T\262\253\356\303\037\255\277\216\301\2625\363\332\216\277]\324\344)\362~\262?\021\253\325\253\032\254\267?\346\351\344\336\r\326\250?\327\2703Br\214\200\277\027r\253\277\031>\220\277\331ya^{\r\242\277g\253#\223\273\205\236?\376\373T*\304\n\262?\334An\037\335\327\273?S\216\031\255\'y\277\277\003\235Y%\002\354\243\277\230G\214\350\326\005\261\277\376`~\277{\220\204?Vc\023\251\304\026\225\277]\352\233<\364\022\247?5I\016\0013\371\260?\010\023\224\377\337\354\240?|k\320E2\007\275?\335Z\333Ga=\233?\363\006\362U)7\254\277\231\r~!g,\243\277\"\353\347\314\243\271\220?\030\226\340\206}3\253?C\027\372g\201\234\260\277\364\252H\277\030e\241\277\036\206T\014\331f_?3,\205\373\000\213\256\277\331\234\231\225\007\363\245\277,h\324D\251d\255?\265X\301\214\005(\231\277j\362\246\220!\230\260\277+\377X\376~\307\233?Sm\304\313\031\021\252?fk\212<\322\'\255?\352\326\335e\2309\231\277\347\\\240\371\0035s?\332\036\020\366\251\200t\277x1\233Dq\003\275\277+\336\215s\2637\271?f\236\324x\300-\211\277\360\323\352\013\202\312\212?$j\014\237\237f\264\277|\351\t-g\306\222\277\332\322\007\246\343\224\252?8w\333\233$b\261?\026W\001\220M\333\252\277\024H\365\206\316W\257\277\200\267\231\351\007\365\300?\341\257\256\224L\t\212\277o\204\312\353\265~\204?\220js;\177\363~\277\201\240o\217\215\332\226?WBc3\025=t?\253\347\242\374\205:\276\277/\013\333\365Y\220\200?\304zi7D\241\177?x\207\275\231\013\324\241?\273tJl\325\033\201?@\300\243>\366\372\261\277\3323\341\226\243k\257\277\320\334gp\371[\255?Y%l\351\363\325\234?\337nI>\022\370|?\217M\303\372p\004u?A?\305K\023s\234\277wD\010\005\324\223\302?\357\313\tr\334n\264\277\226\014\200u\030|p\277I\345\327\372!:\242?\210Z9^\345\350\206?L\310L\227{-\256?\326(\301\034\212\375q\277\216\303\206q\037\300\261?\\\255\221zm\336\260?2[\210\244&\352\235\277\251\321=-\014\014\220\277|\211v\"\246x\207\277\020\323xU,\213~?\322\3278i\336w\241\2775\r\t\300&LP\277\nf\016\363$r\270\277\335\223\230\3046K\257\277\312\025C\275\317\261\223?\204]L:\177A\212?\361**$\273\252\261?\242\232U\262h$\251?\221\332\232E\235^\255?v2%\377m/\211\277E\311\035\021\020\363\225\277b\036\032o\252\240\252?\305\365;\2241T\241\277U\034\037B\263l\237\277@\001\376\240\250H\300\277\222x\200\267C\304\271?aL\210\033\331\351\221\277@o\346T\212\201\235\277\271\325\340/v\317\253?\346\353\267\302\004Y\230\277\204a\376H\335(\264?1\235\250\343Li\271?y\214\226;\320\223\225?\177\005]\273\213\263\233?\244\245]]j\314\270?\027\365Y\377\3008]?\247\342\231\222\250e\260\277\367\025\337a\216\010\231\277\362\364\370\343\270I\221?1l\252\2371\315\266\2772?\256\366\036\021\240\2777;\257\3174[\263\277>\005\014n\217,\241\277:0\306\244\030]\225?w\215!\224\351(\253\277J\347\224\031\223~\251?\364\357\331A.]\242\277R\356\211\021r)~?\362\243\0365\372\027\260?0*\003Y\361+\226?4]\0011\037\273\230?\226\374\243\300u\241\263\277\033\'j\322v\252\256\277m\236\337\356\'O\273?\264\225\275\002\367{\233?\365\273]\376\367k\241\277\016H\301q\363\035\261?{\357\373J\201\315\220\277ZB\267\362\376\031\267?\3647\304\317\246\266\252?\330\270\355\301\n\305\227?\315\024\212P\251n\224\277%\340cu\271\375>?\310)\037z\323\251\301\277\n9\370\310\262Oz?\024?._| \263\277\351[\363G[\271\231\277\320\241\302\353\017^\221?9\373>s\321\212\264\277\246\0351\241\321\237\275?\210\351\202V\\\360\242\277e\346\373\262\260\216=?\350\017\373\217:\243\252\277\026\321\270\3722\330\220\277t\370\277yV`\260?\226\000\306)_L\215?C\370G\221\244w\245?\034\207\311G[\305\252?\350i\235\020\347\227\243\277\023\251\000\227y\215\241?~\374q\ru_\222?\205\251\260\356~7u?C\231PI\267<\276?\231\036d\242\271\302\243\277\024\304\321s\272\033\276\277b ~\205\014\315\261?8\264\353Q\004u|\277\342b\324Zm\016\243\277\260\014]4\t\341\204\277\326RHJ\340z\256\277h\235Wt\357\213\203\277FD\336\347\251]X\277\004m\3671\260\022\240?\314\314\324=w\036\261\277\3672\360L\277\202\273?\335\344\251)\021\304\242\277\266+\217\2466$\214?!\254\036\311\341\001D?\3137\340}\371\020\255?@\0340\355\3367\225\277R\375d\272\'t\241\277\023yD\304\231\305\200\277\304\027;\311\010\323x?H\276#\250\021 \247?&\224lH\355\233\202?|\034\257\260gg\247?\034\356\230\006\2145t\277%\321\205B\373(X?\225\362\210\n\305\250x\277\036\344\203\250\010\234\246?T\002H\302\374\004\233\277d\301WM\343\235\264\277\314\276\031\212E\271\237?\033ao\272A3E?\377$\274\025TI\251?\025v\311\004c\252|\277\335\201\206\020W\346\214?Y\352B0\371IV?\215\361\361\200;\226\262?\3437a8\224\305\232\277\022\334\034\362\032h\252\277\020w\212b\231\322\216?\344\004tT\333\237l\277\035\210:\367WQ\253\277\331\010\251\323\206>\203?\364\023\0170\300\205x\277\324\323\372n,;\246?v\250\266\372\235m\270?\000\271\033\266\315,\234?\000\212\244\371D)\262?\255\225;a;\216\255?\210\350\314\320\321\232}\277Vf\177\371\322@\257\277\330}N0\007\240\260\277\266\275\362q>\324\264\277\0149\364m\371%\235\277\216\357\252z\231\367\223\277^s\344&e\2170?E\225O\022z\203\277?\262c\364\272:\265\251?\245\323l\345sc\215?\245\306c\365\324t\272?\307\314\205\234f\355\260\277t\266\314\033\310\353\225\277\256\205\327J\360\254\223\27789N\266\336\001\272?\367\202X9\037\010\245\277v\274p \037\373u?Y\367\035X\247\237\204?\001\tb\001va\201\277&\361\254H\003j\256\277\231\n\307LA\031\233\277\200Q\275\304\262$\257\277\327\214\217\215d\"\234?\333qz\t\314\177\254\277\300\341Y M\023\241?\205n\027EFa\234\277\211\331\211G+&\264?\220\303\231\005\030\310\214?\277\232\215{\326\004\250\277\275\004l-\2070\227?j\221\030\257C\343\300\277\337\235{+\247$\240\277\315mf\312|d\242\277\320\210\224\331\363\013\277?v\334\255Ak\255\234\277P\366\221\001\005\010\230?Q\2313\247z6q\277v\021\264\257\350\230\261\277b~7\243\014\207\243\277\024h\366\307(\237\250?JRu\211\021\007~?`Cc.\257\322\246?\344\214\231\260Q\225\244\277jMB\373n\037\200\2779\321\362*\027\'\234?\233V\326]\000\314\261?\303o)\233\250b\240?\255\035\244Kcm\263?#\211\340\367\346vZ?#9$\371\242\254\262?\341\010\370:\331r\246?\322\373&\333\252U\246?[\262\261\333\0212\216\277\374\025\267<>5\240?\265\376\204>o\033\237?GT\\\013\375\214\267\277\344b\0257\330,\256?\276a\312\276 5e\277y\263^P\2302\265\277$\210p\243?\304\206\031dG\264\256\277\211A\277\025`C@?\032M<\335\231_\251\277\302YH@\217\257\270?\333x\2136\374q\264\277\262g\315\013G\360\265\277Z\247-Z\001\007\230\277 \302s\273N\206\260?\3557\376\326\032\022\264?9\221\330\300\324\305v\277\276\020j\"\252\006\231?M\261\3401>\264{?\336A\265?M@\231?\227\332\006#]z\246\27756\210y\023\327\255\277\214l\037\n\310R\266?L\201\257H`n\242\277ROjw\337\360\217?\354\226y\201E&\220\277\364W\345\303D\270k?8\345jz\020\211\200\277/\026R\303\216p\271?\274`\254\327_l\204?\035\327\244\261\225\343\206\277Na$\272U\000\200\277\\n\017&\r\272\245\277d\027}\257G\340\207?\022G\307\221`\025\260\277c\277)\"\266\017\263\277T\265\331\202\302#\240\277&\332\342\362\327\316\241\277>\010\252\370o\320\301\277\313\003\310e\331\347\264\2771\277\307\023\221m\213?\277\226\207^\322\361\260?G\272nZwPM\277\264\276i(\243\003\213?\302Xd\301\017\033\256\277\020\034d\355h\316\264\277Pe\245:\3604\300?\2529hTp\232\226\277\302\361-\020\357\377\r\277p\266\203\3531\277\244\277\203g\024>Z@\240?k\013kg\350C\272\277&L\267/\353\006\257\277\217w\242p\0138\234\277\177\313\326\036\350`\274?\0137\036E%\204\265\277.\256\034\356}\216\222?\312\210\240\230w\274t?\315\353\302\335\\\251\240?\367\203\277\360q\250\261? C\222\340\302jy\277\260;\233\361~q\250?0\242\007\342\226s\244\277\323s\344\233\036b\206\277\225\373\265\356Z\213c\277+\213\304\313\007n\244\277\037\224\177;x\243\263?\332\265s\002\022H\233\277\203\217\336\036^\200\264?N.cewW\205\277}Zq\'P\002\257\277\0010\006L\017\224\220\277Eb*!\'\266\236\277\344\026-#!\030\276\277\nv{G\005s\253?\022\01026l5\302?\322\354A\235#\001\233?\t\363J\301\023\324F?\252l\315\333Sp\264?\242A\264\201\201\271\275\277\251\362\376\223\233O\223\277\341\232\3441\227\210\243?P;\204\321\250\016\260?\317\310\247\324\372\n\266\277\025im\037E\037\247\277\367\r\0264n\224\210\277\325\024\234~\017b\245\277\3770\216\210x\371\236?\322\235*\222\031Z\255?\352\366\215*\323\000\204\277\246>#\001\211\"\245\277\356\206[\223\250\260\260\277RP\266>$\230\236?\226\200\014\244\326)\261\277x\323%K\372~\245?\203\3121\020(\242\242\277\271\355\273\375Y\313\253\277K\325\253!8\265s?N\004\327\202\347?\241\277\253-\217t\363\377\221?)\337\277\032\200\231\261?T\371\003\002>G\277\277\364h\245\234U\331\263?\022\202\033\225v<\245?\352\246(\2258&\246\277D\274\322I\010\257a?\300\304\217\354\037\372\243?A\243\373\202S\376\201?\265+\311\256\376\361\201\277\220:\256g\036N\216\277JI\014\013y\364\250?\303k\363\250\216?\203\277S\035\271m\002\036\203\277E\306\025\022(\007\240?\220<\037\240\257\311\234?\323\331l\333\n\034\221\277\240JQ~\306\347\244\277\264\324\241(\257\351^\277\004\001\326\177\226\314\247\277E\356\316\363\027\217\272\277\374T\230kt\026\243?\275u>\263`6\217\277\361#f\360h(\224?\364\370bh\362\257\242\277C\035\315\037\2609r?N\242S\223\236v\241\277M\217\255\262D\312\232\277\352w\332Iqg\203?9\014\210)\345\200\210?&\307\337o4\022E\277\253 G\000\320\231\201?\005<%E\356\315\211\277\347\021h\000#\004\250\277\344\033\320\3425\203\243\277\242<5\323\003\272\264?\242\377\rNF\010\257\277\276.j~t!K?\003\276l\335+\322\253\277\365\200nS-l\252\277I\220E\275f\223\332\276\376\347\232?{\225,\350h\236\303?#\323\260s\354\376\222\277\213\027\301V=\240\270?$\245jD\344\247\261?\246\005c\214\212\203\240?\324\207\205\257\2234a?\275\333\027m: \263?\250\240\220\310\361\324\241\277\364i>dSC\245\277\335D\343\346\336/\245\277j\260\332~\027\362\211\277\003\035\302\364\022\227\240\277\274\313\333\253\312\351\210?\003\227I\263\3715\216?\344\335\353\304\365\014\213?\367\275x\340\020\177\206\277\212_/\335\272\353\257?\360\345\2637\212p\261\277j\307\n\027\334d\220\277^$5\314\345`\212\277\2574\341N\305\337\214?(\320\0169G\222\255\277Z\237\266\025^$\236?FU\271U\021\312\266\277\361H|j\306\241\202?\334\022\213q\343<\267?\010\202}\250\031\235\250?\177Ct\036P\003\236\277L\240\334\265e\016\275\277\371\r\260\317\216$Z\277\202\3423X\234\321\233\277\231\213d\2100\010\204?\336\243\366 T\250\216\277\221E\242\234\321\rb?\205\006\266*\275\321\205?LQ\314\'0\212\301\277\267e\341\021\306\307\300\277s\010Nh\362\002\210\277\216Zgu\036\265\221\277\007\301q\3043\017\247?\2234SX}`\270\2773U\374\227\331\3175\277\333\254\031\351\246\242\235\277\234?5067m?a3\3665;q\257\277)\027]1\340\273\247\277\007\035\0277\010\323\266?\276A\243\357\362\203\264\277\333&\330\345\314l\234?m\346\2351s\002\267?\217H\3418\376\342\224\277\277Z\033\357 \323\202\277\260b\014\205\000V9\277\324\036\206,\016\217\250\277)1\357 \210\231\246\277B\225\034H\346\203\245?\371\215\323p\277\r\267?\201:`\231N\222\224?h_\220\024\'\\\201\277\217Zr\276\334;\242\277\301\222\326g\323\310\246\2774g \237\277w\367\240{\315d\243?\215{\267C\313Y\212\277c\t\030\263?\372\264?n\277=\366\232g\242?\201\242\354\241W\375\274?\236\020a(*\340\245?a\222\306\371F\377\235\277\023)\375\024K\253\266\277\337\226>\340Y\277\247\277\203#\310\337\013C\227?B\370\023\202}k\254?CO6]\3432\226?6m\351\220W\341\271?\301:\036\353\216\337\203?\267\325\374\232A\355\261?\020H\237\226\326\360\245\277\266\25362\201\374\251?,g\007\177E%\265?\275\013\3202\217\331\204\277S\004\306\264\205\331\245?(9\211\317*\237\233\277e>\374C\331\224q?\222\266\2657x\363\243\277\014\\\216\3453\357\251\277\0340\014\247\016T\262?9\2072\275\177_\216?%E\347q9\303*?\0068\330\2032\366\243\277\021 r\274^\255\263?\3779\353io \233\277\355\324\203\353\304\364\243?\337\256\245\002P\030\243\277\022\010\205\002s\024Z?\257JTY9\005\222?\224\0069\2627C\221?\325+#\234\315\251p?3\306\016\177\262\305\261\277b\313qb\210i\261?\271\227\373\r\014\327\244?\365\204*^\000\r\275\27771,\335\317\034\240?o\n\340\225!\353\244\277\344r\215\261R\317p\277\251\\l\356\305:\233?\017\313\203\232\326\035\236\277c@\335\371\024\204\260\277\177\3704\003\337\300\261?\311\004;\324\013\321\240\277\264%\267V!\255\255\277vR\242\201@d\255\277\263\017j\367\025\351\256\277`6\364#\243\200\241?E\305\004\336u\017\271\277)Z\315\275\324\320\215\277$E\266c\021\240\263?={zZ\231F\240?\245\357\323\207w\014\234\277VU\351p\r\030\216?\374\344o\345q\241\300\277I\rf\035\003_\266\277\357*\357&_\035\245?\222u\013\341\\\340\216?9\273\242\221\317\361\221\277\232(\0222\373\355\263?\372U\246^f\'\260\277m\232\033\205\306.\262\277Bp\026SU\030\241\277$\2277\333\213\232\243?\220\341\246Z\2624\254\277E\241\214\001\006C\255?\n\275rw*k\245\277\275\224:\t\355\364\267?\336S\241\346^\032\301\277,\363\202U~;\264\277\001\3028\335j\303\265?\242\243/\032\010\354\221\277\2274\200\242&\023b?\252`\201\021O\t\224\277\037\035\t\366b\006\264?s\2263\340\224\344\\\277g\030\032\217|\036\241?\351\"\224F\2770\241\277\371\240L\327z\216\263?7]\261\270W\216\241\277\352\021\330L\232J\222?\331\332\241Km\245\262?M]\367\365oJ\274\277\235\370=@E\374t\277\217\230\362p\"l\262?@\r\036\213\272[\261?\326\212\276{\202W\256\277WG\274T\303\257\226?\237\013\003\206&\206\265? \023\226\237\010$\210?Kz\370\347\023,\252?\224\236(\337\024\200\232?\017y!1K\360\223?\211T\240=8C`\277\353JTm8ij?\354\325\232\260eo\224?\221_N\354\267\340\261\277rc\014\275\342\201\251?\023d\005\267\205\364\255\277~:\002\017\035\002\223?-=_\232Q}\300\277\334\377\337\327\364\211\224\277\251\217\374\352\211q\224\277\340\205\324\246\"\024\261?\214\240\221\242+8\253?\311#\242-3\317\177?\263\211\377\207\261\262\225?\220\240k\322>\r\276\277z\332\344\236\010\031c\277\244\315\340O*\355\250?xCS\276\307\210\235?\002\247\377zoj\245\277\302\026\355M\257\212\220\2776\274\2232\306\004\240?\357d\374W/\rQ?%W\216&\365k\241?\322\315\221/\213\340\260?e\265\335\306\337\201\223\277\202\206\277\253!p\263?\374\007\022ioK\227\277T\027\"m\362\\q?\355\000U\035\374 \223\2772\205ssC\321\254??\004\023$\264\324\212?\2127\273\013#\364\272?\014\326n\034\023t\242\277\233\277\360\027\010\317\262\277\022}Co\240\033\301?\246a\351j\253\274\177?\276\323zp\325\024\222?\275Z\355oef\250\277\311L\224,\340\313u\277\200\010\216\365\0246\264?L!\315,\232\351\223?Y\256\324\271\035\327\241?\344\254\366\300\357;\205?\237\204\030\321\225\"\270?0\231\003m\010\271\220\2771\370\237*oK\213\2779\005\021V\343\333\301?\250\026\242\022~\023\217?\3465=\345\316\212\230\277\336\221\362\344\n\265\201?\330+\317\206\340\357Y?b\344\270\360\274\370\242\277Ce\205\277\220\257\202\277\350C\340\227i\n\210\277\260\230d\210\343\222\250?l\332ME\375\370\231\277\335A7\300b\315\267?\336`*\001\035O\227\277\327\350\344T7\037\255\277L\202\264\342\276\206P?V\204\3723\373\n[?\233\264\246+)\321\220\277V1\243\351\033\260\262?%\276Z\215~>t?\254\206\375\252S#q\277\206w[f\330\027\247?;,z\031\332\307\264\277_\270\347\301\210Q\250\277}\235\035\310\007)\203? \005d.\200G\250\277\030\202\231#W\245\262?@\365\344\351M\375\203\277\261\307\256\331\237\340h?a\234\255\257\204N\244?\220\260\361\324a\242\250?F\300Z\265\372-\263?\265\3567\005\010\224\263\277e\360\365Z\231L\256\277\352\000\303\305\'\340\240\277\361\014\035*\223\201\227?\356\226\267\013+\'\211?\344\347c\000\026\233\226\2773\276\006]\244\273\251\277\027\250\373\326\210\244\263\277(\217i\033\224,\253?\216\306\317\331r\034\221?T\277\230^\331\033\265\277Le\327\201i\240\250\277G\253?b\313\002\n\326\242\254\277 H\362\314\226\226\270\277\360\2573\355\230\230T\277\335\324:\257P\034f?\250\256\261)\371\017P\277\324\353\021\036\242\236\264\277%\312\214\241Z\344\214?V\201\337Z\366\220\263\277\014\351c\201\2039\232?\021)[\243\261\212\270?\312\312Y\366{\006\242\277\307\000\276^\207\246X?\272\212\343K\271H\226?\317\020gQh\006\264?\262\370\n`\235W\246\277\306\215\272\202\004\351\242?\232\263vI\365]x\277G\233.\001JP\267\277\006\353\222San}?\024\'\227\361\277\337\264?\351%[IW\221\260\277\271\363\345\274\216\325\264?\357\265\216\000$\211\262\277\330\"(\202\013^\261\277)$)\371\303-\206\277\314\013\204\346O\250\205\277\300\020^u\0317\263?\346\020\242\320\262\376\255?r\275\366\000X\301\271?`\317Mw2\211\240?\312\267\237C\354\033A?ZJC\264\226p\262?H\'Jd\353jy\277\271$l\220\307\207\252?\315\254(a\360\035\245?\234\325/\253\347\212\261\277\233\322v\326\3337j?\003\231(\207\202\022\242?\'\346}\347\'\216\260?\310\226\313$\341\221\265\277\254\263 eD\006`?)\000\267\036)\302\263?\346\372\010q\217\277\211?\245b\016H\026\304\232?\272\230\205\232\237\004\300\277\3422Q67Y\224\277\241\213$!\005\354\261?\261\002\001g/\033p?\331\217\212\324\'A\222\277\021\275\323\346e\373\272\277\002\312\336R\340\256\210?\003\322\235\325\201k\266?\030\261N\017\375\034\205?,\253\271{\377\022\244?\3761\233\364\003n\206?\357\004K\322\000\201\247?\314L\214\3216\t\242?\373\263#$I,\273?\206\206\275\036\035\263\257\277<\013tv\021h\213\277\364.\022|\0350\244\277\374\'3\002\224\343\265?\204\020\227\332$\256{?W\323\322\n\272\237\216?\361\200\331\260\201\246\272\277q\342\257\n\032$\217\277\356\014X\341\001\371\263?x\t\223\212/\020\260?\343\003N\220\032\024\246?V\030\321\235\371\301\272?\301\231\035\266p\217\230\277\301\204\213\373\376J\243?q\037\234\364\350\254\265\277\005\n\253\004?g\261?\311\353\017n\305(\250?B\317Z1\3606\302? OI\253.=\222\277<\221\313e\251-\263\277&m@\251#\211\235?+^c=\rp\222?\377\326\351\001\021\364\225?\314\363\321=\177Em?\021\371\260\006\244\315\243\277\036\016\036n\257\313\252\277\224\314\257+\376~\264\277\033\351\344\\\r\254\251?\347\253Id@*\257?\324\234{\241?\332n\002GH)_?\254xb`wG\244?>D\256}\300\033\241\277G!`b\233\357u?\2727\010\021\020I\246?\243\357\036G\377\005h?A\341\263\360\365\244\247?2\255\034\2009\003\203\277T\037\3448Tq~?\024\250\271\215>)\214\277\234\2151T\262r\220\277\200\243\020\235xx\220?:}\270\275\000\360u?\242S\312\373\273\255\252?\305G<\335>\307\241?\345\013\332\371\323\303\257?\222\331\272,nH\254?\331\223\316J\215\232\240\277K\005\310\006\020Y\253\277%y.\235U\267\256?Y\177\031\257\363d\272\277\373\216\264\367\365>\223?`\246\251\037\324*\251\277\3140\037\013\3578\216\277w\032\361Pe\274\224?\002\'\366\340i\300\201\277!\276P\242\362\225\244?0X:\345\032\220\234?5\317\026\300\330B\200\277?\313\017\323\"V\260\277e\3351\005\331\243\300\277Pv\267X\0228\255?\013\350]\212\266\306\215\277\027M\262\'cs\277?\335\206\326+-Y\230\277\034X1\317(g\263\277\355\216\361\202$:\223?t\313\004\325\275:\245?u@\347\240=\317\265?r\035\234\265\366\343\216\277\204\350\'\311\270_8\277\256\024&\305J\251\230\277\032|\016\2314t\260?\253\302#l\030\333\257\277\0305\347p\361O\261\277\034\205d\300\221>\252?\227H|\302\204\363\221\277\252e\310\205\242u\246\277\322\337\315)\357\365\227\277!t\224x\177!h\277<\336a\3514@\212\277\016\306\271!\234\275\270?\321&h(\336]\256?\322I \\\305\204\260\277\210$\r\377^\372{\277R4\033j\374<\244\277O\r\215\322\266\277\326\372X\271\341H\205?\366\221\324\177\367\332\264?\306 \207\256\264t\246?\004\337\263\177*\324\271\277P\267oQ+f\242\277\000M6\276\273\032\250\277P$fK\260\t\225\277\202\t\'\033\204\n\253\277\260\361[\203\320g\237?\243K\030\222bN\235?_vllT{z\277w\026\313\337J}\221?z\n\205\322\236s_?\t\203;*\237\'\244\277\n$\353GE\r\227?\324{\'\021\305w\217\277\215\243\262O0\351\234\277\3314\275\310I;\261\277\220\2076H\246\320s?%\312\212\004\\\222\263\277\330\312?G\357\302\266\277\227=\026<3\313\255\277f\333\300\311\200\335\261\277\362r\203(r\320\202?\227L\031wM\204\264?6\343\310&\023\022\244\277%/\334)\310b\245?\r\312\326?xl\300?\233\261\n\3150\232\252\277\212Z\2479\241j\256?\215\031.\200\315\325\215?\247\234\003>\303(\221\277\212e\257V\224\311S\277x-3\345\021\343\242\277z\251*Q\220\346\215?\n\235\302\312AC\263\277\341\250\245\213\233\'\277\2777\016\321\236K\212\231?\265\305\245\230{\244\266\277\200\331\\9Rj\250\277\014\266\257B\315%\250\277\310\331\215\342\260\004\254\2778A\221\336\n\374\257?\322\336\211X\341\254\231\277\363dO\263N\276\220?Y\342R\252PH\237?\3424\342>\\\013\270\277\004\314\310fx&\246?\230&\256\010[\222\256\277\364>\223\276\237\271\224?\367\343<\363\277\006\312?\026a\233\032_f\250\277\020\271\222\344\315\023\201?\2156\263\270X]\246\277\022\355r\342\371\331\227?\211\037\0136\301\216l?\t\232\007\346\371\033\240?s\340\0217\001\037\230?\022\005\273\204\202\222V?\007|\245\263\005z\255\277\0177g5Q\242\261\2778\304\000\350N\270\246?:\343g\331\2678\243\277M\242L\275\361\321\227\277\337\006xz\206\177\227\277Z\245\016\306\265\352\206?k\344v%M(\251\277\275/ \360\224\240\204?\257\337L\371\272p\232\277\344\200\235\235\0359\264?\2342\372w\217`\267?3!\241\367\022N\222?\303\362\322\t\"1\264?[\276\267Yt\256\223?\'\372\262\357=8\223?\0277\177k[\365\233?\221\322\325%\336h\300?\216\223\tc%\353\252\277\312\\\026\332~\020\267\277t\353m\010B\274\202?\036f\034\240\204\027\262?\302\313oJ\244\223\244\277\235toi\211`\261?{\031\0067Hg\262\277\017\363\006\242eJ\311?\250\260\005l\034\021\273?\025\271\365/\301\306\260?V\253\315\033\313Q\245?\307\266\000X\272\303\264\277\210\275u4\350\357\244?Hl\313\342\307\254\262?\257Y\331\307\022~\233\277\002\034\265\t\322\010\245\277Ib\005\247+\020\265\277\251\225\330\024\244\241\247\277\374\353\0051om~?\344$\010\364\263I\242?\'8\215`\233\257\265\277y\205\347\224\020\247\214\277\213LC\026\363\356\261?\017\267\005\217y\236\237?s\255X\374\220(\245?\376\311(\356e\246\222\277\271\226\246\020\030\304\245?1/\347\363)\004\245\277t\307\222eX\210\247?}%\177\336\255`\247\277j\373\241\034(\330j?\233\345\002\033\342{\261?f\225G\327\252\366\233\277a\203\321\247^A\242?H\345\002/\272\330\254\277C\330W\351\260\014s?O}\244(og\252\277\021\'\267fW\031\266\277\313\216+K^\217\270?\244\244iU\242\240\241?E\016\333\251\014\017\242\277\017\222\352\221:\261\266\277tL\3400\351\017\232\2771\232\024d\276\253\242\277*\343\225\353O\345\241?+\211\337(G\353\245?y5\215S\352y\242?>\220Y\203\2777\242?\275{;\211\255\254\232?\213EOq>\372\264\277Q\3017\233\241w\240\277^=\260\2755=\251?\326;\332E \022\261?\207tq\3162\372\261\277\374!Np\036M\236\277\261\344\022\265\312\204\246\277\276\330l\1775O\255\277RVY\255\241\317\267?[\277\3004T\325\260\277`\026\233>\362\215\265?28\264\217\340\373\177\277\211\226\356\252\371]\253\277\333\032{\352\342\'m\277,X|\264\321\336\217?9*J\306FL\254\277\302\321\314\244\233z\221?\272\014m\250\010K\267\277\224\0356\354\006\265\261\277\226N\332\267\214A\231?\303\331\205\264L7\240\277\353\353\033\332w\r\223\277\273\207\344\241Z\244\211?tEe`w\300\265?n\223\257\245tmz?(\343~\235\n\367\214\277B\264\372\341n\200\234?\324\345\010\336M(\245?\257z@\216\0234\224?~9c\022\243\260\226\277\'\375\260\266\361\004\255?\213\375*y\330FS\277)\215\233cG\030\251\277t7\264h\3231\267?\016\363F\275\271G\300\277\001\350\346\235\235\216\242?z\373\361 \035D\263\277\327\004\325\331\221\264\234?\007\377qH\2413\263\277aQ\30175$\240?(\322\310\353\367\371\221?\034S\357\221y\227~?\'\217%@\322\022\270?M\212b\007~\232\257\277X\345\266\334\034\261\310\2773\020W;\035m\240?\225\0057\304`\375\275\277WMN>+2\212\277\037\266\251\340x\022\250\277\215\241;\001)\016\272\277\342h\212y\'{w?D\261\310\262\230\372\220?o5\246\001\277\370\242\277\342c\210\260;?\247?\007p\r\355\2365\251?\r\363 \375\023\264\253\277V\tA\365\352\361c?h\346\232\230\220G\300\277\314\255\206\360n\203\241?\276\306\024\000|Z\254?\366\n\"\334\305\t\263\277o\357o\252\024[\210?}\010\244w\261\276\246\2773\316b<:\354p?}\201\311\023\243D\270?q\351\001\336\232]\244\277s\214\204\320\301\263s\277\371\037\370\017]p\273?\377\2034\375\035r\241\277\350\257*k`\253\261\277\322\341\326\354?1\264?50\t\215\346\260X?\276{\213\264\004Uz\277\312\231\366|$\315\236\2779\250\254w\226{\257?\031mS\357\027\267\272\277\261\r\253\334\225\t\221?\371xhu\352`\265\277\212\325\177\312j\026\232?\017j\243\341\361\374\215?\261\300\236|\332%\245?\215u<\267F\331\215?\333\354m\257\032\227\177?\344\316Q\007I7\262\277\340t\3019\024U\227?wmP\235\327]\261\277t\213\027Yb\251\242\277\241\265\016p\354\366\245\277\203x\3107UP\252?-I%er\033\256\277~\351\362\245d\373\256?\211\335ImN\331\215\277\020\2124n\262\360\267\277Qi\033\215\330\212\231\277yc\255@k\234\245\277\350^-*\021\230\241?\234\343\013\246i^\261\277\246\0144\311\014x\301\277\243\246\211\345\216\210\250\277@\024\330\201\350:\235\277\304\311\013\025\275=\261?o1\006\301n\231|?O6\326\330\213 \211?8\343\314\233S\251\220\277\343`\316\350\365\242\300\277\356E\335\004\210]\262\277\301\004+\021\323\325\201\277\223\023Eg\356\372\241\277\323G\037\237\311\344\262?g\0057\367\031\313\243?\\\205\210\330\246\210\245\277\272\261K\034x\205\225\277\372\366\206\3516X\260?\032\255\333\366\247\215\241\277)\361x4\024r\263\277)z\031w\310b\222?\323\364\243\346E\013\245?|\327\001R\021V\262\277l\311\022\240\376\243\244\277\001\221\2342\305\236\261\277Ts\340[\035U\247?\177\364ud\333!\267?&\242\337\245\004K\271\277\351\\d\010\260\272\271\277\373\237\314\343\363\002\243?\231\332\253\253\214\027\261\277\003\006\0278\315`\225?\324\\#\362\222{|\277\375t\311r\344\254\261?\254Pfi9\034g?\347\010\322f\365\317\261?$\026\nS\354\247\245?\334bCf\025\202\300\277\345:\357\277T\314\246?\272oe\363\270\354\242\277\326\367\320\240\'\t\271?e\343\236\177\376\303\233?\320\t\367\221#\224\254\277\344\352\030\320I1\306\277\246Uc\212j\273\202\277Ur\0136\202\352\242\277\027\214%%vD\252?C\247y\324\210\371\223\277\365\243\014\327\361\225\206\277\356p\023\\\320\001\243?\260\372\345\322G\013\265\277\324\245X\372\026\353\211?\223\0255.&\255\201?I\357\256\036\2631\227?\204\346\346\027\222;e\277CJ\021\365\177\277\241?\371a\341_\'\264\264\277Ql\333B\177z\251?\372\3112i=Q\254\277\361ge\r\027@\241\277\331\001\030\025X\217\253?\264f\367f\205\204@?\255\326\351\252\035B\222\277\330d\351\323\261\316\302\277/`&n\340\370\212?o\257\246\'\273f\204?\223OC\324\304\342\261?\225\367I\321?D\263\277\303M\314\000\2651\241\277\227\264\207\270\211\363\267?y$\203\274H3\201\277\261\0238\t;\225\267\277\370\351\240\323\310t\272\277n\376\262\202z\030\177\277r\250\277\346X\335\261?\214\237\030\373\2223\242\277A\227s3a`\250\277\234\352\003_p\021\272\277\014z\214;7\317\255\277\313\202\010\374\201Y\262\277E\021&\334T\241\225\277\364\026\303\201RR{?\246\037!\"\242a\266?\230\2605\307\362\323\235\277rb\333\256@\200\253?\354S\272\312:(\231?\222\177\304l_a\265\277[.\307\232LQ\236\277\2340\035p1_\241\277\354e;SVJu\277\310\220\341`\267\221\265?\'\204\3507%\004\224?\273\004!\223\310~t\277\177\373?\304\354\325C\277\\\004\026\365u4\252?\004\316\265\257B\373\216\277_\363p\374\025\334\245?\005\3609#\235nq?\343\t-\320\367\322\215?\010\371\344\237\261C\257?s%\036\362\255x\214?\005u_\305\370\237[\277\374d\004\344\303_\260\277\374\274\005g>\236\200?[u>\023+g\260\277\361\215!\204\017/\225\277\326\323\310\305$\220\246\277p\177\373F\234\267\254\277\363Bz\223\251\245\234\277\212l\013\244\232kE?N\200Y7I\\\246?=\013J\262\346\317\261?X\323\223\3346\036\214?\274~2y_s\262?\311\343\310KHr\235?\233\0337\263\227\004U\277+\032B<\323\366\306\277\033\2604W*\037\237\277A\366\272\220\217~}?\216\273\340A?\250\263\277\311hK\241?\252\220\277vO4\361\010\324\265?\227\224\003\344:u\225?h\207\234mZa\240\277\362&\215\"\266\032\251\277t\231m\3329\271\210\277\2377\305i\362\334\233\277o\205\2341k\372\224?\351X\250\264\306 \264?\037\321}gQ\263\240\277+\374=\241\"\000\266\277\030\025\246\023\273\271\227\277t\022`\035\346^\231?\342\'\013q\276\225L\277\t\2454\032\241!\205?U._\222B\265\264\277#s\210\221\301\345\212?x\035\257O0\r\254?k\035\247\264\352\336\212\277M]\214\322T\017H?-\275\360pNL\257\277\221\037\235E5M\263\277=\363\344\266\305\230\234\277\0054\000\323\007\265\235\277\016\312\3638\0204\246\277\233su\317?\037\223?\301\366\312\206\360q\272\277y\033\003\305M\245\306?\276\023\252\347\264\032\251?\035\371\251\017W\225|?\335\366\217\\\265B\253\277\221=5?\240\031|?VV;f\211\352\257\277\361\354\360\226\347y\267?n\247\031\265\256\337\270\277b\366\030\204\222*\234\277\027\\\211\266mc\205?v\225\2360O\021\265\277\017\024\250\r8b\224\277,P<\330XY\253?\365s\265#\2265\224\277O\353\263\030s1\227?\020+:M$X\272\277%\231\010\351\317\235\214?\301\n\230\255\r\216\230\277\342\254\016\177\021I\237\277x\344\271\370W\221\255\277/\323\276x\2148\255\277J\023\320R\005\326}?b\277\230\310\370\362\254\277\'=\036nZ\326\237?\216\327\006\253t\353\225?\027\223L\254\246(\234\277\327\213@\n\226\024\201\2772\214\033G\367\306\245?Y\262\025\002\014\030\301\277>z\234\332T\037\222?O\300\2414\\#\243\277@\267\324\202\254\211\266\277\264\257\020\033\363+\240?H\304t\256\025\245\204?\nFg\345k$\263\277\343\001\\\016\213\010\245?f\303t\356\213\316\261\277\030f\3362]\030\213\277\231%\313\312\324\007\203\277\305.2`0\217\231?\350\017\240\201o\341\235\277\237\210\202\205j\177\233\2771\360\276\363\364\302\247\277\204Q\312\032\220{\243?-\215/\227%o\244\277]M\235\341\373r\262?\371\214d*\257M\263?\'\026\213\350\257\321\261\277\304\343l\233\262\242\222?LO\235\"\252Y\221\277_!2{\214\004\235\277\206W\302\304\251\007\231?\212b\0242\252\346\233?HclKd\010\242\277F\246w\233\337\336\233?EB\211D\363\255\260?\326v\270\342\'\257\260?n3\222r\010\313\234?\234\3643\343\335\214\271\277/\346k<\"\214\235?\037\221\253\"3x\271?\036h\240\237*0\225\277\232\277\020\375>\321\200?e\375\251\301\303/\261\277$\347\"\310\025j\267\277F\306\036\263:\354\266\277\311\036\342\272U/\244\277T?\033\271h\254\260\277\'~qsu\337w?\317\2105w^;\265?\330\355T+\360A\271\277\254\200^\353\0062\276\277\327d6\327\207\312\213?\275\340=;\361\327\233\277}g7\252\203\311\263?\313\001\214\033\177\356\274?\336p\253\007\016\365\204\277\242W2%xg\216\277\347\256\225\244?\205\270?fsP\270C\232\262\277\220}\240#\200x\242?\371q\323\0206\347\267?+H\271\312\301W\210?\265\213?\320x\327\264\277/\217*rw\300\261\277i\241\330S\307`\204?!\205\341\324v\t\261?\021\202\325\352\221\237\244\277\033ks\030\306\323\241?Y10\317%\276\250\277\214\226R\313\\h\264?{\251G\305{\357\220\2771\016}\326\312\250\210?Q\311$\352\213\354\245?\313*\006\217\375\340\244?T\241v&a\253x?v\371\030\326\312g\245\277.@dBF\024\222?\226/vi[\320\252?F\323\306\013\346\027\262\277\257\240\255xnT\262\277+On!\024r\245?y\235\241Y\2755\220?\014\024\237&\"\253}?\374C\273\211\250_\252?\033\266\371\367\337\235\250\277\355\347\353\3443e{?\317\254\316~\214=\255?\024UH\226\\\004\242\277$\230\302$\275\202\222\277\205I\272\020h9\266\277\206aA\353q\217\225\277\311\266\265\273\274\223p?\3738\222\227\366\256s?F\t\032\261\013\005\263\277\021\316\274\2639\213\257\2771\355\032\364\ns\245?\372\301\025\014c\356\247\277\335T\355\035\275\306\230?l\r\303\230rf\206?\263\352\241]\375\001\203\277\304\232\325\347\321\365\250?\332nd2\\\312u?\367\0046\022\334R\246?\032]\314\'\340\361\242\277\214E\265\001\036\035\244\277\346\370\217\224\205\376\200?k\201\020\2379+\256\277\336f\354zj\310\245?\351\340\335\377\263\371\264\277m\374\232\327\354\032\252\277Db\032X\334q\274\277\000\255\233\n\350\'\250\277\376l\032n,\220\260?\331\"\244\337\317f\274?\267\371~9L\345\232?\240\215\342\325sa\275?\253\004\321h)\344\272?C\327\241\372\247\334\234\277\352\027\337<\212\221\234?P8\024\363\371\372\253\277\270\330\225h}x\276?\364#\254e\006\304\253\277zqd\360?\322\241\277s\241\\g\343\337w\277\023\245e\2124\203\240\277\323p\024\025\270H\245\277\346c-\347\027\003\274?\252I{\177M\346\262\277\302JI\203 -\233\277\315Y\t\210R\361\215?\022/\346[\220Uo?\375\343\355\377\t\271\207?\274\254\246\244\337\220\263\277u\177#:\263E\274?\001c0E\357]\263?l\215%\245\324l\237?\366\345b\014\204!\276\277\245\023}\372\017\214\203\277_5G\000N\010\242\277\232\0138\202\344^\225\277\002\211\261\234\241\235\250\277\034([\362Q\205\276\277O|,Tx\225\263?\366\031\377&\370\226\225?\330*\'0\264v\224\277\360\215\200\027\265\252\241\277\271\037\033\2447\365\234?\241=\231\343\336H\261?\356\250s\"\222$\264?\206\241\271K\207e\204?6\211\344\221\020.\203\277\314G\346\246b\370\247?\264$r1\271\204\255\277\367\302\207\264.\235z?\360`\366Q\305:\254?e\204:\373%\323\217\277%\2559d\310\261n\277\217\303\223\221\276\377\236?.\001r\321A\241\263?\273SNN\211\357\225\277\276v\010\204\022/\233?\373\312\013I\206B\226?B\235\252Bb\301\256?S\213\336r=\255X?<\331\002;?\365\240?j\203\357\267K\300}?n\252\356\326\021\256\244?3A\307\"o\317\220?q\312\314\023e\276\244?\027\327I@m8\242\277i\343cJ2f\230\277\022\311\270W\263\005\252\277\221\354\256G\032\232\262?\343\206w\r\355\013\235?.\335\224g$\250\246\277\237\341B\006^\340}\277T\021D\350\016\226p\277\200q\373\321\013-\265?\312\0148\240\246)\263?F\336\323\356}\225\272\277K\223}e\302\236\257\277\247\355\246\237\235\201\256\277\026\311\004 \020\326\272\277T\333|\023uU\235\277\376\252w~\230\370x?xG(m\026\341\251?mE\265\364q\001\223? \345\017\020\007\t\253\277\024\375>\252\222\333\235\277\314\004\265fem\206\277Q\275PS\223vu\277\022\275U\250\236\231t?\352pTW\367\304\206?\3759)@]\340\242?\266U\007\207N\266\301\277v\003\343\245\214\\\306?\301>\204;`\004\241\277\nM\223\010\324\036\251?H\311JlTO\273\277&\232\022\276\212\304\201?\321n\247\202\021\305\250?gI\337\316\"\335G\277\0239\036\225\246\034\265?i\341QEG\221p\277\037j\204|>\276\200?`3\335\247?\246\314\373\020m\t\265?\014\005\020\361V\273\265\277\312\361V\324\211+\227?\322\317\374g\347\221\220?\\\320\300\255#\223\270\2774\010\3644\325\267\261\277#\215\000\032\203\364\271\277\334\251\224<\204\016[\277B\024\215W\324X\241?\241\372,\346\373\357\240?S\317\214\316\231\324\211\277`\320Y\016\241\006\263?\312\021>v\2735\260?\227aZ\263\0209\255\277!\027]\010\023\030\264\277Q\305\016 \022\037\261\277#\242S\232\224\267e?\314]zu\206/\177?\034\230-\363\350@T?P\232~.\310\202\251\277\216p\217\2612\314\226\277*\341?\250\301U\256\277\260\253\336\310)\237\243\277\234?d\350N\360\245?z\240\325\232*h\260\277\331u\2345\362R\222\277\237z\\\256\014\036\260?\017\301\002\0317m\211?\034\351\010\302\260E\260?\001\265\247\247\277e\214\277\177?|pn`\223?P5dm\037\221\204\277e\251Y\340\347\007\255?\311\270\361\241\313E\223?C8\036\272\003\216\263?\210\361w\347\373%\252\277\r4\237\223\037\026\224\277\303&[\325V\230s\277]\203+%}\312\264\277\206T\224\236`\317o\277\021\260\266ga\361\247\277\333\347\333\026?Z\256\277\362\250\241\347H{\230\277e\022L5\263\376\251\277\r\257;j\336f\217\277\032\305{\325\340\270\207\277y|E\020\237\340v\277I\342\307Q\253\270\302?\305\201\215\373\211\257\263\277\371\247P\315\211\214\243?Bug1\313\304\260\277\266\331\367\244u@\243?\222\271\334\341>|\202?\250x\266\214\304\363\227?\333V\273\225*\326\276\277n%\301\277\3767\303?=Rc\223\251\340\243\277\320]p\364#J\250?\372\030c\337O\026\261?\233\246\355\236\260a\264\277\364\004y\316t\256t?3\2326l\214\372\240?\276\20101\345\303\233?el\206KE\014\216?\330\347]&j@\224\277\333\0306(.\367\230?\217\316\005\365\247Q\271?\361\020\246z\360\365\257?`\275\353r\244\031\240\277\t\204I\311v\206\203?\325\341t/n\346\204\277\2011\"\237`o\267\277q\340\313\342\010]\231?\006\235JD\261nx\277}7\311n\225:\205\277\306\206\n\257\023\017\225?\371r\242\340\345[\241?\200\275\376\031\363\215\267\277a\323\361\310\001\005n\277\030\234$\215\357\320\255?\t\261\3056\024\342\227?O\243\272\371\315\200\222\277:\277\3033t\r\256?-\215;\300\230\025\237? 2~\'\2357\300?\263 /\245O\362\267\277\034\331\354d\357\265\251?\364\007\n\240OF\200?\326\350CX\220.\262?{\231\025\315\340\"\262?\246\374\014h\014g\255?\324\206\210\235`\025\260?\177b\204\310\363\335\277\277m]\352P\211\336\253\277\230\327\314x\364\370\221\277\315\254\301\302\200K\240\277\036o\231\345on\260?\217\006\233\253\304\006\244?J\221`\2479`\240?\260\r\333\224}\376\246\277a\205\321\2537\365\246\277\257\315\223n\223bX?\370\216\250\241\277\372\223\277\007\364/v\237m\234?\214\033\266\"\\\336\236?\333U\260\250o\240\207?P\344\320\205&s\221\277\220\227~\376\304T\206?\327W\307\333\343+\242\277\222\276\300\360\341\212\301?n\200\017\230ui\227\2777\375P\270\030F\234\277\036-\'\334\251\204\265\277\261GA,\345\376\254\277T_L\004\370S\305?\037\233#\361L\366\237?\335D\177z8\257\225?\356\343\263@Z\021\255?Q\255\000\245a\244]?\235\216\347=p\005\241?\271\304\230v\2616\241?\361\331$m\240e\242\277%\002\331\324$\326\231?\305ht\346A+\214?KJ\270\016\270X\235\277Z1\2134\203\325\246\277\323\000Y\271\232\361\265\277\002\267\225t\372j\265?\177x\311 -\213\245\277a;@\227\'\031\302?c\260\274\217[P\244\277B\t\316\352\232(\244?0\316\035\364\2670\235?l\3275V\0000\254\277\362=\234\211``\271\277\007\326\007\320:\263\260?l\203\370\350\346\230\213\277\301(S\242v\026\235\277~A\246\363\303\344\204?\373O:\307 \233\260?\235\277\231\232\200\227\274?A{\255\231k\203\253\277\246\231\361\000\274\316\210\277\225}\341\324|\267\252\277p\273\220\345I\347\271\277\221nN*\0253\240\277\n\374\372\350\302u\273\277~\275\t\337\253\206s\277\250I\213\334hd\234?\204\t\013\234\333<\250\277\234\310\006N\370Q\264?\035\333\277\216\037s\220\277$uE{\211\333\247\277\313m\r\221a\243\232?i\342Bi0a\226\277\311\"\220\246\205\261\261?T+\2140m\217r?\353\367x\363\367\r\245\277\206\263\202\356\212\217\241?e\t\306\247r}\200\277\334\223\027[\007\363\262\277\313WrX,\031?\277\326\312Q\203\027\343\265\277\007X\263\321\333r\241\277\322\331a\034X\376\256\277\265p\234\375\371\373\211\277\374}\314UZp\257\277\233L?\376\273\366\265\277\334z#\333g\202^\277B\016\221S\342\272\301?\037\365\330P\003g\204?f\377L\2749\022\256?\276\201H\256\016\333\236\277A%\\j[\362\256\277\306\207\363\0020\223|\277\247\255,\022\211\367\263?Y\246a\356m\252\244?\255%aJ\323\200\265\277\035\377\022d\022\274\241?\236J\252\017\272#\236\277\027\006\252\374lL\236\277\001\361\335 \322\240|\277\307\244\226\031\252\251\273?\205\204\376U\3165\255\2772\222\316\206%\332\240?\021\230\020\233\330\214\240?d~m\251\006\007\220\277~\317\003N\313\262\300\277\375U\002\371\202\344\243\277\210\354\252\270\215\010\260\277L\273\r\221\235v\201?\315D\311\306\312\010\226\277\275\3258Xl\027\243\277J<\301\2400=\262?\017ol\200^\016\311\277\364\224\177\312\2050\244\277\331e\250\232F$\226?g\005\371f\010\231\242\277{<\236\n]\272\263\277\217\000X\024F\226\240?\021\360\335y\363/\230?l\362#\254\214_\223?}y\250\300\307-\234?K\006\004\004.=\256?\343\332\252\312\300\201\246?\301?\177\304\034Y\250?mZ\353~h\364\202?\026\352,C\2228\264\277W\247\030\270l\031\204?*\216QR7T\260\277 \256\211\317\251\367\236?\321Q3Y\374@\267\277\301\271\335>\215\250\221\277.`\024\353\200Y\214?\253\275\246\277\036\036\247\277L\220\006\206\317\215\206\277\201\215\342o#\337\241\277RNL\274\374l\243?\317\345\177\231\022\262\230?\335?\304o\\F\247\277\034\223\244\000mt\243\277\033\021\\L\273}p?\232\022\334\367\364\260\214\277%\241Ktq\221\221?\337\204\267\r\263J\253\2771\310\22082Y\265?\251\332\255\202\006>\243?v\353\333\030TX\252\277\352\236z)\272\013\262\277i\333\251\337\233\317\236?\256\360p\303+\r\244?\317I\263\332\360\234\243\277\373t\221|T\370\240\277\316a\271\002 \301\210\277\013T\274\001UK\235\277\270F\256Rm\351\231?\237yE/\n\324\261\277\226\246\342\3112\271\227?\263x-\260\223,\215\277\306\302]\032\\e\233?E\206^~\334\317\203?:\356\004\205\255\207\227?$\217IJ\351\234\250\277\260\206\330\373Y\372\213\277\331\tM^\377\276\245\277G\202/\226\357\027\253\277\336\210\225\023\376\322\247?y*s\351\357\006\252\277k\211\375\236fw\236?\241Y\212\227\010\234\223\277\364\312z\251N;H\277\357\240\024[\373\213\257?b\262\216\317\024+\232\277)\304d\007\241\nR\277T\032\321\007<\355\236\277A\006\341\312s\333\242\277\356U\004\246z\311w?\364Zg\207\355\377\302\277\252=\226Wt\271\210qr?\255\376\210\177\273\263\242?>G\007c\032\022\232?h\270t\310\024\305\231?\253\2103\346\377\001\253?\374\251)\254\341U\236?\360\247\230U\031t\304?9\333\322UO\333h\277\230|]\376z\316\204?E_\010\357\027X\224\277\247H\316\215J\312\257?\374\004\031\325\371{\233\277\316\236\t\363\267\372\215\277\n\032\356,^\351\253?\337\326\266D\225\276\265\277\245\361\276c \264\303\277\020\350 ;\225\365\231?\265\254\325\'\332%\240\277\243]\237<\016\240\225?\200%\337d\2368\244?\272\360\033)g\320s?\247\376\260T\323\'\211?\256\235N=\356?\226\277d\001\r?\"!\240\277tk\315\373|\330\237\277\312\"\341\350I5\250?\267\240\272]\r{w?}\305\\\335\253\356\234?\307[\r\220\240;\226\277\004\3643enN\200\277lR_y\346\327\270\277\262)\371\r\333\370\237?S\253\024+$\377|\277\004\005\344b\026]]\277|v\001\246D\306\262\277\311\225\263\307\017k\263\277\014\215\311\344\356B\207\277\323~\351L\013\202{\277\260`w\310\245\245\306\277O\247\020K\363\265\232\277k\276\226w\004\232\201?v\300\n_n\216\252\277\244\225\204y[J\246?\221\245n\315\273\307\247?\352\200\016\026\307\375\213?!\360\345B\337\211\262\277\271\302\203\2544\004\231\277\037t5\362\350?\206\277\371y\345D\215\343u?\221\235\035& \266\270?\321\260\374\263\003\235\223\277\021\315\273\240\306\270\254\277D\237\201\274\277\340\243\277~z\227\336\326\006\262\277\2067\300\275\314\343\270?\347r\350A\2766\207?\241LN>\201\245\272\277@>$]*\027\260?\003\367c\2507\360\244?\316\272\370\246\344\372\251\277_h\313\013\272\306\212?\001\212\022P\221U\252\277\232\031\300=\013\300w\277\373\242X\247\231\351\217\277\251Uri\002Y\223\277\r\217a\007\275\210\233\277\005P\275\340\010\270\215\277 \243\210\366\344\327\206?\346\345\r\350\257\020\231\277\235\246@~\337\013v\277\211nV\347\355%\214\277U\347\240w~O\224\277\266\2539\016c\335\260?\025\000\233\346>\265\213\277\340[\034\252C\312\252?,\326T\022wE\214?\302\372\304I\215\362\266\277x\230q\214\257\347\242\277<\033{s\016S\230\277W\274m\277\'\325w\277\370\014A\206\220\002\246\277\207S8\220?\334\240\277\325\016\225\220\346Z\253?{\257#\324\323N\230?%r\343x\n\210\250\277~\303\267r\362\347\261\277\013p\203\322\337\271\253\277{\232nN\356\375\243?\344\275\317s\033\313\277\277\224\007\336\213\342\312\244?6\257\312F0\351\223\277_\332\003\324\225\203\212?RV\025\227)a\216?6b\036\260Im\271?\344\267i\266gs\241\277\212\033\034\273\007\255s\277/\336}\245\365d\267\277)[r\006\250i\233?A\030p\223\t\367\216\277\303\306\036\242FV\237\277\372\377l\265\373[w\277v\035 \r|\303\244?()Q\321A\345\235?\216\307Apk\027\267?\214Y\336\213>\351\242?\016\342\235\030\n\341\221?\250D\013\031\252\375\202\277\313Y[^\236\014\216?\250v\255\307\031\326\262\277p\277\226\315\246\227\223\277\367q%<_\244\231\277\307\256m\347\232e\242?\214\231\250\035\373U\232?\233>\252\007\262\261\231?\246l\242Se\344\266\277zu4~\200\272\251?D\3012\364d:\256?A\"\016s\375F\273?\003\236,:\345\236?\356&\354W\3702\267?!\000e\243\367\377R\277\nB\0337mO\234\277\250:]R\254C\273\277\363\345\0367-\006g?\010m\324\366w+w\277\t\373\366\231W\241\231?L\311\353\325\216M\224\277wK\025\311=\301\251?\246C]:\307P\245?\001\014#\245\271\263\252?\216\246\'\345\"\024\242?.\374\007\201+T\264\277\235\377\277\360\203\246\242?G.\026\350Z~\264?z\034\177\352\371\357\204\277\253F\203\346\032\017\304?\362\006Y\027\270!\246\277\212\343\334\263\r\210\244?\022\274\325= \346\242?+\356Q\266\245\261\220\277\273\201\221i\3527\242?\200lR\354\020\024\217\277\247\377\020\231\330\257\224\277\246\226\246>i\322\200?\267/K\0211\016\266?\312\316\027fv\373\250?\211\302R\200=U\204?\313\346\365\006\364T\270?e\304.\274]\354\231?\3758\017\311\227\260\232\277\3066\335A\305\006\213\277\206W\225p\254\260\245?\207\372\263\321\365\014\246\277V}G\244\351\330\216\277\272\221\365t\251k\206\277\253\376\373\303\333P\261?_\277q\2672\361\261\277\254\355n\206\276=\246\2779\366\224C\370\343m?\363\250d\206R\242\212?WyZ3\271\357\255\2779tc5\213\037\205\277\217%\204x\253\265?\372H\"\372#\313\251\277+\025\240\365\266y\207?{\030\305\352\272\013\273?\326\240\325\367\362\321\231\277\323=7\211\211J\265\277\\)\212\315o\004\265\277\346\362\301\326\2030\202?\301+\224G\020\336\274\277\001\004|\263\311\230\240\277\200\374i\330\335\301\251\277\2061\314\347\217k\244?\n\222\255R\242o\214\277P\304*\365J\377\274?0\"\"\236ID\242\277`P;\023\001\305\262\277Q\345^\010!\\\233?=\314.\004\360\261\204?\314>\203\263S\005C\277\314\203\357\004\325+\211?\243\345\313\226 I\265\277-\367\026\254\314\355\252\277\321~\014\235;\237\273\277\031\266Vy\250x\234\2772s\005\017\206:\243?YB\246\247\330\033\261?\013r\037SC\257h\277\333{\326\0053\304\234\277\263\222\220O\241\325\242?\374[]\305\312b\244?\000\225O\276\373\020\252?\302\324[g\246vP\277\"K\243\365\017\377\254?\320\3762t%\270z\277\373\346W\013\354\206\273?\006w?\000=\307\300\277D\006\313 \177\260?\253f\014\251{\207\253\277g\022\253<\231\036S\277{F\307\002\250?\n?\362\372\305\242\332$\255\277\334q\306\246@\373\250?\220\241\326\271\030\367\245\277\232\344\301\0163+\240\277\252Z\t\307\215K\277\277X\211\334\303H\241\263\277\007E\014\177#&\222\277y\340\227\212\232\n\261\277\302\343\266\020f\213\235?E$\023\024\234Y\265?a+\266\032,\202\242?\0166\177\311\212\033\261?\323\201GYV;\275\277\230\256\352]vJt?se\035\337V\311\240?U\307\365}\265\335\205\277\276\322\326\377\200\230\265\277\366z\"\313O\t\212\277\313=\370\210\003\177\252\277\247\226\234\360E\311\265?\352\246\033\365p\332\254?\005|\216\313\271l\254?4\031\344\341&p\255?\\\237/\t\333`\220\277\276\rk\024\002\t\245?$\344\340\372\005]v?M\214\2702[\370\177?\025\307\231\231\247\212\224?\373\013\242\352\"\266\253?\370iC\230\246\353z\277\234\277\227_\235\304|?\024m\303B\333\027\246?\010\356f G\360\213\277e\002\340\253t\237\257\277\233F\2764\333\027\255?\275\257\003\362X\312\264?\322\347\373o\231E\264\277K^\352\265\244\254\255?\214|\207\321\343q\236?\310\271c\33151\260\277\313c\3458Y\375\243\277\362\027\030?\177\316{?Ph\311\033\357L\227?\245\355m\300E\r\221\277\001\2069\203\326\263[\277R\234\257\375\001A\241\277\212\326|\376\347d\246?\311\007\261\231,\223\222?XU\010\266\356mq\277\r;\352n\203L\241?L\264\252\353\246\262\275?\211q0J\022\014\256?\376a\245\212\212\031\244?`3\030f\222\033\206?\200Bm\345\013\317\251\277K\024\324yg\000\257\277|\272\007Jp\270\246?\363\330\345g\355W\250\277\261(\357gY\357\263\277\013-\317\322A\310\233?\027\342\214s\tK\271?\032T\335p\274b\271\277\246T\303!3\264\222\277\025\264\205f\254=\237\277\220*g\0134\003\225?\374\030P\326s\037\265\277\010b%\336\021\242\241?\352\361}\247\254\214\225\277A\276\255b\236\241\246?p\310O\036\354\354C\277f\220\2762\270\277\252\277\314C\302=_\030\203?\014\365\tn\343\257s\277\332\024\342\216\276\r\241\277\352\027:\250\212\241\236?\275\336`X\221\035\234\277ww\001\030\372\265\256\277%\305E\370i\037\272?\274EP\331\340r\260\277\010\314\377.\314\312\240\277\341\377\177\376\267\355\226?h\357\332\300,\250\267?\261\264Ka\325\271\246\277\021\263)g\373R\235\277m\252\311\230\013\227\201\277y\312\254\365Sf\305\277\313(\327\317H\322\243\277\243\254\243\271\364\370\250\277*\367\336\200\240\245s?\333\035\020A}\301\222?\024\363ZBW\346\272?\007\'\353\361\330<\260?\206\376\340\270\306\215\210\277\311\020\302A\341\367\267\277\326\360\204L1\331\211\277K\371\027\270-\022\217?\017q\222A\272\204\242\277^\316m\264\276\341\211\277\025:\331:H&\263\277hT\236\317,E\243\277\352\201\215\352\217\024\275?\205\305\227\t\244k~?uR\351\3038\200{?\262\223\246\n\345\353\247?\316\216\344\3613\313\255?\206\300\335\021\311\303\256?\216\242\244;\366\243\243\277\314\260\t \200\257\201?x\033F\234on\240?\2360\3443\251\352\251\277;\336\366sn3\260\277\017\004!\254\225\007\254?\034\3771\370\032\004\262?y\022k\235\353\321\257\277\357I\237\036\333=\274?#\023\313\3271<\260\277\352\001Pq\033\"\245?0\271%\313\n\232\213?\300\345\253\361lQ\257\277\200OI\013\220\366r\277j\031\207\357\242\254\216?#r\365,V\331\264?\222/$\314\020F\232?\324<\325\376\336|\243?\363wRA\t\023\200?\301\252\241ebC\023?v\216\3000:\343x\277\340e~^\3029\246?\346\014\002c\010R\223\277\217q\304\031\213$\240?D\"\020\227\037H\253\277_\305\343\2543\327\211?o.\215N\361Yi?\305\037\272\274W\356b?a=\312x\022\006\262?\275\260\220d9.s\277\230C\237\230k.[\277\267\320\346f\021R\271?b\320#\335v\254\223\277\311\327\365b\204e\256?\370\003\237%PG\241\277@\003\237Lk\233\226\277\264\014\264`Q\033\264\277\370~\025T\213\307|?\304\225e\007\255I\273?\274Q\001\376[\021\252?\203\374\350\322\273n\211?\342\340)%z\354\263\2774\247\037\241\241\334\241?}q\200a\272#\225\277\007E\370|W\241\247\277q(\374\254\251\357\240\277\263\244\034|\366\305\216?F\270^\304x\006k?\264\300\305W\331f\232?\356\202\332\216<\222\300?|\237j{\335u\201?\305\351\261\030>\251\240\277\322\201-S\273um?-S\252;\230\321o\277^%5\031\021\263\242\277W\212\325\334z\250\235\277r\355,\234\204n\300?\026\240K\257\3702\243?\322>vly\371\212?\036wsH\327\207\273?\004}sML\322\241?\335\330,\210\237\367\252?\030h\345\0005q\251\277`V\304\353\204\r\263?\260\246\017V\010\215\251?\375\251e-2\035\272?\316\271\377\205\025\311\264?\325~\375\014\217\353\232?\371\267p\321\232[\262?\345\363\217\0141\235\251?nd\'\255S\272\243\277o\315\004\013\300{\247\277\351\256u\361\357\005\225?\262$\2724W\313\260?\263)\325P\024\372\227?O\341\013(\217aj\2778\343\235T\261\372\262\277\016c\250M\313\003\250\277\227\022\220\247(\263\235?\005\342\357\224\336\276\246\277\035\323Q\201\307\375\226\277\324\n\263w\233\275\261?P\020\203\2245?\261\277\365\276F\342\345o\252\277\277\036,O\341\361\202?\024F\343\307\356\227\256?K\314\315\242\342\366\215\277F\343\212\342\375N\225\277i\2452\366\000n\251?\332\016!\345q\365\251\277\307\031\267B\364U\231\277\027\'\255X\204\362d?D\321\322\222\035\350\266\277a\224a{\'el\277S\n\233\330M\317\300\277\002>ii6\326\267?u\303\230\261\2039\241?\314\000y\244\351\014\220\277\327A\031\236M\355\251?\031\005W\212\306\356\245?]g\274G\021\002y\277\346\"h\004\016\276\200?MJ\014\316y\200\222\277\347\212\371\312 \021\273\277\01433m\001\240\222?\005(C\204#\017\246?s\337\355\220\212\237q?\327E\342d]\'o\277\223\351J\r\024\250\216\277B\350\323\007\326i\263?A\234p\032\302\277^\365\256<\262\315\242?\203\374\005\253\000\232\226?Z\360\261\024\026\206\252?\203U\370\300%\343\264?X\200\"y\307\361T?\253\264D\240/k\206?\036\251\333\177\253\244\253\277~H\213\025\344\304\254\277U\344d\023\311\036\240?\316a\020\016F\331\261\277\013ajLUz~?\202hj\207\360\337\222?\375\251\271\236\326/b?\347\214\374X\217\307\261?\036_^\274\036\354\261\277\344\236\376\021\274H\242?\307\004\332^xEg\277\256\206\324\254I/\277\277\304\267\333XJ\237p\2772~\330\235\004\2114\277\331\300a\340\207\003\247?\2447\210\210\035\344\245?\305\010\\:\220\335\262\277\345\361y:a\004\223\277\373`\200\002\3661\270?\231]\210k\005\264e?\260!v\305eX\272\277q\360#\025`\360\266?\311F\206w\262\353\243?\243am\346\227\310\227?\005\177\316\212\346\020\273?\314\213s\307v*s?\241\344\321s\020\235\205\277\032\272~YSE\221?\025\010\225A\351\370t?Q\341A\032\034x\253\277\357\225\025\345\353\214\227?\007\343G\210\014\023\300?\346\303*\324k\013\242\277\352+XT^1\263?\022\341\230\310\303\321\\\277\262\212=\326\202\245\202?,\246\344\000\201`\223\277V\245\272\024\255\001\261?i\331\255\350i\030\261?\217\335\277\314\254\247\227\277\315H6\004\270\021\221?\276\237\365\235\223C\220\277\365\020\265A\375v\214?\320\'IrG\342\252?*\335\341u\202O\253?^\027\031Z\036\216\253?9\377\260\351\327B\274?\274\240*2\304\212\261?\353\026*$\222m\247\277\353H\265\262\241\202\216?[\374Ql\247,\264?\200X\324\014\332.\264?P\312\200\3702\026\222\277\233\261\r\335\335\026\242?/\tV\357\267\355\265?\365m\320?\243\236\233\277xg\375U\004g\246\277\013\005&N\021\312H\277\220\000\311H\231\352}?<\273\265\304R\354\272\277\026w\006\216\312\227\233\277=\302\216\234\343\343\260?\370Eh\031\264X\227\277\251\226\rc\366\220\254\277\265>R@\346\001\224\277\327*\356\020\253W\247?\217\371Q\262\003\370\245?@\351\300\030\2767\224?U\223\016\236\372]\240?\331\207=\016\033J\220?\371\030\316\005\377\247\230?DL\263\261\215\261\264?\327L\243en\332l?\230[\0050\213\244\260?\205W\002\253\321%|?\241\372\002\324\356\031\234\277\263V;+\023\200\245?\035R\035D\211\273\245\277\270p\016\031\216\301\220\277E#\304\206oq\251\277\347\262@)\021\301\177\277\236m3G\246\026\214?\266\271U\0309\335\213?\352G\227\315\343f\250?\241:\346)\274\240\256?\022\353\337D\312F\243\277\r\267q\0027\253\213\2773\243\255\236\030\'\253\277\263\245\003>\201{\263\277)7u&{\220n?=\2710D\353\016\265?6u>\327\3614\241?G~\340\361q|\270\277\371\345\315\201\340f\267?b\310\224g\315U\263?&\251\240u\274\273\242?\207\2512\361\020T\241\277FR]0d~\221?\331\216\335\377\334x\230?\034\324\311\337C \244?\256\307\242\245E\235\243\277\267D\313FY\035\246?\344v\313\3503\344\273\277\323\315`\215e\256\215\277\311\312\346\2736\022\263?\326\210E\355\202\361\225?w\232\213\255sO\244\277\034u\234\341Sxv?\231\r3\0179\202\242\277t>;\327\202\226\227?a\331\200\221=+\236\277\310\2102Pf\376\240?\354\354\333n\334V\247?\223F\367M\023/\232?!oI|\352w]\277\347\303I\352&\214\256?q\022\252\275\321!\263?\365|=\037;}\211?1\022\007\205\341\353\222?QT\'\216v\365\242\277\341\211&\251e\350\270?\355[\251\374gI\251\277\310$\\\243i\350\236?;\030\356\320\236\303\277?\027\007oD\302(\220?\3418\235\214~R\212? \335\360\002\330\025\270\277\"\356/\303\216\317\250?\201\370^\r\376OP\277qh \351\236\226`\277\337_\203A\000j\232\277d\030\206XU\261\260?\245\361\277\264\267\303\263\277\344\226?\352\034ep?\202\350k\353\337\030\207\277\303?`\206\222\367\224?\322\006\272\271r\214\263?n\356h\233\010\343{?V}\373\350$\010\250\277ZIu\0177\317\245\277\324\314\365\250\215P\267?I\362v2\234\013x?nHM\333\330\232\247?\024\205)B\321\320\245?\244<\020\206j\006\243?6\204\017R\215f\251?^\355\325\265\245\003\277\277nY\275p\033{\245\277r\202\014\336\272\355\234\277\310\226\315\234\341b\242?\340\255\315\201\023\022\220\277S\305\323E\373n\300\277\304i\335\313\235I\220\277\267\024F\242\246\300\241\277\265\247\201|\224\336\270\277\272\342Y\330MT\247\277MD\014\231\033z\256?\346\252VB<1\267\277\372\217V\373_\347\250\277\330\241\233:x\001l?\n!\001?j\372\301\277G\375s+\005\345\244?|\021)\351\341@\211\277\265\232\360\243\326\300\256?&\217\270\316h\352\242\277\223\212\r \310s\245?\341\254l\251X\364\234?\244u\030\023+J\211?\347(_\361\235\346t\277%i;\253\232\\\277?P:\250\305\260x\241\2778\322\005\263\034;Q\277?\236\001].r\223?M\225\030;\257\264\232\277\246 \024\345\013R\234\27787\317VA\\u?[a\310\346\263\260\267\277\231\002\305z4\276\277?@tc\256\3007_\277\302W.\245\020\375\231?wt\301;\032\031\246\277@\251M@\205\342\273?\250K\255\002W\010\250\277~L\273\336\'\327\203?\302D\304X\200\313\266\277-\021\323\303\227\271\244?\3018\246)a\332\261?s\330\242\3268\270\252?\352L\361\001U*\203\277\306\362\221\005\223\244\245?\007\247\272\203j\\\216?r\241\345\232%3\227\277z\3415\316\264y\254\277o\032Zv\267\034\230\277\240\274\354\315p\366\242?]\22252\372\004\236\277\206\372\254SGF\210?\227\303r\254\240Y]\277\306~\300P\266\227\221?\321\177V\354\216\377\271\277\206\266d\303r\'o\277\204=\0163\376\240\260?\353\256\004w\033\n\234\277\302\226\377W\274.\205\277\311\t\310\'\325\3540?\321\373\312\375\315\026\203\277,j\300<)\262{\277!\323\336\307\035\001\264?K\326\372|P\014j\277h1\356,\374a\210\277\370\017\321Vn\355\244?\343\252\252\373 C\246?\357\301\242\232\246\242\204\277\"x\356I\024\010\243\277\330m\032Wo4\223\277\334b\233\241\036F\243\277\243\313*\227\337\304\222?x\205\247\274bu\261?,\216[s\2360p?R{3!\263\371\257?k\241\374\210\000\034\211?\200VW\341\270\304\251?\341\271\305\336\363r\267?o4S1\316\207\272? .K\274\n\246\223\277\341\213\214\301\0077\234\277\006\266\327\353\252h~\277\212Nm\304g\321\266?l\314\252\257\207\022\243\277z\007*s\262Z\226?\303i_\212\001$\212\277\373(\324\375|\220\266\277\212-\236G\237{\236?\004\304\201l:\377\263?kb\334\314\256\226\260\277\010\024\363\363\242\016\266?9\363\033\332\231\304\257\277\262\'\366Qly\205?\374\252\034idT\233\277w\223\277\262CTi?\331B)\336\342_\243\277\013\024\355C\217\207\225\277\"n\360]\216\303\260?Q\2765\242\314\367w\277\236\032\214\252_\325\257?m\310Y\374\257\004\205?\251M\341\371\364;\276?t~\000\325\341\361\271?\323oT\305V/\262\277\335g\254\245o\367j\277\021%\340\323\t\021\230\277\234A\025\213zW\201\277AL\336F1\233\241\277\272\006\330\t\244\216w\277D\010L\030\272h\033\277\031\351;\312\353\007\237?\037\231Nlf\345\275?\260#\315-\265<\260\277ru\302l\336\307\227?:\035\343:\036\376\247?\025\321\265]\247\222\272\277D\313\271x\221_\221?\237\235a\230\201\372\241?\267\315\217\303\254\224\227\2779\244~+\r\r\261\277\344@Wro$\240\277\t=\231osV\241\277\201x\0025\254w\225\277\010y8\355\212\326\256\277!iS\251\2241\220?\014\002A\225\216H\271?\342\3476a\376\200\240?T\206\225\246\365\266\240\277\260PT\306\330\322\221\277f\3338\346\027\220\260\277\007\001\253\232\036\250\274?\360\377y\302\032\324\214?\003\372\210Q\327\223r\277\013\252\375d{\276\264\277NW6\247\354c\254\277\215\226W\320@\312\266?\000\261/\372\003\322\263\277\217\264w/]d\211?f\212YE;\343\263?\245\373\224+\275\032\224\277\250\030\334\014u\243\234?m\'\"m\351\033\262\277K\237C\272\006P\247?\325m\271\177\276\204\235\277\331\265&\276^\017\243?\2021\343^\006\031\271?\277\272\2768\350\000\215\277T\2639\335\026\003\246\277\037\024\344\005\344x\260\277\335\035\331\3341>\300\277\377F\\\243Bb\245?\302k\244[\001V\205?\361\236\245\201\005\216\215\277\370\215%\316h%{?\312\363$\002\243Sm\277\037F\361\273\274i\210\277\232P\2256\321Y\256\277\036\270\000h\r\377\270?\327@\177\347(>\232?\355\366qA\317:\267?U\021\311\270\266\035\240?\2619\316~\275`v?ibr`\252\017\261\277\t\227\241\266?\311\267?p\024\266\363\342\206\262?z\261]Kl\330\222\277K\260Z\000M\347\230\277g\347J\205w<\222\277{\211\233\026V\264\256?m\233\232\202\253\214\224?s\026S\021\307`\242?\000\274\000\367<{s?P\322\2075\264\026x\277\253\230*\350q\254\227?\372+\215\256l\013\265\277\321\257\332\216\2734o?E\002\335\004,i\255?\235h\357[`\372\241\277\353$\312,\225j\266?\330\257\201W\307=\255?\332Eh\255=\262\251?nAx+^<\235\277\306\025He\233\264\217\277\034|\313\344\341&\257\277\221u\363\354p-\300\277o\245|\324H\032Z?q.\036\007\373\032\261\277\236@\335B\204^\256?z\241\233\007w\334\226?\374;3s\005u\261\277?\222\000\225^\262\310\277\360\307I\362\231D}\277\334\351\035\002\341\375\265?8\304\005\017\313\256_\277i\033\250\245\266\253\246?\301\306\311^c\333\260\277\330\003\007\210M\220\241?#\226\341\031\250?*\342\371.\r\214\256\277\323r\305\377\007s\264?\377\334D\363U\334k?\320dQY\254|\220\277\273\241\246\267\\\311\271?\261\013j}\230\247\246\277\254&[\205\346@\217?\305\3118\213\262.\200?\025\233\302\020\210:\240\277\203\272\027\340m(\261?7\000)s\316y\241?!\035\377o@\037\237?\302=\336\253\322\331r\277z\365\204\n\032\315\231\277\267\200\207iO\363\261\277$}\001\224\2344\211\277\315\252G\220\203P\246\277\357p<8\303\320\245\277\361\331S\225\335\263X?\002h\311\361\370\256\234?\244\206M\233\333o\260\2779\301\010\2116\331\252\277\204Y\242\311_\t\224\2771uEt\345V\226\277l\312eO~\\|\277\000\023-[K\263\203\277\274\262W\244\240\213G\277ID;\374[\"\241\277H\320l\334\337\310\213\277-$\374g\016M`?*\306\325\3660G\263\277\302r+G\202\024\220?\037u\262\021(h\254?\005\357A\276#\355\261\277\205\345)*7\363\207?l^\241Y\263\212\234\277\365\305U\2030\213\245\277\326]\360\270\222*\265\277\343\003\326iT#\234?\204\235\362\332\312\356\216\277\022Y\004\026\303&\300?jt0:Mu\221?\026\010W\253\372\341\240?\241h\203!\306|\221?l\371\370{\365A\206?!\274\227\365\354\006\240?\324\252\2742\271y\244?\373\237V\330a\353\243?lh\224B\303x\260?\372+\245r\rJ\215\277\022\240\206\t&\214\204\277\313\0108\261}\026\234\277\356\342\323\266%\273\252\277Av\324\204mX\226\277\260\361Su\004tk?{\324g\246\030\357S\277\266w\0260b\356\222\277n\330lZ\210\357\265\277H\233\331\026\354\317c\277e\346\310\037\300;\265?T\377R\256\357X\224?Y\334!\211RC\231?\341d\274\220\376\331`?\334\200QJ\223\317\232?\356\322\262i\221\204\215?m\004K\230\005\220\262?\351\321\316\033\023\211\205\277\"\321`\271\272?\264\277\007\342\2216\034+\273?\326\224\237`\360>}\277D\254Pg\t\366\265?\214\262#\300\037\333\231?Z^1\001\234\206\206?=\3126\324i\353\244?\351l\266\363\314$B\277K\257\231\263W\201\234\277A\033\210\333xY\205?m\010\273fi\026\272\277\253i\263\237\023\255\275\277t\255\316\226\246t\255?\2536\347\n\336S\240?l\225\321(\271\315\263?\314\244\032\226\274F\220?\346\336\357\263-\220\260\277\200\244\350\301c\214\250?\224\220:\002l3\244?\343\300L\214\265?\254?_\022\336\262\211\022\243\277\003\206\256\316\210\230\236\277 \010\t\340z\361\232\2774\313xg\2576\303\277g\001\230\"6\341\257?m\271u\2747L{?\342\340\273\252%\202\234\277\nD\336\364\330p\275?p\327\224\245\344\227\255?cb\320`\371\274\214?\024\362\311\177\273G\232?\26069\227}\036\226?\177\305\264\251Q\250j\277\245\177_\001\t\355\234\277\224\t\311\312\"+\213?\0009;\324n\241\265?U&\032s\025\366\250?\356\017\226\324\326\252\255?\246p[\214\366\314\266\277&<.d\354]q?\303J\n2nT\275\277KA\236Yq)\205?\260\036\325HX\"\233?\347\177\232}e\\\215?\201\333\014\032G\262\252?N\351!\313L\231\272\277\350\342\227\345\0012\254\277\234R\250o\030\002\223\277_\007!=\324\300\250\277\332\375Zp.\n\274?\362E\250\207\037\022\231\277\233\010\240\243\367\333\211\277\216\325&h\351\263\274\277\275\014\221\245\311\215\247\277\375*\255\276\026\303r?q\352\237\217`\224\224\277\025\260w\327:^\206?2\354\016}\266k\266\277\272\224z,\262[\211\277\347\270\214\0134\224\253\277,\246_\264\322L\264?\2255\340b\000\n\243\277\330\234\370\315t\354\267\277aT\302\3020e\220?\263o\312\340D-\206?o\243\345<]\354\260?\340\367\325I\213\330\247\277\246GQ\262\340\351\234?W\315\357\312|\220I\277\220]b+\376\177\201?K\034\tg\013\335\266\277\030\322~\264\355\261\243\277qlXe\210\255\256?u/c\340y\303\301?J\205\211:2\033\226\277\014\260\363 \365\275\267?\363>\034\361\373\216\303?\245Y\324OZ\375\231?D\274\017p\345-\221?T\201\211\330W\271\201\277\225\'$\377\310\250\260?R\226\241tw\030f?\003S\353n\200\017\225?\344\263\265\235\225\204\234?\315ea\337\224\203\225?<\224\331\355\n\005\263?\222$\312\n\320\356\266\277c\177\340\332\354\204\244?\tA\000X\035\313\234\2773\017\272>\347+\275?HY\233W\217\244\224\277O\376Kk\245Y\233\277\243\010ROF0\245\277\340\300\242\217\224\355\266?Z+;\351\312\336\206\277l\376\376S\257\351\247\277\370\202\025W\316As\277D\237a\332\353\261\226?\035\375z@\353D\200\277\337a\251-k:\200?\243\264\366\"MQm?{\t\'&\261\201\244\277\020];\341\256\267\245\277W\317\345W\333\314\242?1\265\365PQ\211\270\277\345\210\210\374n\274\300\277N\202g\205a\333n\277P\377\263\366\213q\254\277\t\177}\301\010\323\260\2772L\327\036C\315\262\277\265\264DH\332c\244\277\025$g\241\013\243\245\277\003\306?\365\027\225\275\277!\340\0070I\'h?W\333\n\324!\265\261\277\350\363\340\2306)\263?\260p_w\314\273\235\277x\032\275\344\237\240\237?\275\346\301io*\263\277\241r\353\\^\014\241?\352\244v`\0324\247?\241\002\235\250\367P\266\277k\352=fy\261\177\277\206\352\025I)W\250\2776)X))b\301\277l\003!Z\260;\253?/D\036\032X\204\240?\222\215q\266$\035\262\277\014bL\232\233*\261\277X\260$\217\245w\272?D\315\253\0270U\253\277IMl\332.]\260?\205\275\364d\367\345u?\303\311\247\203\006\341\264?\341_\013\373#\210|?\201]\\=\340\344\236\277.\021\302\t\213}\202?0{P<\203\272\233?ib\303\213Lh\231\277\204A\255\036\010&\220?H\310\344\353\013F\254?\320M#\203\033\263\233\277\001\232\335c\247(\220?\030h/j\215\203\265\277\307\025\001\304\211s\227\277t\346A\036\264s\222\277\216\326F\255\036\036\245\277\276\334x\201\227\352\203?T\033HP\360\367\270\2772\340\370Q$u~\277\0165s\334\251\250\207\277wke\324\354\354\202\277\305\332\225+]r\251?\030\270E\t\035\256\220?\367BV\n\207\274\270\277\005\227~\267\361)\271?\3105@\212\021\023\253\277\230\245\r@\007\210\254\277\\P\231nP\270\262\277\232\031\210\375\327\315l\277\334\004\247\037\031\214\246?WV\312d\367*\250\277\376GD\201\216\270\230?6\374L\005Rf\270\277\263\030\311\024\207\322\261\277#)\017\356\250\266\255?U\324\177H%\265\252\277\264\320\277\023\037\211@\277\005\te\"\251\316\247?\232\014\210\263\205.\204?re\206\251V\004\275?\357\362\001\007n]\256?\031/\032\330A\305\245\277\265zwp\200\316\262\277\322\033\225\213-\036\244\277\2638\034,\323\211\256?\350\370\226\032\033\266\265\277b\315\217\005N\315\271\277\033\001V\005`\025W?=\321*\253%\206\302?:\253D\023\346\324\253\277\021\232\352\263C#}?3h\007\322\341\370\257\277\265\2235\002\350R\241?#\320\315a\302M\242?i\345\033\323W\302x\277R\242\230\245\016Z\232?4\343\035\217j\371\243\277\321\232\367&\232\316\222\277\254\352\314H\361\360\205?w\222\003\242\253\264\204?dG\352\270}#\206?\244\366\234b\250\304\254?MF\311\356da\241\277l\211\t\023:\262\204?\356\365\360B\313\210\251?\027]\330\230~\370\260?\224\341~\027;\315\260\277\024&HN\356\332\243\277~~U5.W\253\277V\027!\037\263\316\245?n\305\n\276\367i\237?\353u\350\266\037\201\227?\234\342\212\252\025p\266\277\2470\002^Kx\240?\243\367\324\367\313a\233\277\251\347\013\234b\360\262?\340=P\356Y$\255\277\t/\025\204U\024\213?_?\231\036.\245\266\277\001zc?\262\034\252?d\2606h\246\360\263?h\341\036\326rr\223\277\025\255B\214\235\310\262?\366\035\n*0\300\222?G\224i\013*\024t?\010[\345\256xl]?\330\346\322\247ov\216?\362\313\336L\014t\246?\357\222\365\211\016\t\237\277\030\330\315!E\265\252\277H\276\\-\010M\214\277\247D\\d\313\346\256\277^?\037\373\2542\216?\267\377\003\365\366\214\267?\034R\014>@-\246?\276\260}G\245\026\260?h\'8\224sV\270\277K\212\353\315\336>\222?\335f> VV\212?m|\313\321\216\030\277?\220\205/\203\\\006\253?Y\355\355\273\347\266\240\277|2\016zZ\370\251\277f2L \037*\247\277\360w\311\223\365\300\251?\207\\I\035\3678\241?AB:\034\332\304\263\277\314C\210m\025\272~??\002\342\262R\027\275?\227\021\027\351T\220_?;\275:\204\220|\263\2771-;\'\371\201\253?Fo\327\344\350\213\263?\016\325o\237-N\212?\016\261\242w(c>?\322_\210\272\276$\250?*\235W -\244\261\277\341\366\220k\244\004\236\277i=}\321\001\014\261?\315e\'\271r.\300?\212\251$\316K\222{\277\241B\021\235h\303\271\277\337\352\355A]9\225?P\377\354\364i\214\243?d\352\272\266\202\275z?\215b\337\305!\235\250?\020+g\236\320\334\252\277\013\216\242\360\004\251\233?NN\362\306\260_\256?\356E\312\327!O\300?\367\232\010\337\302_\242\277-\206~\272u&\223?w/\244o\331\231\262\277\374\351&\332#R\226?I~\343\247h\273\275\277\264C\215d\006$\270\277\216(\261\271\277\226\265?)\373\330\t\237o\260?\017\250\243u\351J\234?D\216\177:MXc\277G\243\273\310Rh<\277\251\032}\2673\276\216\277\2012\023\327?\321\260\277\246}\010\"\263\247\244?\335Y~\264+\202\241?\322?]\371\303h\224\277\007$g\031a)\210?\352\333N=\027\r\207\277\346\260+m\272\005\232\277|e\353\207\211\374|?+\034\352,Y\367\246?x\341\321\304e\276\266?f\224[>\0365\224\277\177\312\214\227\234\222\242?\365\372\001\275\032\265\265?\371\350\321\017\232Qt\277o\033\207&J\\\261?\311\245\026\255\274{\240?]5\304\260\251b\220\277\205It\334\005+\215?\225\010\357\242\253\222\265\277\220\231@\021\274\226u?r\317\370[;-\234\277\312\246^d\275u\263\277E_\237\024C\326\213?w\250\377\001\2606\246?\264r^5\014\313\211\277\247n\263\360\376k\201?\030\220\022\340\352\370S?\305\247\001\2432Y\244\277\372)\332\356\177\234\243\277O=m[\251)\221\277ST\376\336\316\326\262?\225b\273\333=\252\254\277\016I\347.\027&\271?B\013Z`ur\224\277\253\"\017\204\3645\254?\225\025\243\324\377\276\252\277\033\036\316\214\363\250s?QGj\245V\021\266\277\214\302\372\254\332\366\304\277\204\243\311\354\354\206\270?\247\266e\207\006\364\267?\376!\253\n|#\211\277\261\025\256\331\264\233\246?\365\272\2015\320(\241?x2\243\267\347R\201\277\350\351\006\376\325?\206?\242\250\206Q\305\304\233\277\r\036\2215\231\243\272\27719\312\356)\341\240?\343\311B\302( \277\277\222v*Z.rG?\213\177X\010j,\242?\221$J\352\322\030\252?\336\017\025@J\006\242?4\311\277x\231=\240?w\037\206\247U;\252\277\007{\3532\373\005\270?\266\017`\250\336T\177?\001\230\215\037\204\356\226?\235e\332\347\000\374\246?\362\216\306\262\256\333\223?F\377\312\317\204ix?\221\306\375\272!9\237\277\225(\232\020]\351\276?bPX\343x\215\251?\032o\177,\022\006\216?\333\3528l\271\000\251?\311\371MK\010Ni?\004\004\241\225L\026~\277\017B\202\215\266{\250\277[\017@\177\275\234\005\277\274\324\350\272A\337\246?\324\353\345H\225\037\230\277*\311\223\307\221\274+?\245S\203\316\302z\231?\304\007\016\324\246\213\224?3\030\326\371T\337\243? \260%U\336\202\270?!\010\227\005m\250\261?\336\001\216\3611*\226?/\2121w\305\260\240?\014\001D\317^V\267\277\226\020\317W\236\260\271\277\310n\201\216\ry\207?M4\321\263x\256}\277\365\242;\"{1\252?P\225\211\037\001\311\222?h\263\017\3231\313\242\277\274\357y\230\204\r\241\277\337\217\253b\335\002\273\277\305A\345\\B\246\207\277\375\366\377\355\023_\261?\341\341V\352\205 :?U\266\200\354h\247z?\346\177#\222\021\205\217?\272o\210\021\247R\272\277\332\260\274\202\017x\257\277\253\230\034,D\016\263?\007uz\227_\305\301?\316\342#\344C$\232?BR@i\002\256\261?\022\311\365\346\306H\222?\235\330\022=\241+\242\277\222\251g\275\263\321\235?r\322\330\301R\036u?\227#\225\220\003p\202\277\361\374-\366\231\002\266?\353\256\321C\257\000\253\277\"\265\177`\377M\201?\315\224u\210\312|\255\277!`\020\211A]U?\023\223R\255\206+\267\277N\266I\310\342L|\277@s\326\332\023\220\272\277o\355\3340\370\310\205\277D\016\255+\016\214\244\277w}\023\024\215\200m6\216\231\230\244?IK\244rhU\210?O\2428\211\252\t\251\277\337@\271\361U\227\242\277\302.\244\tp\251p\277S\310\220\352*\252\263?\264\217@\ny<\271\277\347\242\3649\347}\204\277\306\262\246\270e\307\261\277n\014$\003g\317\246\277&:\2710\323\336\214?m\013.\212\263\357\227?N\037\361\325!OB\277/X3\025)^\225?\r!cZ#\350\254?\t\332E\377\001\036\207?\321\240\374T\230U\250?\343\316\304?\332\353\223?wz\211\247M\302\235\277\204\367f\340~\352\270?U\324C\007\216\272\266?<\032\002\346`\271\251?}E\215Kh\324\216\277\263}\374\0257\213\245?\262\\\000;2n\243\277 \206#\330\321}\243\277P\224\037N\315j\230\277\203\355\302\250\277\224\227?\ne\335\354\017J\260?\3730\272Rf\275\260?5O?\334f\215\252?\356\034x\213g\021\274\277}\017\250\276J\360\245?Ml\227\360U\306\232?\2677\302\2536x\233\27789$~\240`\240?\232\372\003i\375_\205\277\223\347\303\345\261\032\240?\022\031\024\007\246\021\264?\213z&\270\365!\244\277Y\374Z\007\232\370\243?\267\027l\277\211-\244\2775\"?Z\2576\266?\003\275\022\336\362\273\250?\024\345@h\240\272\206?\376\300^\250?^\366\254\346\360\256\232?\271\003\247\021\256\233\205?\266\343\243H-\271\221?\2738\323\215\231\324\263\277A\006!+\"\n\252?\322\343\2343~\016\244?\241\270\262\375\021\310\244\277\201\225\217^u\363\244?h\221y\373]\233\256?\216\231Y\002\373\253\260\277Am{\302\373*\212?\217]\311\031*\204\276\277\217q \006\202\313\234?\246\327\004uv\364\224\277\016\014\020\353<\237\250\277\333\007|MCU\227?\301\353\204<5\304\241?\357h\257\'\346I(\277\300^e*\273\271\263\2779\373\014\261]\304\241\277>\342,\256\266\270\256\277\221\316\304\036\346\366\233\277\253\207\024\256\354J\266?\032o\250\021\001\262\247\277\002\320\314\212\204\'\241\277\213\376@\303\365`\266\277\037\010,S\002A\224\277\240\267I\223\367\374\237?\342\320\333\267Op\264?\260\333+\241b\305\233?\332\010\243r\3463\237?\363\314\230.d~\241\277\024\034\345\365\277\007\303?\217M\345\357_\247x?\244\344\240\024\304\026\261\2771\234T7\261>\257?i\3365\257\212\346\202?\007\316\324\005\234\362\244\277\376N])\340\n\251\277\262D\025I\350\222\251?j+\315\227\263\254\224\2770\245\271\320\341tv\277\033\032\243\340;\253\261\277\327o/T\303q\233\277\243\221\215u4\334\252\277\360\252!\014\251\303\227?8\230\364x\366v\265\277\035\315\265\310\375R\261\277\230\341mH\312k\222\277-\273\250;\230\241\227?\360\240\310\\3\261\231\277\332{A\333{,\264\277}p\227\314K\205\250\277}\207\006?\273\236\261?\203\207\020\240\271P\240\277\331\236Sh\370d\216\277\336\374\275\212Y*\274?O\377q\366\212\267\277{Kv\354\372C\247?\020\234}!~!\300\277\021\371\257l\250w\220?;1\236\024v\244\256?\'}\006\032K\272\213\277)\327\021H\263\243A?\363\322\201\242\201\212\270\277\227\250F\254\306\261\245?\335\276\213\350]x\241\277\321Yg3\322{\217?\256\264Va\302\234v\277\'\350\013\206\216\001\200?\374\373\177\033\342\244\207?\023\275\327\332l\330\234\277\206\264e{_\242\226?\350\367\374AK\212\240?\204\216S0\370.\247?K\217z\035\242N\271?\315\272h\254f\000\261\2772\005S\017\3735\237?\276\007\267\013\264\271\226?\372\375\337\233\312\036\240?\357`\357;\205\332\251\277?nU\006Y\206\245?\217t\253\347\025\243\230?\224_\230!\343\357\254\277\254v*\233\375A\277\277\177\361n\226\276\302\240?\nn\"\303\251\267\213?\245\022\345\363\354B\267\277U\226\032\207\255\000\253?`t\006\264\020T\257\277$>\310N\364\354}\277\005\247& \312?\270?\207\357O\030\272a\260\277\253\022\370\224\001\270\253?\324\376\035T\317\037\267?\256\333\321\013\n\214\220\277R\310\361\374\177{\246\277\371\033&-1\030\230?\003I\204t\213\376\233?\315\2079\n\257\001\250\277\207\235Fm#h\211\277P\203\217_\237\266\262?\254\254a\206W\367\257?\233\363_\257\215\336\220\2773\200,W,M\254\277\035S\226&n\246\253\277\016\376\017\253\'\000\252\277:\\\353RGPP?.V8\005\226o\263?\225y\245N\312kr\277\234\367\0220\2709\250\277\207\243\333\013\233\361\274?5\215I\261\367\235\244\277\275\232\310\221\013^\242?\006b\024\264x\264\236\277\317s\362\374\226\266\265?\275%Aq@z\224?g\3568\374>\360r\277\356\271T\203\222E\264?\215-z\177\320\244\263?,\333^DU\367\260?Ds\245\260\336\213o?\257\226\"\026n\310\235\2777i\267\2525\206y\277\322\374\206\237O\312\301\277\010\033<\200\210s\251\277\225\370\232\244I\033\253\277\240\257#\226\020\r\300\277\013]Tk\314u\223?\307`+gz\364\224\277)\244N\324\250Rr?Z<\300\361\023\343\277\277\257\351\230O\215\244\240\277\234\220\266\233*\321\225\277S\346\202g\006\343\300?\034{\341z[\317|?\020\016\353\257Q\275\263\277\266\027yw\370\035\224?\222\017q\213=\241\255?\276a\005\305\233\357\215?.\027\327\037\026\347\241\277\357Qi\262\335\201\230?5K[\252\270\353\262?\022\0050q\004\325\225?\221\211\245[\007E\242\277y\234\230\337\261#\267?\240\220\367\2767\222\263\277{\303R\370\037\320\204\277\256Gi\226\013w\254?vW\305\352\251%\252?>s^\224\243a\272?\300\016\353/N\375\262?\032\re\r\323\317\204?Q2\345\377\"\327\244\277\333,\032a8$\240\277\027\024\035\310\032\263\275\277\227 N6\331.u\277=\361\0162\330\265\237?\220\257iO/m\220\277v2\333\357o\262\254?F5\307\352\244h\260\277C\374i\272\213\236\223\2772\341o\331\223?\223\277\313*\212$\354\340\232\277\232@\355A\347\340\221?\221\265z/$*\250?u\365\375\020&y\267\277#\014\257\245\261\035\242\277\340\317A1Q\206\263\277\'k5\236\243~\264?\232qCM\221\261\264\277\203\245y\304)m\240\277\334R5\177\204\341\200\277i:\027\240\2074\271?\304\337!\006\254\352\265?\276:\177\326\231\354\265?\353\320\023\244\373\254\221?\'\3330\313)\202\224\277\211\\\247\327~\370\253?2\01052J\035\215\277hY\327\370\307T\241?\346\025\247\nU\253\254\277\355\260\273\357[\002\227?\233\036\326[\374\316\250?\272_[PN\334\244?K4Z\210\241\030\261\277\3632\034\242\246\224\271\277?:K\206f)\200\277b\220\031\335\\\031\240?z>\240\000\024k\257\277\274(\005\214\337B\242?\336\001\251B\356\321\204?\360\330\2038\306\361\200?\016?\010\360\030\017\263\277\031\266~V\003\324\253?1*\217\263\034\216\261\277\360\035%\177]\243\262\277\237j\200\020\303\304\243\277\033BMQ\326P\266?\371\246\2311\210p\240\277\203w\220\2134\373}?T\0160Oa\300\257\277\302\310\035\311\234\352\233\277\r\336-c\210\026i?\336\033\222\005]@\264?\262\224\355\024\310\270\253\277o\352o\226\273O\241\277\254\374\347\002Unt\277\2475\026$\010f\272\277.\270\244\355\244-\250?xq\000\233\023y\204\277\267]\026\303\2738\262\277\221\305\366n\267\377\300?}\256H\374E\260{\277\342\260L\256\275\373\241?\340K\362\243J.\226\277\232!\021!DV\257\277u\345\213}r\002\240\277\216\333V*\027\322\255\277r\006\035\336L\214\240?\213\376\262\274\335\202\255\277\004\356\033\347\334\251\245?G\345\332\331\302\335\222\277\341\237\236\253a\261\272?\030\273\270\202\026\254\223\277iy\356xj\234\224?\344\252@\003\016\371|?T\200CDS\323\247\277\346\226\355Z\307.\266?\252\274Z\222\221\246\230?\327%\362\363.\007 ?u\032\017\213\250\005\252?\210(_\250b\\\226?\376\232\242vF\211\236?\317V\014\034l\337\262?\347k\016\347xCl?\243?\014\332\345\233L?]\346J=N\001\220\277\277\303\205\347oA\213?\345\177,\231\210z\263?$\204\271\257\315\266\223?\307L>x\220\204\217?.+\251\364\006M\253?\356\rL\rw\206\223\277R|\251?Sk\261?$<\267S\265+\260?\037,eXo\260\265\277\232\354\030-\223nB?(\235c\324\026\362\234\277\"xY\227,\350\262\277\377\337\026\014\315\267\242?\010g\271\257\230r\275?\356tl?xP\234?gp\2318\322[\301??\242k\357\025\257\251\277\246\324\227\315\253\350\230\277\327\273n\304&\221\217\277\263R\0367\272Nq?\255v\214-0q\241\277\373\237\373V\216\334\214?\255kF\342h\355\202?EO0\307\020\037\225\277\347\333\310\213\243G\232\277\322~_z\371\013u?\347:\245X5\021a?9\353T87\021\245?\252q2E7\275\241\277p\237\366\247\341,\256\277\304\351\007D%\021\221\277\024\322c4\361\330\253\277\322\212\332l\036t\267\277\005B\233\233\234 \241\277A>\340h\364\301\304\277\305\277p\233\2405\201?\324\325\277\264jV\270?p\005\030c\210\305q?\214\315\ncrq\214?\217\211\366\301M\352\221\2770\214\221\027-\223\245\277\273\212\030\321\251\375\244\277\022H\3438\233\343\235?\354\335\000\031,\342\240\277\225q\r\304\207\246\232?\017t\353! \325}?\377\312\320\253\221h\265?W\277\360|\211;\245\277\304\357\347\307\241\363\234?\330r\227Ch\030\177?m\362\017\177\300\220\273?Y\200\217\216\365\341\306\277\363\317%*\243\323\223\277\002c\\a\225\240\233?\231\327#P:\003\231\277\233:\332\363?\017\227\277\\\376\027\'\253\"\223\277m*N\3343\003\217\277\325\233\267\303\362d\260\277\353x\315;\370j\250?\3669T\314tj\230\277\006\333D\355df\217?2d\343c\325\342\253?\206\001\245\014\363\353\266?\374\031\343\276\021`\254\277\262\tm\274\237h\265?\033v\027\375\361\210\273\277\036\223U\315s\326\262?\267\010\362\370:N\231\277\246\324\030\330Ij\226\277V\301D\201\364C\225?Y\030U\236F\220\234\277}\317%T\251\017\237\277b\014\020\235-\256\245\277\357/\343\336\022\212\004?\347\003v\212\223x\212\277\302\303`\214\257I\206?\254D\3213\002T\274?\221\225`\307N\2139?T\221l\226\323\370\204?\262+\326_\273\345\204?\250\333bm\"\366u\277\302FZU\010B\267?\264\371\323\246M\332\261\2776$\201V2\255\254\277\370\223\273\007\302\260\202?k>\332_UI\210?9q\257]v8\225\277\200\370\020 B\350\255?\375n\210,\341\210\240\277\313\247\373R\213\272\275\277\034.x\301\334\245\237?\370\214\036\310P\355\303?y \326\215\206\n\206\277\306\2130\225\271\204\234\277\206\323\365\351\003x\241\277\375\212z\360\213\305\242\277\323Y/\270u\307\277\277\264\314C\210\313\301\254?\304\246u\320\317\335\201?c\303\212\2624\214\243?~\r\327\366\'\341\244?b\231\tA\207k|\277\271\215i\270\351\004E\277\036\371\301\234\327P\236\277f\330\332\233\251\336\261\277k@C\243\265\242\265\277\022\272\247\n\374\302\253\277\363.ck\306E\260\277.\0237\315,+\207?\014\355\365?)Nv\277\0354>}\266\265\252\277\037\356\267H\225\217k\277\376C\221\313)1\214?\340#`\177)\333\251?\225\363t\346\334\300\264?1*\363\317H`\245\2778\016\224\303\336\300\252\277\032\205o\3272+\202\277\367%\363\363\355\260\200?\245:\0259<)\247?\025\237\023p\306Dv?Yy\237\001{\000\227?O\254q\010\010\014\220?;\306\342\373\335Ir\277@Y,\255K\016\200?>\322#z\372\304\257\277\322UDr%\007\270\277\224K\320\307\333\363\252?N\312M\004ex\253?_:j)\362\024\224?\303y\250\3102E\271\277t\232\331knj\261?5-\261p\207\262\254\277\270\000Js\353\036j\277\351S\270N\n\236\246?N\315\213\211\346\005\227?\225\010\220P\241L\256?\224\006\321\246\266\256\241\277\374\226\207\217\272\013\266\277F*\321\225\000\356\261?{[\025;\354\002\243?t\254\031\336\314\370\260?\215*\336x\013\200\253\277c\r\003*\224\220\272?\3477 \212\2251\257?\016\001\366l\013N\200\277\317\003@\246\215\276\300?\301\301\223\007\355\327\266?>\333\004\311\301i\253\277\177\267\3222\302\376\264?\211\222\220L\372\220\262\277X\361\307\376*>\271\277\315\324\177\244\'\177\256\277BZ\371\014\214\313u?\213m\312Oz\010\271\277?a\032\351\177\203\276\277\373\320\3653\317\306\244?>\263\230\032\204\\\266\277\337\300\277\241\323\036\261?\035\240G\337\304\334\235\277^QR\316O3Q\277\305\251]`ei\260?\3123\315\2166\275\215?\273=\361\304\375\265\201?\200[D\331\227\316\237\277M\364{\222\003`\232\277\223\370:\310\237\022\233?/\002\000Q\236\301\242\277\217\301\2054]2\243\277n\0146\311\225)\233\277\022\335\273!\322\037\235?+\367\014\352\255\376\224\277\347\242\241\033z\262\260?E\262\036\232\271\t\240?F\0135m\346\356\260?R\307k\023\265\313\231\277=:\231\227\022\300u\277\377\337\017\206G>\246\277t\374eS\265>\223\277\3514~\242\224\025\254?\206\r\310\030\005g\244?\275\307\006\341G$G\277\306\306\231\232\3557\243?K\031\nY\251&\234\277]\205\037\232G\265\225\277\314\344\362\020\201{\234?\213\376K*\250|\220\277\003\211\335\224\244\031\255?o\345\330\343\331\273\230\277\007a2\277\330\270\252?\340\271\377\374m\320\222\277-7\202\020\027\236\236\277\370\30340i}\231\277M\340;pI\366\215\277\303\242\202\202\032\316\270\277F\264\315\2033#\211\277gN\347j\3215\241\277\253\r\271\352\007\222\253?!s\347\217\271\241\253?j\007\217\264\277\214\266?k\315\243\3427\230\243?\342@c\275\301\213\261\277\201\315\335m/\344\300\277\320\251\025\t\374\016\237\2779\301\375\324\306\261\251\277\022\200\244L\225f\260?\255\326\300\335\201f\247\277\250C,\266\020\232\242?N\306\242Yx\240\203?\r\340_]\037?\263\277\223\362\020>py\256\277\335!\355\21757\271\277\r|\323$q/\300\277:\201\337\255\376u\202?\323u\213k\364\350~\277\014\231\376u$Z\254?\277\013\367\254g\031\245?\002h\005g\301u\267?\352\213O\325\312\021{\277\210sy\334\017\016\264?\036\353\027\335\2120\265?f%\341\224V\344\225?\t\230(\231(|t\277\207\245T\271\262\331k\277\255\375ZJot\262\277\016\r\245h-\014\265\277\362\351\305bY\250\251?\037\231\315\037)f\301\277z\006U\230\342\010\247\277\341\026\331hO\315\253?\220\024\\\0209\221\263?R\247\235\240.\351\257?\366\3652\323\311\220\206?$\264\2511n\300y?E\230\3109:V\244?\373\222Y\236K\367\240?\336\206\013\252\303\010x?FQ\250E\t$\226?\016\261W\244*\305\227?\265\201\252\231d\213\263?T&\"\265\337\335z?\274\250\313Sl,`\277tL\251W\215\366\260\277v*tW\362x\261?\245\272\023\256S\322u\277C\305\316T\273\323\275\277\027@\335[\373\375g\277\214\332\261t\223\333\260\277\002\243c\375%\'\227\277\274\274\227\330z3\244\277 so\036\230\217\205\277E\024\261O\031\344\243?\312a4_\236\365\233?\365\022O\313\205\033\216?\362.\221\352\031\301\220?\347\025\372\242m\274\264?RB\230\364J\023\214\277\203+Ux_\271\260?\020\212\361\007|]\255?\276v`&T3\272\277\006\242Ye\255\253\240\277\345|jZ\206G\257\2770oi\370\013\276\243\277\355Yq\261PNp?\302p\326Zg\270\241?fN\031q\334\023\227\277k\225\344\234E#\250?\014\016\263sVKs?\247\033JpN\001\220?\352\356GN\234R\266\277E\333\313\303k\245\260\277\374\265\253\026\353\335\255?D\034\362\027f\265\241\277\326\"\024\277B\257\251\277\006\316\264\3123\263\233?N*\226&7\335\226?\370}\206\277(\251\263\277V\'\233&\332y~\277\314\303B\271\233\277\273?\236\214\027\222\321m\230\277\014\305\236\303\355X\250?\204$\030\257\031\220\210?\337\356\244\347\2763\236?ThV79\330\252?4\265I2\324\367\201?\375\310\250\234\306\240\260?^\320kJ\357j\261?\343\222\025\r\227\"P?\022\\\221B|}\226\277\177H\005\370P\310\254\277F\313\257j\306\357K?\350\\\261G-[\262\277\024\215\351\3267\224\257?\311xw<\253\020[?\003\007\316e-\177\243\277\242\"%r\004^\224?7\334\"m\330\034\262\277\311L~\274\306)w?\232\304\220\234hG\245?Z\264l\326\216d\263\277\273\000\204w\333\017\265?\000\360\371\r7 \274\277\\=\224y\003\nw?\243\261\r3\251b\225\277\312\260e\322\334\220\240?:\261\031\347R\274\223?u\340\306:$b\262\277a\r\246\332\343\376\237\277\361\250\030\334l4\235\277\360.D\037\217\203\260\277\232\'\0035-\330\246?b:X\336\300E\242?\034\2744\033\032\322\221?&N\274F\272R\242?qaP,\323U\206\277\231\264\255\334\217Jx\277\314\275\235\353\207\276\261\277Q&\220CSpq?\265\266\0106\024\300\242?\272\2241\312\342\301\275?\251\212ut\016D\260\277\230\035W\335\241,\222?-\323\010\017w\356\210?V\333\366r\226(\243\277\252\310\370y\347Y\233\277\303\001f\242\253\302\246?v\253}\241\003\303\245\277\362r\363\347\177i\246?\270\305\336WX-\230?\253\257\374\301\243\266\260?\366\003]\347n\3728\277\221\356\026$\264,\261\2779N\362\267\310o\247?g\240\257\336\366\203\210?\017\341\314%?\212\222?\331\243\216f\013\233F?\rm\351HT\226\262\277\270\236*\225\363:\242\277\2030\252\nV7\236\277_t\321hS\022\265?O\025\007\250F!\242?\225h\234\344\'\342\241\277/\350\352\221r,|\277\225Yc!\376\225\302\277\346\272\034\233\320\223\220?\213\'\010\361\t*\264?\"&\275G@\311\227?\302\222\304\257pi\255?9\356O\2109\225\252\277\030\277\201j\220\010\230?\035\267jmV\326\262\277T\213\3360m\027\211\277\350\n8hMR\221\277\261uO\345O\347\274?\323\314\326T#\345z?/\010\345\313\346\201\240\277^^\222\006\360A\221\277\021S3\266\350\315\262\277==oD\251\327\213?\250\201\320-\000\202\252\277AU|\243\324\307\221\277\310\034I\315\023\325\251\277U\2566qW\355{\277bL?K\363\025\241?\274\373\210\221\372e\257\277F\376x}\000\215\224?\275;M\340\204\236\250\277\243A\255A\375\350\273?\007\\H\341\202(\225?\240\236\357R\222\230\231\277\376\361\231~p\324\254?\215vKZ\326\221z?\266\370\020h\216\365\252?\336qXr~\205\220\277\346\367\200\007\214\332\224\277\335\205\033.i\032\210?A\240\343YtR\242\277\356\353~\237\213\205\231?\352\374\260\000\377\257\244\277=\264\306\256^O\217\277\230\227\364\355\353)\251?\224}bM:0\255?3\002\312\324\266J{?\375\227\360\032q\261\251?e?\r=\350\317\254\2774\201\034\310\360\027\237\2773\255v\035\301H\263\277\364\223\013Q\001\244\220\277\237\325\003b\222\247i\277\371d\363\024\205\225\230\277i\017\364\345\212p\262?\266\303\324Fm\016\225\277\321\277E\001Z\377\261?4\235m$\312\013\235?lC\2757\034\246\243?9\275\317\200\035\037\231?\025\037x\342\025\270\250?\372\216\231+}\347\224?\222{Z\010Z\253\234?\017\216l2\273\302\230?uqz\357\363x\242\277\217)C9\3560u\277\255\376\0172\254\033\266\2773\2308s\023Nn?\334\356\324Y\316\020\237\277v\270\252\233W\360v?\350P\021\211\267\027\255?/\305b\316\364\007\261\277:\215\340\365\013\"\264?*\214B\373\347\223\211\277\232\336\371\222E\243\237?\266\207h\374\262~\247\277\007\264\270&;\010\242\277\0215\030\346b}\267?\232F\2130*\n\225?7sxa[\021S\277\032\206\317\350l\311\264\277*\3414\213\316\216\244?M\202\242\337\334T\206?\200_:%\023\306\262\277\253\236\"*\305K\204\277\361\347LO\351\221b?\3034R\332pY\255?\264&\3320s\237\216\277^\3166>\272\217\264?Y\231\032@\211\246\243?\0135\203\214a\376\247?\364\335\267P_I\227?\332T1.\212;\265\277\260\005\365\210\370\325\223?(\201\234\324\343\221\223\277?:\311\250q\246\264\277 &_\341\332^\235\277\371\027\304p\217d\246\277\334\215o\322\207Q\275?\262\217\240P\266\305\243\277d\334_4\366\241\234\277\021\375\r\232H?\253\277\274/%\2000\336\267\277\251\272\222\305\302!\240?t\t\300J\212\273\262?\360\347lH\277\236\262\277\273\302\241\024\226\336\241?\225\306\256\016\370]\261?\310\257\305\3672\212\230\277\300\2439\202H\346\220?\317\006\013\277\000\221\263\277>6W\016\255\340|?`\273\317\005\320\257\246?~-!aa2\220?\267\\\216\343xm~?{\264\370\333z\325\254?Tn\204\033\252M\253?@\016y\267\246\324\226\277D|\327\255s\352\251\277\256\022\301\211z\367\236\277_\244\271\233dB\232?\346\302\202\253\256\306\256?\242\025\025\3471\030\260?\264\353^\373\206\372\211?\370\355\324\177G\254\233\277[\'\235i\3007\255?\311\345\320\377\010\301\244\277\324\t\027\324\347\271\245?\013\204 \212\214\335|\277\260\266\321,\274j\212\277\005O}\225G}\265?\034\024>%\375\324\200\2775\304\300\225z\303\222\277&\nN\211\031\304\250\2778v\331\3649\251\225?\224\341\036\223\3169\213\277;\244\206\274\227j\245\277+6\2220u\227\240\277\271\236O5I\252\263\277(c\356\303\335\236\241?\221\353\202\r\273c\232?Q\264X|\324Y\236\277\t#\341!\356\n\250\277\355\3419\331iI\233?X)\245HX\215\271\277\350S\370\247$t\274\277\235\353G\t\303\275\234?]\247dS\243\332\253?3\025\235\004J\201\272\277\002\253\264]\003\016\237\277\233\340\021d\377\232\275?\377\\\342\361O\211\242\277\360\340\035\324)e\257?J9\273\244Cu\227?\230n\376%6\031\210\277l\031\217\253eo\247\277k\377\036\272\356K\247\277\305}O3\253Zm\277\257}x\372Bw\267\277\323l\013\220(\316\261\277\005\362\223\t\273\311q?\\\263\362\201\274\255\251?\tYC\341\001\221\222?j\2620\024\374Wu\277\'\350\013\351\345\204\253?\202\263\206\220\207\267\243?\323\220\350\341\327\001\221?\367/\305\031\240b\247?#\340j\252\260\365\215?\312C\315N\237L\244\277\274\373\203\031:\"\211\277\246a\366\307G\261\300\277\223\307T\344*=\227\277\270\306\\ \306\343\263?6qk\217\312W\246?\350\350\202\205\331S\230?\314\331{0C\273\227?\\\242\3015d\362\177\277&\242\023O\2464\237\2772\232\356\261\205|\263?\341\037\303F\345G\211\277 \350|\000\201\371\246\277ao\365\244\004A\244?\325\334\272\026?\213\264\277R>\220\201\303k\241?\370\265y\347\033/`\277\0237\235\235^\237\275?\24775\262X6\265?p7\301\354y9\210\277G\356\270\003C\361\252?[\r\246\361\253]\244?\357\250\31610\'\275?\276OQ3@\177f\277\230u\326P\320\036\223?\302\373\313H7\232\263\277\345\375\037\360\306\\\222?\tUM[\314\365\276\277\375\023BA\r_\250\277\022X\3061k\221j\277H\314\303A\377\356\271\277:CM\016_\201\255?\016nPW\326Y\216\277\356\227\335\271K\357\226?\004\363W4X\323\216?#\037\006\361S\036\302?\276\014\376\034\207\231{\277\253\231\311\3764\217\213?\001\010\326\202\352\330\237\277U\370nY\264\002\242?\301\037\005 \225\345\266?EC\242\206\243\352\251\277\234mqj\300H\203?\202\216\3638a9\220\277\010\321\341\205\t\321\276\277\222\320yDz\243\207\277\300\330\037\330%\310r?\264C\247=\377\356Q\277\033\"\362(\374I\222?\243f\257\305I7\237?.~\266z\215\346\024\277\303Z\232\343\016\211j?\014=\355\033\332}\252?\357\334\305\244\317\312\223?\356\242\035o\357A\264?\334u\366\265i\200\244?\201\2146\322:\"\226\277\013:C\367\210p\222\277g\241\005\222\2037\231?\367-,\242-\256?\236\301\231H\352\275\303\277to\206~{\364u\277c_\277:\345\356\265?\373\030\036\373\016_\222\277r\000\357\373u<\232\277\273M\276~\350\000\253?V\r\005\327\322\320Z\277\"\362\3608\244~\215\277(\256x[\360\202\272\277\346\031I\347\334B\251?p\337\216\271M\254e\277\263#y\211Ns\235\277h\215\333\372\373\227\265\277`\323\233\273A\371y\277{U\375\225\313`\221\277\215\020r\344\034\024\232\277\200\347\003\250\022:\237\2776\225\030N\'\263\216\277z\311\267w\033Cw\277\031o{\222\311i\260\277Y\332?:\354x\255?\372\352@N\362\242\254?#\3647\216Ws\250?\n\334\247G\3230\260?E\336\024\214\265|\264?[\370\3231\005U\227\277 @\243\036\037!\271\277\234^y\2137$\224?\366\273\003\177/M\243\277O0/[T\004\231?\036\227\267L\272\263\302\277\332\3365n\n\370\261\277\372\363\034\317\252+\263?\312^\374C;\352\243\277\304A>\206\241\222\260?\340\201\233R\026\037\202?\207\2650zt\213\256\277V\230!o_\355\210\277E\214au\274\003\236\277\330\307\223h\234q\264\277\257#\224Gx)\266?\346\262[A\344\022\236\277|\177\331\r\370\310\220\277w\030f}l\206\235?^\027\035B-\024i?\217<)\345\2032~?\263k\232,\340\374\267?\244\346\307\370:Tu?\350q\010\3677&\237\277\312\350\337\231\316\034\262\277\270\224\003=\031\350\232\277\212\\a\022J\276\\?\032S\341\357L)\257?O\313\300\227:e\215?G\254\314\203NU\243\277\335\037\322z\333 \212\277\335\220Lf\214A\247\277\023W\232~\273u\257\2777\034\367Q\014\034\221?\370\301q\247\246\206\223?/\227_fu\314\226?\306\272^<\242Z\231?\242K;\235\300\212z\277\252\235Z\250A\354\207\277\224.\305\376A\247\240\277\335\364I\303\370}\242\277\001\266\265\314\272;\221?\374\031\256\272\321Z\227\277J\245^\304H\016\207\277\t\314\341qLu\251\277\034\261\000<\306J\210\277}\2008\341\273\237\266\2772\370\\\314\335\037\241\2779|\362oyga\277{\004;9\366\317\305\277\335 \213\363\206#\247?\240\036\231\005+\310\247?\313\321H\247\317u\260\277\354]e\245\347i\221?\342\237\"F\323Q_\277m\323\r]\330\215\256?\025\320\277\035\000\254\207\277|\311\302\206/[\254?\007o&\211Lp\270?Sp\222\316\326\013x\277{\214\267@_\212\235\277-\004\343Q$\315\243?\254b\306\031g5\272\277\211g3@\367\r\254\277\377s\0143\021\002\260\277\365\225\006\317\234_\270\277\031t1\222\302\241\241?G\247\001\007\275&\207?\316R\311I\272\335\232?\336\336\302N\344\363X?A\371]\372\306\372\251?\"4\376\246\240Y\177?,V\350\335\250H\272\277\237\223\355\302\346L\241?\205\034\200\177Q\027\207?\307k\036~\021\256\223?H,R\213\212\200\251\277Q\236`o\023\232\265\277*\307\312\236\357\000\233\277\001.\372\304\014\273m?\321H\002\270W\027u?\276\364W0\363\027z\2772n\254\227\233\246\267?\3178\334\325\374R\241?\317\3226\2519I\266?\022\336\256\325\tc\254?\256\252[\346\245;o\277\244R\242<\314GZ?\014\014&jg\376\217\277\\\002\272~V\226\230\277\337\252\244m\251\\{\277*g\023N\242\025\231\277\006\036dJ\371\330\222\277?\345\341e\024\004\227\277\3131\007\351\312\341\246\277\205q\2134\301\255\215\277\273:C\033\224P\274\277\016\323\2357\'N\264\277\030\271vJ\002\322\241\277)\332\003,61\200\277\324\223\276\370\262\220\246?\223)\365\273\267k\257\277?\235\230\3714H\251?~\345\3705\235\273\246?\323\301\325\361\000\207\241?n)f\273g\021\266\277\301\303\250!\021Ax?\375\367A\376\345\016\262?\020\\\340\275ve\217\277q3\260tp\236\246\277U4>@\277\274\231\277\3028\350\004Qo\210\277s\010\224\\Y\271\261?\357[J\n\203\340\206?\333-\354d\255\351\221\277Z\210\350\371\340\273z\277\255\320F\001\374h\254??\355\254\377\371\222\262\277\334\306\342\325\317\265\230?\355\'\243\020\323\314\245\277\340\352\0359en\227\277\t\3641\316\001\206\236\277g\210x\312\207\322\206?\250\372\302xYq\252?\305,\225\272\200$\275\277\354\242j\322\200\231\241?\026}\366&\214\352\265\277\300\2309p\362\\\220?ac\031\330\334\244\201\277\006\021^\370 \017\240?\372~\372fw`\231\277r\0038\004\200\253Z?>\332\366\306\300\341\224?ooc\014\t\017p?Bu\026\232\332$\252?\204\336\347\273\203\332\233?\313\377\357\364\000yp?\264*U_\311\263\207\277\341\266G\025\207\373\231?\242\315\336;@\270\266\277\362\264\216]\004=\247\277\316\014\333\237\263\220\204\2776\321\2770\376 \272\277\302\331\001\223Z\304\264\277\247\014\260\203\273\364S?T\302?U\246R\257?\236\036\237\032\203\336\235\277\276\204\344Lk\273\216\277\251\324#3]J\300\277|\357\036Q:\336\250\277\353\342\010X&u\\?\033G\246\323\231\361\264\277Z\253T{\310\"v\277\261[\002E1\024\300?}\033\250uh0\260\277\314\244c\216A\210\210?E\324\243\345\231\232\205\277\257\226\032\342\360\261\242?\3768(\036Oz\233\277\202\361$N\361\201\210\277\3056 ,\305\331\243?!\2771E_\377\204?\306\274-y\264n\273?\021\204<\241\267\321\272\277\267\332\240\301X\305\267?\364~\311H\252\n\240\277\270-B\323^$x\277\320\256c\004\313\217\265?t\344\260\303\272%\272\277(\304T\303\210\372\244\277C\273\315\361\020\220\250?\023\333\236\244\340\t\272\277W\347\020\030c\272\275\277r\206tp\2152\247?\305\016\347\312t\332\225?\224\017\333\'\027\337\227\277\230\236D\024n\235\250\277\314\004{\350\350\201\246\277\210Nu\357c\214\255\277B[\340\364\332N\256\277U}\255G!\354\225\277|\177kB%\000\262\277\317U\031\207p\255\211?w\326\226\363\273%\217\277\252\346mR\234\025\246?P^-\3715\261\265?B\364]\332\241\026v?\\l\331\337\354\241\203?@\'\013\206xa\263?\342*|\215\337{\246?\345\252\373]\357\"\220\277\212\225N\261\344\212\260\277\360\250\325\336\034A\256?\320\254\343\007:Fs?\016\353\301_\334\342\275\277b\227u\273\002B\263?\304\3765\277\211\220e?_t\332~\241\260\301?\031L/U\307\251\301?\377\345\006\340c\251\264?V\205h\232\345\211\242?1\257\367a\275_\245?\016\204\322j\305\302\265\277?\265\317fh\356\001?\014\t\336\010\240\206\212\277\351\354\021L\360\327\227?\021\037i-{\267\251\277f\330:\347\265\310\212?Z\353\310\260P+\275?\213*\006\024\227\001\263?\262m\306\312\262\006r?\225\204\036\224-\211\241\277\236\200zX\220\210\254?\031\030\272\321\021)\236?\310e\025\016\245\211\262?z\212x\200W$\251?8\177=\267_\251\211??\327\376\202;v\241\277\261\237b\232\371\346\220?\307J\006\225\356]\275?\2761\031A\000\254\241?\246\216c_\316\035\261?\225\235_x.f\241\277\275DW\237\354\032\272?\356\267\333\372\364\233\272\277\301\303k\024ks\232\277" + } + } + } +} +node { + name: "layer_0_type_0/matrix/read" + op: "Identity" + input: "layer_0_type_0/matrix" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_type_0/matrix" + } + } + } +} +node { + name: "layer_0_type_0/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\240c2\304\374\267\364\2774\231\252\202\0207\261?\236\254\2429\210\243\332\277\332\340\227\244\014\316\334?\353W\365\014<\230\332?V\'\355T\212M\322\277\275CFn\301\364\352\277\330\031\2146\341\300\302?\225\370\223M\330\217\235\277\223\343W\215\324\344\377?\0047\251\357\327\265\335\277%\270\260~\333\311\233?\344,\201\022\244\312\334?}\226\374\n7_\360?\263/\350\327\230\177\367?\031z0\345\220\275\360\277\377Z\230\234\342\264\347?\027\224(\334k]\342?N\362\252\265\031D\320\277\237\210I\350\177\024\376\277" + } + } + } +} +node { + name: "layer_0_type_0/bias/read" + op: "Identity" + input: "layer_0_type_0/bias" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_type_0/bias" + } + } + } +} +node { + name: "layer_0_type_0/MatMul" + op: "MatMul" + input: "Reshape_18" + input: "layer_0_type_0/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_0_type_0/BiasAdd" + op: "BiasAdd" + input: "layer_0_type_0/MatMul" + input: "layer_0_type_0/bias/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_0_type_0/Tanh" + op: "Tanh" + input: "layer_0_type_0/BiasAdd" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "layer_0_type_0/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\024\000\000\000" + } + } + } +} +node { + name: "layer_0_type_0/Reshape" + op: "Reshape" + input: "layer_0_type_0/Tanh" + input: "layer_0_type_0/Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "final_layer_type_0/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + dim { + size: 1 + } + } + tensor_content: "\322y\300^\000\275\226\277\344\272\226\205a\345\311?\3627=\177Ne\327\277\321\303\311\321\306\027|?*\321{\t\217\020\313?:\"j1\030\032\273\277\007q^\340\314\257\321?\222\361\2456\367\200\272?g\235\211V\177\306\323\277\026\320w\205C\301\202?2\350\373,\007\324\312?W\001\261\301B\323\265\277\300\337\214\014\245\301\236\277=\305\304\273[\006\262?{^\250\227\247\021\223\277\267\337\007\246#j\326?\241\376Y;\315\240\245\277W\261BL\305(\320?\273\241\217N6\361\304?\325\374\016\023\022\215\312\277" + } + } + } +} +node { + name: "final_layer_type_0/matrix/read" + op: "Identity" + input: "final_layer_type_0/matrix" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_type_0/matrix" + } + } + } +} +node { + name: "final_layer_type_0/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 1 + } + } + double_val: -0.4676774651676667 + } + } + } +} +node { + name: "final_layer_type_0/bias/read" + op: "Identity" + input: "final_layer_type_0/bias" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_type_0/bias" + } + } + } +} +node { + name: "final_layer_type_0/MatMul" + op: "MatMul" + input: "layer_0_type_0/Reshape" + input: "final_layer_type_0/matrix/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "grad_a" + value { + b: false + } + } + attr { + key: "grad_b" + value { + b: false + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "final_layer_type_0/BiasAdd" + op: "BiasAdd" + input: "final_layer_type_0/MatMul" + input: "final_layer_type_0/bias/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "Shape_4" + op: "Shape" + input: "Reshape_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_18/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_18/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_18/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_18" + op: "StridedSlice" + input: "Shape_4" + input: "strided_slice_18/stack" + input: "strided_slice_18/stack_1" + input: "strided_slice_18/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_19/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_19/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "strided_slice_19/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_19" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_19/stack" + input: "strided_slice_19/stack_1" + input: "strided_slice_19/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_19/shape" + op: "Pack" + input: "strided_slice_18" + input: "strided_slice_19" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_19" + op: "Reshape" + input: "final_layer_type_0/BiasAdd" + input: "Reshape_19/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_20/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_20/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "strided_slice_20/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_20" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_20/stack" + input: "strided_slice_20/stack_1" + input: "strided_slice_20/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "add_1/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "add_1" + op: "AddV2" + input: "add_1/x" + input: "strided_slice_20" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_21/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } +} +node { + name: "strided_slice_21/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 4 + } + } + } +} +node { + name: "strided_slice_21/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_21" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_21/stack" + input: "strided_slice_21/stack_1" + input: "strided_slice_21/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_4/begin/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "Slice_4/begin/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "Slice_4/begin" + op: "Pack" + input: "Slice_4/begin/0" + input: "add_1" + input: "Slice_4/begin/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_4/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_4/size/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_4/size" + op: "Pack" + input: "Slice_4/size/0" + input: "strided_slice_21" + input: "Slice_4/size/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_4" + op: "Slice" + input: "Reshape_14" + input: "Slice_4/begin" + input: "Slice_4/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_20/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377@\001\000\000" + } + } + } +} +node { + name: "Reshape_20" + op: "Reshape" + input: "Slice_4" + input: "Reshape_20/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_0_type_1/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 320 + } + dim { + size: 20 + } + } + tensor_content: "\016k8\324\237Zt?\006O\313\033&\201\236?J\212Mx\252\354\256\277\324\334\307\005#\035\240?\300\177\345\332\272*j?(\236\323\026+v\273\277`\250\000\270~\337\250?wF\031u\301^\212\277\336\307\0021\3530\240?\032\352z\350\362\352\266?\013O\211\023s\"\272?#\276\016Y\'\213\241?\223+\007\177\377o\271?\n~\314qc\230\274?\217\271\377\233\216\251\245\277m\264\202\332\217\025\205?\371\226\231\340G\330\241\277,\001\367\210\244\333\241\277\206\324\306\003\337\237\270?g\374T!\365\253\267\277N\3046\224\265[\233?\3148w\211\007\255\230\277\227\362\273p\013\377\233\277[~b)\312\016\236?k\255=\025\324?\244?\261\013\014&Hf\262?\350\310a\234\2414\271\277K9\307^|\325u\277\264\223IG\244\256\264?\273\316\2573\317\351\264?\3761\235\025\014\357\213\277AL>?\246j\271?\272\000>\252HU\243\277<\317\325C\335\357\264\277\320\335\371\336\320\271x\277\317\t\266\032\247{\200\277\223\374\365\337s\371\217\277)\327c\203x\365\276\277\207\335\376\034\271\3004?\0028\257\037\000\203\254\277\017\313=\361_\377i?\301\265\036C\320\315\251\277H{\205\275\242\014\222?\223y\372z\250s\234\277\030P\367\2410\235\261?(\234\324\377J\006\243\277\\1\263\372\334M\241\277\376\301qHa\352\250\277\323\352\345\3429\253\260\277B\253\300lM\272\242\277\346\357\24310\325\227?\273zl\306\032\210\211?5\235C5\345 \265\2776\227$\274\222[\260\277\217\301~}\345\262\204?0t)4>\361\242?\345o\231\204\243\230\210?\021\230\245\2702\037\221\2779-\\67p\244?\212\367\235\252\235\000\207\277\210\177\355{\320s\220?-G\310\361r\253n\277\232\356?\261n\341u\277\005\024\343\300|\376\274\277\203R\025\231X\273\233?v\323\272\250R\252\263?.\226\004\014\316\326\224?\343\265\r\242\320\206\251\277\027\311\3209\215\031\243?\244\336\221\364\223\262\245?\316\361\312\036X\375\254?\236\022\354w\332\021\240?aP9\353\033\377i?\025\311\246\035\321I\261?\321\312\022\332A\272\257?G\322\202\317H\243\301\277\341\004~YP\026\260\277\247{\332\312\315\217\205?\376\342\320\252\016s\267?\377\210\0232\250Y}?\n}\377Z\257\034\241\277\353\245g#:|\230?\2511\302\270;\311\276\277\361\341\021a\356M\265?\355]n\272\361\247\231?\220`\002\035\223\360\225\277*:\237$\322J\237\277\035\371_\363u\355\237\277\375\237\004\256\333\007\224?<\240b\r@\007\242\277\272G\367\357E\265\261?\254\013\253\373\302\236\243\277\323\202X\271Dw\224\277H\251\3329\275\255\266\277\241W\2154/\207\263\277\275\\Di\224\215\222?\312q\215\004\374a\220\277\2013\204\333=\314\275\277\000N\356\305\t4\222?\364\263\354~g\314\215\277\211\241\301\316\260\275\217\277Q\005~\2465~\274\277\r\211^,\205c\240\2776\365\\7!t\203\277\004\361\335C:c\246\277\371\215L\204w\t\234?h\205\373\305\356m\223\277Y\010(z@<\220?\200~q\233\343\030\253?\227\246\377\317\236\305\220?{\\d\245!\231\246\277\255\215\356\314\246z\225?z\357&Vr\330\275\277\035ZUj\010\373\223?\276\237A\245\340Jj?Z\253o7\006\263\242?/{z\254\226\376u\277\275\366;9r\025\260?g\353\333\250\271]\241\277\221$\036\006\363z\257?\214\342\317>\251\267\253\277}kc\204\335u\225\277`lB\276*\247\262\277\203]6\247q\000\230?3o\231\222\334\322\246?\371W\334y\377p\261?a\333;\256\310\216\273?=C\360\337\321\301\225?\222\242P\201W\217\212\277\363\351Y\245\2640\257\2772\201T\372\232\312\256\277\211\232\201)y\227\206?f\346\361Y\367\364\302\277\312q\342\264\367\204l?\030u\233a|\376\267\277\367\312p\0067o\242?I\362\023\376:U\241?\256\214\323\307\355\005\255?\205\2759\235\203\253\244\277l<\010}\3062\213\277\023\326*P\357?\251?S\277\033]\347V\255?\324\321#\343.\266\255\277\260\244\374L\223T\235?\352\264\247i\300\254c\277i#\001\256p\237\263\277\325\225s\231tg\243?N\2723E\353\312\244\277\032\374\240\260\305h\266\277 \341\034\r\305l\255\277\024\301\262[G\270\200?7Y\267\207\253\231\245?\260\321H\037q\371\235\277`\207\377\3224\210\255?[\033\362\333\363\002\246?\241h\"\257\212\234\253\277\364\313&\020\211h\214?\272\000h\006\245\225\214?Y\302\231\007(\334\272\277yj1\201Ec\237\277\234jN\300\315\342\267?\215\216\314s\325n\237\277\333B\206\242\355\022\202?\337\212\255Se\227\243\277;d\310\376\004\215\254?\2458\242\376\007\340\240?\314\354\205\021U|\271?\2475\313E\341\310\227\277\203\207\310@u\210\256\277\246\206\200\250\3001\264\277\346$Y\031\2202\233\2773\305\002 g\353\260\277\'\2268\344\014\340\263?\004\250\311y5\265\223?F^\200:P=\254\277\255m\036j\355\321\254?H\331Dv\261>\226?\370\317\245\357@$\227\277\360\357\225\202N\032\222\277\006\035\204\273\200a\225\277Q\260\237k\200\266\223?\276\013\373\3241I\222?\213#\027\023\340(\242\277\231q\330\013r>x\277\326Mh\210\354\276\213\277\355\334 \212\310\361\260\277\302 \020\324\036\036\226\277\345\240\356\336G[\200\277\353\202<\t\010\313\271?\234\226\016)\317\035\230?\273q9\014\316\323\270?6\355\243\304\257\005\243\277!\357\302\254\222\334\210\277\372\364\262=\201\351\304\277\253d\017r\246\324\253?R\314\364\362\325\014\203?V\001\021\006\353\272C\277,K\223\334\320\240\221\277Cz\024\356\226t\250\277_W|\374n\245\274?\2125\215\344\305S\224\2774\330\013\324\316\207|\277\334\347C\016\215\"}?\303\320.\311\200\264\252?\241x\257\274\217\234\257\2776\324\003\312\353\253\247?\244V\007N\377\"\245?vg\230?}\035\254\354GC\270?B\224_\2701\257\211\277\326 \031<\305r\262\277_Jn\352+\324h\277\323\357%Vg\204\251\277u\370q\037\214\024\252?\211\364\253\004\342\221i?f\317\305\244\232\235\225?\244m+\343<\033\225\277\32172\305*u\236\277\342z)\255\226Y\243?*|\024\256o\266\262\277Vw\322\351)\316\266\277\231\330\313\017\217o\225\277\316\330\242\027\004\235\223\277\303\246\270$\246\247\232\277\307\220\202\t\273\026\271?\\8\277D\214\215\204\277\372\351\356\013\374\252l\277\203\273>\310+\010\265\277\017\306\365/\004\030\232\277e\244\n\270Cl\262\277.\346\035\211\003\032\241?\356>Z1\3217y\277\243Dp\004M\314\201?\220\221\242\213\332\361\242\2774\'\013?L\212\245?\200\314\253\013M \262\277\302\000\255Rs\3506?\221z}\230h3\262\2770\344\244\247\371\274\246?\251\255\315wW\\\236\277c%S2\304\373\205?\305\027\026k\034\331\223?\324\247\244\232\253\317\"?\3259\216\263?\302\261\277~\021\375\202/\377\217?\301\205\247I\356S\240\277\212-\260\336`\024\261?L\342\355\303\310\321\236\277\352F\317P^\316\274\277\341\257L\252\351\347\265?\030\2176&Q\223\261\277\027\345l\300\213\322\275?sY\274FHs\254?n\350\336\r\303\376\225\277@IE\220u\r\203\277\202\006.\003\251\367i\277\035y\025;|\264w?\340\227\346\277\261\354\225?\376\367\310\312\304\007U\277\332\2706-~o\232\277k\221Bz\n\306\210\277\266\264H\212(=Q\277\224\270\225\266`\344\240\277,\t;\230 \373\214\2772\324\276_\302\325\266\277#\323\352\\\t\221\220?\303\243K\007\315\333\236\277\344l\270np\224\300?\'v\237\036\212u\223\277\335\027\036\260\303P\252?_\036|\263^\360>?Z\272\251\027\367\000\263?\306\367[\364\211\273\240?m\311\223\300\334`=?;@\3631\363\037B?b\377\001\215v2\227\277n\310[o\340\362\225\277\224\233\277\020\301%\247?Co\243\270Z\020\222\277_\351\325\331\336A\220?^.A\235\245$\234\277\304%\202\341V\025\244?1J\266Q\001\344\300\277\204l\325c-\360\266\277\325\211\274\'\035\224t\277!\351Y\306H\r\245?{\000\262\345\n\243\265?\346\r\323\270\224B\244?\025n\006\207V\330\256\277OU\306fc\327\246?%\306TR\013\341\261\277\231\256>LA\331\220\277\231\025\003\210Z\013\252?E\276:Gv\240\231?\262@h\244\322\"\265?\203*ci/vf\277(\245\332\031y\342\243\277\004b\243\032\210\212t?\326\346s\333\262\271p?~7\344\346J)\254\277$\272\213\326\212\201\246?\330*zY\335\355\204?\003\023Q\313\253\275\202?\261m\243\225\334^\220?\261\212)\276\027\250\261\277\270\027\224z\271\242_\277ZP\032\347\3406A\277\217\342O_\270A\261?\315E\304\013\320\357\223?\357\"\220g\242R\252\277\347k.\356\3060\240\277\206\262\375B*v\254\277\320T4\275\202\376\232?\310\300Hj\037\222\237\277\247\327\203\244\325t\251?\013\332\256+\246h\260\2772\331\367\315EH\230?\231\"k\211\237\324\205?\020\252IG\217\276\206?\355\027\333r\031\315\251\277\233\215\032@\325X\261?D\372E\246\r\217\205\277\345\205{\356\2136\246\277\026\327%_|y\254\277\037\202Y|\r\307\210?k\361\205\273j\316\252\277\377Qy\225\245Z\241?\350u2\306\313\262\251\2777y\277I3\372\230\277\030\271\257\264Q\254\262\277\241\347^\255\016\335\277\277N\242\277\345\325\220\223\277\202*\252s\206[\261\277\365\332\257\341\034\237\224?\206\203\246P\301\261\244?\331\342\234\267^%\277?q\0033\277\343\314\231?\233\0078\216\343\370\241\277o\254\367p\232i\251?\224Z\304\262\362\241\253?\242=\317\220\300\343b\277\324 \353\004\3231\264?@\373\227z\256\232\242?\301^\265\010\223\023\240\277\224\202\'M\245\216\250\277*\276\033.\205?\252\277w\217\333\317o\270\264?\215\2049w\303\n\225\277\222\245-\315\223e\265?JP1>\337\342\222?Vs^\304\331\205\247\277Q\004\020\350\272E\221\277\321\014\264\255T\"\261\277%\024\035G5w\202\277\024\226\241\217\027\216t\277\266\304\277\244\230\033\221\277\016\223\2606Zy\212?\372v\221\206\213\004\270\277R\247\006\243\301|\267?x\3400\26055\264?\350\371\315E\000\234\207?\234\026\1779\241\271\251\2771\364\365svu\256?\221\337\200\245\351\270\231\277A5\360\3114)\274?\231\373\317i\\\227\221\277\242u=<\345\252\226?\233\276u\343\235\022\235\277:BG\306fl\240?\030\"\331\306\346\354e\277\215\030I^5\356\250?\344`[\213:\270\261\277%&\230\247\375R\177\277\277E\353\033\007\257}?\364\277\352x\263\213\200\277\241\301B\310\305\375\242?\177\212\252\312@\205\254?-$\320\244\227R\240\277\306\215\305\007\275\217^?\026\307%1\310\362\241\277\313w\037;\264\330\237\277E\257[\352\372\324\241\277D~\345\342\243-\262?\332[2\177\2426\240?\353r\213kY\n\256?\000h\037\'S$\202\277=WD\212\024\005\270?D\232\245Y\017C\221\277:\247?\370l\217\215\277\205\374\336f\330M\267?\316i\356\225\022Z\240\277\003\230GK\203\313\244?3\237\201\300\256\325\261?\301u\224`\227\n\224\277\272%d\262T&\275\2778ZM`\001\222\245\277Y\017\325L\3446\273\277\030>\017\026\207;\253?>\033\315w\205\255\237\277\206\3745\214a\031z\277Wgn\362T\250v\277\344\246\315C\246\021\251?M\372\356`\206\271n?\207\263\275h\363b\243\277\202\230\032Y;\242\253\277\\\024;\303\237\352\251\277V\371\030\254iy\244?\024}\337[\240\262\302\277\361_\265\242J\237\254\277ruo\226\312\020\255?{6\357\271\367:\225?MF\340\336\354\245\250\277\230\n\250C\001k\244\277\33215\312d\216R\277\267 \265O\307\007;?\265\261]-\354\327\263?\214\312\215\362=fD\277r\272,\221g\010\275?\313IS\201\225\372\221\277\200\351F\310\211U\242?\206kmA\315\226\220\277*\217(T\345\007\234\277b+\273\351n\303\240\2771\r\277\021\031\222\220\277\333\377 \004G\217\256?\263b\212\346\315N\240\277\200ej-\316d\221?\2714\232Mb\230\210?\352\366`\253\224t\217?C\0371\356v\373\212\277K\207\210\341XC\250\277\326^\020\360\210\014d?\350L\214\017\304l\253\277/\354D\353\365!\250?Ew\250\"\350W\262\2771i\345\251\214\200\237\277\3731\007Q\222\334\177?\324\251\177\322{\342\251?o\022\226\nE/\230\277\301\341\201\017\347\330\271?pn-u^R\257\277\301@a\257\313_\247?\310a!\226(\337\230\277Q[\300\313\305\311\261\277e\226LI$\022\202\277a\215\333\036q\310\265\277\006}J\377\252\030\273?4l\277v\315\206\263\2770\213i\006lX\244?\n\334\336[\231F\272\277\025\232\255\'\317!\241?A\244\266Wd\221\253?\270\262$\017\231\254-?$7#r7\326\247\277r\2163\016\035\323\262\277\236A\246G\365\210\235\277Q\350\004\305\244\262\204?\320\177\007Vo2\245\277\342H$\336\314\003\302?\372\177\232`v\r\237?\035\312\303{\0333\274\277\211K\356\261\331\305\273?2lhU\374\351\210\277\021mI\224\014\200\246\2777\267:*\"\266\257?\314\333\304\237\334\005\203\277=\313\3703d\354\250\277la\372\234\025\245\257?\rJb%\346;t?\203D\266\376r?\222?\227\2704\341Gr\260\277\301\377r?\265\222\246\277\230\000\320}\266Q\237\234\277Y\371\300\234\217e\230?[\317\242X3\227\240?\235P\033\032D\313\273\277\202\256\221\017px\305?\003\017\205B\355g{\277\327!\354\361~E\241\277\363\322\2130t\036\261\277\267\224\215\215\352\342|\2779\322\335\014n\302\252?\216-\260X\030Q\264\277\233\236M\035M\321\276?\013\032\245|\223\341\222\2775;\223Q;\346\265\277\251\026\254\3617\317\265?|\326\223\027^{\214\277\303\374v5\317\202\262\277\013\263\220:\177\222\263?\346\013r\205\004P\226\277\346\300\250\222O\177\242?\337D\256\222\352\210\254\277\336\010Mv\250\322\264\277\210\016\311\311-\033\302\277\2749\244\010\'\360\202\277\226.\212\306!\177\251?\354(\257\003\316:\253?-\306\365\320\373\311|\277\210\314C\342KV\266?\37731^\225\204\257\277\241\212y\t\025U\221?\023\220\340w\201x\265?i\361\377`e+\242?\325&\351WSM\235?\267\331\002`rke\277\306\240\372.\212b\220\277b!B\311\177?f?\232\230\261>\304\357\243\277\245\023\234\'wr\220?^\004\350z\343r\243\277\364\210^\245\203a\252\277\3247q\323\004{\247?A0%\257\234\351\177?\372\346^e`\213\261\277c\037.G-\337\234\277\347\220\2027H.\260?\262\241\000\263\264\362\264\277gray_a\264?\215|\374\366\241\322\233\277-\007\261)\200\260\216\277\301-\330\316\014e\236\277\273\306\000\211\022N\222?BM\361V\355\206\243\277\327\2107}\377\032\212?\305v\363\206\221\010v\277Sb0\216\\uy\277\3146D\306\221\310\204\277\277p|\3032\003\274\277\373!\214\200q\230\232?T\024\n\245\"%\266?D\373\237\2208\267\261?%\373\301@\252\335\223?\233\230Q\242\372\316\263?o\220(\010\267#\242\2772\375\036\200\227i\231\277+\036\\70\306\261?\263VD\227X2\243\277\022,\202\234\256\322X\277L\206\333\265\r\201}?\212`\266|\331;~\277]\326`Z\364\026\256\277\241\240yrL\224\266?!\266\374\2514\263\270?YG+\270K \227?\3206\325P!\317\247\277;\252\225\235\277Ft(\351a\027\204\277?\273ZX\306v\261?\334\321\220\"r\332\231?`\367\013h\226\352\246?2-\tJ\275\347\277\277\255v\251%\251\261\257\2777d\256\210\354S\225?\253S\267\274\207\340\264\277\365\337\230\020\246\312\263?\272w\002\006\350\010z\277X\231\346\360*\306\241\277\272G\023e\224\350\264?\177\312\344\245\024X\201\277?\374\343\024C[\260\277\241\350\276\226v9\210?\305\327y]\350l\275\277\357_\267@\212\251\256\277\233\033#2\305\251e?\346\326K\353\253\026\262?\243\363]\315T\272\201\277\346u\315D\355\020\264?yg\263y\202\271\244?f:\361L\034\230\214?\352i#m\353\331\252?9a\035\010D\336\220\277\352\204\245\220\305;\177\277\202\257`\227\366\331\235\277\032&8\035B\275\272?+\005\335\014C\035\266\277G\001j\023h\215\240?\366#3\022\204i\236?\355\336u\3671\330\226?\310d\305\024t\235\263?\032\356\030\3740f\226?\330A\340N\376\301R\277\005\320,\274P\265\200\277\235\203\311jc\240\306\277CeYVe\304\243?\010l\035ymw`?\335\243\365\017\356Z~\277wx\022U\022\354\211?\323\363\036Y\303\333S\277m\031\236\\\210\355\235?\271Y\345\234T\237\217?J\007\330\276\241\023\263?\246\350:J\273\377\246?Y\234\262\340\246\243u\2774\373\301\341\222\307\266?b8\222\017z\r\253\277e\263\240\274\0058\260?NK\257\367\340\020\265?\221\220\315\302\341R\230\2773\1778\236\177\034\220?\350\233\210\031\273\215\263?/\257\010\314\230\322\r\277\222\335n2\262\374~?\201\332JV\216\212\261?\247\367y\267\033\262\237\277\221\007o\377\214E\266?\340\321.\232A\223\243?\244\326\346\272\217h\235\277\271\247\261\030\235\233\253?3\037\220H\232g\251?)\200\265a\3207\227\277\225~>\265\306\254\242?}\007\343\366\ry\202?`\341\242\234\364\200\263?T\r\367\344\223=\255?=\323\244\001R\301\305?\t\0309\337\332\211\262\277\365\035\374\245\353/\226\277F\253\323 \014\214\237?Cn->\305\362\270\277NL\000\217\263\343\230?\346\030\226e7\225\220\277\223\216{\2021k\226\277z7\303\216\207\363\243?8\000\200j\0206\251\277\354\344^\365.\222\230\277\220\241\257cl\016\233\277\302\246\205=\354\227\210?\314\235\343\002\022\r\244\277$\312\237\rX\232\247?\203\332\276\321\344\313v?[\317\247\034\264r\213\277\244\202i\343G\233\272\277\370$\271K\004\315\240?\340|\352`I\362s?&E\220\203Y\217\265\277\336\003\033\033\342z\265\277\342\272\276\261\226K\262?\221\017\333\265\001?\217\277]\227\214\230\3703\240\277g::k\333\370\247\277\177\0307\026\265U\225\277\025\024R\013\231\313\261?\307\206L\345\261\233\221\277\371p\014)\226\r\256?HX\036\336\307y\214?Y\260{\306\323\247D\277\327\006\263<\314\265\246?Y\341 (\3102\245\277a\'`\334\260\255\260\2770w\226\027\306b\254?\304\216&\374M\272|\277\270\303\205h;a\201\277,\310e\320\233\352\234?\360\330\224\347\373\242\261\277\337\223\nD\032,\266\277C\357\2212A\362\206\277\201\201\270\024\276^\241\277j\177J\252U\264\226\277M\372\202\261\034\311\243\277\224b\032\337\313\200\247?#\352\270p\351\374\236?\375\305\256F\210\234\223\277\227\3660/\014}\263?g\301\304\0055]\223?\366nQ\335\204\270\"?\335\377J^Sm\252?\247_f\255[(\250?\317]o\216$.\236\277\255\266}\027\214\366\253\277\226@\320\204\343x\206\277\236\2430\345\306\227g\277\366=\t\342\374\003\206?3\364\275\030Wn\203?<\006\005g\366\350\245?\343\033;\035\332F\214?\341\037\006\202\022\334\244?\3716\\\323\204\271A\277\365\245L)\036\006\260\277\177\270\205Ag\000t\277s\244\331\302\313 \234\277k\305\337UF\253\255\277E++\317\366\240\222?\356H\2343\314\270\231\277h\020uOS\226\260?\226\035$\204\\\262\241?@\344=k\323\350\251\277LJRv\307\307\276?\244\304\005\000\366\201\242?\363\235\247\325jy\251?\313\277;\243\325\315\236?\014\273\334B\001\275\226\277,\270\323\353\335\206\250?\016=\303\250\272\235\224?\313\357\237\340\036D\254?9\332\220\"\235\021\242?Ry\334\251+K\221\277\243h\371\213\213\205\271\277\252K\265\304\0133\225?\373\237\253I\"\017\261?\321x\030<\321\320\200?\321\250\211_P\275\220?\234i\001\274\032\313\233?\2464\001\331\205W\244?Y\030I\363\302\354\260?p4\313\312K\222\203\277\252\346\377\021&|\267\277%\301j\301Y\276\242\277\036\266\257\277wx\256?\'\227\360U\231\237\221?\276\265\344\213\363\036\264\277\3556R\035O+\264?\0007\231\312u\202t\277em\2207\216\031\207\277\203\301Z#\322\317\201?\232\3159xv,\244?\326\342\306Vu+\240?\327\221\021\3634\020\245?\"\034-1\2446\255\277\232\237\203\346JV\252?\333m\253B&(\226\277\345\3425\201:\206\260\277\242|\213=\020\t\200\277\316\250\006\252B\340\261\277\014\221\226\223&\217\207\277\300+\231\212\016\3456?1\334\376\205\375\265f?\037\201\032r\352T\253?AX\357\004U\000\257?\237\210v\270\315r\260?\221\266\316z\347\236\231?[bH\022\302x\271?\327\350\035h\262\360\270\277\223F\363@cG\242?\315\234\014\253[v\220\277\241\312\320Wn4\257?N\263\371\3021\343\267\277\207\013C\333U\307y?\364h\300\017\263\222m?\003\232\267\202\366B\241?\272\345VA5H\254?\025\367\027\275\312\301\227\277\006\372\330\327\240%\262\277\240\375B1\231\027\270?\334%\r6.\351\254\277\346\255!MX\207\274?\353\261$\345]\276\246?\246\320\370m\370\254\241?\215\320\030\361P\355\267?\210\242\253\034\205\000\236\277\220\214\037\032 \234\225\277\355z\326O9\261\\?D\221\035\217\363\232\270?\214Q\242\030C\221\242?\'~\230M\\\'\277?\253\307\362\347r\260\223\2778\003\255\016e\321\257?\003\0074\364+\360\261?\203s\334\2240\263t\277]\327\024\213+w\211?-\251D\303\322\374\301\277F(d\035\234\354\260\2778\276\265\257\210 \232?+#F54\371\221\277n\003\212-D\320\261\277\004?\346S2\300\255\277\203\035\306\211\003D\263?o\024I\255y:\234?0\002\220H\016\315\227\277g\036\275\341\211>\265\277\274,\212;]\236f??\035\230\036\306\225\276\277G}\321kU:}?\343\034\257\275\346\331\217\277\254\346\264\022\354\250u?\373\226\270(]\212\243?\316\022O\222\303s\270?\212\360\261\207\340\224\227?\023\211V\265\312\364\252?\251\264\224\225\t\352\246?\033;\005\021w\200\245\277&\240v\232\022\265\275\277\265G\326\264\002\376\271\277UI\034\336L\260\266?\257++\353n\020\252?\343\306\316\352[\356\215?\311o\237Y\"F\250?\215\252\270\301\326\236\261?\246t\2354\272\032\223\277\374n\322H%\200\247?F8\006\266\272\337\256\277\242Y\006]\247\232{?\010g9Z\301(\266?\224h\377\277o\252\252?\334\320\023%\3609\255?6\355\233\356K\243\247\277\366\005\221M\261\213\265\277\322.\030\206m)\304\277\230\242\223!\232a\252?\007uD\330\324\344\243?w\364\016\277J\264\234\277\nK\270\271\320$\257?\\\275D\323e\304\271?\272- \354\302\203\231?$H\347R\210T\247?\372q \340\243$\260?\017\360\353\273\007\250\243?\3716g\272L3o?Rm\022@\263\026b\277\260N\035\'Wi\211\277f\330\204/\341{\214?\213\221\010j\202\022\205?v\345;\314$a\355\276\337\261\304\354\003\231\231\277\203\326\203\037dT\264?\353\n\037\204c\252\206\277\354mK\217W\253\247\277C\226\243\223\364\230\226\277\327\240k\234\202\347\200\277\357\242\245m\203f\265?-\210\303\032\224\347\265?Q(\230\362\016e\241?\373a\215g\r\354\233\277\243K[u\3322\230\277\021C=\010xT\235\277\330>yy\244\246\263\277\345\272\303\016\375\250\236\277*\300[\313O\250\276\277\213\272v\023\005\"\231?\021\341V\201\207$\261?\302u\350K\346\257\265?O\277}\337\231\236\226\277\022v\036\201\211\324\226?6\257E\307\241f\260?\374\204H\272o\240\275\277Pw.@\260\210\220?~\334\200\t\344\255\263\277C<\217\024\350\306\210\277\3643.\372\300\263\246\277^\343\016\221\344%Z?;u\226\324?\207\252\277\322-\271F\346\201\252?(\262M\302\370h\203?!\350\373J-\353\222\277\317\240\313\357\302D\245\277a_\235\204\251-\257\277\"\3623\311\337\303\250\277\014\032\177\242o\002\301?w\374\326\002\315\345\246\277.\223,\207\306\327\256\277\276\316\336R\031\222\224?gw\213\205\312\276\300?\217\3516\022*\273r\277\013$\260Qc\361\263\277\3703\267 \330F\247\277`\026@\030\227\253\244\277Y\275\266\354Bi\250\277r\264\330\214\334\314\207?V\350\023\246\260B\300?\300\351\031\326o\206\223?~E\326C\334\302\246\277\3371 Q\305\017\246?\335#\305q_\314\252?\001\352r\356\225\334\220?\374\365\033\256\232\227\266\277\032\305\3018\327\277\210?\036ZR\031$\356\253?\266\260\242\261\324\007\266\277u\345\\z\210\020\237\277\361\337\027\030M\026\270\277\n\340\340\246<\223c\277\260g\307\337_V\276?\234\027\262\267\365\234\237?\273\035\264\316\007\346\252\2776\375:?\256\025\202\277\357\252#|\221\003\273\277^@\205L\342\327\265?:Yu\205y\205\212\277\336\204\035E\374\026\224\277}\036\262%x\352\221?\344\261\025\t\003\026\256?\216\215`\231\233P\223?\236g\236\246u?\247?\225\260\342\316\253 \244\277\204\207\335\2467\373n\277\326a\334\241\031\023\241?\240\\N\327\272>\241\277\216\344R\332\355\302\246\277<\305PKz\252\224?0\352F\363\314i\241\277q\351{\277\370Rr\277\303\224Jw]\331\262?8\224\240\251>r\217?\3474h\227fr\244?\217\300\247\214\332\255\255\277I\025\333\334\255B\212?\203\317\334I\373\234\245?\211\205\320\377.J\233?\252\362AfO\336\252\277P\027\351)\316\260c?*\\\222m8\337O?\023\225\353P\\\211f?\r\351Ol!I\237?\t{kG\205\320\220\277\273\215\177\361\254c\242\277\272j{<\265E\237\277_\021\241\033\250\022\212\277\334\354\221\227\246\243\221?\215\003\035I\023f\223?\222\325\355\250\3778\236?\003h\326\364\253\312}\277o\277\002%\204\237\244?nh\032\247,Cz?M\326\214\320J\035\255\277x\364\032\212\001\227\222?\337iDU\336G\243?\250\263\350\207\216n\252\277\326z\265`\337X\261?cl\325\276\257\253\274?\372#\237\323\262\264\266\277\344\032\241\220\247-\242\277\007\033\377\027\270_\233?\201\200^\252\351V\227?\377\230\027\237\304B\262\277\277\035\237\203\366\254\262\277/n\336\302\241\314\241\277\327P\274\344l\235\264?\235\220;\336\317\214\240\277\226b\304\211\327\255\223\277\322/A\215 \214\265?\016MU_\271C\242?k\230j\014\376+\220\277\313S\035b,\372\270?\252?\t\351\365\'\265?\2474\177\255\300I\247\277}\202C\261\245?\260?\213#:\005\240\235g?V\217 \002\260j\255?\230\2012iv=\266?\3337\324O\234\267\263\277C\234\214\306S\203\237?\270m\351\201\203\300\245?\247\031\231m\304\300\225\277\217}\243\337\017\010\255?\017\336\027#\352\032\271\277Z\220\225&\374\301\242\277\303\027\343\277\205\266\204?\360{o\342\246\r\202\277`\336^c\325ca?m|\343\300%\263\204?u]\034q1\035\204?\232P\034\037\343^R\277\270\027\314\261\366\227\257?^\2742\276\335\303\211?_\227\221\370\252\234\254?&\221\322?&=\251?\375\364[\275W\363\221?\322\312G\302C\213\301\277v\334\200\255\236U\227?\263\2137\241Q\315\252?a\242\204\312z\211\225?1ywn\250\240\261?\313\243M\365\320\307\222\277Ye\267V\261=\232?\010]\031\214O\230\260?D\002\016COl\272\277\377O\007~\3410\236\277\244)\200\234\313\034\233?y\365F\256\332\001j?\001\227\237\224\300\331\262?=6\007\227D\215\274\277\021 \217\274\023A=\277i\222\3134\257\310\255?*\254@\375v\017\213?\'\370P\242Ii\236\277\324\206E\031c\204\266?\326D\337\277N\036\254?\314\257\265\210\222\370\260\277\305\375\333wqM\300?\207\320t4\365[\241\277\010A|\317\341_\211?q\204;\013\273N\225?\334\337\345Ab\277\233?\036\235\311\244\251#\220?\231S\245e\213\344\224\277\317\226\273\342\204\025\271\277D\251d\221[m\252\277?g\371-b\314\264?\244\037\202\201\007!q?\334k\246(\221s\227?\323d7s\214\272\217\277\370\204\351\246]\335\242?\300\"J\363\2560\240\277\240\354\345K\246J\265?\005\'\336\234v\005\237\27784\365{*\016\246\277\271\214.\341\236q\242?\377tN\010UK|\277\233\225[\330\003\025\264?E\245#]/\370r?\3410\220\246\331\314\271\277\361\177\037i\337\242\266\277\'\346t8I\202\252\277\020Y-r\322\223\254\277O\177F\315\315\014\250?\303\244\006\374\241\373\254?\345k\006\\\271\025\253\277\343\377\265\263\207<\204\277\256@\354\327\2638\231?Y\\\037\261\3026\256?\370\213\351\203J\367\257?\271\237\216\205\366:t?\303\343|\273h\273\202\277\265R\365\230\234\034\212?\211P\023\001\374\226\203\277\037\017[\340\316\321\214\277\226.\271\331\335\261\200?\313\002\005\274\036\002\237\277\307\222\037\271\205yV?K\262a\254J\306\222\277.\351\013DMJ{?\0367S\362c\363\237?a\\a\t\032\211u\2770\204\261\266\323W\262\277Y\351\336BNp\260?\216\034%\251_B\243?oCto\373\334\242?\247\3745&4t\243\277\230\367Jf\302p\240?\355\305F\035]\t\266?\342Z](\325\373\220?\232\017\375\204#\276\247\277\366\034\210L\007$\204?\353\241$\003\311\201w\277/\265\252\332o\036^\277\'\037\313g\032[\256\277\244\206\010K\317\303\210\277\345x\341\022X\030j?~\343J\022*K\267\277\315\014\371/\2163\242\277\217\022\370\017\002\316\241?\351>\233I\211\244\223\277vW\213t\243\t\247\277\021UJH\035+\245\277&\366\2009\374\352\255\277\253X\355H\373\332\223\277\331Ai&\t\354z?\375\r\254>\242.\254?\364\326\034yX\250\213?hy\346\376\020A\220?\032bb\213\364\233\203?.u\346\221Us\242?\304\366R2\302\035m?\233u\362\027T8\223?UK\274\205\355\242\235\277\030\237s\223\375\305\263\277\202\240\017S\037\342\225?\321u\225\335k\340\243\277\232\263\215i\344\257\266?c\215\3760\354!\275?\371q63\347\243\230\277C\221A\347\016\363\245\277\232\351\"B\030\204\230\277\350\021f\224pr\227?H,\300\371\247\010\224\277\020@V\301\233\330\260\277C\276\372>m\210\261\277S\016Q\253\243\237x\277w?\267\376\224\352\275\277\212&\346Q\334\024\204?\031\306\247\356y\266\270?\366\247q\013\0056\230\277\221\355\346\026\344[\255?x\265\330S\262\032\264?\270\031@`\n\'\260?\220\007\3441\315\030\224\2778\206?~!P\243\277\032\263)\"\245V\201\277\266\233D\033{\356\246?\017p\266-\261\333\230?B\023.\232\267m\252\277\310V\314\205l-\263\277F\365@\200\312b\265\277\216\206\305h\277z\265?\002Y\037n\210\023\252\277\251\363\271/O\300\252?\177\251\342\321\243\307\253\277\207\255im\232\022\260\277B\214\211\372\237\031\247\277#\301\027\303}\274y\277\212\222\362\231b\346\252?\260\n_\375W\245\205?\333\226\272\205q\362\260\277\2140\177\304\313\257\262\277\224z\273$\rd\204?\027\362.\337\340\243\244?\220i\225\002\006\304z?M\270\223\244^S\201?J\330\210\336\350;\241?nH\350g\273!q\277f\340+,_\252\270?\360\212\222\010\330\243\255\277\264V\023o^v\255\277\266\035\222\010\275\206\265\277\221\367\270\350,Gj?#?\016z}\002\266\277VAMoz\177\255?\241@\272\304,#\253?}s\231\"\226\347\261?\334c\301B\n\331V\277M?y\337_[\262?\274\332\020\262rC\263?\342zbz\350\021\226?\363\026\307_Gh\203?WY\312!T}\246?9/q\025\331k\263\277\0200\214\252\246\370\255\2777\345\024\247\304L\205?\325:\347\373\310\367\255\277g\265~\333\360\313\214\277\027F\007t\2778\300\277hp\246\202\006=\240\277_\326h\004\031\'\252\277{\n\313\254\024\331\243\277\337\033.i`&\260?\254\326c\026?\344\254?\034\247\312fo\204\217?\005\324\207\276\027\250\245?G[\314~\347\233\243\277\014o\247$\235\205X?\200?7\261\370\005\254?\353\317\324\210z\023\210\277U\005\273\020\216t\251\277\340\273\333U<3\221?\331AC\324t\362\261\277?\304ZN\270\344\247?\264\024\270\326\233\255\250?\205\322\204t\335\372\260?W\312cyP\347\263\277NA\351-.F\241?\'\3049Y\371\257\223?Gf\316{\257w\213\277\262\270\206<\262\312\246?\007\253s\260:\201\260?|\363f|\355\007\233\277oN@\014\315r\220?\021r\036\016\314\220[\277\343w\355\212w\370\266\277.\315\377\264z\356\255?<\337\255o\336-\251?\245\377\212\361\307\335\257?\306=9\r}r\231\277\240^\221rCU\236?\216\033v/\035!\270?&\255\356M&e\213?\205\2112\346\276-\253?\252\265\003\340\223\226r\277\024\302JJiM\263?}\323\254\323\235\223\254?8i_q\303\017\233\277\200)\006\275\016\"\260\277]f\024+\000\250U\277\226\315\211\323\271\332{\277\"\317\372h\376&\204\277+\261!\365j\016\241\277\241\314\000|\017\253\205?\334z\352\220E\271~?+\326`)\030\305\200?\352\251\220\247\323\247\204?\325e&\265\030\205\257\277vX\030;\336\323\242?pH\345\366\203d\267\277\342=\375\r\310\271\226\277z\360\375\224\333\320\235\277\247\023\'\273\274\013\243?s4\002[g\250\226\277M\315\212S\224\256\261?~*\370J\243\223\233?\362yXn\204#\224?F\016\251\363\277f\271\277d\344i\216\024\332\262\277\320\037\310\302<\007\240?\360\221A\366~H\241?\205\204U\243\2309\246?\225\312\272r\367O\227\277\225\002\314\30644g\277\205\307\321\320f^\265?J\225v\242_i\240?\220:\240}\250\223\246?\277\026\266\013\342\355\256?\354T\256I\321I\207?\354ET\214\241\246\200\277\236\367\r\342\354=\260\277\227c\315,\325\263\220?\206 \240\033\346\216\232\277\305/\"\227\336I\220?\212\234S\001\233\302\272?\320t*\202\260\203\241?\354\243\254\256U\336\274\277p\261\211\325\353\r\231?\317OsH+\314\201?\331z\345\344\225\273\262\277\314,n2U@\236\277\363\332\346\340\023@\206?\007\333\367\212\231\003x?\320\263\322\3269\326\250?${\3741o\231\231\277\305y\177\025\207F\203?\363\216\260\206\311\t\270?\021\252\221\031AJ\273\277:\006\222\265\321\300\271?\032/\310|\2520\245?>\303\010\330\007\323\261?\177\201\0343\262\320\256?\014\247\333\241\302f\254?\020\017;Za\034\207?\311\350\371\363\020\027\243?\036(\260\033\262\027\266\2776\221\013\203\232\030\263\277\031\177o\325\210#\242?+\363A6+A\251\277x\343r\022\332\200\246?\361\357b\\\231%]?\251\344\377#\247F\302\277\202\373\200\256\004\027\230\277\242\250wh\3746\276?uP4\364ED\266?lD8h\240E\246?\177O\023\355\251\357\242\277>\214\262\014;lf\277E\206\340\342\370\312\213?~@\033y\202\244\250\277\016\021q\242\242\337\241\277\376\201\265\003N\023\256\277\306=\226\252\'\177\261?\300 \003~-v\237\277\341\376\240\220\336\036\206\277\213\035\006\'\264\276\231?\226\006\374.R/\225?4\257=T?\231\241\277\\\246\302F\033-\266\277\340k\006\375\264I\177?\r\354\032!j\344\220\2774\314\250 \030\245\260?,\327\224\030\232\345\243\277^\252\002\027\323\304\221?cAs\036W\276\275?0C3Q)\327\245\277X\006\322=m\360\246?2\255\'\364&\244\246\277{@\025\264<)\243\277\235\306\377\366\322\300\227?\001\366@\n\214{\221\277_o\033\026\335\354\246\277})G\345<\251\250?h\251\346K}\212\233\277\361\023\3708\"\003\265\277\006\232P\325\352\240\246? 3\213\342\267\323\276\277\263Vd\316\204u\244\277\265GY\033\234G\261?\312\273\005\316i\034\236?\016*.\005\231V\266?\363\266\367\307A~\253?\231\330\021D\314\231\264?o\026\001\200^\024\241\277\017\247\334\333\234\235z\2778\326z\250\362\331s\277\2317\302\334\024*\271\277\273\355LS\350y{?\241\t\024\006\034\177\233\277\233E\261lX\252\266\277\276\000\361\261m\013\240\277\210\377\233\245\034\350\207\277u\337tx\027\322\261?R\322\300\022v\n\256\277\267\260Cajs\252\277)<\364\026\027*\205\277@J\306\304\372\267\223?\344\332X\027\325\270\242?\240\245!\022\234\223\230\277\"lf4\231#\274?\211<\005\350VV\251\277\207\024H$ B\223?t\373\217c\"Eq\277\221\211\030\023Cb\223?\212\341\3362v\316\206?q\217l\256\325\323\301?\206\323t_]\335\242?\200T\036\210\340\316\252?\013\260r\277$\'\250\277\232\010z\003#\362\263\277Z\211\376\241\226\200\230?sz\205\236\251r\255?[,\226\227\226\014\246\277,:N\335\366\237\230\277r)\256\"\016K\231\277\214+#ZVuy\277\366\336\021\216e0\243\277Y[\216\217\256W\214\277;7\325\352\261B\271?Z\364\014\026\314\243\270\277\017~\223\316\301/$?\200\355\177\302\327\336\242?\224\\(V\345_\214?\257vH\210\261\000\250?\233iN\320\375\345\253\277\250\314\321;\250\250\234?\'o\3274?\245\263?\006\037P\265l\246\260?\007Q]\\\r\022\262?\274\235\376x\300d\202?\365Y\230\367p\210\266?sB\255\245\010\322\225\277\304\t\023h\212B\242?b!g\364\251\276\266?\351\266y\354\024\240\247?\362C\030\340pZ\256?\306\025p]w\316\231\277h\034\215\205\233\217n\277b}\301\0179\013\263?\372\177\251\370f\205\262\277\350\2240\304\013\305\202?\000W\342\247\027\013\251?\325\326-\350}k\247\277Y\025L\341Q\227\207?1\233\344\267\210\371\247?>\255gf\017r\273?)\332\212`\312\244\250\277/\257h}\361\374\246\277\'\202[\271\016;\273?\243G\340\024\265\367\206\2772\221\364\214\325\212\264?\254\370z\365\343eB\277[0O\340\373w\250\277\2769\366\236\240\277J\330US\2056\200\277\335K\263\370\363\325q\277\271\263P\024\373\t\231\277\024\352\201\251e\"\261?I\370\243)\341\016\255\277\202\306\272\273\300x\241\277F\205b\371$E\263?x\337`\361\243\241\213\277\354{\242\324-\267\255?\342\233\232\353~\337\207?\237)\004r\370\356\267?\356\"fZ\246\314\270\277\312\264\033\010\263m\254?L\372a\221\236\302\253?01\"&\331\256\242\277\"\304l\305\214<\274\277\026@\314\373\217\310\250?\030\257\\I\346c\232\277\365pO\344\203\302\243\277\377z\302Vx\240\262?\n\304\326\253\036$\221?RH,\306\275\226\227\277\242\336\301\005\"N\250\277\005Q.\325\251\210o\277%\0208+\354\032\236?\222=\336_p\020\221\277\271:Y\013$\006\301?E\254v\207\310\236\204\277\005\265\347\307\374\276\300\277\031\371\315f\272Q\235\277J\206\346u&2\227\277>x\312\343\205\216\240\277\004P\343M\252s\246?s\300EI\321d\257\277>\202\030Um\334\252?\204\307q\022p\274\276\277\274L\212N\014&\242\277\213\177\240\363\273\217\256?\342\205R\r\364\375\243\277\207f!>\261\341\221\277\214\302\361(\227\330[?\n\232\303]PC\272\277\303\355\227\316\305\331\217\277\204s\335\234\301\354\243?\246<[\210\177V\240?\003\371\330\267\352\025\221\277\'\026\307\"\214\266\222\277\337\247v\267\3324\245\277\331zT\365I\246\257\277\270\262c\354\026\217\200\277J\2667\261\'t\241?\321\036m\301\032z\243?\033\376\300<\263\017\221\277\006\210\267\332\203_\255?E\204\271\264)\r\254\2777O\031\022\324\035\267?}.\345OM\"}?\343\247\036\317\333Z\267?\277\232Z\261\307\241\254\277o&_\364f)\236?\271\274\270\244\013\312\221\277\000G\034\340\345\030\267\277lTl\330iP\267\277\007\020\200\232\335q\231\277\344\217\342\317\362\373\256\277\\\'\342R&>\201?\223#_\234\257\203s?\021\312\204$\000\216\263\277\323\3102\350\021\264\223?\343\236\232(\264\204\262\277i\213\0279\365W\252?\256\032\353Z\355p\264\277\004\237M\241q\332w\277\024/\001\317\002\341\272\277\313\307\231w\242e\243\277F\"\262\202X\313\255?\005\001\3065~\"\261?\005\013g\300\324\244\226\277\375_\331\326GV\246\277V\242T\344Ch\263\277\031\245\255\005~\245\247\277\261%\356\2374\246\227\277\017`\226\324\322 \227?\325w\354\031J\233Q?\204\357m\344^\357h\277\211\307+\205\305\203\221?\346\\\374\320\031W\236\277\347\2125\301\220\250\232?\224KJ\216\013\201\233?\367\002\353\372\263\211\236?X~\307\256\207\245\260\277Q\245y\361\254\253\201?\366\244\024G+9\245?n9u\271\002}\242\2773\032\222[\r\242?\262\271\260$\001G\212?i\204\270\213\255\357P?\306\201\3455N~\234?\203334\306\270\205?\324\214\237\331R\212\264\277\037\'\021\315\002k\235\277MF\010\372WF`?\013\327`\036\332\256\245\2771\334\\\215\223\204\222\277 *\243h\321\025\253\277`\332\257\007FA\261?\035\257\275\354\014\251\264\277R\345\350p\347L\226?\007\3612@~_\247?\336@ce\375\242\211\277\314\252-\224\035Z\225?T\351A\312\267D\266?\331\201u\272\377tx\277\313Ys\331\244\365`?\303[Lv\261u\241?\244\355g\343\215\352>\2772&{\263\013\241\226\277\264\232EW\352\251\243?\227\227G\030\340z\257\277(mO\252\231bp?\334\n\310\231!\362\245\277\377\224#a\016\037\243?\202~\340\231\207\307\230\277\201\244\021mQ\036\250\277\215\357\221(\362\350\236\277\244\001Ah\343\321y?`E.G\340\365\246?T\363\237\243I\240\250?\227\307\203\336S\363\261\277\300\340\217.\371lD?\335\2373T\271\037\232\277\355\312\313\027\330\344\236?\232m\020\241\336\374\253\277\365A\215N\352\346Q?=\3511\321\351\361\254\277{+\n\303\307\313\270\277\233(\230\223\023\356\224\277RC|\261})\207\277A.\235\314\014\036\240\277\371\262\033-\3156\235?6\257L\\I\001\271?\345/,p\010\327\245\277\244^\277\267\342I\257?\301P4p~Z\227?\277+\322\'n\375\251\277\323\264\213O\350\235\213\277\033\236\002m\n\217\254?A\332\352\233V\321\240\277\236\020\265\375\377\0333\277u|\2775~x\261\277#\251\033D\271\206\255?\302*\202t\\\357\203\277\0144\215[\'>\277\277,\033\031\347\315F\217?\022\271Q\005%\300\233?\322O\274\027C\336\256?F\333\343\352\363\302\230\277\344\230`\2753\323\241\277xT-0\255VH\277\301k\233GS\223\236?\210\263@\033\000\203\220?lY\215\005\033\367\226\277\354\305\261\202.2\243\277\000)\244\371C\337\255?\"j\302\251&\327\271?)X\352A\213\352\212?\317qF\023\326\371\247\277\310{\234h\241#\201?\206\325\256\217\364d\264\277.BF]t\340\200?\303\331\263\337w\330\227?M\272\271\003YY\260?\250\376\306\353\207\201\220\277E\257\365o\311\032\247?8?\007H\3443\260\277\344\007>R<\331\236?9w\210j\274C\243?<\243\310V\n\355\234\277\261\244\351\037\327\247y?a\3539\210\2377\260?\337\255\245&l \302\277\353\211b\202\277`\245\277%\351\n\261\344o\266\277<\341\003\372\025:\215?\343,UC\210\247\257?\217\224\342\307\000\323\301\277\337F\253\237I|\301\277\302\000\235\263\030\035\261?\274\340?4\002\264\257?\340\"+\026\322\214\275\277M\250W\206\030\253\230\277\304v\351f\267\265\247?\320\364\216\n{\241\257?\2753\013[vD\240\277/\360\331\224\361\021\246\277\225_P.B\217\242\277\021&\260?\332\252\014E=\321U\2776\200y\333I\236\217?\030\251\271\344u)]\277\305\007\354\347\204\010\260?\366X\031D\274G\246?\375\377\221\304\201\037\244?\213\221TH\010\243\207\277\"\337\217\266\266{\202?\004N\232\316^\347\245?gy\202\003\241\007\261\277\353\370\017I\035l\274?\242\331\266v\037X\245\277%\335\230\237\274\217\250\277[l\"<\373\365\210\277\202\303\216\261>\034\251?\350\232\210=d\345\255?1\272\241\016\207\237\233?r\327?\177k/\242?\241\220S\352w,\257\277\246\346i\312\n\275\263?\331/\272\352\352\207\226?\231\230~\255\361w\302\277\256\215\003\316\223\377\211\277\341o{?0\352\222?\321\r{T]\232\234\277\n-\223\251^\177\220?\256\317\021h\346\367\210\277p\355hGz\336\261\277_\222\2436\317\227\177?\003\272x\332fj\263?,\221H\rd\303\254??<\251\277\372\335\265?\224F2\361\355H\267?\304k3\370C\034\255?FL\241\201T\016\251\277\n\205\245%\246e\255?a.\032\334\226\\\243\277\225\210\n\031t\343.\277\262\003\376q\005\034\263\277\021\260\221\234\216`\250\277\r\353\237RW\303\257\277YJ#\261+S\244?y\216\320\"]f\262?\205\215\2003q\336n?\177}\207h\3417\250\277o\034\236\376Y\346\243\277j\"4\001\007\031~?\030\367\365\270\216\307~?\037\253\223>\336\341\231?z\361)*\217\013\236?\311Hl\023\356\334d?\330\213p\313\250\327\222\277\000\214\365vX\211\216?\r\266\333H\232O\241\277\003\210\236\306\300\034\247?\373\225\304\024p\240\252?\275\'\333D&x\262\277*\377\235\204G\364\273\277NK&F\264\030\274\277\031\003\253t\377\212d\277\275\373\276\022 \250\241?\212\240)8K\262\277\277\002\024\3347\200\023\256\277\207(K\202\275v\203\277\033\242\030\226\032\002\270\277\372P\325mc\266\264\277\211\207\036\245\350[\241?p\2314m\370\346\245\2771{\253H\251\033\264?j\2656\002\207\233\177?G\267Rk\207,\271?\\\250\354$\200\375\243?\317-\222P\317\021\251\277\204\340$\005\267\366\232\277!\263\013.\216\037\201?\236\340\353\244cM\271\277\306#\352\335\023s\300\277\276\222lL\344\241\265?u-/\2368\212v?Ly\023u\374\016\266?\222\232\334/\001|\260?\242\217\3316\004\263\236\277\033&\301O`\232\253?\247\007\333e\237\221\263?\367\353\252\024\\\021\226?\033\223\333\026iP\210?\236\331\334\327\005I\232\277\255O\0316v\336\300?\316\333\343D\244\237\272?\0161\202\307\274w\224\277#\r\231]\305\247\233\277\202\254\313\220\007T\260?\275\211\3133&h\262\277\226\251\022)\204M\261?\261\337~\265\346T\251\277\032\261\271\215\227\330\267\277\336\234N\020\002H\204?\005*\205\302\313\370\241?\304\177\326\"\322\234\261?\301~\"1\016~\252\277\254^\272\315\275\344w\277\203=~\272+\212\261\277\n\356K\257@\314\267?S\353Y`TJ\244\277\363\224\251\177\036\231\253\277Z*\214t\314j\244\277lw\023uU\207\245\277P\215\031\372\320\206\223\277/\376\327\360dO\257\277/z\242\374\214~\205?\026\244\266\225C\351J?\031\020\272\316\340\\\254?\205\177\271v\363(\257?1\006\n\247\235\222\243?\251%EQI\361\226\277\355\276\037\371\344\'\263\277\013\222\005!\0276\241?\"\312\3006\230\343\257?\370E\257\246\320Q\243?\374\366\312z)I\273\277\227*\354\247\t\275\270\277\245\034\024\242e\213\225\277C\357\307\312d\236\252\277\303\310\231\016\222-j?\033w\345\n-\265\206?\362H&jX1\254\277\334\3524\316G\330\203?\006\370\221\177\016\250\242\277}YG\332\023\261\267\277\340b\336\024f\376E\277\317(\243\333<\005\263\277}p0VA=z?y{C\307\270\322\231\277\245\257\302\262\'(\225\277\270GE\217\3218\233?\003\024Q(\347\227\261?\036\010f\310\265\267\245?\357\361\256\017\241\214\253?\002\031O\343#g\252\277\n\255c\2762-\250?s\363\004\206\336;\242?\031i\036\016\017\327\252\277<$6\322P\036U?\367\271\336\363\3511~\277\265Y\257\r\342\212\230\277>\010\356)O\316\220\277d\207iT\361+p\277\227\336Nl\2335\215\277 \357\333=\353\340\204?\343\320\3569\273\352\242?I\013\274|8y\244\277\376\032\335\023\315\006\212?\010\352\202\232\250\202\243\277IT\256o\211m\221?\237\374\207\341\304\341\261?P!C\017>\264}?\271C\316\361\356\030\234?\013\n\277\004\305q\236\277\335<\256g\270D\242?\353\343\330\t\216\006\217\277\331\372\211\243\222\366\252?z\230\323\262\243z\256?\307\243o\017\032\307\301?\n\365\345(\213e\254\277\354\344>\264{\345\251\277\213\243\014u\024\025\223\277\270\331uo`\344\244\277\211\253\320\206\345{}?)\220a\352\3368\210\277\247\010F\340|\320\255?\304\032x)|\023\260?\002]\t\327\014f\227\2773\202M\2271\254\244\277\362\230m\3062\tn?\3151X+\254*\257\277C\202%}\351\'\253\277T!M\304,\211P\277\022\036\274a\306te?\277b\304h\201\336t?$\2461\231Q/\243?G\314as\005\350\211?bei\356\n\n\265\277\274\256|\037\365g\263\277A\362\246@\331]\254\277\000m.\013\345\261\217?\213\334\000+\353@\261?\235\227\037[#7\235?\363\3329\321Ct\215\277M\3671\031\347\373\260?\367\034e\362\314\300z\277\317\341ZrY\270\251\2770\326\267\234i\266o\277\021rJ,B\014\246?\216\242\341\364\370\263\267?\306\227\201\006\333\000Z\277\323\264\346\255\303\034\240?\314U\3420\365\273\200\277fb\373\034\234\376\252\277\341\006`:\215\326\237?\372#8V\002\335\227?\253\244\274\324\332$\225\277r\226^&\211\324\234\277,\010\250\264\032X\200?\202]\320TNZ\236\277U\260\200\017\237\225\244\277G\246\tL\355\016\271\277\017\352\311\254RN\251\277\265\355\234$\304\364\234\277\325\344\326\\\314\272\205\277\222\355\204\250t\030\250?\0236\2415\006x\232\277\"\232\327\016V5\262\277XK\325C\037\377\243\277\254\377\177\246\006\260X\277\226\206\034\352N$\247?.\240e\351>n\202?j>\376\034\256\224\224?\326\257\237\034\232]\243?\330\246\007\303\251\206\256\277\224\020z\352\223Y\225?\334\300|\213;\317t\277P\342\334bG\300?\232\254 bs\017y\277\263<\0055<\010\266\277#\026\247\030\310=\247\277\036\210!\225,\267\231\277\247\366:g\365\232\226?*Ta\r\355\010\253\277\304#.(\212B\252?\352Sa\364\302Y\212?)\226\215v\326\300\263?\271\320\341\000%\337\240?Z\237\027x\363vY?\016#[\236\233\235l\277n\205>\332\3001_?D\276\257R\314\361t?\247J\023*\2711\300?\263\006`\354\214b\244\277\233\311C\353B\222\272?\004\304\273\350\3269\205\277\307\335\346\023\307\247\234\277U\3616\343\304\255\230\277\223\261\336\2215\023\263?\330\337\337V+\223\247\277\246\266C\306\344\326\261?\035\212\377\0227\207\244\277r\347,\253\006\221\231\277j\210.%b1\235?y\'\260\272I\335\246?@\'H6\255G\241?\353\256T\313+K\212\277\000\016#\265\240K\230?D\337E\251p\306\200?\340AId\236\270\256?%\023+\244\252\017\244?\234\'\236\350<\362\261\277`\207\032N\305]\232\277\323\371\227\307\235*\224?\201\313\275\351\264\223\256\277\370T\226\373\313\256e\277\244\001\316\203W\355\241?\'SG\323\342\257{\277d3\254\267\337r\263\277c\341\356a\177\344\241\277\363\016B\344\2375\261?\3443)\035\314\212\256?\030\23751\276\344\224?o=\320\355\320m\243\277\353\245\2212\0304\221\277\210\001\271^*\223\263?U\333\371\376\2319\262?>\371\206S\356\227`?z\r4\263\234c\242?Q\240\246|/\352c\277\004\277Z\336\266\036\200?[g&*W\227\231?^\233\002\333\261\263\265\277lS\020\216\210\376p?85\370\n\037\203\220?\277\256\220\322,T\225\277u\313\302}=\365\256?Q\223\363D\300\177\277\277\231\363\244\261R\320\223?A\336\336<`\253\242\277.\351\245F(F\250\277\221(ao\303\\\216?\317$1>\363\037\210?\204\255yq ,\262?,\027\2076\271S\254?+\372I#\201\276\260\277O\355 \315\307\211\240?T\005\311\337\355\304\226?E\321\252\211\317\350{??\231H\225\350B\240\277\262\277@\201z\245\250?\336+D\227^\350\264\277v\331\337\203\330\220\244\277\004\331\361\201\372R\242\277K!\311\266p\017n?\204)\362\355a\333\245\277\272\377\246\321\327\321\264\277\233\000\373!\344s\266?\375\337j\010z\251\271\2771N\261t\307<\271?\367\027\305\023\200`\254?\254\271\372yd@\244?-\213R\253\005\344\251?yv\205\201\314\200\247?#\3624L\362\376\216\277_\000\354\345O\013\232?\241\1770\207\264\n\205\277\3139&s\177K_\277\264\377J\317 \030\264\277\365\3210W\000\200\253?y\\` j\234\260?\016<\300\315\242=\301\277N\003\310\261\253\226\303\277\026]\204\272\211\261\242?5Z\373\251#6\210?\025\206#\335\323i\233\2776\360\205\262\276O\220\277\301`\275`)\030\247\277\322%\340\351H|\210\2776\220\216\272\214)\211\277\n\227V\'d\361o?R\330f\326P~\252?\005\245^\212\255\375\261\277\340\031\020\314\026\241\257?\216G|\377\275\370\246?\031\\\350|L\214\243\277&\307\215\212\253~b?\235\2038\037\253\232\261\277}\270\013\200\253\031:?\001\231\366[\342\373\235\277f\307\231$s\272\245?}0E\276\034oz?\313$L\317\232\323v?\300p\014n|\235\211\277G\033\020\257\013_U\277tf\216Z\334/\247?L\226\202H\251\003\245?0F\250\366u\255\233?\001\306{\352;~\302?\t\t\260rC\032\236?Sxk\023\2608\243\277f\343$\226\006 \263?\270\317\371\324\353\323T?\330\363\'\241u?y\277\025(M\005\217v\220?\362i\300>N\030\237?\0236\2727t\341\247\277O\324\307\2316\310\241?\343>T\272\010~\223?\243Pb\036ak\246\277!\216\223W\370R\265? O(\234#jV?\327\022\230\243\361^\225\277\265F\274\037\0220\223?\363\333r\324\364\333k\277\024\033\372\000\200/\272?l8h\340\357.\201\277tI\2549\2561\262?h\247\334\212\354a\264?7\234\373\252pmw?\216Ol\276\355\323\201\277&2NN\354\213\226?\375g\002|\323\242\220?\340\303VE\332P\253\277T\257\323\340[\233\236\277\316\240v\023\344\230\255?\032cN0t\331\214?b\350\004\264(%\205\277Pp0-X\326\260\277\001\321\222\200\020\300\242\277&C\267\314\347z\252\277\360\202\271W\374\031\257?\257\316\245m\212\313x\277B\330K(4H\235?\337\214k\344\263\272\250\277\327\006z}\211\241\231?>g\\\355\007\351\253\277\177\341&\344\371q\204\277\301\331\177\225\360\272\272?%X\023i\274O\243\277\036t\331\302<\327\230\277!\264\304\365\253\031\300\277\352(u\332P\256\255\277\024\211WG\310\021\247\277\232ox\366\247\301\216\277\014\325\331\257SFp\277\207\312\2323\263+\247\277\014\241\312H\\g\261\277\302K\204I\307\221\246?\274r\240?\313\210\340&9|\234?\236^\220\341\312\305\231?5/#+\322\334\240?*o\270\005.2\265?j\241\034\261,\030\277?\247\330\264\272q\224\271\277\225\240\203\036\204\371u?\334~%\274YO\222?\253\227\260\035=;\261?\363\233\205\340\311\276\244\277\244X\014}\016\252\233?K\352\033\366\315\324\262?\013&\223\314y\274\206?t\'i\2729\252\213\2779+\000\2533\330\200\2773\002\325\345{\302\262\277\340\365\366\214\021\n\221?Z\t$\"\365\356\274?\25047\325\206\224\220? ^\205~Mo{?\257\262\234\2618(\222?\010\273\261\260\332$\252?W\254|\351\254\242\264\277g\373\316E\010|\250\277\230\236`\314\213\364\264?\256i\246\305-C\252\277\253\216\326\025b\354\240\277Bg\324\336[w\241?^\307\337\030\247\273\241?V\000\334\007\226\177\267\27756\342\233\322px?\2124-\371U\323\210?\353\327w\",\372\220\277\240\005CM\2516\255\277\217\234\371\342\307#\242?\332Z1\221\014\314\306\277\017W\377\264X\363\266?J_\302%:\023C?\340qm7\253\264\230?\241\255\026}\250c\252?I\341\202\301*Z\225?\303\035\0254\365\"a?\375k\235\345\205\337\256\277\254N\223>\212\323\210?\375W%\016\223\343\245?#:\3650\362\372\240?\346\'\353\342\304\202>?8\001Vd5\333\271?\354\273\324\357.e\264?\334\317\014\341s\013\241\277V\234\'\033w\032\240?+\211\256A\353^\270?}\237JK\236\257\222?V\315;\207\314\232\204?4\205|\301\327\025\264?\277a\"g\034\223\263\277\272@\027e\0375r\277\300\317h\275\256?\250\277\303\310\302\265\201\205\241?\001&\357\266\347)\261\277 \300\327\271\r<\246\277\317\025\205\014\222\026\271\277\2527S\3375\211\247\277q\222\2633\032\032\207\2778_\271\235\237~\253\277g\373\206C\224\021\252\277\313\226\0316p\331\227\277>[\244\323\306\375\245?\346\025\341\321!\002\272?\314\227\216\345\317\315\200\277\330Z\224\277\2359d?\031\024\345\251\276U\310?\003\236\343\354\363|\202?\307\230\254@w\274\265?\240\022\3007\322\024y\277\362x\025\360\352\203s?\317\2072\232\224A\243?/\372j%\303c\277\277\256\257\345\305\212t\262\2771*\206\017U\220\253\277\nI(\001\340\235\212\277\256\355X\245L\240\210\277\256\221\207dZ\360\241?5y?\206TR\265?1qd\014\2440\274\277\003\271\240\007K\241\224? iZ#Q\365t\277UYa+\022\223\251?*\230n\311\301e\241?\035h\220\207\264\250\256\277\321+m)\241\211\220\277n\004L\232aB\271\277\344=\3612z>g\277\370;&\277+\274\266?\271\271\271\341\216\256\240\277\030\202\265\252\210\243\227?\362\214\242\324\003\354\233?t]\210\263\237\235\263?\315:\\B\301\332\262\277\322c\240\\\017H\256\277o,\264\372f\337}?\300\245\2316\345$\273?\000\327\231l\357|R\277\035\255\215\316\312S%?\024r\306]\236n\252?\273\324r\364\317\264\227?\216\273\013\355x\320\265\277\337R\030MJ\310\222\277b0ug0_\201\277\013p\t\300\231A\262?e\366\330\253\300\007\221?IqOP@\237\277\3770\027\216\2606\233?\266\364\251\337@\332\210\277`\020\216\344\3575\272?\266)\314\023\026T\270\2773%\333e\037:\240?S\332-\363\344\306\205?.v2\222F\001\206?\205\367_\266NX\241\277B\373B\252\350$\224?\363\027k\242Oc\251?B\354\21392\216\235?\320\233]\245<\227\266\277-\016\263\305\217R\241\277\234\267\311\347\376\215\232?\365\323\317\203\352\002\240\277\242\017Xp\354Pr\277e\033\350\211\263\036\220\277r\362\323\362\352\216\256\277O.e\216\355\241\241\277\354\t\020n\031\342\271\277\237\t\266]\316J\251\277\224\241:5\236\272w\027\265?m[U\302\254~\245?\240j2\221$\367\273\277k\313\302\242\036\341\221\277I\215VvO\235\263?\373\007Y\306\266\372\253\277|;\360\032?;\235\277J\270\021\253\031\372\226?\253\376a\0048Tw?\237\271\226\332\035\242\251\277\275%\371\337P\276\224?\033_\313\346\326lw?|FV\255\023\245\202?2\"\353\t\001\337\241\277\223\270\375\267i\346\243?ai\032:\202{\303\277\265\022\342\026\227\320\266?\300\371\215t\276\'\220\277\247\376\262\352A\363\244\2778X\246\235\365\364\246?\363\036\262\213h\364\207?\354\211\352*\351\247\242?\307hL\346\253W\243\277\001CIw\021/\252?\271k\264\277\267\217\256?e\244\207\202\375\242\223?\017;\311\016\354\250\225\277\3435\007\362\016{\221\277o\013\r\344\247\315\200?\237\313i\332\311G\221\277\355\216\311\216\252#\264?\236L\253\020[Y\232\277/\005H\302\0141\241?;\027\271P\312s\272\277\003\324(s\004\036\301?\354\017\026K\332\373\200\277\314\206\034\211\326TT\te\303\226?CVy?\241\227\265?\254q\257-\274\241y\277s\276-\366\350\304\271?\326X`\236\266\333{\277\224Q\336 sM\240\277\345\325\014Q\344\222\225?C~J\001\207H\266\2775\000h\320\263\'\243\277\217\267\244\"\274\206q\277\351\256\\\361\261\243\277s\206P\023\312\362\264?\'&\002\304dB\247\277^\006\264/\017,\260\2775\205 \357\314p}\277\223$d\277?\363\252\277\014\272V\026<\260\255?Z\200\373\227\237\304\250\277\222\367\243#T\300\244?u\353\313\205\300\201\244?n\375\377W1\374\251\277\3239=Y\360\263\245?F\350\357\326t\033\210?\255A\327\002N\230\245?X\034\257\275H\265R\277\214kr \001\005\272?\246\262\"\347\271E\270\277\3661$\332\307\371\220?\211\265\025\211T_\267?D0\0029\274F\263\277\231]\355\030\363\013\230\277\207d\013S\256m\250?WT*k.G\263?$v%\335\337\034\251\277*B9O\341\016\232\2773\232\215\314O\201\254\277\251K)\327\373q\223\277\305A\357\367\001\270~?9\rk\001\252\242\267\2779>\216\332\2604\274\277.<\261\241\261\376\221\277\340?H\334\201o\240?4\242\r\205\024\002\221?\273\371(\241\275\003\260?,\335\263`\014\325\253?4w\257~1\352\237?\371\365\004\333+/\260?\241\236F)j/\262?\331\225\\+\016z\230\277\005K[\215\014\206\272\277\"(\312q\036_\232\277\222r\231~D\367\300\277\262\306\374\0169\016\273\277\216\326\321\021\350\032\273\277G\025\0079b}\263\277\272\213\265\023\250\322\205?g\216\327\001=\274\243?\365\252l5iv\264?\310\323g\362`z\252?\265ES\236\342\307\\?\246\322\327\235\371ss?\277Qf~\035\263\260\277\205-;\234\037\274\215\277f\340g\211\237\243\263?bU\264\210E=\232?\247\007\024\254\344\374\244?\036\'\323RMM\222\277\376+\357\277\375\316\214\277\'(\343j\r>\224\277\363T\006\337!\223\261\277h W\316\200\225\270\277\013\007\323\221\343q\256?\r\027\371\026]\271^?\355\342\330I~nS?\246=\314\271_e\261\277\320B\351\260\363\247\265\277lt\272\347\024\304\231?\276\217\225)[\350\257\2775F\217>?\221\206\277\324\266\274\004\311\234\222?/\312\227^t\360\302?\250\\\374A\016<\220?Fr\220h\310\370g?\214\345\\\262]\270\177\277y\203V\267\212\273\223?g\021\372\026p\340\241\277\203\014\217Z/\260u\277GS\275\005\361C\200?\237\"9qp\375a\277\350F\240\315\033\373\220?\326\027\336%\036\026\251?\000\236\010+y?\252?yv\206[\300\003\231?\371\370\223x\321\r\267\277\273\273\320\245_\302\255\277\241`J\002\210\217\255?\307n\251J+9\244\277\033\212\353\\\224\327\241\277\326\301>\357 \373\203\277\010DC\177\005\260\254?5\3017U\022\310\252\277\312\223~\366\372\n\234?\365\302$o\304s\210?]\3765\371Q\377z\277\n\336\304\017\005 \257?H\344Y\336J\357\240?#&N(;6\277\277\210\344vn\257\257\244\277\305\345\200\0230^k\277d3\222\353\267\326\203?\2602\375_o\204\251?\024\327FO|\337\230\277\300+\322\375\354V\227?\030nCR\020\273\270?\302\256\367\310o\350\271\277\255;\230\242\0133\240?w\304R\210\272=\222?)\034\324n\3313\245\277\302Y0\014\252Q\244?\353\201\266\323\t|y\277\343\371:\303v\304\206\277q\243Q\302lS\221\277J\260\255\244{\256v\277\267\354\007C2\352\277\277\372\303\370\237\025\312\235\277\205\350\230K\271\364\246\277\254\235\234<\366\301\224?J\377\016\277\003\035\241?\021\313\320\322\220\212\254\277\017\000\247\323N\341\241\277\237/\352<\363y\206\277\030qt\027\314#\245?\313\240\344\201\363\300\235\277iM\222\265\024\031\257?\300\022}\372_\215\267?_-\352\377\341\352\243?\360\004\300\216\333p\264?\026\177Aa\025\010\247?\351gr:\000R\254\277F\003\2019#\375\222\277\310\014Y\300\227\035\253?FdiD\223\333\245?\300\363x\005~\332\301?*\344rg\212\267\253?5T\234\202\026\212\225\277%5\265\264\360\212\221\277\243\320\"\256xX\231\277R\217]\177\330\027\242?\373\360\267h\t\275\215?X\0359\276#\351\245?+\365\226\037\024\320\235?\370\"j\r\223\025\210\277\214+\315\261\235G\250\277\013R\2775\321\213\264?q3\233\253P\335\252\277\361\265\321+\236A\241\277\370\033\243`\2177\242\277k\334\275+u)\245\277\242y\265\257mE\212\277\007\257\360.\004\210\222\277}\235i\322\233,\245\277\241\300\'\250\234\362\250?\224C\367\005<\315\240\277\367C\323\261\252\347\261?.\341\367\312\201\'\244?\355\340\206\246\272H\247\277/Q\346\266\027\257\225?2\375\003\364}\273\245\277\325!\345\247\275\344\215\277C\355\271\225\225\225\272\277\0035\022\032\255u\246\277\240x\207\350\3164\224?\\.\260\207\177%\242?\t!\232\td<\246\277Sa|\343e}\251?p\261\346\r\202\230\224?E\275\323\223\037\007\264\277\206\036\032c=\037\243?\222\240=\371\023\310\251\277,\365\036sq\271\256?&\r\2543\025\222\231?\247\"\323\357\3536\215?\212\270\000\032\223\271\264\277\324w|\322\364S\220?\027\004\252\300\332\211\244\277\027Qu.\247Q\226\277\250\000\254\334\243\206\212\277\361X\352A4Y\265\277I\031\000\243c\253\260?\211\357\342.\006\213\242?\241t\334\331\250\202\244?\360!s\316?7\201\277\003bU\3174%\253\277\311+\346\327+\361\224\277`\376\314\224\246\254\206\277[\366\"y\356\010\207?\361\024^\244Y\274\224?\222\\L\364z\305\242\277\353\251\033f\222q\252?Ja\324at\270\252?l\177Ey\244\244\246\277\222-2\305\027\225\232?\343\017\373\232\373\255r?\351\341O\364\010!\241\2770\027M^i7Z\277=\232\013\230\240\010\277\277\234\224r\230\354\005\243\277\306G\271\256-\361\260?\030\231o\230\017nl\277\026\217\363\2270T\255\2774BM\2175\276\227\277\037\312\361\244@\001\266?0\320\211\215,\017\265\277.\033VyW(L\277+\223 ?\306|\242\277X)\346\325\323\001\264?[\253!\240,,s\277\300\311\223\000^\017w?h\324\223\254l\035\201\277S\304\325l\n\257\215?\347\3352E\201\260\267\277D\255~}nW\230?\241\201u\370b`\267\277\357R\374\324\357\005\243?\032I\213gY\353\242??cl\203\267\001\251?\330\315\210[\003\236\224?\263e\234v\033\026\251?\350{\337NyP\244?\233\206\357\365\375\n\261\277\373\227\013\006k\366\261\277\360\223\353x[\343\253\277_\003\276\263\3538\267?C\374{\n\237S\244\277\021\033\253}\035[z?\276\033\026\264:v\242?\277\254b\260\332\026\260\277\020\3054z\343T\260?s\224\232\206\324\316\222?V\016\016,\341\275\257?n8D\334\'/p?\000\341f\315R.\240\277\255z\010\362xl\266\277\2236b\275/W\224?\313Tts\340\n\263\277\274\350p\023\213\r\236\277\236\345(\326,\316\303?,\345\241\202\013tp?S\216Y\361\264\242\220\277\205\254\345\223Ss\251\277\267\2055\303K\343\246\277\021g\262Wn\273\300?\234Cw\236\n\273\236\277\251\327\355\375\250\302W?s\264\024\321\333\243\230\277t\226C/\356\227\247\277\310i.\361l\303|\277\212\351\347\261,\247\304\277\303M\0004e\263\243?\227\301g\036Z\274\225\277\313\003>\352\2436Z?N!}\264{U\201?\212\2626\205\226\\h\277Z\236\3512\020-\254?\271\261L\303/J\302?\020]\320\013Y\321~?1\210f:t\221\242?\\\223F\026@\376\207?h\031&a\225\203\276\277\235\310#P\256c\242?\2512\366Y\245\037\260\277\254:\3506\356\316\243?\007?\304*\224!\254\277ZL\223p$#\233?\330Lg\340\313Y\241\277)\202K(Y\003\274\277\267\t6C\255\274|?H\215C\223\332q\302?\240\001\255\334M\"q?\327Zlal\303z?\356\2071;\345\276\201?\205w\036\233\306\365\257?\242\240\035f\217E\257\277c\036\227\366a\305u?\023\253k\314\006\370\241\2778}c\333\377\206\260?57\250u\t\352\234?\256\307h)\314C\200\277@]\031a\215\262\253?\002\355\341\227\257\365\234\277\023\362\270O\353\244\223\277\237\312\036\022\237\035\244\277\2666\215\233. \242?\307\265\272\203\222\275\245?25#\261\330/\245\277kB\005\005\247\t\222\277\025\013l\335\277<\203?\'F\304\373\201H\261?\243\371Ca\360~\177\277\241\355\366\027\252\367\246?f\306z\226\027\366\202\277\r\t\253 \334\025\210\277\004h\355E~a\277?\2034\014d\006\375\240\277Cf\001\211\2320\220?\225\027u\251W\373z\277\216\334\306tb9\230\277\2509\005\300\241\346\260\277\032\252\035\200>\330\300?\203\351\263\276\217\356\243\277y<|\244\230\026\246?\333\365\230\325z\304\232\277\255\245\2736\245Tn?\267q\235\226\250Z\261\277\013\213\374\232,:\177?\'\321\355F\245\016\226?\022\'\216\235\366\'\202\277_\276\000N\037\346\215\277\014\205\311\\\006s\247?%\033i\322r\315\251\277\017\354\364\014\240\301\224\277b\250H\226\243h\236?\203\372\366W\307\277R\277(H\025\362\003\253\261?\004\324\214\312m\004\232?\213`\251i\3011\275\277N\377\211\246\013\236\210?\231\246c\237V\002\231?b\336\024r\000\340\252?o^\336@\016\r\241\277\317\367\312\014\354\007a?\n)\245)5r\265\277y\rL\247\010\026{\277na\200\014\251+\263\277\363.\020\036\336.\226?\206Z\037ga\336\220\2774\225\247\236\273\247z\277\305\255\231\342\363]\265?\352\243\350\353\036X\202?lS\241\324\263\212]\277\204yvvQ&\262\277\246?\310\226\352+\245\277\375\372\331Y\230\241\214?5\017\262\016\263\201\260?P\237e\331{u\220\277\000\253Xg\001P\211\277\317w$\014$\237\275\277\377\332\203\344\233n\220?\340R\213\023eY\200\277\206Mq\256\324L\243\277j\206\333\214\250\357\252?-\366N0\356s\233?V\256\277\351J\017\261?\266O\313\'\303\221\302?%\030\017\340\244\n\220?i\325Q~\177\250\221?u\210\361\206\373\331\263?\247J\2566H[\221\277\336ZOXR\275t\277\334\222\265\357~1\237?\331 8U\251(\265\277\246\233\']R\311\262?\235\341\342\273\"\010\231\277\357[\177\\\300\356\233?\037b\264F\177\336\221\277\204j\036\366P*\261\277\022\303\215\236\2768\227\277\203\266_5\332\210\262\277\002\305\256$\363\241\205?\212_h|\207\305\271\277\3613Q\360\017\214\302\277:\224\326Q\241\303\263?\222P\362%l;\257\277\354\346\306\\\036\330\272\2771e\203\235\324\'\256\277Q4Pl\220\264\227\277Spo\035Y\223\234\277:%\201\203S=\223\277\207\306\251\230[\343\242\277y?\252u\214|\220?\307!E\331\255\330\232\277\257l\031\320\347m\301\277\240\361\250L\002<\300\277\337\336X\346\320E`?t\2150\353\301\254!?\2159\202a\312\t\231?\227J\272\255\253:\227\277T\306E\010\035\352\256\277\233\301\322\201}\033\272?QY\256w\013\265\240\277\t\021)\"\\Y\257?\0035\027\343\221n\220\277\236\017\207\264\237U\302?p\356\372\2641?\235\277D\376\301\010\357\036`?\265\'\272\002\346A\265\277\314\273eWU\204\263\277O\351\332\356!\214\220\277z+\024ak\006\253\277\373\252\307\235\310R\253?\020\235-8\341>i?~;\245,\272`\247?5\322\037s\305\337\242\277\245^{%\376\330\242?\\\t\341\017\276Iu\277D\212\353%\212\216\222?\346}=\255\227\243\263\277L7\316Rw\261\215\277bx\243\371\201*\204?\265\370\017\2565\274\215?\372[\277\022\243P\254?V*\314\005\330\243\243?vH\001\337\304A\227\277\374\242\326\014?E\261\277\225\226Gl\312\007\234\277\266@\031V\021\\\205?\025\3426Uua\241\277\270\207\325\035\216>\210?B\322\261\265\331\225\257\277\323\006[\021:\350\266\246\262?Hw\344\236\033L\272\277\362\300\206\263\242\257\254?\346\033\tH\2424\214?V\240\246Hqx\247?\0022\242\021/\340\262?\340\343\366\345a\366\243?p\377\220\001\3069\235\277\"\367\364@\230v\220\277n\220\036\254\\\206\232??\323C\020z6\262\277\027\254\202xr\377j\277-\013.2\343p\242?f\230\244d\024\347\206\277\231\343\333\326\212\006\266\277R\250\204\335\"\026\266\277M\0379T\360cf\277\331\317\020\257|b\234?\3138K3mK\270?F\371\237\344\000\355\260\277{\220\212;\371\026\221?N\025\230Y\370\013\245?0\205}\334\312\270\262?\357\302\276)|*\232\277\343\004\332}\354\320\275?\301\231>\026w\275\264?\021\000H;!\027\251?m3\332%\337\343t?J]\211.\227\276\234\277;[\235G\241&\241\277\014a\243Ai/R\277\017\007\013\351kt\207?\357\355I\261\202\351\261?\233N\264\216\334\200\254?8\030\246\211h\356\205\277\323\036\213a\036<\240\277(\353{\262>\231\211?m\007\313\036\205b\251\277`\251\007\257H\205\273?(\004^\301\001m\247\277\t\334\223j\242\201\211?\223\347\3233uh\240?\210C\343\260\335\006\254?xd\272\247\324\341\231?\372\207\336 o\265\207\277C\217L\367tR\227?\021X/H\314\300\203\277Jt\356\2733\367\235?\245B\267^GE\256\277-\2474\"O\217)?\003{f\3437\027\240?\312B\322\013@\257\261?\307\0248\377\236\213\267\277\270>\364\030\351\325\301?]\267\264kS\313\260\277&\356\013\236x&\231\277\375\277\034N\217\007\236?\303\377\2479\022\016\250\277*\200\365M\311\337\233?d\333\375u\365v\244?\251\340-\322\213!h?s7-\36350\223?Bf{\034.\024R?\342\375\213\"\274\244\261?W\230c\216z\021\273\277\024\005\206\317\375<\200\277\202\223J\016l\234\262?6\347Z_S\375\267\277*J~\324\034\026\264?k\244\276h\211P\273?\354\227\225Tq6\204\277\207\366\313\310j\352\253?\007\354\367\003s\342\261?\307\025\261\303\321\002\272\277\244{m\224\nO\226?\344\305/\333\233\356\226\277\347Yv\014\302\033\244\277\352\277\200\'^\210\224\277FK\004,\312P\221?*OH\334X\345\243?\0208\025Ar\320\242\277!\230G\005\334\334\225?C\014\204\316D\t\236?\003\303\250\317*\263\247\277\334\027\226\345A\030\262\277\242\257.\024\213 \242?m\335\352xtk\257\277+Q\363\036\201Lu\277)\266O\004\220E\216\277\202\365\341\361j\375\251?\222\256r\256%7\243?\312\307i\277\026E\246\277\306}\212\340\324\001X?\264\370\337\036\200\373\247?(2\024\301\3044\272?E\025\373\307@\244\272\277:\214\217\226\270m\261?\270\207\213\304\274\227\247?\362\t\255E\326\213m?h\270\023.\270\266\247?\327=R_?`@\277z\n\t\256\373\226\266?\207}\n0\000\027\262?b8\371\327\332\027\300\277\236j\372N\003J\260\277\347>Q}\266]\257?k\265\037[-\266\240\277\026i\3415_;\247\277jp\251\225\215\335\236\277\271\372\027\3045\273\227\277\210\224^\365\332\267s?\301\003\0134\215\302\224\277\014\005\235\313py\253\277\233HD$\355 \212\277\337@Z\031\325|v\277\024\306\244\0354\254\231?\355Z\013\312[.\203?B\2123\'\265\005\233\277\314 \230\245\177G\264\277f\235d>\275\327\242\277y\217\220\022%,\177\277\022g\006e\031\212\223\277j\371\236\001\323\353\255\277 \021e\334}\215\241\277\370\3215\310\205\256\226\277t38X\373xn\277\315\240Y\220\212\264\241?\240\370\360\201\2775\252\277\tZA\323\002t\230\277\273Uq59\302\216?\200V\033\314\202L|\277\327\037\315\216\233\367\211\277?:b\020LZ\226\277\332?x\007ei\242\277Z\367\016Um\202f\277N\263\354\332\326\336\247?$\004\004\326\307\250\251\277\ti\027\330\212\363\250\277\233j\213\n*\314\263?\232\223\304\266+K\305?\314\006S\003\234:\203?\204\t#<\262\305\264? \310\215G\256\315\232?\333;o\005\027\327\252?\222yg\336\201xd?\036\333\344\263G\331\265?\303\'\301\306\3073\222?\310Mr\307\347\217l\277]\230Fo\253\344\260?\017\317\035X\217\312\213\2772\247onJ\206\243\277q^\207\347H\204\277?\315tS\372\266l\222?\333\337\353\367\222\330\261\277\277!\365\232(X\240?\242\322\224\237\006\365\244\277G\331\370*-G\240?u\313\210\005\256\215\227?\365d\"\205%\230\267?\216h\023\204\237E\260\277\214 S\375uD\264\277\377\300\216~r\203\235\277\321.\213\253\034\356\255\277\242\223\0053\343\342|?o\241\262\014\300K\226?\003\201/X\'\200\300?\220\272\355LmL\201\277,ur\207\257=\216?\351\224\304\240d\311\224\277\003\266\245\'y\t\210?\004_3`S\251\262\277\223@\0145r\"\262\277\007\370\334\226\007`\246?30}7\005\\\233\277\306\234\272}\327R\264?FOdF\310\n\262\277\235\275LfT\233\221?\227\211gi\342:\212\277\342\333D\354<\374\263\277A\330\271\214\314\336\200\277\216;k>B\036\265\277\346\220\000Ca\201p\277Y.A\246\014\223\246?\215\030\330\003o\013g\2777\255N4\372\230\201?\001X3\247\037\237f\277\275NO\010\311m\252?\374/\307\330?9\241?\215U\367v\370\220\257\277)0\347\250?\326m\277eE\356HD\211\254\277\034\271\tUc~\263?\215\334c\243\205\030\246\2773\265y\307\215@\245\277L\242O\t\035v\250\277\224*\207M\212\311\253?:\364\260\031t`\211?\035\323\034\315N=\250\277LR\344p\014\341\306\277\234Y\251\332`\231\266?)\007\251\014\360\272\240?\266_\260\3205x\300\277\253/\3705l\256\201\277ESXO\375\220\230?N\246526\007\262\277K\201\365\372^\353\263?\254z\030j`\312\244\277F\025N\237,\270\253?\\\3003\210)D\225\277@l5\317\021\035~?\355\242\333\206\256w\256\277\025g\232{\200\326\247\277\016\347A\2257\037\206\277\250\312\207.\377f\236\2770\273k\3336\342{?\355\224.\335kt\205\277\3272\337>\373\212\203\277\220\267\305\r\030P\255\2776\352c\271\252u\255?95\354\264\"\013\262\277=\354\300\213\251z\240\277\212\250\220\273\333i\301\277\245\0205,\331\026\250?\362}\227\366\274\360\214\277G\347Pd\2749\250?\330.\313\006Q\374\240?\332\321&z\272m\244\277f\332\352k0\330\240?Y\272B\026\\y\206?\274\032y\310\342\222\261\277Z%wg\353s\252\277\221cK\232\204\366\253?\352x_\302\213\003\204?\333\316\363\304\274\002\225?\273t\262\252\202\314\272\277\251\302\362\377\021k\265?\303:\344\363\325\332\277\277\035V\027\206\365\344\242?\363V)\264V\362\256\277\256\376\350\304\311\035\247?\323\345\355\364<4d?\373r\337\377\316:\241?\227\243\344\335\224\275\240\277\276\320\356\222\030S\246?\330\356\010\263\372\213\262?\302\317\232\0359\342\266\2776\221\361\301\370\\\214\277\376\225\340\272W\307\214\277e\037\261\207\300\246\204?\201\340*\371Vh\270\277\252\324;%\304\034\246\277\271\\\373bU\353\222?v\203\374\036\303\201~?\253\250\001\347\315\"\244\277\254F\225\240\325e\240?w\211\307A7\215s\277\236\261\020\004\276E\266\277\013\366i\322\332\245\236?\377\257\025^\373\376`?\260\301*\276U\004\256?\203\033\277\243I+\226\277\306s$?\242\217\232\277-#|}\332\237\244\277{F\327\230;\210\242?\234/\253\221\304\032\235?\371{\234\350\2700v?\375<\2715-X\251\277a\230\247DUd\273?\232s\325\205mV\226?\020\034k:O\203\230?\233(\354\273|X\240\277\375nX\224\341\317\203\2774\250R\000\013\233\263\277\004\rO\353>\214\230?|R\332g6:\213?\334\213\340$\035P\262\277$\350\320\317Fah?\227>\020\262\337\370\240\277\rV\206\273\307>\277?2\245}PG \270\277\360\233\037\025\2209\246?\010}\016\2009\031v?w\304\305o>T\237\277N\335FsMkb?\364>\364,\221\262\266?\337\002\255\0252\224\201?\246\363I-\263\257\264?a\203\233\246\000W\220?\003\265\231\304\210\313\271\277\304\325J\330nP{?d\000\265\367!\221\245??\3547:\'j\253?\000\240\217\247\243\256\213\277\352\350f\354hA\206\277\223\021\351\213>\007\216?\301\261\261a\357W\206\277o\345\305_\247\230\205?X\312\346\351q\234\227?\306\030\254l\200\255\207?\335\tv{d\001h\277\2344\242E\315MC?\203\2758\317L\022z\277\307\220\r\245\274\366\221\277\277\225\004F%\242\263\277?z\337w\007\250\261?|p\240}\301\252b?}q\344Y2\237\252\277\350WB\334w\323n?.4\210\343oR\252\277\006\354m\036]pt?d\254\251;\'\316T\277Qm\250\253h\213\260\277\213\376\'\rav\247?DMc}\257\327\245\277\364\305\301e\320\024\215?\301\335(C\3646\247?\'\210j\240.\351\254\277\224\365\007N\373\310\215\277a\'(~\274\030\267?\324\321 u\320\365\201?\317\006e\0356`\253\277\370\246\3469\362c\263\2776\361\033!fQ\177?v\207\313%eT\233??r\226\341\001\030\210?+Mi-N^\240\277\237Fa-Vg\226\277p\217w\273\257\354\261\277\020dk@\213\330\213?mfw\304B_\244?U\260\036Pj\004\251?\231\244%\314\327\252\232\277e\245xh\301\357t\277\301\373,\334;X\236\277\241\001\241\365\315r\256?\264b\000\230\272\250|?\013t\265U,@\277\277\013\263\204\222\000\200\221?\256\003\224\225b\340\250?\0240]\335W\370\250?:\203j\206\013N\265\277\226\362\271@\251\253\252?\276\301\t\277\222\314\231?\343\327\275]<\372\220\277\330\211\001\276\335\202\223\277\'t\215\342\366\302Q\277\000\216\272\240\214\352H\277\206\206S\2736\264\300\277-\334;\310o(\271\2770\"=\332[2\233\277 9\377\257e\304\221\277\361/\'Z\320ag?\014]\325\024b\033\242?\301\252\306\214P\021\221?r\340\242\335`\365\252?\220\223)\371\204\370\217?\365h\355\240\222&\244\277\335\204ud\321\023\222?\206\307Dd\222\374\242?\327n\022\257\341\354h?\321G\r\356Q\277\253\277\331\251)\330\353e\270?\001k\007\035)\300\253\277\270\'\227#\225_\233\277\034_/\267\264\356\t?\221\306\003\350@\016\303?\305\301\317\272\347\261\262?\301\0266\346\262/\250\277\273\221\211\3610\275\253\277\004R\213\235\345\377\262?\340F\313\231\246b\242?jL\205\375>\371\264\277\261rP\3511\324\261?\245\255\214#\n\230\270\277\261\326Y\262\243\252l?\270\014\0058\335y\220\277vbgS\221z\262?|\277\nO\313\013\256\277h\360\245b\203\034\223?\020X\373\210H\231\206?\350\0351\257\201\263\241\277\374\217\333q:\302\002\374t\306\240?\200\373\351(\217\034\262\2777\355\305\371r\016\300\277d\340\336\244c\356\221\277\211v\007Vs\211\261?7\340~g\274@\275\277\254CcQ0 \262\277\302n\t\006-L\251\277\037\272B\036\230\235\227\277i\3067@\0344\253?>G\337\242\245*x\277\377U\364\021\343\245a?xlr\226\257\366\262?E2W]\016:\252\277\314\266\237\326F\312\242?%bt\310Q\341\230\277\"\327B\360\244\256\244\277\360\336\212\211&\003\233?\275\335\354\375\231\351\230?\010\340f\315L\034\305\277\342T\216m\277[\217?\242\014(<\267\310\226?\377sv8\341\235\257\277\263\305U\352D\207\257\277\357\305\250\006\351Z\234\277DX\346\\\322F\250\277b-P\033\205\266\261\277\363\370\254\3154e\213?\020\202u\332s\265\237\277v\374\312t\372-\254\277_%\3645N\037\251?\212\306Z{pV\245?Ay\313\231\000\373\261?f\023\215\036+Cp\277\310\354\003\261\354\271x?B\013\0318Xh\271\277\375r\310i\251\372X\277\237t\330\324\363\001\240\277-\302\'2\021\270\205\277\226\003L\232\216\246\225\277 \017u\221k\007\244?j\002\314\307\273\356\255\277\346a\207@\314b\252\277\022\227U\221e\032\255?\264\033\353\253Q\234\222\277\200|\333\353R\302\201\277\201i\306\204\0178\264\277@&\264\231\026\323\220\277$M\035\342\274\003\226?\324\026\352\255}\234\221?\375\335H\260>\346\247\277\321\025\003\257\263\t\221?\351c\246\244\354#\252\277R%\213\003C\024\263?\221`\365~\205]\244?H!\177\007\254G\255\277\272\314,:&q\252\277\374Fy_\000bz\277\r3k.\202+\263?y\225\254\235\234\243\220\277.\033\230V\371\020\231?\333{7o/l\233?\3645<\354\242Ov?\030\t:\212\274w\223\277\226t\177X\303\371\216\277\333P\274\340\366\345\271?\030<\320k\001\020\250\277\022@-\2608?\262?L\366\202i\017g\222\277\360uq5i\245\241?2\302\244\232\375y\222?j\210\261\354\024L\220\277\247\264\230\'\241\233\262\277=\342\234.\367:\213?H\332\373}S\242\261\277 *\n\272O\247\257?\367A\230?[\016\3065m^c?\177Q\365\273M\234\226\277\327\244\177V7Y\270\277\222\264=\310\004K\242?\244\233Z\3331\210\302\277\343\223\216\005\361\215\236?\000\307\007\362{O\207\277\217\221\241\353\305r\245\277\032\377\"\272r\373\203\277\3242\244\306\336\300\241?\036F\032\230\343\204\252\277R\267\257\352t\243\236\277M\306(\350\")\235\277O(\014a\313o\235?\014E\022Y\320}\270\277\321\311XM\227d\235\277\302\342\033\211\355\244`\277\035 \320$\242\377\247\277X\211\004&a|\213?\376\r\227\375\232\370\276?Z\333za\2156\275?(st\004\376R\177\277\215e\te \261\225\277\204/\016\333s\370\240\277_\3006\227{\240\254\277I\251\312\007\216\342\200\277:\347\020d\326\346\240\277I\032\331\352\006\374\241\277\372nA\306\234\306\214?u\266c\235\373\316\272\277\000]ls\376\242\252\277S`\'\337\001\371W\277;\244p\274~\267\267\277\375\217k\365*\340\274?\037<\344\343\331\300\234\277e\305\361I\'\001\256?\213\301\004\235yr\200?\035\276{\004;\231\243?x\335k\246\322\305\267\277\326]\227X\322\022\265?\271\177\\\205\205\020\234?b\311\324$p\207\251?Nk\026\371\262\203\233\277W\233\342:\224\247\256?)\252\021{\006\023\300\277\003\360G\263\261\002\232\277\014b\364C_\267{?\017?\036L\305<\265\2775\352\203T\247\377\271\2776\306\016\240X\344\243\277|\312\2652(T\222\277)I\365\001\244N\262\277=\2229\227\237$\255?\353\347\2577h\343\212?\334\266\233\232\335\353\263\277HFH\327\367P\253?vm\231\034\017O\251\277d\021\266\"\314\331z?f\226`c\364\303\261\277f\"!\336\314\357\212?\315W\306hJ;\224?\256n\002j\247\374S\277\266IL/x\026\177?g\313\272\347\225gw\277\264\374\rk\352\304x?\264x\335}\342NF\2772\242M\276\223\013\252?a\22513\204\342y\277\240i\216\274\3762\241\277z\036\345O\263T\266\277\347\301S\336%\251\233?\254\352\220\016<\302\206?T\363%\237:r\254\277\022uc$\031\340\270\277f\202!qZ\211\220\277\001\333w\021\262\325\301\277\003?\312\030\371\230\241\277\204\215\341.4,\205?Au\273+n}\226\277\227\3019\222\251?\256?4\305\374eP1\250\277\266\004\010\313\312\006\240?\311\245\341cy.\224?3\227\247\177yJ\254\277\277\223a\314\022\271y?}X\324\344\250K\262\277\354\341Y\313\261A\261?fd\354\360\237t\253?}%P\205\316\233\273\277\247\343\3623\002\035\237\277\314B\253\235%\351\265?I\"N\006\271\262\221\277_\266\036\004\210t\271\277\215v\220J\245\314\244\277g5\275\210\250p`\277Um\2111--\240?h7\267\222T\275v?\317b_v\312\364\241?\311\321\327\000\307\303\207?\305\374hG9\232\271\277w\376\"\213n\205\214?\330H=\365u\275n\277\370\013\364U\025\224\275\277\222|\226\234\356\257\240?\223\352\177\212\006\316\266?\263\253\246M\327(m?\001-#\273B\363\212\277\220\344P\026Ip\244\277\365?\214e9\032\271\277rC\004\360\243h\274\277\006\342]\363;\241\245?\355\210\235\242*\215z\277\252\340B\236sm\247?\366 \274R\325\243\272?\013{/ZG\351\222?\006+\033N\357\002\262?\213\376\245\313\312^[\277!\204g.k\031k\277\353\021\316\207\326\246\204?\016\214\301A\020P\264?\333\361\272\223\000\374\276\277h\276\2450\336\336\261?\347\025K:F\215\216\277\337\353oL\354\354\247?l\320\222h8\275h\277\311\3307%\024\312\203\277\276\253\024\235\306wR?\256\303\234\024g\353\246\277\205\270m\005\274\327\245\277\337\025\246p\217Y\256?A3>\237\223\027\260?\321\005\273\274N\240\230?\362\302\261ly\242\253\277\353\014fOvu\261\277\'\350\357\212\342\342\221\277u\273h\024\370\245\226?0\375i\307(p\223\277<\366\327\321fJ\261\277\177\225\n\230rP\257?\025\256\\9,\321\233?\235\017\262\001\267\034\207\277t\334\346\367\303@\222?\303\016WJKg\271?\301\366M\367\206\374\266?\245\252\260\024;\222\270?\322\340(mh\345\222\277\360\257\324\340?\251\247?\226\007\366\265\033\334\254?\032J\313\213F\366\234\277HR\035H\316\010\245?\217\204\311_\231q\225\277\233^/\240\002\324\247?\363\024\222K\370\254\221?\277\211\037WC\r\246\277\352\316\310m\025\220\253\277\274\367M\242Y7n\277U\247\225\350\303\010\271?4g\236}W\002\260?\332\300\366C\245\210\261?(\242\211\222Q\215\231\277\'\262\274xZ\013\261\277(\010\244r\231\216\214?\021\221\326\021\275\034\234\277\317\355C\001\000\030\245?\241.\202\027\260p\244\277\331O\227\032\006\026\303?;n\225\311tx\237\277EJfl\312\013\232?\004q\326\247\253\376\265?H^V\356l\032h?\255`\002\031\231\227\257\277\374\\\004\325V\376\257\277[6\222xzg\306?\330*iQ\247\270b\277^\221Gc?\211\230?/\021R\347-\254\247?\005\"C\250\241S\242\277Z\221V\2472\351\234?\022V\035W\007\010\257\277\343n\000m\000I\255\277\026\211^\306}W\251?&5\000d\315\236\245?B\327\257_\2544\210?\221\250S2a\"k\277\240\360\035\321.\204\207\277\337p<5=n\222?\331iw\000\\\216\237\277\265\016\336\367\001\375\234?X\206\204\347E4\273?d\'\211\202(v\257?t\300H\2733\253\224\277um\371\017\223\021\263\277)C\023n\204yF\277\212\270=\027\250gT\277\265\260e\020u\265\230\277\"H\374G\262n\256\277\361?\214\231r\332\260\277DSP\014jsi\277J\231\351\211\202U\234?\357Q\312\270H^\220\277\353Q\207:\224M\253?\034\255\002\242\366\016\266?\345\330\313\267\000\242\243?\035\373\003\213S\205\222\277[\264~E5\031\233?\017\365\317\244\347,\253?\275\\\301\320\243Z\222?\026\342\246\327\373\361\233\277\204zn#pQ\235\277\314\340\004\215-\377\235?\312\205<\272\300\240\254\277\355\365\026\014\223.\263\277\316i\316p\177\202\231?3\314\201\032Xl\263\277<(\212\034\214{\275?Je}1~A{\277\374S\362\354\366\311\260?V\320\305VF\233\267\277O\3477X\317\236\\?\331\037\246\364\365\3156\277\330^3\312\004U\234\277\3467\200\223n\200\270?\266M\026\'Ln\260?\373\035\276y\322+\203?\336Ws\035A\016\254\277*X\264B-X\275\277Z\"P\032\204\010\236?Z\231\255\346H\236`\277\022&\2424y\030\207?$\351\355=c\"\245?\215\335+D\213\310\261\277\377%IL)*\203?\002/eD\214}\264\277\030\246\323\361\207i|\277y\277\327\243 \363\245\277\304\247&^\213\021\226?\365\025\241\306\014\226\245?$ix9n{\240?\315\217F\343\241m\263?w\003|{\317A\225?\353\356\347\250*p\276?\271\000g\245\002\364\255\277\204\275z\351\233X\276?p\317~$\361k\207\277\234U\246\2070\241\257\277C\006\244L&\302\244\277\314b.\237\233\310\257\277k\307\304n\006\362\251?\000- \227\352m\243\277\234m>\032r\213\270?\266\355\326\247\217\005\241?\014t\247C\331\313\264?\013\"$\361\275A\220?`\364\002\227+\230\221\277\332%&u(\000\250?\030\256q>\261E\262?\336&i\006M\356x\277\253K&\201\347\020\221?\365\026\254vr-\245\2777b\205\rH\253\270?vB`a\233/\244?\333\001\320\316\352\006\266\277\302\303uI\014\026^\277A\354q\325_\331\261?,\263T\353K\314d\277\253\365\244#\231\375\256?\217\362-\\\273[\251\277\237{2\271\006\261\267\277\266\260\215W\230\356\205\277V_\315\320\032\014\265\2773)\234c$\211\225\277\024\204\032\367]\330\254?3\267F-\257\001\232\277MG\315\000B\245\243?8\361q\251|{\247?\236\263\305\307\236J\252\277\367L\211M\367\266\206?\275\377\036\360\025)\247?\332\227\024\006\362\"\302?p\3573\315G\022\252\277\373\254`\321uY\240\277\030\222R\026D\330\210?e\261[KJ\302\242\277\3772\222\266d\215\256\277\326j0\275\240\232\247?d\260G\343R{\256?\244\221{\253\237\022\244\277\002=\036V|\332z?\265\347\322\322\244\255\273?\236\211\025\350sR\231?uYf\027YO\255?\315k\216\334K)\272?\263\200\014\334\224\262\247?\035E\332\023a\313\232?\276\274`\210/Q\201?$\233I\n\324\275\275\277r\231<\264\355\026\237\277\355c\343Y\274x\261\277j\366\014\030\035\037\271?\2129\310\273\245\217\246\277\305\301\233s\n\036\220?\304\003\351P\363\376v?\'M\353kp\024\224\277E\215\240\257\240\355\262?\264\302\256\300\250q\264\277\224\004\252@\030\277\263?.^d\244\256\312\264\277\232\256\226x\231\364\206\277\365\333\022\025\023&\223?\237^\300.\313x\243\2774Wa[L\303\206\277\245\317\020\327E\265\261?\362\221\374\273\n\260\204\277\021\2546\265\224\227\245\277xV?\371@G\231\277\002\203BN\215\376\213?R+\010M\031\225\214?[V\337\231i\237\300\277\251\342h\355\311\035\240?\262\205\363\023\261)\277\277\260\236cQ\343k\245\277Y\262\243gxRx\277&\271v\204r\035\265?c\314G\263\310b\270?{\373\214\216\2229h?kx\033K\013\354k?eDH\025nc\221\277\262&\276\216\250\213\236?z\322\344}\334\237\230\277\363\303V\241\334\371\201\277FU\303\013\252\315\274?\224C\354\333k}\201?\327O\233 \252`\304\277\203\333\226\342\313\265\240\277\365\224\211&\264d\211?l%\333\240\211\353\237\2771\321\014\000\307\260\251\277\315~v\024\017\376\226\277k\341\005q.\010z\277\246i\303\316\222\265\262\277\304(\307{{\017\220?\217\312\207%!\026C?\200\234S\277\333+\253?&7\370\267\352m \277\017\310\222:o\003\223?~\202\250/\3611\232\277\253\271?\341X\357\227\277\345\207\245\n$*\272?\276\3060\361y\017\242?\354&\246z\232G\266?\311\333\320\226\206C\242\277\224?\330\377~\311\266?\340\341\331\231`2\251?\372:j\373\270n\222\277\363\276\273\212\370W\260\277\201!\202\206p.\264?FoDE\351\215\267\277\013f\026\255XE\213\277\020mI\204\270\276\240?S\274ET\026\361\241?\232\337%\314b\200\257\277\221\216\230\025U\277\271\277\357D\363>y\rw\277\257\"+\003\324\002\262\277\014\250\354\2477\017\272?\034m\025|\264*\227?\322\323\360\027\220\342\223?\347\230Th\204\004\276\277i?@\327t\326\202?7\337R\305w\271f\277\364\336 \347o\307t\277\374\035\247\355!P\177?\307A\332\323M4\226?\345\312bM),\261?\367\'S\323\231\\\264\277R\302\336y\2611\252?\027\017\255\327n\323f\277\320e\001\027y\027\214\277o\355\004\336\220Nu\277\322\242\277:\224\333\221\2773\207\343D\224\232\220?\003%\316.\235\201\266\277@\317ZT\335\227\225\277\301\265\210\341D\363\273?\343C{\r\037\000i?>\207\256\374\252h\206?\264\t\024B\027\213\261\277\030U\304\022\337\354\251\277\014\364s&\0038\245?#\256\260\321\2642\230?\246\222\207\231\021\032\240?[\035d\037\302\010^?r1\n\t\326\353\253\277\347m\377\304\374\264\303\277+\017^\311@\357\243\277]\252\006\207\316\224\230\277\374\262]E\255w\222?\346\227w\034\213c\267\277rm=\216\003f\223\277>\356]5a\267\246\277\t\256J\362\252\034\243\277\nr\377\210\320\333\177?OmG\347K!\264?{\323\267\303v\010\250\277\222\005\367\365|#\260\277Z\235\'\330\222\207\223?\310\026k\020\303M\250\277c\003*\321\016\370\247?y\2531\201${\261\277\361\001g\373*\334\231\277\364\305\215;Yl\265?\211t\314\237\312\354\212\277\220\305\367\214\214\304\257?\275\231\003\234\202\027\224\277Q\027y\341\243P}\277\034>\003\253t\036\241\277\204R`\314\355\034\237?H\003\r\t\016\310\272?j\312\311\362M3\234\277R\363\214<\345\337\232\277\370\210\256\367\340\317\255\277rX\274j\345\327\250\277o\351?<43\252\277\324\243\032\254\3732\221?\273J_<\304a\256?Q\353:\246\027B\252\277\327\0335B\253\325\254?\243R\314\000g\216\251\277\031+\373\322\r\357\263\277\036*!\3476]\246?\021\267!U\210^\221\277\313\212\346\324\327\025\242\277\213\363\255N5v\250\2771\235\336=\316K\253\277\017\267\346\330%\216n?r\361^\217$\r\262\277\r\266\003!a\327\267?\013Z4yyb\263?Wm\270\313\256\247\223?D^\r\027\212c\245\277\272\'k\034\351\237\235\277PO\3028\371/\241\277=\224\322\234n\340\220\277%\2014\363y\205X?\034\262=\323\003\r\236\277\264M\377\241hMT\277\312\302\317hCh\240\277\014z\352\331y\341\267?\322\347\323\364\300\216y?,/SM\311\007\243?\341\200\314\256\030Io\277g\324wS\177\300~?S\362/\315\216\024\251?\306\177>\232\240\336\243?N~N!\340cj\277\266y7\240&k\247?\"\363X\022I\177\237\277x\337\317x\220\013\242\277\3721`\035\0008\204?&a\374\240\3307\210\277$g\326\026\270\366\240\277#9Q\324Dac\277\236\334;j)\333\200\277\277\207$\340y\350\272\277\273<\256\261\240\214\264\277\330\027X\346\020\030\222?(MR\317\375\250\246\2777R\330\014@\021\203\277\2069\213\256\226\361j\277\202@\244Gb%\244?S\314D(\020b\262?\343\312?\217\343\r\252\277\\S\366%J\033\255\277\201\351\271\357m\367\270?\027\307\332D\313\024\264?\0060)\254\255\256\264?\374r\001\243\211\344\271\277G\243\232u\222\372U?\325\234o\366\002\367\222?\037O=\361\276\344\220\277!\352\026\232\261|\302\277q\225\231(E)\233\277\263\367\327\215\342\337\243?\250\374F\251\017ms?\232nhLM6\264?_\207\234\013\260+\222\277\n\373C:\237\016`\277z\272.lkv\255\277\362z\205\260\313N\260?\004\334\311\004\337\266\242?\352\374\371\000\241\347[?\351_a\252~\365\232?\373\247u}>\006\265\277\374\0333.:\026\262?\177\237\002\270\331\316\213\277B\343\240Y\252\325\210\277&A\037|\225]\232\277\207\347\013\300x\273\246?S^1\353\240\023t\277\372\335\253Sa\370\231\277\221E\212\267\370\353G\277|Y\222?wmA\274\0027\241\277\354m\342/\3145\246\277\365{\217\372\004\372\247\277\314\026\221a\354\253\260?_\"\005\331\340+\236\277\311r\205\024\366\267\262?\270\245q\0377\333\252\27791\344\373\253G\250?\337\377\311\222\n[\252?/\214\217\377\360\020\274?= Sx\264Mp?\177\030U\303\3635\243\277-4\303qwB\204\277\334m\2746~P\233\277\tAw\373\021\240\232?O)\343\265\223\250\245\277\325\333VcB7\262?\303\3116D\022\245\220?\2652os\252\343p\277\377\364\357\362\246\006\217?\014\243\336\362\337\264\214\277\330aY\306a\263\262\277\341\022-\017O\262\242?0\241Rr\373\267\274\277\343\303\222G\274W\242\277\207]\315\305\313\201}?cu\341\205\237\330\246?\037x\342\341\202\312R\277]\374!\010\322\232\257?Ra\277\336O\225\223?\205\266\341\036D_\267?T\333\036]\003X\244\277\310\212x\301F\364\247?\r\022\357s\373Ka\277\306\220\272\002\2062j\277N\305fF\232\332\223\277\262\221\237\227\337@\241\277(\323\024zk\206\233\277\314`\353\302\265\023\206?\n~`N\254(\204?\210\375\021\277kS\203??\217\322G\373\376\206?\274\353\t\252\200\327\271\277P\214B{\275]\231?\033\246\363\310\310C\261\277\354\347\224Wy\203\237?\373y`\001i\344\240?\377\273\336R2\275\251?+\256\016O\202\014\224\277\276Jm\3156yz?]\334#\257R%\233\277\260a\245H\301\\\255?\272\240\320\320\365\350\262\277\003\377StG\024\222?\222x\236$\231\236\242\277\"%\251W\255\337t?\350\372\020\373v@\252?6\2653gbK\235\277\001\204\">4p\254\277\211\343:7G<\262\277+7\256\212\251\331\275?\311c\327G\242\204\253?\261\307nL\267m\263\2773\231\316\355\307\262\207\277\001\302Gn-\010\244?\337>R\021\234\215\263?Y\270\036v\374\356\256\277\350\351\223\256\355\273\260?\211\022\034\271\213\310\261\277\376q\313\342\035\r\261?;\245\302\367\310q\276\277\200x\223\206m\271\300?\313K\023\roR\271?\241\316\245l\375\325\217\277\n\222\314\212\201:\201?qQ\210\004\221\321\204\277\217\346\341\334\371m\210?\223\331\rU\021-\230?\3409j\322\314\316\261?\014>\236\234\260\314\217\277\341%\3422$\265\265?\2728\342}:\355\247\277qw\267\010\"u\232\277\347\260\341\202\301\007\253\277\250\242\3119\227\363\262?C$\310\356\010[\223\277\031\213\3702k\220c\277\3322\034\313\334\340\221\277PE\035\252o*\211?\366\\W1^\026\246\277)\314,\004\302L\222?\372n\031\254\262U\242\277\007\276*\217\235\336\225?P\223\033\021\2637\235?m\t\240Uv\020\206?E\310\277L\'c\242\2775X\200\307\'\206\220?\240\327\247ImO\213\277i\217\303\004\266Z\227?\177\346%iB\324\245?$\244a\\6\361r?z\216\031)\2252\262?\325:\260\240wze\277[\005.\033\352\272\243\277\310\3216\377\212\001\230\277\376\030\232V:f\\?=\025\321\305X*\273\277 \025\313\314\336\226x?_R\317\271\313%\262?U\033\201\373u\253e\277\225\257\025\316O:\245?\253\205b\376\rfl\277\364\353\331\217\234i\237?\004zVm!\337\242?A\305\327\301\\a}?\364\331\262X\314\031\203\277\372\273[/\253C\263\277-\213\213\217\373W\260\277GC\026f\220\363\215?%\243\317\373\025\003\241?w\366\322}\313\245\277\277\001\260k\365{m\274\277\253pH\177\344\334\242\277\333\275\022\315\204\230\262\277\355\203\233\342\334D\222\277\375\301;\332\220\276\225\277\363\232@\345>Xd?\221\302\266ic4\254\277\265S\254\341@b\250?&\025\306\202\323\346\247\277\030TU\232\037\264\235?\035r\253\336h\305\242\277\376\006N\336\260\333\261?\233`.\024\323\024L?\304\324\342\323I\204d?\355\202\214\260\234\017\270\277V\250\207\r\364\345\253?s\265\341j\370\226\243?e\021\037x\273\266\227?t\343\232\255\224:\273\277]\362\264\220+\025\252\277`\216x\241\242\242\244\277\205W^w\227\254\267?\037qN\325\332\340\300\277\276\327\327r\357Y\260\277{\272Zn\032\t\253\277\233\254\253\302;>\260?o\202.\327E\357\265\277[v\215(\365\213\246?\275\272\233\007\321\312\227?@\203\014$\204?\242?\343\321^\"\035\235q\277\365\033\227_&\036\215\277za\320\320\210\242\264?U\220\337\254\2352\277\2774b\367\254s\222x?\362\273\205u\251/\250\277(M\363\2555\025w\2776\250\277\206\272b\271?\342\241\330\324r\035\211\277;\265\373\243H\213\227?.\000I\352\244\034\241?A\206\251k9N\216\277x\'\220\314ed\223?\306\035\035:k!t\277\211\220\227\353\227\273\250?\345?\nks\305\262\277\264\240.\024\205\371\212\277Bb\330\r9\334\215?\240\365c\007\205 \252?\230\230\326\265\306v\272\277\311rA\226\'\330\303\277\261\342u\373\343\311\203?]\014\357\306yYu?3{n\007\251\205\244?\016Nl\325\235B\260\277R\331\013`^4\276\277\241\315C!Gx\203?\273\244MD\300N\254?e\302bp3\331\254\277\277\023\330is\207\274\277\254\002\027\262y/\250\277\326$\327F\203\274\214\277\037\355\360\326\017\000\276?x)\244;l\225\253\277\252\212\312\230\347\355\241?\245\341\215>t*\307\277\3531\374\034\374\342\301?N\320{?\274H\214\277\2638\266o\007k\264\277(\276\014\324\300+\301?\313\250\250\254\024\026\251?z\005\376\017\237y\213\277\035\245\351M\303\317\257?u6R\031\n\025\207?s\213\202pEv\270\277Z\205\r36\311\220?\034\032\364\227\007\334\242\277\'#2\220?\325N?\344S\213v\247\310\255?\207\211:\"\322\221\227?L\316]\365\356L\266\277\237\017\036\007x\\\213\277\323 rd\2266g?\335\240\201\370&\265\222\277\255\371b\270W\276\246?\232\322\tO\314\352p?\360\0011\227j\324\250?\205g\312\331B\360\221?\226\206\367B\010Y\224\277!\005c\240\273\224\234?\267{P\3130Xj\2776\014\271\334\037\372\261\277\302\357n_\251C\216?kr\'\237\343\001\272\277\037\303\3253\360\224\223?\332\234\325\035\264\026\265\277ld\373\275\256.\244\277h\026\3645?i\263\277\314(\342\257^t\264?\261\214S\317\350}\260\277\323\311W5\001\030\244?%\213)\364\254\341Z\277\273\r\253\344\316h\253?J\363\021_\240f\252\277\257aN\315\243\317\210\277O\370\363\350\245R\241?|0\231{\202Z\260??\357loHm\265?\031mJ\204\020!\252\277@\344\227n\227\312\252\277\372^\373]\316\240\207?\016jB\256W,\233?\"\350\277\023\337\202\205\277\3443\214\237o\004\261\277Mou\212\230\233\263\277\211\205*\326\265\202\266?\311\266\367#g`\234?>Q>F\221\226\263?-o\226i9\037\240?\324h\021\\2\021\241?\336\224o%8\233}?\021m\244w\014y\240\277p\345\223\300%\364\235?\207\332e\266\337\205\234\277>\244\273~5\013\220?_lO\177\354\364\200?\363\013\217\027\320\215\242?\004\021\024\236\355\036\266\277\232o\312\3511\307\252\277\204y8\352\257J\243?\037\373\010Y\230j\244?\003\004\021(X\021\232\277\001\257H\367\331:\270?R\022\203\264KQ\217\277#\033t\030\351P\277?\215/\315\273\304\003\205\277T\366$<\312D\226\277\035\2105{[\320\227?\317\333\177bN\337\251?\277\275\254\275),\213?\021\246e\376*\346\261?a\007\224\220\005\372\253\2777\325\253\270\001K\230?z\257\2277\006\2739\277\246\032\3674\372\212\231?\027|\216g\333s\243\277./\236\223\2310\261\277\230\363\033\275A\016\262\277MQ\261%Bf\233\277\006\373\350 \234L\204\277\211\026^\016b`\274?a9|\257\212A\223?)x\371u\237\340\245\277g\274\\\005r>\242\2770\300\0179\316\201\252?\205\310<\037R\036\227?\226\377\2422\027\210\243?S\224\344h\035\240\301?\350\343\204\206<\256\263?\346J\242\333E$\241?\254\242\\\311\274\337\236\277\024?\024\022|\311\252?(\220sB\023\t\270\277\220\354\026\022\313h\260?\360\355p\025\014a\300\277\356\327\303\375\241P\224?\034y\341\313\371l\232\277\374\254\017\330\2037\231?1\255\230\272\363\321\252\277\311\"\360\313\341\342\213?\216A\362L\275=\211\277<\225U\244\032\002\242?\364\263\025e8ob\277h\237w\224\237n\241\277\246\3554X\313\243\250?\354\323-\215&\022\254\277h1\207S\377i\234?So\224\262\226\033\223?\313\035\021\"F\234\277?\3660\356\312\3020\204\277|:61MN\202\2770\003\214F,\370\265\277\321\315km\364\024\260?\207\304\300\250\271\333\225\277\301 \363\2017\"\246?d2\2374\360Z\251\277\370\204\301\373\n\231W\277\354\346\371\3057j\260?\013\364H%\246\222\274\277t\rI\361\3231t\277\232va@\204\325\262\277\234kF\366\245!\223?]\306\302T\364\214\305?0V\210Gs\243\214\277\362;\2503\265U\260?GvC\326v_\226?\230g\312>\354A\273\277\313\0231n5R\263?\030\216\212\250\340\237\302\277\031\031{\255\315\275\260\277\265\304\202\307\276D\245?t+W\347\340}\203?D\000v\253/+\256?\277\227\342\205+\246~\277\236\216cN\020\032\247\277\304\024\257hZ\034\246\277\341-\340i\361\251\222\277\037\001\361\013\233\014\265?Pa\020\335KI\234\277\237\370#\226E\206\266\277\237\216-{\303h\241\277%\310\221\205\375\361\265?\266\210\363j\364 \244?R\362K\351\277\342\201\277\267\323\322\300\307(\246\277rh\340/g7\245?\266\221\355\034\231\001\211\277\013\241\254V\3246\251?\305\240=\341\031\025\242?\2274\374{0Y\250\277\313\031C={\314\242?w35? F\263?\210\006\001>\347F\264\277\246\010\201\314(\004\260?\023\354\300\222\366\204\262?Jl\020\330~4\271?\213\236\234\024\010\210\216?@0dRu\014\270\277\026\337.\273\220\026\224?\301\000\363\330\021\205\201\277\333,9\032\324\004\235\277\303\314\221\201\357\337\251?D\364,9A\001\264?-o0\236i\206\242\277#G\224\313\350\202\220?\274WG^v\372\255\277Q\272U\366\003\305~?%rh\n\276$\250\277\310h\355$\356&\260?\345\032R\003\252w\263\277_\027\253\031\257\310\224?\346\353nP\324\035\203?\247/\336\r\324\214\225?\370\236\232\217\n\330q?\206\262t\240\347:\220?h\343\262\322\340g\270?\332\246\372W\340]_?\2245\303\224\307p\215?\305\301b\225[\301\241\277\207\272>\227\0218~\277\305\022\272!\355\335\266?P\260\255~q\323\272?#\336B\243\031L\243\277`\3673\216T\343\200?\210<\266\377]\260\256?\204\227\002\021X\244\204?\2559\230T\006G\223?#\230\311\362@\222\262\277\241r\240\317m4\246?\276\314$\375\201\026\254?\311\257\010D\302P\276?\321\034~*\210T\241\277F\2313}\364\262\225?cz:\263U\227\230\277M`\220K\272\321\301?\036\0018\360\357-\235?[jf\360\316\257w?\323\334\220\033f \241?\326\315\0317=\023\252?\307\341\321v\023d\275\277\003X\321\344ZU\265\277\250\240K\244\200<\266?\354\212v\321\332\n\223\277\373\213\217C\236\302\263?\207\031\001+\316\215\246\277N\351\244^\232\342\236\277?K\243t\306\215\246?\352\330(\363I\002h?\240\000:V\202\213\220\277\301\241\342\013#b\245?1\276(\307@\031\266\277gl`\200R\324\264?\'\362\353A\230\233\253?W\2363\246\335\r\252\277Qg\234\246c\244\247?/mt@\243\rw\277Z\n_S4\032\246?>N,O\231s\254?\340\251\002A\2531\242?\034\213\223\255\201Zk?V\343\\\344$-\272\277w\302\'\224\277\203p\277\022\217\262z\257\232\260?\370\032\036\277@k\246?1J\367@\314\340\233??a\353~\262\246\236?\337[I\030\214B\254\277\t\301C\245O7\240\277\322e<5OL\254\277W\236Y\325\315\236\257\277\273i\314\245\345\315\256?\t\001=\003\222J\266\27720r\\\022E\254?-\017p\346uo\276\277\014\'\223\216)\357\263\277\177\215)_2\356\257?\025\341C\206\014&e?\"u}v\024\016\253\277\031+\216R\345\004v\277\214\351\247\330\031\310\211?\007;\177\354\360\264\264\277\270\367\n\312\241\232\224\277\345\243\024`y\303\251\277\217\204\305\366\307p\243\277\005\214\235\027\331\327\272\277\202]Y!\241\334\265\277d\364\377:-\304\230\277SM\217bC\230\243\277\016\273\216\345\266\'\304\277P%\301xy\003\215\277\'\230\220\023\361\330M\277\263\3413K\352\335\263?!i\375\300\231\360\251?\211F\363-L~y?f=\000d\377i\252\277\363Mg/\311\351\256\277\037\n\302\317 c\267\277\rDrNm\306\235\277\334\304\203W1\316\243?\020\323\333j\220\347`?968^!4\032\277*\250\351\364p\374\200?\033\205\346\233\003\016\210\277|g\370\365x`h?P(%t\260\023\212\277\316\372\361\307\323ft?/\262\004\016;\344\264\277%R\001\213t\310\220?\271X\356p-[\255?\244\317^F\371\\\243\277\336\3327\370\324\374\211\277Rh\006fK\330\264\277\351M_\325SY\242\277\356pBw\377\263\270\277\300M\372\264\375n\262?}\363a\337P\r\246?W\241r\3652\024\303?\326\006\352w\276\026\240?\215\000\357b\3447\207?o\241wi\211\207\211\277\024\330\365\276x\233\240?\225\212\305\266@\324\246?d\306j\030\3436\206?.f\205QR\266\234?\270\262\241\240\235m\242?\273\027R7\207\306\211\277L\303\271\312\227\335\212\277\242\353r\257\267\312\246?;UI)\375\255\270\277o\311\271\241\232\366\201?\243>\347\255\220\230\251\277\005\021\324,\261\016\232\277\037\340&\023Dw\202?\357pQ\233\261\337\200\277\027o\312p\3178\242\277\306\036\265\230\r\207\257\277\257\34284\020\324\241?Q\244\222)\362Y\232\277n\227\230\362vb\261\277\202b\302\254\212\035z\277\037R$U\021\233\202?\343\227\203C\332q\250\277\233\267\235\022\240\234\222\277\345i\330\233\345(\234\277*\324$\333\\b\232?\332\304\315\360x\206\235\277*p\022\177%\220\241?|\356\034+\020+\210\277+@q\365}6\225\277\002\301\234\006`\275U\277\204\321\3770t\036\271\277\022\344,\245\337,v?\343F\212=\037D\266\277D\223\035\365\324e\226?9OlS7T\241\277?+f\253\302\356v?cv\332\024\222\315\260?7\270\230\222\274\356\246?\342\276}\342\245p\256\277:\240R\316\361\375\300?q\310\207\335\241Q\260?\324\343\006\3765r\232\277&g\030\033\255\336\211\277SPb\211\254r\241?D\347\237\210\336q\205\277\013\356\017\334\367\021\262\277i\035\025\300\332\235\257\277[H\365\006\314\371\266?YXD\313\326i\265\2775\307E\230Z\210\241?k\032\004\324\216P\257\277\021W\033g\350\337\246\277h\332\t^\"\000a?XF\306\224\237\254\301?o\341\272\342\031\335w\277\301$\035\233HG\243\277\273\321\332\246\222\000\233\277\020\023\2476\265\240\226?\316\374uD\252I\261\2774\001\334r\251\200\240\277\340\264(\341(\020\242?\005d\220(\032\027\257?\313,}\254\364\372\262?\023\272\025\310\024\271\264\2779\000b\025Uh\274\277U\232\252\224\000\377\261?\3507\206Z\217_\253?\217\260\007\211\272\350\246?\250\247\335\002%\215\220?0`\'A\230\374\250\277\243\014nDR\274\271\277\202q\377\336\357d\300?tt\376\233A\376\246?3\365\2632\\\\\261\277\215=\010{\033N\231?\352\036V\346\275\377\226?\261:\237\027\235\353\260?R\301\374\360\330i\224?}:\320A\241l\242?\215\223\'.Q\360\246\2778\216\367\021\364@\223\277\177\377\234,m\017\254?4X\216\206e\336j? \"\334\036\r\017\237\277@aVY\251\004\262\277\321ByI\354\265{\277V\000\201\216\240l\242?\257\023\251\327\237f\234?\\\331\240\2176[\213?\031\350\222\\\314\354\254\277\275\346;\265\036\347\247?x\312\233\354\332R\232?O77\227\302\262\246?\346O\233f\206\331\211\277\305\333\355\307\317u\275?dw\211o\213\032\265?\332\304\265\000\003\351\204? \305\004\274\3617\231\277+eK\001\244\313\252\277\020O%t\310\315\226\277%\327n\345\017\020\267\277\370\350\371OR\352\222?\310aJ\261k(\261\277\247\222\336\'~F\221\277N{f\304\037]w?\336Zd\377\340\235\251\277\361\032\313Y\362~ ?\257\350t\016\315X\225\277\037\347\356/\336\243\244?9U5\\glx?\371\252Vp\231\027\234?\303Vk86/\213\277;K\222\263W\221\246\277\016\005\004\010/E\250?Qz\226x\\\023\242?\326SX\'\013\307\232?$\007\004J\267(\225\277\000\265\327#\367\342\222?\211\275\036f\337\276\247\277P\rJ\224\277\227\327P\177\267j\202?\361\344ih[\204\254\277\372\r2\323\230\005\220\277\347TW~\006\342\241?\322UK\335-\375\260\277!z\213}\021?\214?\2715\265bk\353\263?Sg\227\001P5w?\267z\356:\230\216\302?\001\257\307\252\353\246\277?\024E\020\215\021K\270\277\347Q\363W/\r\233\277\324\252G9\005\371\267? L\243A:\266_?\214\366\277\\/\361\220?\231\"k\360-\215\244\277y\255\354\356D\207\253\277\331\247\251\316\254k\245?\214u\351\245W\267\203\277\2317AL\000\013\263\277Y\304\321V\227\375o?\211\230J5Y\275\213?:@\\\223+\244\276\277\237\313]\246QV\221\277\344f\352\266\\\371\216?}\272\214\0149\227\231?h\030k\317F\373\204?8\034T\234\201\241\265\277\211\372\254-\035\321\227?\247\t\373\231j\267\273\277m\265=M-\243\271?\324\255\273\336\320]\261\277\223\345\213~\210c\210\277\325\315AD\351\226\240?\3472\217\216y\353\262\277i\177\235\202l\330\300?\010\3302\000+\232\264?4k\020\341\364\277\214?\277\303\337=\027\003\236\277\223\317d+D\207\215\277\203\016(\010\307\216\255?m\000\n\203\244\277\245?\365\\k\254\232\315\202?\326\274k\360\267\243\243?\027YR\255\246\354\223\277ajW3\231\005\244?\014\302\220\231\325\006\225?\275\252\240(\016mM\277\216{\206\336\030\334\235\277\"\313\3101\210&\241?\030\377] s\260\261?\004z\217p\253\217\224\277\306\322\'\032i\276\255\277\035\225\004\335X\023\260\277F\310[Wa.\246\277\000\330\0235F\020\246?\207\214\352\000\220\371\244\277\213*\310\013h\003m\277\316\014\274Q\237\362\217\277S/\245$`\324\242\277\225\257O\266K\211\245?\322\371\207@)\317\235\277\376\025\230Ui.\223?\202\360\265(:\207\276\277|H\002\260\332(\255?$\311\305\020\246\213\253?B\310\036\232\265\366\243\277\374E\3112\362\356\235?\037\277\333\375Q8~?aX27\332\233\212?J)\307}\314`\250\277\265}q\351z \252\277p\302\034\331]\010\222?\"3<\205C\205\252?o6\032\304W+\260?\030p\212\370D\353\221\277,z*\227\233\204\242?\326!4\271\233\370\231\277\036\202&\332)[\225\2773\031G\372vL\225?(\024\331\n\345\236_\2772a\200\204\271 \226\277\245\333l\326\037\224\273\277*\325\246\333\331:\253\277\225\370F\255&\345\204?K\210d\205\353J\266\277&\0061\033\226g\261?\017\216Zn\017\230c?G\036\211Z:m\241??\004\014\244H\302\234?B\tM\035\205n\267\277\234~\274\346;\362\264\277_\216\215\350\324\333o? 2\265\323\204R\243?\014\"\322\005[]\233\277\346\352\372\341u\264\262\277Oz\204ZTZ\215?hkvq\017&\220\277\242>E::\237\223?vwC\023\3430\260\277(\003\223\206\222@}?+y=\0257\377\240\277\036\345\245\213\214\274\247?\221\355\005\342\223Z\246?\005\035\314\200\031\013\245\277\222\377\350\202\234x\276\277\210mF\375\337\227\260?\035R\344V[\271\240?\201\201HK\264P\303?y#TY$\377}\277f\000\363\376\371+\265\277\020\205\326\305<\247i\277H\rm\326l0\201\277\033\262`\036\364\205c\277\376\001\364^\342@\247\277Y\262k?\262al?\320\356\210\326\330,\221\277\375\320p\371G\033\300?uRm\303\006y\206\277i\204\3355G\357Y\277\377\335\372\301\277\253\253\277\247\301.yy\036\254\277\365\335g\355d\267\231?$\273\217\317Q\325\270\277\311\274\020\246\353X\252?\351\247X\374\357u\257?\376\346\260\265\234\357\231\277 \275\201\263h[\210\277\034db;2\332\203?\2609\333D\177*\264?\255IH\206(9\252\277\234D\314(\337z\252?\3163\216\323%q\177?]\360=2\002\255\203\277\003}X\304R\362\201\27798\300\270\277\275\263?P\327n\202A\302\260?d\020\205\346~\311\244?\241\035i\363\333\243\242?\020\304\214\"\367D\253\277\325\245\367\251\232\005\235\277*\231e\232H\214\241?: XUD\235\202?\310\361\332V\204F\243?lr\205+\030\304\247?$\317&WT\370\271\277\352\274!\330\302]\274?ui\215\217p\326\260\277\202\200(\t\365\"\242?\366\315\260\023(\177V\277{<$ZJ=\200?|\311\364\273t\267\213?\301n\217O\037\316\227?O\300\211w/\300\263?\234\216\023\0356\260\244\277\003\232\346\034\337\205\251\277\226E\020k\371<\240?\016\017\021\302\2006\274\277\236\350\270\223\222Ar\2776\277\273n\266\304\251\277\266\245\037\3463|y\277\235.rD\313\365\223\277\241\351>\247X\322\273\277\3610\320\246\001\032\227\277\315U\264\215Lb\275\277\233\2045\277h\277\263?\237\231.\216\360r\242?\314\310!D\272k\275?\005\201\177\007\270>\262\277\233+.\351\316\266\212?\357K\211\013\264A\204?*\364o\206\357\266\245\277\237a\337!\254\353\235\277\340\274\334ybb\264\277\2271\r\267j\207\225?\334%\222[\370\033\242?\r\352ST.\243\224\277\262<\273B]\334\241\277\313\346\023\037\355\360z?\037\305\231C\244\332\242\277\230\200\035q\216!\225?\333\355\233;l?\250\277\266Lq\014dz\260\277\277\362\243O&\375\247?Hy\300\347\343\230\277\267\305\275\343_\375\223\277\030*\211\261\357\n\230?.\n;cNJ\231?C\3745\361\362v\266?\263\352)\317\005X\301\277\224\326\311xB\014\256\2772[\337\363T\t\227?e\365X[qR\253\277\354\251\356\360\003(\231?\214L\256\360\3771G?\016\320L\236\363[\247\277\030MP\300\240\333\235?\265\374^\307\247\340\200?\2068\004\371\271I\260\277%l\247\336\210\035\217?\262:\230}-\t\223?\241\361\326\321\247s\251\2770R\202\206b\217\216?\305\373%n\316h\221?\306j\350(7\245\240\277}o\277<\214\010\300?\321\020\300]\266\344\271\277\226\341\355\204\261\226y?\0342z\000{y\260\277z\233\361\300\362{\257?\344\221\366\373\334$\271\277\210TN\376\212\323\255\277\375\344?\353\275I\257?\277\266k\005\237\241\254?\214\016\235\242\260i\217\277\010\304Y\255aA\361\301?\275pO\316&5\247\277\016\350\232\355\334\036\240?\302\324`\347\361\346\266?\246\031\200\201\255k\203\277\266M\224\336a\341\300?\340\004&\311[.f?\366\034\251\206\016<\267\277\207\333\250\376\rIb\277~\270>\202\341!\226?\201 *S*,\257\277r+\237\221\274F\242?\257\304\306\316dx\226\277\313\307El\251?\242\277\"l\301\336u0\253\277ba\244<\305\237p\277\327e\023\332\337\351e?\006b\343\361\340\274\254?\253\252]\223\232\226\256\277\026$\211{l\221\230?K\310U\315\243\243Z\277\177\002\013\304]}\200?\364\005\200\307f\215\266?d\357>\017a\005\233\277\000\366Fs%]\221?\376\226\310\256\246\017\205\277SO\271\2360\026\240?\225\323\241\005\236\350\241\277y\251\353\024\255\335\262? \215\272\212\r\245\244\277\221]}X\035\010\301?\373\361\007b#\266\275\277\325\325#\2671\312\223\277\322\266\3418\307$\251?v\304\016Z\333\243\210\277\2670\"_T,\255?\332\225O\007?8\254?O\n\272\\\250\307\241\277\325\000\275\017!\363\200?\005\"\036\242h\206\262?#\315\243QZ&\252\277\356q\240uh\327\220\277\251r\327x\211\010\251?H\320dW\316&\261\277\374,X\301E\032\267?\214]$\365\327\356\200?\206\027\352\312>)k?\3014\270YW=\242\277\215[\np,\017\242\277\016\345\200\352\313\220\243\277\007\231\362\240\014+\256?\t0\305H\244\344\263\277\2337\363\250\366.\227?\263\231\320\325\335L\265?\017\254\236\363\361\204\264\277\300\031\024\256\321\323\243\277\275`#\266\371\347\242?\214\254\214\365\017\357O\277^6\201\217\227+\241\277\366\002A\350\323\034\232?rG\243\0255\335\250\277\327\352\017\360\r\256\217?\017\235\342\n\345\347\257?\326(\330\205/\025\256?w\327\375+\331\211`\277\256\226*\"\224\247\275?\257\313\030IE\036\250\277-\266u\210\217\022\260\277o\270\255\260\362\334\211\277:h\317\317\367O\256\277.\220&\373\306\004\223?\\\\\345M\347\252\263?\244PX\0328\001v\277*\255\205`~\240\245\277\376\216:m\317\004\260\277\247Kc\353\315\n\224?\241\373\005n\216U\222?>eT\242\3737\257\277\033\261\221\3631\336\214?\352\021\020\273\024:\246\277#J\027\267\272\234\246\277>?\233\360P\027\263\277\350\203+\372`\320\266\277\233ob\001\303m\265\277\312\271O\230f\253\241?\252\2415yz\377\271\277\303\236\177\021\232\025\232\277\001\337\216\354\031B\240?4\002\340\372\177|p\277A\313\244=:\201\264\277\240\031\\@\312\010\201?\006\327s\262@\224\261\277\274\314\017\244-\360\260?\236\347\240\356\266\224\244\277\260\314\203\306\360M\245\277\306\210K\221\345\364z\277\253\311/\322\033^\245\277\267<\202c,4\250\277H\343+\316+Wm?\320\324\324\2636\206\236\277\374\220y\033AC\205\277/\345:\000\r\037\260?\267Z~\371\026-\250\277\271::\313\256Y\275\277\247\342q\231\'l\272?\240\022*\250\237a\246\277\350ZD\343d=\267?\365?|>\3272\252\277\307\\\336?Xl\223?\032\310\2559\235\275\273?\332\2667v\374\021\261?\222\"\206\276_\021\242\277g\242A\227\333\234\242?\035\340\354V8\016\264?@:\333r\243s\261\2775\030\311\"?M\277\277\361iw\222<\355m\277\r?\343@\311\370\230?\355\335\363\032\337\204\210?\255i\223sCU\241?\007\201=\305\014\021\242?\023\301\335qx\303x\277\254=.\n\3252\226?\377\261>\224Z*\222?/\324\215\022o\354\217?=\022t\246\344\333\271\277\261\206\2601!N\272\277\'QG\372\307[\241\277\371\254\312%\205\371\234\277\203gq0\322\r\244?\274\021\226\037\022\374\203\277;m\257r\246`\216\277{\014\020H\326\251\261?\206\267\262u\t\244x\277\225\220l\030\227\r\240\277\273\2116\374u6\225?\004\275B\030\242\302\226\277>B\300\245\222\326\227\277\266\0105\245\2740\242\277S\325T\222\317T\216?\221\226\343j\367#\241\277P\230e>\371\236\261?\031\212W\237Nh\200?\201\0175\005G\253\231\277,LZ\203\006R\237?\310\212\335&\017\366\247\277\305\247\314\217\222\366\220\277h\335D\273\000\256\271?o\354\313\370\233]i?R\000q\256\223\364\241\277\3509\303\002~o\245\277B\360L\201\234\215\271?X\345\303\326&\364b?\300\316\027\225\252\350\214?\3229z\241\270i\242\2777\264 \377\235`\214\277\237\317\326:\230\371\266\277Yp\0275Y)\261\277\345\276\365/\320\352\234\277C\267c?~y\217?c\247t\312\377\034k\277\360v\002\277~.\020?0\2364@c\350\266?u\326#\033r\371\256\277@\207O\235v\223\226?H\031\335w\312%g?\010$L\204\364\007\212?&\221\222\007\030/\267\277XD\032\2666\217\267\277\323\206\234\343\345\211\242\277\365qu\312\272l\241?\244\230H\270s\004\213\277#\212\222\233\263t\245\277!\216w\020/\327\234\277^\014\006:\207\\\266\277l!\353\266O\272r?\275\210$\004\002M\254?M?2\254\367,\210\277N\377T\247S+\260?baB\216q\036a?\234\263\025\007_D\246?\206\337\355EI\023\257\277M5\322;c\206~?\020\2251\002\220ek?\236t\333\227\334\254\220?\233\216\341\036\311\000\254?\213\302\206@L\352h?\032\021\305\230\253\004\226?\026P\352>2k\242?\336\330\217\251\003}\240?\352.\271\224#K\235?\342&\361\007\333\211\227\277\237\303y/L\002\227?\255\241KCh^\257\277\220\313\360s4LY?-\332\336\262w\271\214\277\347;\351qEG\242?\323y\230\315\252\244\246\277\001l\313\026\361\027\265\277\235\365\302m\304\r\211?\007+\261\311F\250\237?\213C\002\277\305\300\245\277\266@(\331\026\026\265?U\204\223\027\376\007\232\277x\220,\371\217Q\221\277k\033\205\325V\230\265?\354\253P\367\245?\301\277\217*\003.|\002\221\277\014f1\2547V\300\277\326K\2039p=\223\2779^\372\311y\372\210?\3552%\377h\201\234?X\r\352\351\210\322\200?\3179\033m\010\332\221\277\263\367\022\336\351\217\202\277\355U\333w\3318\250\277\247ss\231Dlt?:\232\203@\363\362\200?\tl(\246ta\256?;%\317\320\216\375\177\277\014\0361(\224?\213\277\3019!\315\261\276\250\277\023\265\324\031\034\035j?\277\227\3212w\033\270?\303\325&\006\3278\240\277\303\304\244\322\002Cy\277K\r#\277\326\256\230?\177!\371wi}\266?\'\211\215\224\354U\262?h\364-\373\014?\252\277O1\212r\314\265\302?\342\034\363\351Z7t?\325\317\016\362\243]\216\277\036\265?\302\252P\253\277\275W\333@Y\307\247?S\245\214\375J\357\213\277\010HoR8;\261\277\304\316\272\237{6\257\277\372d\360\227\360h\251\277\351\201\340\224\034d\246?\032z[L\2713t?\242\3709Z\361\300\265\277\020\326c%\246\262\265?4\207a]i\025\234?h\325\020\250O\264\270?\035q\231\017\226\r\202?\020\3534\t=\207\254\277,<\346\336\323\035\272?\326p\241dm\022\273?\3321\336\314\200D~?8yjY\305\032\253\277\000\323j\357\271\206w\277\315[\354\336\003_\256?\302MZ\037\205\302\244\277\271\306p!\364\212\204\277\340=\005r\300!b?\007[_2\205!\263?\032Q\277v\034.\226\277\354\277\350\026\020@\233\277W9N\326\265\266\221\277\302\213\211\265\333\261l\277\2418\267\236t\336\213?x(8\356T\034\230?h>\222\350\271 \245\277f\002\333z\017\252\264?c_\256\3329\332\262?\024\323\004\227\r\373\231\277But\251(\346\247\277u\034.\361\266\204\250\277\222{{\372\216\242\216\277\253d\")\"\375\237?l\021\347\234\232\224\224?\0249z\201_3\216\277\363\327\002\232\031\272\212?\347v\001\237rh\230\277\026u.:\311\373\262\277\266\272N\352\323!\242\277\027\315\r\021q\372\220?|n\353\246\342Yt\277\343?\200\341\304\267\262?N\217?e\005\334\243\277\232;\315\240\230\\\264?\264\226\201\026\256\363\260?\013\253\227\034\036Sf?(x;\231\0051\212\277\363\256\260\364\354\214c?\344X\222\270\316f\260?\023\312\266T}5\260\277\276\215&\313\206\317x?dc\000\001~\232\227\277\264L\036R\'\213q?\356\2006\355\255C\263?sXW{\0219\255\277\240yQ:\246a\300\277\372\024\2526\203\233\213?\325\343+Hl\263\262\277\002h\264\320TY\256\277\217\347\244\n\373%\206\277\236\030\225%V\262\246\277\375\2159\006\036\005\241?\\\030\365\352P:\240\277\031W\356$\013\310\261?6\356\3428\262\311\206\277\222w\'\027\370\313\251\277\310\030&\0253\037\266\277\000h\3303u\242\230?\027\306\274w\212\377\222?\212Z}2\357\235\233?\212?\310\274a\223\262?4\017\3750!5\232?@\367sE{\211\256\277\244\231 \277;\310\257\277Z\036\000\006\243\313\207?0B\001+\\R\225\277[\265\241\312\263\361\226?\244ct\2358\025\262?Br\313f\240\313\263\277\302\004Z\003\336\003\235\2776\212^\224\266\216\261?\255\215\2252\034\r\247?o\017C\354Vu\230\277Y\025a\265\244)7\277\017a{[\371\254\260\277\330f&\277\2468\233\277\014UI\351X\210\220?x\336\376Jp\250\263\2772 \254\324\240\214\227?\364\020\225\243\370k\263?\334\254\216n\272*\261?\301\241\010\333\215\014\240\277\225\3578\320\374\207\261\277rJ\305\014\3711\272\277\016xY\246;\315\210\277\230\273\336\355*\005\247\277\301\226\372\352\355\315x\277\360H\355c\305\324\250\277\215D5\250R2\257\277\341J\335\353\325B\241?\377$\276\340P\325\221\277\206\337s\330z\034\230\277\353\212;$\255\335\261\277\260\276?\344R\275\264\277b\034\335\200\350r\240\277\345\\\373\331\016\373\244?\237Nl\352:\016\241?\330\342C_U\270\214?\346\230\227\234\354\266\206?\n\216\363\213=\234F\277\023X\233\342\364\317\244\277\337\225+fQ\371\232?\003^\245q\246\210\211\277\277wt(\367\023\263\277]\366\367F\000M\226\277\265Wi\000\013q\270?\255\276q\215\214\022\300\277@\254\315\372]Z\256\277\205e\354o\215\267|\277\215\033\030=R\212\202\277\236\233\242\230)\266l?`.\341j\253\335x\277\3234v\363\327\323\276?\020\013*yn^\231?\2426\t\016c\334\300\2777\031o\010$H\253\277YWId\215\037\255?\205H\033$!Q\221\277\r#\350\321\215\312\262?\344)\014R\034\265|?\0171\3139\204\n\223\277\332\247\302;\363I\212?q\032|*\006\037\272\277\212\355E\375\342\037\251?\377\"\022\264\3677\264?\222\036\221Y]3\277\277i\262\324\330\251q\235?%\021\305\211j\026\240\277\367f\206\301l.\242?\357V\257\"=F\227\277\306\261\313\317\024\263\267\277U\205R\345\235Z\262\277\232Pn\204\275\335\241?fb\365!s\027\257?\275c\021vci\207\2775$\226\023\244\243\264\277F\025\264I\210J\274?\"o\231\327\217\033=\277{$\000$\261p\263\277\357z\274\352\364\231\250\277\353#y\265L\204\266\277N\215\353-Y\n\252?!\020\332\256\036d\230\277~\251j>~H\277\2772\262=z\202\353\244?\032R\205\341fS\254\277\024e\023\177\232\235\251?\276\372\374\035\265Cs?\327Zj\026\267k\254\277a\323\210\265\252\266\250?\203M\316\313\033k\263\277{B\224\270]\020p\277\335\366\224\\\361@\253\277-\004\206t\315\034\243\277P\214L\'\302\316\241\277\363\006\244\001U\023`?y\313\340O1\324\247?\234-[\301(\356\223\277w\370\306.\205u\265\277\320XT\221!\"\253\27725\350\235%\217\240?C3U\261\032>\244\277q\333m]rh\267?Em\262T\374Z\177\277\264(\014\205M\013w\277\254\023{\347\nw\252\277\274\370>\353\264\367\243\277\374z\304\000\262\024\221\2778\320EI\001\353\253?\350\252R\035\263t\227?\367\237\324\025\032}\242\277W\206hT\355\276\241\277\3771\316c\275\003f\277\361\364\027\203\274\277\246?\002\255\225\025\317\014\230\277\027\0252r\316J\272?{]\\\302F\255\236\277\337\330_\027K\304\233\277\201\216\370\007\002&\250\277`\234\003M\017\251\256\277\3272\307:.\277\253?\347\243\326\325\340\243\257\277\034\200\344\204\220\324\264?`p\273\315K\247\246\277\276\014Q\336\323]\237\277\262\341\016r\033\312\260?2\242H@\265Y\227\277\335\267\221m\205\241v\277`W\273nG\230\272\277t\264\345+\037\035\271\277t\340\200+H\301\234?Z\001\330\2159\372\202\277V\003g%.f\253\277\373\232\353\317E\200\255\277s\357U\354\270\254\221?zl\211\304bA\251\277\343(\024\353\221T\270?\346\265\262\315M\314\237\2773p.\332Y&\264?\357\203\330\r\262\'\226?_N\217\032\024V\200\277\370\233\302a\030\316\237\277\260*\207\356\270\245\226?\323{\007\364\2153\246?\2618\240s!r\232?\177\253\246\034\n\261\257\277kt\342&C\346]?\t\021f\266\375\017\254\277![\344$s@\257?\021\206\252\255\2717\035\277\226\362\3020\235\231\242?\202R+\004\202U\241?\321\002.\304K\221\263?\353\211\255q\"\364\242?\266\243\177\331\033N\252?}\266\372\005Z\245\227?\324\212l\\\261\234\270?[\017)\005\244H\277?z\203!\332\315\373\266?.\2358\321\017\273\252?\002\256`\036\366\312\233?\361\234S\301\001\037\206\277\rR2\351\305x\260\277\035X>F\032\260\255\277\253\205o\247\263\224\237\277\351\252\275ca\177\255\277W\"\261-\205\000\261?\3610/<\304\346\247?\250\\\007\000\024\253\212\277\343\201JR\255X\243?\020jU\230~\334\244\277\240\250{L\3014\207\277\204\3420\020g\272\205?\320TLj\244\270\240?/\246\030\312\363@\230?\313U]\350\3176\243?S\370\003\235\223\'\237?\351h\020\035\313\251v\277m%\224\201\256\214\263?\314\206HY\266k\253\277\345S\360%}\222\212?\216p\213\325\207\231|?\017`\346\225^]\253\277\300\211\2319[\217\272?\205v\365i\3406\265\277\014\304\212UJ\352\262\277-E\030\206\341\031\240\277\223\276j{\022\330\265\277\3459>k\366\032\261?\232\2372\333\301\021\260?\316\377\273\372R\217\230\277]\255tqh\266\237\277\360\025\177\267\216\303\255?-RkfZ\211\223?\342\021\310\366K\206\263\277\255\'\341\356\313\271\242\277\273\334aP\202!d\277\245\335\350\017\240\360\215?/d\352\311s\r\231\277\260\344$\017\207\035\262?[\377\334f\'\340\246?aa\200Oq\315\246\277\212\022}\260M(t?\2075q-\217\260\254?\332h\242\241\230\361\264?\314z\357\256~R\201?\177\222\341M\317\247z?4\272N\030\020N\235?si\002F8\326\210?\336\276\267\230\223\240\215\277.\241Tv\324\242\230\277\306/Y\022O\371\254\277%kk\036\356\371p\277\t\232k\213\222\246\271\277\022\371\303Z\345\323\240?\317\223\255\177\'E\250?\274\334 \364_\351\244\277\n\265l\344\006W\256\277 \343X\030`\276\216\277\326\251W\035\212\000\222?\026@\327\275j\310\227?hU\205\213\324x\270\277)\352\324Z\306g\231\277\373\243%j\026\365y?m\3625\010\353 \222\277V\210?\320s\276\352O\272\272\277\340/_\360\014w\260\277\022\376\264\302T\347\264?\361\251\356YQ\273\266?(\316\336\342\367\202\266?\352\223\365\377\214\017\237\277\372\013\272\024\304\314\236?\376\307\002\206\2663\236?`\256<\256\332\341\243?\024\274\034oIV\245\277\365\200\334)\336\317\275?mmY\205\003\273\231\277\265\334\2440\017k\245?FC\345\t\256j\241\277\026\2210\265\320\022\255?L\013\237\342\370i\254?\353\204\027={\260\265\277:\357`\270\227\274|\277|\302\361\207\317\026\251\277\033\377\255[\023\363\220\277\252\240\312\312\257\311\252?\376\315\350\332+q\266?\032j\247&\254\007\267?\267\234\303\340\236\277\263\277\325\005[\204h`\253\277\324O\006\217z\341\260?h\214\002\247&p\224\277H\026\007R\013xj?\357\010\251\273\016\034\260?\224o\016\233\346\215\223?\272\356\202o\354c\260\277\216\335\362\003\302|\240\277\233]\230A\323\t\253\277\320\227U!t\362\250\277\3171\246\335\024\335\216\277\260\3462\366{\203V\277\345\'\"\233L\314\276?\353Ku(\340\273}\277\231iG\301I\256\241\277\2448\247Na\276\224?K\200\261\263(4\256?\307\013a\033\031X\202\277c\235\204\313z:\233?\001\r\276v\236y\271?\275\350\221\316\234\366\207?\206\253\223{\214X\261?\316n\310\373\'\247\211?%\025\032\346e\322\243?\'\333\377q\3617\224?X\360\372\371\342\327\257?\342\307\3357\0178\252?[\361\352$ZI\264?\225Bj\367\201\220\300\277pHN\233&kl\277\254\252\241\250\306\\\254\277\203\322\234-\3241\230\277o\262\256\253\237V\260?\304\317z\304\213}\276?D\177\"\243\323\244\246?\206Z\000K\204\301\264\277\"\024\237\tb\236\231?)\2172\251\210Z\230?0f\341UnCA\277\207\367-G\235A\273?\365\225\353\0356c\203\277,Y\234V]E\256?wek\245\351\251\270?w\310\364\302q]\242\277\030\342ha|$\252?\227\346\327\334\277;\227?\004s\351\262-\224\262?^\361S\206\371V^\277W\020[2\261\r\205\277\255\n\300q\363\205\267\277#\246\n4\363\340\241?\224!\340\333\326\250\246\2777\355w\244\246G\270\277\356(\212\022Uw\261\277\342\306\013\320>\014\303\277\261\337\023\023\206\256\234\277\231\256\276t\200\377\232?/\334)m9\037\244\277\310\356\374l\277F\250?\350\320\300\372\315\004\224\277\0316\230\n\022\207\216?b\217@\233\242\016\275?\200\235\r3\207\221\264\277\222\340\346\356Z3}?c\341\332\277\330S\242?\254\247D\223p\321\250?\220e\"\016\234R\205?y\304s]\017\275\224\277+\250\306k\242\'q\277\000\360-G\237\315\264\277\004\261|\255\33661\277\265\332\002\336K\t\236\277%\237\262\314\031\032\253?\227\263@\250\004[\260?3P\265u2\027\234\277S\240+\312\360\300\225?S\320\261D\036\322\264?Rw\214[\370DP\277M\314o:\314\342\235\277@d\r\261\366P\246?\343\360\262\235?\342\260?\213\235\3432cd\261\2775\314\'\236\274h\233\277\275\325\254\001\013\211\217\277W\3261\324\365\367\261?\223Z*B\007\266\213\277\314\262\376*\270S\235\277;9|y\261\336S?\252@\177st.\241?69N\030\332\265\271?\227\276j\367\273\243\246\277\2778\033\000\270fb?\367\313O\231D\361\246\277\342\225Gy\004D\236\277-\307\020\014\2724j\277\205r\370\362\334\325\202?4S@\364\247\"\231?OQ\206X\273I\221?\252/2\253\346\235\267\277\362\224\201;\357\245\237\277\244\303\262\241\243\376\247?\303\355\275f,\034\207?\347\310\262Y\352\266\262?\355\303FL\301\307\217\277\220_\021Rp|\261\277h\214)\333\0040\247?W\216\023\036\3564\231?\240R[\223\301H\240?\\\031\355\025\224p\216?$}\264\342Xn\265?W\2230k\032\376\256\277\360w\347r!\2416\277\233\223\342\376zo\230\277\241A\227\021D\341\205?\001\310\251\267\215X\267\277\264\326P\351\307F\230?\241\353\327\237\322\036\267\277\300\006\366\257_z\255?\270\032\272\024\234\320\245\277\366\336q\214\346\212\233?`\235-\314@\025\250\277qmB\237mD\210?\323\375\342\246\202\241\220?MS\n\237f\340\260?\326\300\244M\370r\217\277\177\314\200\002\020\221\251\277dULh6\036\306?\312k\266\312\246\325\251\277\363\333\\\202\026\250\262\277\371\336\224\327G\236\177\277\031de\366\2125\226\2774\311OH;\023\244?\036q\215\334?F\246\2775\244[\266\322\370\227\277>/\267\333\314\271\204\277\352$_\212YM\227\277\020\236\325\243~)\247?\302{\3709hS\232\277\2214X\322\311\240\220?\271\211\222q\332Z\262?\247t\200\213\366$\244\277\364\315&\237J\255\266\277\304\314\314\200\237C\263?\005\306#5R^\246?\203@\336\376\341\345\277?" + } + } + } +} +node { + name: "layer_0_type_1/matrix/read" + op: "Identity" + input: "layer_0_type_1/matrix" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_type_1/matrix" + } + } + } +} +node { + name: "layer_0_type_1/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 20 + } + } + tensor_content: "\204\343\014O\003\210\366\277P\341\006\021?-\354\277\327\363/\266\360A\317?\213hJV\017\013\317\277\315\210_\007[\334\362\277\037\344U\246\361\260\332?\005\007g\315\200\310\324?_\032\357}z\034\360?\326U3\271\330\275\375?\300!\032\275U\037\353\277N\273A45\014\345?]T\230\213$\237\354\277k\312M\237\032\373\303\277\204p\231\205Ko\312\277k\241y\346<\321\326\277\271\305n\230t\3252?<\337\365EH\014\371?\033\024\342\000 Date: Thu, 31 Oct 2024 20:23:49 +0800 Subject: [PATCH 098/193] add deepspin pair style (#36) --- source/api_c/include/deepmd.hpp | 14 +- source/api_c/src/c_api.cc | 8 +- source/api_cc/include/DeepPot.h | 20 +- source/api_cc/src/DeepPot.cc | 71 +- .../api_cc/tests/test_deeppot_dpa1_pt_spin.cc | 4 +- source/lmp/pair_base.cpp | 872 ++++++++++++++ source/lmp/pair_base.h | 116 ++ source/lmp/pair_deepmd.cpp | 1024 +---------------- source/lmp/pair_deepmd.h | 87 +- source/lmp/pair_deepspin.cpp | 572 +++++++++ source/lmp/pair_deepspin.h | 42 + 11 files changed, 1703 insertions(+), 1127 deletions(-) create mode 100644 source/lmp/pair_base.cpp create mode 100644 source/lmp/pair_base.h create mode 100644 source/lmp/pair_deepspin.cpp create mode 100644 source/lmp/pair_deepspin.h diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 260a122451..f664d622fe 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -971,7 +971,7 @@ class DeepPot { * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute( + void compute_spin( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -1105,7 +1105,7 @@ class DeepPot { * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute( + void compute_spin( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -1147,7 +1147,7 @@ class DeepPot { const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotCompute( + _DP_DeepPotComputeSP( dp, nframes, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); DP_CHECK_OK(DP_DeepPotCheckOK, dp); @@ -1221,7 +1221,7 @@ class DeepPot { }; // support spin template - void compute( + void compute_spin( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -1345,7 +1345,7 @@ class DeepPot { }; // support spin template - void compute( + void compute_spin( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -1946,7 +1946,7 @@ class DeepPotModelDevi { }; // support spin template - void compute( + void compute_spin( std::vector &ener, std::vector> &force, std::vector> &force_mag, @@ -2122,7 +2122,7 @@ class DeepPotModelDevi { }; // support spin template - void compute( + void compute_spin( std::vector &ener, std::vector> &force, std::vector> &force_mag, diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index a086e0eb75..9dae45eb92 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -289,7 +289,7 @@ inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, std::vector e; std::vector f, fm, v, ae, av; - DP_REQUIRES_OK(dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, + DP_REQUIRES_OK(dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, fparam_, aparam_)); // copy from C++ vectors to C arrays, if not NULL pointer if (energy) { @@ -486,7 +486,7 @@ inline void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, std::vector e; std::vector f, fm, v, ae, av; DP_REQUIRES_OK( - dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, nghost, nlist->nl, ago, fparam_, aparam_)); // copy from C++ vectors to C arrays, if not NULL pointer if (energy) { @@ -894,11 +894,11 @@ void DP_DeepPotModelDeviComputeNList_variant_sp(DP_DeepPotModelDevi* dp, std::vector> f, fm, v, ae, av; if (atomic_energy || atomic_virial) { DP_REQUIRES_OK( - dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, nghost, nlist->nl, ago, fparam_, aparam_)); } else { DP_REQUIRES_OK( - dp, dp->dp.compute(e, f, fm, v, coord_, spin_, atype_, cell_, nghost, + dp, dp->dp.compute_spin(e, f, fm, v, coord_, spin_, atype_, cell_, nghost, nlist->nl, ago, fparam_, aparam_)); } // 2D vector to 2D array, flatten first diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index eaf9995794..34a5f530d9 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -459,7 +459,7 @@ class DeepPot { * @{ **/ template - void compute(ENERGYTYPE& ener, + void compute_spin(ENERGYTYPE& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -470,7 +470,7 @@ class DeepPot { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); template - void compute(std::vector& ener, + void compute_spin(std::vector& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -558,7 +558,7 @@ class DeepPot { * @{ **/ template - void compute(ENERGYTYPE& ener, + void compute_spin(ENERGYTYPE& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -572,7 +572,7 @@ class DeepPot { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); template - void compute(std::vector& ener, + void compute_spin(std::vector& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -660,7 +660,7 @@ class DeepPot { * @{ **/ template - void compute(ENERGYTYPE& ener, + void compute_spin(ENERGYTYPE& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -673,7 +673,7 @@ class DeepPot { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); template - void compute(std::vector& ener, + void compute_spin(std::vector& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -773,7 +773,7 @@ class DeepPot { * @{ **/ template - void compute(ENERGYTYPE& ener, + void compute_spin(ENERGYTYPE& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -789,7 +789,7 @@ class DeepPot { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); template - void compute(std::vector& ener, + void compute_spin(std::vector& ener, std::vector& force, std::vector& force_mag, std::vector& virial, @@ -1105,7 +1105,7 @@ class DeepPotModelDevi { *same aparam. **/ template - void compute(std::vector& all_ener, + void compute_spin(std::vector& all_ener, std::vector >& all_force, std::vector >& all_force_mag, std::vector >& all_virial, @@ -1189,7 +1189,7 @@ class DeepPotModelDevi { *same aparam. **/ template - void compute(std::vector& all_ener, + void compute_spin(std::vector& all_ener, std::vector >& all_force, std::vector >& all_force_mag, std::vector >& all_virial, diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index d69e749ac2..4afdf6442e 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -136,7 +136,7 @@ template void DeepPot::compute(std::vector& dener, // support spin template -void DeepPot::compute(ENERGYTYPE& dener, +void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -155,7 +155,7 @@ void DeepPot::compute(ENERGYTYPE& dener, } template -void DeepPot::compute(std::vector& dener, +void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -171,7 +171,7 @@ void DeepPot::compute(std::vector& dener, false); } -template void DeepPot::compute(ENERGYTYPE& dener, +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -182,7 +182,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam); -template void DeepPot::compute(ENERGYTYPE& dener, +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -193,7 +193,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -204,7 +204,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -301,7 +301,7 @@ template void DeepPot::compute(std::vector& dener, // support spin template -void DeepPot::compute(ENERGYTYPE& dener, +void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -323,7 +323,7 @@ void DeepPot::compute(ENERGYTYPE& dener, } template -void DeepPot::compute(std::vector& dener, +void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -342,7 +342,8 @@ void DeepPot::compute(std::vector& dener, ago, fparam_, aparam__, false); } -template void DeepPot::compute(ENERGYTYPE& dener, +// nlist, no atomic : nframe * precision +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -356,7 +357,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam_); -template void DeepPot::compute(ENERGYTYPE& dener, +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -370,7 +371,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam_); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -384,7 +385,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -475,7 +476,7 @@ template void DeepPot::compute(std::vector& dener, // support spin template -void DeepPot::compute(ENERGYTYPE& dener, +void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -494,7 +495,7 @@ void DeepPot::compute(ENERGYTYPE& dener, dener = dener_[0]; } template -void DeepPot::compute(std::vector& dener, +void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -510,7 +511,7 @@ void DeepPot::compute(std::vector& dener, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, true); } -template void DeepPot::compute(ENERGYTYPE& dener, +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -523,7 +524,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam); -template void DeepPot::compute(ENERGYTYPE& dener, +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -536,7 +537,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -549,7 +550,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -658,7 +659,7 @@ template void DeepPot::compute(std::vector& dener, // support spin template -void DeepPot::compute(ENERGYTYPE& dener, +void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -680,7 +681,7 @@ void DeepPot::compute(ENERGYTYPE& dener, dener = dener_[0]; } template -void DeepPot::compute(std::vector& dener, +void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -699,8 +700,8 @@ void DeepPot::compute(std::vector& dener, datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, fparam_, aparam__, true); } - -template void DeepPot::compute(ENERGYTYPE& dener, +// nlist, atomic : nframe * precision +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -716,7 +717,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam_); -template void DeepPot::compute(ENERGYTYPE& dener, +template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -732,7 +733,7 @@ template void DeepPot::compute(ENERGYTYPE& dener, const std::vector& fparam, const std::vector& aparam_); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -748,7 +749,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); -template void DeepPot::compute(std::vector& dener, +template void DeepPot::compute_spin(std::vector& dener, std::vector& dforce_, std::vector& dforce_mag_, std::vector& dvirial, @@ -1121,8 +1122,9 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); +// support spin template -void DeepPotModelDevi::compute( +void DeepPotModelDevi::compute_spin( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -1144,13 +1146,14 @@ void DeepPotModelDevi::compute( all_force_mag.resize(numb_models); all_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], + dps[ii].compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], all_virial[ii], dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, fparam, aparam_); } } -template void DeepPotModelDevi::compute( +// nlist, no atomic: precision +template void DeepPotModelDevi::compute_spin( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -1165,7 +1168,7 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); -template void DeepPotModelDevi::compute( +template void DeepPotModelDevi::compute_spin( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -1240,8 +1243,9 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); +// support spin template -void DeepPotModelDevi::compute( +void DeepPotModelDevi::compute_spin( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -1267,14 +1271,15 @@ void DeepPotModelDevi::compute( all_atom_energy.resize(numb_models); all_atom_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_force_mag[ii], + dps[ii].compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], all_virial[ii], all_atom_energy[ii], all_atom_virial[ii], dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, fparam, aparam_); } } -template void DeepPotModelDevi::compute( +// nlist, atomic : precision +template void DeepPotModelDevi::compute_spin( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -1291,7 +1296,7 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); -template void DeepPotModelDevi::compute( +template void DeepPotModelDevi::compute_spin( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, diff --git a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc index df325ab5de..c2cb01f6a8 100644 --- a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc +++ b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc @@ -125,7 +125,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist) { deepmd::DeepPot& dp = this->dp; double ener; std::vector force, force_mag, virial; - dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); @@ -157,7 +157,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { deepmd::DeepPot& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); diff --git a/source/lmp/pair_base.cpp b/source/lmp/pair_base.cpp new file mode 100644 index 0000000000..e98a4f09f5 --- /dev/null +++ b/source/lmp/pair_base.cpp @@ -0,0 +1,872 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include + +#include +#include +#include +#include +#include +#include + +#include "atom.h" +#include "citeme.h" +#include "comm.h" +#include "compute.h" +#include "domain.h" +#include "error.h" +#include "fix.h" +#include "force.h" +#include "memory.h" +#include "modify.h" +#include "neigh_list.h" +#include "neigh_request.h" +#include "neighbor.h" +#include "output.h" +#include "update.h" +#if LAMMPS_VERSION_NUMBER >= 20210831 +// in lammps #2902, fix_ttm members turns from private to protected +#define USE_TTM 1 +#include "fix_ttm_dp.h" +#endif + +#include "deepmd_version.h" +#include "pair_base.h" + +using namespace LAMMPS_NS; +using namespace std; + +static int stringCmp(const void *a, const void *b) { + char *m = (char *)a; + char *n = (char *)b; + int i, sum = 0; + + for (i = 0; i < MPI_MAX_PROCESSOR_NAME; i++) { + if (m[i] == n[i]) { + continue; + } else { + sum = m[i] - n[i]; + break; + } + } + return sum; +} + +int PairDeepMDBase::get_node_rank() { + char host_name[MPI_MAX_PROCESSOR_NAME]; + memset(host_name, '\0', sizeof(char) * MPI_MAX_PROCESSOR_NAME); + char(*host_names)[MPI_MAX_PROCESSOR_NAME]; + int n, namelen, color, rank, nprocs, myrank; + size_t bytes; + MPI_Comm nodeComm; + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &nprocs); + MPI_Get_processor_name(host_name, &namelen); + + bytes = nprocs * sizeof(char[MPI_MAX_PROCESSOR_NAME]); + host_names = (char(*)[MPI_MAX_PROCESSOR_NAME])malloc(bytes); + for (int ii = 0; ii < nprocs; ii++) { + memset(host_names[ii], '\0', sizeof(char) * MPI_MAX_PROCESSOR_NAME); + } + + strcpy(host_names[rank], host_name); + + for (n = 0; n < nprocs; n++) { + MPI_Bcast(&(host_names[n]), MPI_MAX_PROCESSOR_NAME, MPI_CHAR, n, + MPI_COMM_WORLD); + } + qsort(host_names, nprocs, sizeof(char[MPI_MAX_PROCESSOR_NAME]), stringCmp); + + color = 0; + for (n = 0; n < nprocs - 1; n++) { + if (strcmp(host_name, host_names[n]) == 0) { + break; + } + if (strcmp(host_names[n], host_names[n + 1])) { + color++; + } + } + + MPI_Comm_split(MPI_COMM_WORLD, color, 0, &nodeComm); + MPI_Comm_rank(nodeComm, &myrank); + + MPI_Barrier(MPI_COMM_WORLD); + int looprank = myrank; + // printf (" Assigning device %d to process on node %s rank %d, + // OK\n",looprank, host_name, rank ); + free(host_names); + return looprank; +} + +std::string PairDeepMDBase::get_file_content(const std::string &model) { + int myrank = 0, root = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + int nchar = 0; + std::string file_content; + if (myrank == root) { + deepmd_compat::read_file_to_string(model, file_content); + nchar = file_content.size(); + } + MPI_Bcast(&nchar, 1, MPI_INT, root, MPI_COMM_WORLD); + char *buff = (char *)malloc(sizeof(char) * nchar); + if (myrank == root) { + memcpy(buff, file_content.c_str(), sizeof(char) * nchar); + } + MPI_Bcast(buff, nchar, MPI_CHAR, root, MPI_COMM_WORLD); + file_content.resize(nchar); + for (unsigned ii = 0; ii < nchar; ++ii) { + file_content[ii] = buff[ii]; + } + free(buff); + return file_content; +} + +std::vector PairDeepMDBase::get_file_content( + const std::vector &models) { + std::vector file_contents(models.size()); + for (unsigned ii = 0; ii < models.size(); ++ii) { + file_contents[ii] = get_file_content(models[ii]); + } + return file_contents; +} + +void PairDeepMDBase::make_fparam_from_compute(vector &fparam) { + assert(do_compute_fparam); + + int icompute = modify->find_compute(compute_fparam_id); + Compute *compute = modify->compute[icompute]; + + if (!compute) { + error->all(FLERR, "compute id is not found: " + compute_fparam_id); + } + fparam.resize(dim_fparam); + + if (dim_fparam == 1) { + if (!(compute->invoked_flag & Compute::INVOKED_SCALAR)) { + compute->compute_scalar(); + compute->invoked_flag |= Compute::INVOKED_SCALAR; + } + fparam[0] = compute->scalar; + } else if (dim_fparam > 1) { + if (!(compute->invoked_flag & Compute::INVOKED_VECTOR)) { + compute->compute_vector(); + compute->invoked_flag |= Compute::INVOKED_VECTOR; + } + double *cvector = compute->vector; + for (int jj = 0; jj < dim_fparam; ++jj) { + fparam[jj] = cvector[jj]; + } + } +} + +void PairDeepMDBase::make_aparam_from_compute(vector &aparam) { + assert(do_compute_aparam); + + int icompute = modify->find_compute(compute_aparam_id); + Compute *compute = modify->compute[icompute]; + + if (!compute) { + error->all(FLERR, "compute id is not found: " + compute_aparam_id); + } + int nlocal = atom->nlocal; + aparam.resize(static_cast(dim_aparam) * nlocal); + + if (!(compute->invoked_flag & Compute::INVOKED_PERATOM)) { + compute->compute_peratom(); + compute->invoked_flag |= Compute::INVOKED_PERATOM; + } + if (dim_aparam == 1) { + double *cvector = compute->vector_atom; + aparam.assign(cvector, cvector + nlocal); + } else if (dim_aparam > 1) { + double **carray = compute->array_atom; + for (int ii = 0; ii < nlocal; ++ii) { + for (int jj = 0; jj < dim_aparam; ++jj) { + aparam[ii * dim_aparam + jj] = carray[ii][jj]; + } + } + } +} + +#ifdef USE_TTM +void PairDeepMDBase::make_ttm_fparam(vector &fparam) { + assert(do_ttm); + // get ttm_fix + const FixTTMDP *ttm_fix = NULL; + for (int ii = 0; ii < modify->nfix; ii++) { + if (string(modify->fix[ii]->id) == ttm_fix_id) { + ttm_fix = dynamic_cast(modify->fix[ii]); + } + } + if (!ttm_fix) { + error->all(FLERR, "fix ttm id is not found: " + ttm_fix_id); + } + + fparam.resize(dim_fparam); + + vector nnodes = ttm_fix->get_nodes(); + int nxnodes = nnodes[0]; + int nynodes = nnodes[1]; + int nznodes = nnodes[2]; + double ***const T_electron = ttm_fix->get_T_electron(); + + int numb_effective_nodes = 0; + double total_Te = 0; + + // loop over grids to get average electron temperature + for (int ixnode = 0; ixnode < nxnodes; ixnode++) { + for (int iynode = 0; iynode < nynodes; iynode++) { + for (int iznode = 0; iznode < nznodes; iznode++) { + if (T_electron[ixnode][iynode][iznode] != 0) { + numb_effective_nodes += 1; + total_Te += T_electron[ixnode][iynode][iznode]; + } + } + } + } + + fparam[0] = total_Te / numb_effective_nodes; +} +#endif + +#ifdef USE_TTM +void PairDeepMDBase::make_ttm_aparam(vector &daparam) { + assert(do_ttm); + // get ttm_fix + const FixTTMDP *ttm_fix = NULL; + for (int ii = 0; ii < modify->nfix; ii++) { + if (string(modify->fix[ii]->id) == ttm_fix_id) { + ttm_fix = dynamic_cast(modify->fix[ii]); + } + } + if (!ttm_fix) { + error->all(FLERR, "fix ttm id is not found: " + ttm_fix_id); + } + // modify + double **x = atom->x; + int *mask = atom->mask; + int nlocal = atom->nlocal; + vector nnodes = ttm_fix->get_nodes(); + int nxnodes = nnodes[0]; + int nynodes = nnodes[1]; + int nznodes = nnodes[2]; + double ***const T_electron = ttm_fix->get_T_electron(); + double dx = domain->xprd / nxnodes; + double dy = domain->yprd / nynodes; + double dz = domain->zprd / nynodes; + // resize daparam + daparam.resize(nlocal); + // loop over atoms to assign aparam + for (int ii = 0; ii < nlocal; ii++) { + if (mask[ii] & ttm_fix->groupbit) { + double xscale = (x[ii][0] - domain->boxlo[0]) / domain->xprd; + double yscale = (x[ii][1] - domain->boxlo[1]) / domain->yprd; + double zscale = (x[ii][2] - domain->boxlo[2]) / domain->zprd; + int ixnode = static_cast(xscale * nxnodes); + int iynode = static_cast(yscale * nynodes); + int iznode = static_cast(zscale * nznodes); + // https://stackoverflow.com/a/1907585/9567349 + ixnode = ((ixnode % nxnodes) + nxnodes) % nxnodes; + iynode = ((iynode % nynodes) + nynodes) % nynodes; + iznode = ((iznode % nznodes) + nznodes) % nznodes; + daparam[ii] = T_electron[ixnode][iynode][iznode]; + } + } +} +#endif + +void PairDeepMDBase::cum_sum(std::map &sum, std::map &vec) { + sum[0] = 0; + for (int ii = 1; ii < vec.size(); ++ii) { + sum[ii] = sum[ii - 1] + vec[ii - 1]; + } +} + +PairDeepMDBase::PairDeepMDBase(LAMMPS *lmp, const char* cite_user_package) + : Pair(lmp) + +{ + if (lmp->citeme) { + lmp->citeme->add(cite_user_package); + } + if (strcmp(update->unit_style, "lj") == 0) { + error->all(FLERR, + "Pair deepmd does not support unit style lj. Please use other " + "unit styles like metal or real unit instead. You may set it by " + "\"units metal\" or \"units real\""); + } + ener_unit_cvt_factor = force->boltz / 8.617343e-5; + dist_unit_cvt_factor = force->angstrom; + force_unit_cvt_factor = ener_unit_cvt_factor / dist_unit_cvt_factor; + + restartinfo = 1; +#if LAMMPS_VERSION_NUMBER >= 20201130 + centroidstressflag = + CENTROID_AVAIL; // set centroidstressflag = CENTROID_AVAIL to allow the + // use of the centroid/stress/atom. Added by Davide Tisi +#else + centroidstressflag = 2; // set centroidstressflag = 2 to allow the use of the + // centroid/stress/atom. Added by Davide Tisi +#endif + pppmflag = 1; + respa_enable = 0; + writedata = 0; + + cutoff = 0.; + numb_types = 0; + numb_types_spin = 0; + numb_models = 0; + out_freq = 0; + out_each = 0; + out_rel = 0; + out_rel_v = 0; + stdf_comm_buff_size = 0; + eps = 0.; + eps_v = 0.; + scale = NULL; + do_ttm = false; + do_compute_fparam = false; + do_compute_aparam = false; + single_model = false; + multi_models_mod_devi = false; + multi_models_no_mod_devi = false; + is_restart = false; + // set comm size needed by this Pair + comm_reverse = 1; + + print_summary(" "); +} + +void PairDeepMDBase::print_summary(const string pre) const { + if (comm->me == 0) { + // capture cout to a string, then call LAMMPS's utils::logmesg + // https://stackoverflow.com/a/4043813/9567349 + std::stringstream buffer; + std::streambuf *sbuf = std::cout.rdbuf(); + std::cout.rdbuf(buffer.rdbuf()); + + cout << "Summary of lammps deepmd module ..." << endl; + cout << pre << ">>> Info of deepmd-kit:" << endl; + deep_pot.print_summary(pre); + cout << pre << ">>> Info of lammps module:" << endl; + cout << pre << "use deepmd-kit at: " << STR_DEEPMD_ROOT << endl; + cout << pre << "source: " << STR_GIT_SUMM << endl; + cout << pre << "source branch: " << STR_GIT_BRANCH << endl; + cout << pre << "source commit: " << STR_GIT_HASH << endl; + cout << pre << "source commit at: " << STR_GIT_DATE << endl; + cout << pre << "build float prec: " << STR_FLOAT_PREC << endl; + cout << pre << "build with tf inc: " << STR_TensorFlow_INCLUDE_DIRS + << endl; + cout << pre << "build with tf lib: " << STR_TensorFlow_LIBRARY << endl; + + std::cout.rdbuf(sbuf); + utils::logmesg(lmp, buffer.str()); + } +} + +PairDeepMDBase::~PairDeepMDBase() { + if (allocated) { + memory->destroy(setflag); + memory->destroy(cutsq); + memory->destroy(scale); + } +} + +void PairDeepMDBase::allocate() { + allocated = 1; + int n = atom->ntypes; + + memory->create(setflag, n + 1, n + 1, "pair:setflag"); + memory->create(cutsq, n + 1, n + 1, "pair:cutsq"); + memory->create(scale, n + 1, n + 1, "pair:scale"); + + for (int i = 1; i <= n; i++) { + for (int j = i; j <= n; j++) { + setflag[i][j] = 0; + scale[i][j] = 0; + } + } + for (int i = 1; i <= numb_types; ++i) { + if (i > n) { + continue; + } + for (int j = i; j <= numb_types; ++j) { + if (j > n) { + continue; + } + setflag[i][j] = 1; + scale[i][j] = 1.0; + } + } +} + +static bool is_key(const string &input) { + vector keys; + keys.push_back("out_freq"); + keys.push_back("out_file"); + keys.push_back("fparam"); + keys.push_back("aparam"); + keys.push_back("fparam_from_compute"); + keys.push_back("aparam_from_compute"); + keys.push_back("ttm"); + keys.push_back("atomic"); + keys.push_back("relative"); + keys.push_back("relative_v"); + keys.push_back("virtual_len"); + keys.push_back("spin_norm"); + + for (int ii = 0; ii < keys.size(); ++ii) { + if (input == keys[ii]) { + return true; + } + } + return false; +} + +void PairDeepMDBase::settings(int narg, char **arg) { + if (narg <= 0) { + error->all(FLERR, "Illegal pair_style command"); + } + + vector models; + int iarg = 0; + while (iarg < narg) { + if (is_key(arg[iarg])) { + break; + } + iarg++; + } + for (int ii = 0; ii < iarg; ++ii) { + models.push_back(arg[ii]); + } + numb_models = models.size(); + if (numb_models == 1) { + try { + deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_pot.cutoff() * dist_unit_cvt_factor; + numb_types = deep_pot.numb_types(); + numb_types_spin = deep_pot.numb_types_spin(); + dim_fparam = deep_pot.dim_fparam(); + dim_aparam = deep_pot.dim_aparam(); + } else { + try { + deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); + deep_pot_model_devi.init(models, get_node_rank(), + get_file_content(models)); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_pot_model_devi.cutoff() * dist_unit_cvt_factor; + numb_types = deep_pot_model_devi.numb_types(); + numb_types_spin = deep_pot_model_devi.numb_types_spin(); + dim_fparam = deep_pot_model_devi.dim_fparam(); + dim_aparam = deep_pot_model_devi.dim_aparam(); + assert(cutoff == deep_pot.cutoff() * dist_unit_cvt_factor); + assert(numb_types == deep_pot.numb_types()); + assert(numb_types_spin == deep_pot.numb_types_spin()); + assert(dim_fparam == deep_pot.dim_fparam()); + assert(dim_aparam == deep_pot.dim_aparam()); + } + + out_freq = 100; + out_file = "model_devi.out"; + out_each = 0; + out_rel = 0; + eps = 0.; + fparam.clear(); + aparam.clear(); + while (iarg < narg) { + if (!is_key(arg[iarg])) { + error->all(FLERR, + "Illegal pair_style command\nwrong number of parameters\n"); + } + if (string(arg[iarg]) == string("out_freq")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_freq, not provided"); + } + out_freq = atoi(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("out_file")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_file, not provided"); + } + out_file = string(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("fparam")) { + for (int ii = 0; ii < dim_fparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal fparam, the dimension should be %d", + dim_fparam); + error->all(FLERR, tmp); + } + fparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_fparam; + } else if (string(arg[iarg]) == string("aparam")) { + for (int ii = 0; ii < dim_aparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal aparam, the dimension should be %d", + dim_aparam); + error->all(FLERR, tmp); + } + aparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_aparam; + } else if (string(arg[iarg]) == string("ttm")) { +#ifdef USE_TTM + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); + } + } + do_ttm = true; + ttm_fix_id = arg[iarg + 1]; + iarg += 1 + 1; +#else + error->all(FLERR, + "The deepmd-kit was compiled without support for TTM, please " + "rebuild it with LAMMPS version >=20210831"); +#endif + } + + /////////////////////////////////////////////// + // pair_style deepmd cp.pb fparam_from_compute TEMP + // compute TEMP all temp + ////////////////////////////////////////////// + else if (string(arg[iarg]) == string("fparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid fparam_from_compute key: should be " + "fparam_from_compute compute_fparam_id(str)"); + } + } + do_compute_fparam = true; + compute_fparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("aparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid aparam_from_compute key: should be " + "aparam_from_compute compute_aparam_id(str)"); + } + } + do_compute_aparam = true; + compute_aparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("atomic")) { + out_each = 1; + iarg += 1; + } else if (string(arg[iarg]) == string("relative")) { + out_rel = 1; + eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("relative_v")) { + out_rel_v = 1; + eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("virtual_len")) { + virtual_len.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + virtual_len[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } else if (string(arg[iarg]) == string("spin_norm")) { + spin_norm.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + spin_norm[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } + } + + if (out_freq < 0) { + error->all(FLERR, "Illegal out_freq, should be >= 0"); + } + if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { + error->all(FLERR, + "aparam, aparam_from_compute, and ttm should NOT be set " + "simultaneously"); + } + if (do_compute_fparam && fparam.size() > 0) { + error->all( + FLERR, + "fparam and fparam_from_compute should NOT be set simultaneously"); + } + + if (comm->me == 0) { + if (numb_models > 1 && out_freq > 0) { + if (!is_restart) { + fp.open(out_file); + fp << scientific; + if (!atom->sp_flag) { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" + << setw(18 + 1) << "avg_devi_f"; + if (out_each) { + // at this time, we don't know how many atoms + fp << setw(18 + 1) << "atm_devi_f(N)"; + } + fp << endl; + } else { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" + << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" + << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" + << endl; + } + } else { + fp.open(out_file, std::ofstream::out | std::ofstream::app); + fp << scientific; + } + } + string pre = " "; + cout << pre << ">>> Info of model(s):" << endl + << pre << "using " << setw(3) << numb_models << " model(s): "; + if (narg == 1) { + cout << arg[0] << " "; + } else { + for (int ii = 0; ii < models.size(); ++ii) { + cout << models[ii] << " "; + } + } + cout << endl + << pre << "rcut in model: " << cutoff << endl + << pre << "ntypes in model: " << numb_types << endl; + if (fparam.size() > 0) { + cout << pre << "using fparam(s): "; + for (int ii = 0; ii < dim_fparam; ++ii) { + cout << fparam[ii] << " "; + } + cout << endl; + } + if (do_compute_fparam) { + cout << pre << "using compute id (fparam): "; + cout << compute_fparam_id << " " << endl; + } + if (do_compute_aparam) { + cout << pre << "using compute id (aparam): "; + cout << compute_aparam_id << " " << endl; + } + if (aparam.size() > 0) { + cout << pre << "using aparam(s): "; + for (int ii = 0; ii < aparam.size(); ++ii) { + cout << aparam[ii] << " "; + } + cout << endl; + } + if (do_ttm) { + cout << pre << "using ttm fix: "; + cout << ttm_fix_id << " "; + if (dim_fparam > 0) { + cout << "(fparam)" << endl; + } else if (dim_aparam > 0) { + cout << "(aparam)" << endl; + } + } + } + + // comm_reverse = numb_models * 3; + if (atom->sp_flag) { + comm_reverse = numb_models * 3 * 2; + } else { + comm_reverse = numb_models * 3; + } + all_force.resize(numb_models); +} + +void PairDeepMDBase::read_restart(FILE *) { is_restart = true; } + +void PairDeepMDBase::write_restart(FILE *) { + // pass +} + +/* ---------------------------------------------------------------------- + set coeffs for one or more type pairs +------------------------------------------------------------------------- */ + +void PairDeepMDBase::coeff(int narg, char **arg) { + if (!allocated) { + allocate(); + } + + int n = atom->ntypes; + int ilo, ihi, jlo, jhi; + ilo = 0; + jlo = 0; + ihi = n; + jhi = n; + if (narg >= 2) { + utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); + utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); + if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { + error->all(FLERR, + "deepmd requires that the scale should be set to all atom " + "types, i.e. pair_coeff * *."); + } + } + if (narg <= 2) { + type_idx_map.resize(n); + for (int ii = 0; ii < n; ++ii) { + type_idx_map[ii] = ii; + } + } else { + int iarg = 2; + + // type_map is a list of strings with undetermined length + // note: although we have numb_types from the model, we do not require + // the number of types in the system matches that in the model + std::vector type_map; + std::string type_map_str; + deep_pot.get_type_map(type_map_str); + // convert the string to a vector of strings + std::istringstream iss(type_map_str); + std::string type_name; + while (iss >> type_name) { + type_map.push_back(type_name); + } + + type_idx_map.clear(); + type_names.clear(); + while (iarg < narg) { + std::string type_name = arg[iarg]; + type_names.push_back(type_name); + bool found_element = false; + for (int ii = 0; ii < type_map.size(); ++ii) { + if (type_map[ii] == type_name) { + type_idx_map.push_back(ii); + found_element = true; + break; + } + } + if (!found_element && "NULL" == type_name) { + type_idx_map.push_back(type_map.size()); // ghost type + found_element = true; + } + if (!found_element) { + error->all(FLERR, "pair_coeff: element " + type_name + + " not found in the model"); + } + iarg += 1; + } + numb_types = type_idx_map.size(); + if (numb_types < n) { + type_idx_map.resize(n); + for (int ii = numb_types; ii < n; ++ii) { + type_idx_map[ii] = -1; + } + } + } + for (int i = ilo; i <= ihi; i++) { + for (int j = MAX(jlo, i); j <= jhi; j++) { + setflag[i][j] = 1; + scale[i][j] = 1.0; + if (i > numb_types || j > numb_types) { + char warning_msg[1024]; + sprintf(warning_msg, + "Interaction between types %d and %d is set with deepmd, but " + "will be ignored.\n Deepmd model has only %d types, it only " + "computes the mulitbody interaction of types: 1-%d.", + i, j, numb_types, numb_types); + error->warning(FLERR, warning_msg); + } + } + } +} + +void PairDeepMDBase::init_style() { +#if LAMMPS_VERSION_NUMBER >= 20220324 + neighbor->add_request(this, NeighConst::REQ_FULL); +#else + int irequest = neighbor->request(this, instance_me); + neighbor->requests[irequest]->half = 0; + neighbor->requests[irequest]->full = 1; + // neighbor->requests[irequest]->newton = 2; +#endif + if (out_each == 1) { + int ntotal = atom->natoms; + int nprocs = comm->nprocs; + if (ntotal > stdf_comm_buff_size) { + stdf_comm_buff_size = ntotal; + } + memory->create(counts, nprocs, "deepmd:counts"); + memory->create(displacements, nprocs, "deepmd:displacements"); + memory->create(stdfsend, ntotal, "deepmd:stdfsendall"); + memory->create(stdfrecv, ntotal, "deepmd:stdfrecvall"); + memory->create(tagsend, ntotal, "deepmd:tagsendall"); + memory->create(tagrecv, ntotal, "deepmd:tagrecvall"); + } +} + +double PairDeepMDBase::init_one(int i, int j) { + if (i > numb_types || j > numb_types) { + char warning_msg[1024]; + sprintf(warning_msg, + "Interaction between types %d and %d is set with deepmd, but will " + "be ignored.\n Deepmd model has only %d types, it only computes " + "the mulitbody interaction of types: 1-%d.", + i, j, numb_types, numb_types); + error->warning(FLERR, warning_msg); + } + + if (setflag[i][j] == 0) { + scale[i][j] = 1.0; + } + scale[j][i] = scale[i][j]; + + return cutoff; +} + +void *PairDeepMDBase::extract(const char *str, int &dim) { + if (strcmp(str, "cut_coul") == 0) { + dim = 0; + return (void *)&cutoff; + } + if (strcmp(str, "scale") == 0) { + dim = 2; + return (void *)scale; + } + return NULL; +} + +void ana_st(double &max, + double &min, + double &sum, + const vector &vec, + const int &nloc) { + if (nloc == 0) { + return; + } + max = vec[0]; + min = vec[0]; + sum = vec[0]; + for (unsigned ii = 1; ii < nloc; ++ii) { + if (vec[ii] > max) { + max = vec[ii]; + } + if (vec[ii] < min) { + min = vec[ii]; + } + sum += vec[ii]; + } +} + +void make_uniform_aparam(vector &daparam, + const vector &aparam, + const int &nlocal) { + unsigned dim_aparam = aparam.size(); + daparam.resize(static_cast(dim_aparam) * nlocal); + for (int ii = 0; ii < nlocal; ++ii) { + for (int jj = 0; jj < dim_aparam; ++jj) { + daparam[ii * dim_aparam + jj] = aparam[jj]; + } + } +} \ No newline at end of file diff --git a/source/lmp/pair_base.h b/source/lmp/pair_base.h new file mode 100644 index 0000000000..06c7a071d6 --- /dev/null +++ b/source/lmp/pair_base.h @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#ifndef LAMMPS_VERSION_NUMBER +#error Please define LAMMPS_VERSION_NUMBER to yyyymmdd +#endif + +#ifndef LMP_PAIR_NNP_BASE_H +#define LMP_PAIR_NNP_BASE_H + +#include "pair.h" +#ifdef DP_USE_CXX_API +#ifdef LMPPLUGIN +#include "DeepPot.h" +#else +#include "deepmd/DeepPot.h" +#endif +namespace deepmd_compat = deepmd; +#else +#ifdef LMPPLUGIN +#include "deepmd.hpp" +#else +#include "deepmd/deepmd.hpp" +#endif +namespace deepmd_compat = deepmd::hpp; +#endif +#include +#include +#include +#define FLOAT_PREC double + +namespace LAMMPS_NS { +class PairDeepMDBase : public Pair { + public: + PairDeepMDBase(class LAMMPS *, const char *); + ~PairDeepMDBase() override; + void *extract(const char *, int &) override; + void settings(int, char **) override; + void coeff(int, char **) override; + void init_style() override; + void write_restart(FILE *) override; + void read_restart(FILE *) override; + double init_one(int i, int j) override; + void print_summary(const std::string pre) const; + int get_node_rank(); + void cum_sum(std::map &, std::map &); + + std::string get_file_content(const std::string &model); + std::vector get_file_content( + const std::vector &models); + std::vector type_names; + double ener_unit_cvt_factor, dist_unit_cvt_factor, force_unit_cvt_factor; + + protected: + deepmd_compat::DeepPot deep_pot; + deepmd_compat::DeepPotModelDevi deep_pot_model_devi; + virtual void allocate(); + double **scale; + unsigned numb_models; + double cutoff; + int numb_types; + int numb_types_spin; + std::vector > all_force; + std::vector > all_force_mag; + std::ofstream fp; + int out_freq; + std::string out_file; + int dim_fparam; + int dim_aparam; + int out_each; + int out_rel; + int out_rel_v; + int stdf_comm_buff_size; + bool single_model; + bool multi_models_mod_devi; + bool multi_models_no_mod_devi; + bool is_restart; + std::vector virtual_len; + std::vector spin_norm; + // for spin systems, search new index of atoms by their old index + std::map new_idx_map; + std::map old_idx_map; + std::vector fparam; + std::vector aparam; + double eps; + double eps_v; + + void make_fparam_from_compute(std::vector &fparam); + bool do_compute_fparam; + std::string compute_fparam_id; + void make_aparam_from_compute(std::vector &aparam); + bool do_compute_aparam; + std::string compute_aparam_id; + + void make_ttm_fparam(std::vector &fparam); + + void make_ttm_aparam(std::vector &dparam); + bool do_ttm; + std::string ttm_fix_id; + int *counts, *displacements; + tagint *tagsend, *tagrecv; + double *stdfsend, *stdfrecv; + std::vector type_idx_map; +}; + +} // namespace LAMMPS_NS + + +void make_uniform_aparam(std::vector &daparam, + const std::vector &aparam, + const int &nlocal); +void ana_st(double &max, + double &min, + double &sum, + const std::vector &vec, + const int &nloc); + +#endif diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 25f4441b1f..74514f9759 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -84,375 +84,14 @@ static const char cite_user_deepmd_package[] = " doi = {10.1063/5.0155600},\n" "}\n\n"; -static int stringCmp(const void *a, const void *b) { - char *m = (char *)a; - char *n = (char *)b; - int i, sum = 0; - - for (i = 0; i < MPI_MAX_PROCESSOR_NAME; i++) { - if (m[i] == n[i]) { - continue; - } else { - sum = m[i] - n[i]; - break; - } - } - return sum; -} - -int PairDeepMD::get_node_rank() { - char host_name[MPI_MAX_PROCESSOR_NAME]; - memset(host_name, '\0', sizeof(char) * MPI_MAX_PROCESSOR_NAME); - char(*host_names)[MPI_MAX_PROCESSOR_NAME]; - int n, namelen, color, rank, nprocs, myrank; - size_t bytes; - MPI_Comm nodeComm; - - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - MPI_Comm_size(MPI_COMM_WORLD, &nprocs); - MPI_Get_processor_name(host_name, &namelen); - - bytes = nprocs * sizeof(char[MPI_MAX_PROCESSOR_NAME]); - host_names = (char(*)[MPI_MAX_PROCESSOR_NAME])malloc(bytes); - for (int ii = 0; ii < nprocs; ii++) { - memset(host_names[ii], '\0', sizeof(char) * MPI_MAX_PROCESSOR_NAME); - } - - strcpy(host_names[rank], host_name); - - for (n = 0; n < nprocs; n++) { - MPI_Bcast(&(host_names[n]), MPI_MAX_PROCESSOR_NAME, MPI_CHAR, n, - MPI_COMM_WORLD); - } - qsort(host_names, nprocs, sizeof(char[MPI_MAX_PROCESSOR_NAME]), stringCmp); - - color = 0; - for (n = 0; n < nprocs - 1; n++) { - if (strcmp(host_name, host_names[n]) == 0) { - break; - } - if (strcmp(host_names[n], host_names[n + 1])) { - color++; - } - } - - MPI_Comm_split(MPI_COMM_WORLD, color, 0, &nodeComm); - MPI_Comm_rank(nodeComm, &myrank); - - MPI_Barrier(MPI_COMM_WORLD); - int looprank = myrank; - // printf (" Assigning device %d to process on node %s rank %d, - // OK\n",looprank, host_name, rank ); - free(host_names); - return looprank; -} - -std::string PairDeepMD::get_file_content(const std::string &model) { - int myrank = 0, root = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &myrank); - int nchar = 0; - std::string file_content; - if (myrank == root) { - deepmd_compat::read_file_to_string(model, file_content); - nchar = file_content.size(); - } - MPI_Bcast(&nchar, 1, MPI_INT, root, MPI_COMM_WORLD); - char *buff = (char *)malloc(sizeof(char) * nchar); - if (myrank == root) { - memcpy(buff, file_content.c_str(), sizeof(char) * nchar); - } - MPI_Bcast(buff, nchar, MPI_CHAR, root, MPI_COMM_WORLD); - file_content.resize(nchar); - for (unsigned ii = 0; ii < nchar; ++ii) { - file_content[ii] = buff[ii]; - } - free(buff); - return file_content; -} - -std::vector PairDeepMD::get_file_content( - const std::vector &models) { - std::vector file_contents(models.size()); - for (unsigned ii = 0; ii < models.size(); ++ii) { - file_contents[ii] = get_file_content(models[ii]); - } - return file_contents; -} - -static void ana_st(double &max, - double &min, - double &sum, - const vector &vec, - const int &nloc) { - if (nloc == 0) { - return; - } - max = vec[0]; - min = vec[0]; - sum = vec[0]; - for (unsigned ii = 1; ii < nloc; ++ii) { - if (vec[ii] > max) { - max = vec[ii]; - } - if (vec[ii] < min) { - min = vec[ii]; - } - sum += vec[ii]; - } -} - -static void make_uniform_aparam(vector &daparam, - const vector &aparam, - const int &nlocal) { - unsigned dim_aparam = aparam.size(); - daparam.resize(static_cast(dim_aparam) * nlocal); - for (int ii = 0; ii < nlocal; ++ii) { - for (int jj = 0; jj < dim_aparam; ++jj) { - daparam[ii * dim_aparam + jj] = aparam[jj]; - } - } -} - -void PairDeepMD::make_fparam_from_compute(vector &fparam) { - assert(do_compute_fparam); - - int icompute = modify->find_compute(compute_fparam_id); - Compute *compute = modify->compute[icompute]; - - if (!compute) { - error->all(FLERR, "compute id is not found: " + compute_fparam_id); - } - fparam.resize(dim_fparam); - - if (dim_fparam == 1) { - if (!(compute->invoked_flag & Compute::INVOKED_SCALAR)) { - compute->compute_scalar(); - compute->invoked_flag |= Compute::INVOKED_SCALAR; - } - fparam[0] = compute->scalar; - } else if (dim_fparam > 1) { - if (!(compute->invoked_flag & Compute::INVOKED_VECTOR)) { - compute->compute_vector(); - compute->invoked_flag |= Compute::INVOKED_VECTOR; - } - double *cvector = compute->vector; - for (int jj = 0; jj < dim_fparam; ++jj) { - fparam[jj] = cvector[jj]; - } - } -} - -void PairDeepMD::make_aparam_from_compute(vector &aparam) { - assert(do_compute_aparam); - - int icompute = modify->find_compute(compute_aparam_id); - Compute *compute = modify->compute[icompute]; - - if (!compute) { - error->all(FLERR, "compute id is not found: " + compute_aparam_id); - } - int nlocal = atom->nlocal; - aparam.resize(static_cast(dim_aparam) * nlocal); - - if (!(compute->invoked_flag & Compute::INVOKED_PERATOM)) { - compute->compute_peratom(); - compute->invoked_flag |= Compute::INVOKED_PERATOM; - } - if (dim_aparam == 1) { - double *cvector = compute->vector_atom; - aparam.assign(cvector, cvector + nlocal); - } else if (dim_aparam > 1) { - double **carray = compute->array_atom; - for (int ii = 0; ii < nlocal; ++ii) { - for (int jj = 0; jj < dim_aparam; ++jj) { - aparam[ii * dim_aparam + jj] = carray[ii][jj]; - } - } - } -} - -#ifdef USE_TTM -void PairDeepMD::make_ttm_fparam(vector &fparam) { - assert(do_ttm); - // get ttm_fix - const FixTTMDP *ttm_fix = NULL; - for (int ii = 0; ii < modify->nfix; ii++) { - if (string(modify->fix[ii]->id) == ttm_fix_id) { - ttm_fix = dynamic_cast(modify->fix[ii]); - } - } - if (!ttm_fix) { - error->all(FLERR, "fix ttm id is not found: " + ttm_fix_id); - } - - fparam.resize(dim_fparam); - - vector nnodes = ttm_fix->get_nodes(); - int nxnodes = nnodes[0]; - int nynodes = nnodes[1]; - int nznodes = nnodes[2]; - double ***const T_electron = ttm_fix->get_T_electron(); - - int numb_effective_nodes = 0; - double total_Te = 0; - - // loop over grids to get average electron temperature - for (int ixnode = 0; ixnode < nxnodes; ixnode++) { - for (int iynode = 0; iynode < nynodes; iynode++) { - for (int iznode = 0; iznode < nznodes; iznode++) { - if (T_electron[ixnode][iynode][iznode] != 0) { - numb_effective_nodes += 1; - total_Te += T_electron[ixnode][iynode][iznode]; - } - } - } - } - - fparam[0] = total_Te / numb_effective_nodes; -} -#endif - -#ifdef USE_TTM -void PairDeepMD::make_ttm_aparam(vector &daparam) { - assert(do_ttm); - // get ttm_fix - const FixTTMDP *ttm_fix = NULL; - for (int ii = 0; ii < modify->nfix; ii++) { - if (string(modify->fix[ii]->id) == ttm_fix_id) { - ttm_fix = dynamic_cast(modify->fix[ii]); - } - } - if (!ttm_fix) { - error->all(FLERR, "fix ttm id is not found: " + ttm_fix_id); - } - // modify - double **x = atom->x; - int *mask = atom->mask; - int nlocal = atom->nlocal; - vector nnodes = ttm_fix->get_nodes(); - int nxnodes = nnodes[0]; - int nynodes = nnodes[1]; - int nznodes = nnodes[2]; - double ***const T_electron = ttm_fix->get_T_electron(); - double dx = domain->xprd / nxnodes; - double dy = domain->yprd / nynodes; - double dz = domain->zprd / nynodes; - // resize daparam - daparam.resize(nlocal); - // loop over atoms to assign aparam - for (int ii = 0; ii < nlocal; ii++) { - if (mask[ii] & ttm_fix->groupbit) { - double xscale = (x[ii][0] - domain->boxlo[0]) / domain->xprd; - double yscale = (x[ii][1] - domain->boxlo[1]) / domain->yprd; - double zscale = (x[ii][2] - domain->boxlo[2]) / domain->zprd; - int ixnode = static_cast(xscale * nxnodes); - int iynode = static_cast(yscale * nynodes); - int iznode = static_cast(zscale * nznodes); - // https://stackoverflow.com/a/1907585/9567349 - ixnode = ((ixnode % nxnodes) + nxnodes) % nxnodes; - iynode = ((iynode % nynodes) + nynodes) % nynodes; - iznode = ((iznode % nznodes) + nznodes) % nznodes; - daparam[ii] = T_electron[ixnode][iynode][iznode]; - } - } -} -#endif - -void PairDeepMD::cum_sum(std::map &sum, std::map &vec) { - sum[0] = 0; - for (int ii = 1; ii < vec.size(); ++ii) { - sum[ii] = sum[ii - 1] + vec[ii - 1]; - } -} - PairDeepMD::PairDeepMD(LAMMPS *lmp) - : Pair(lmp) - + : PairDeepMDBase(lmp, cite_user_deepmd_package) { - if (lmp->citeme) { - lmp->citeme->add(cite_user_deepmd_package); - } - if (strcmp(update->unit_style, "lj") == 0) { - error->all(FLERR, - "Pair deepmd does not support unit style lj. Please use other " - "unit styles like metal or real unit instead. You may set it by " - "\"units metal\" or \"units real\""); - } - ener_unit_cvt_factor = force->boltz / 8.617343e-5; - dist_unit_cvt_factor = force->angstrom; - force_unit_cvt_factor = ener_unit_cvt_factor / dist_unit_cvt_factor; - - restartinfo = 1; -#if LAMMPS_VERSION_NUMBER >= 20201130 - centroidstressflag = - CENTROID_AVAIL; // set centroidstressflag = CENTROID_AVAIL to allow the - // use of the centroid/stress/atom. Added by Davide Tisi -#else - centroidstressflag = 2; // set centroidstressflag = 2 to allow the use of the - // centroid/stress/atom. Added by Davide Tisi -#endif - pppmflag = 1; - respa_enable = 0; - writedata = 0; - - cutoff = 0.; - numb_types = 0; - numb_types_spin = 0; - numb_models = 0; - out_freq = 0; - out_each = 0; - out_rel = 0; - out_rel_v = 0; - stdf_comm_buff_size = 0; - eps = 0.; - eps_v = 0.; - scale = NULL; - do_ttm = false; - do_compute_fparam = false; - do_compute_aparam = false; - single_model = false; - multi_models_mod_devi = false; - multi_models_no_mod_devi = false; - is_restart = false; - // set comm size needed by this Pair - comm_reverse = 1; - - print_summary(" "); -} - -void PairDeepMD::print_summary(const string pre) const { - if (comm->me == 0) { - // capture cout to a string, then call LAMMPS's utils::logmesg - // https://stackoverflow.com/a/4043813/9567349 - std::stringstream buffer; - std::streambuf *sbuf = std::cout.rdbuf(); - std::cout.rdbuf(buffer.rdbuf()); - - cout << "Summary of lammps deepmd module ..." << endl; - cout << pre << ">>> Info of deepmd-kit:" << endl; - deep_pot.print_summary(pre); - cout << pre << ">>> Info of lammps module:" << endl; - cout << pre << "use deepmd-kit at: " << STR_DEEPMD_ROOT << endl; - cout << pre << "source: " << STR_GIT_SUMM << endl; - cout << pre << "source branch: " << STR_GIT_BRANCH << endl; - cout << pre << "source commit: " << STR_GIT_HASH << endl; - cout << pre << "source commit at: " << STR_GIT_DATE << endl; - cout << pre << "build float prec: " << STR_FLOAT_PREC << endl; - cout << pre << "build with tf inc: " << STR_TensorFlow_INCLUDE_DIRS - << endl; - cout << pre << "build with tf lib: " << STR_TensorFlow_LIBRARY << endl; - - std::cout.rdbuf(sbuf); - utils::logmesg(lmp, buffer.str()); - } + // Constructor body can be empty } PairDeepMD::~PairDeepMD() { - if (allocated) { - memory->destroy(setflag); - memory->destroy(cutsq); - memory->destroy(scale); - } + // Ensure base class destructor is called } void PairDeepMD::compute(int eflag, int vflag) { @@ -485,14 +124,8 @@ void PairDeepMD::compute(int eflag, int vflag) { vector dfm(nall * 3, 0.); double **sp = atom->sp; double **fm = atom->fm; - // spin initialize if (atom->sp_flag) { - // get spin - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - dspin[ii * 3 + dd] = sp[ii][dd] * sp[ii][3]; // get real spin vector - } - } + std::cout << "Pair style 'deepmd' does not support spin atoms, please use pair style 'deepspin' instead." << std::endl; } vector dtype(nall); @@ -581,45 +214,23 @@ void PairDeepMD::compute(int eflag, int vflag) { if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { - if (!atom->sp_flag) { try { deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } - } else { - try { - const vector &dcoord_const = dcoord; - const vector &dspin_const = dspin; - deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord_const, - dspin_const, dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - } } // do atomic energy and virial else { vector deatom(nall * 1, 0); vector dvatom(nall * 9, 0); - if (!atom->sp_flag) { - try { - deep_pot.compute(dener, dforce, dvirial, deatom, dvatom, dcoord, - dtype, dbox, nghost, lmp_list, ago, fparam, - daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - } else { - try { - deep_pot.compute(dener, dforce, dforce_mag, dvirial, deatom, dvatom, - dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } + try { + deep_pot.compute(dener, dforce, dvirial, deatom, dvatom, dcoord, + dtype, dbox, nghost, lmp_list, ago, fparam, + daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } if (eflag_atom) { for (int ii = 0; ii < nlocal; ++ii) { @@ -665,43 +276,22 @@ void PairDeepMD::compute(int eflag, int vflag) { vector all_energy; vector> all_atom_energy; vector> all_atom_virial; - if (!atom->sp_flag) { - if (!(eflag_atom || cvflag_atom)) { - try { - deep_pot_model_devi.compute(all_energy, all_force, all_virial, - dcoord, dtype, dbox, nghost, lmp_list, - ago, fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - } else { - try { - deep_pot_model_devi.compute(all_energy, all_force, all_virial, - all_atom_energy, all_atom_virial, - dcoord, dtype, dbox, nghost, lmp_list, - ago, fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } + if (!(eflag_atom || cvflag_atom)) { + try { + deep_pot_model_devi.compute(all_energy, all_force, all_virial, + dcoord, dtype, dbox, nghost, lmp_list, + ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } } else { - if (!(eflag_atom || cvflag_atom)) { - try { - deep_pot_model_devi.compute(all_energy, all_force, all_force_mag, - all_virial, dcoord, dspin, dtype, dbox, - nghost, lmp_list, ago, fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - } else { - try { - deep_pot_model_devi.compute( - all_energy, all_force, all_force_mag, all_virial, - all_atom_energy, all_atom_virial, dcoord, dspin, dtype, dbox, - nghost, lmp_list, ago, fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } + try { + deep_pot_model_devi.compute(all_energy, all_force, all_virial, + all_atom_energy, all_atom_virial, + dcoord, dtype, dbox, nghost, lmp_list, + ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } } // deep_pot_model_devi.compute_avg (dener, all_energy); @@ -778,20 +368,6 @@ void PairDeepMD::compute(int eflag, int vflag) { MPI_Reduce(&max, &all_f_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); MPI_Reduce(&avg, &all_f_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); all_f_avg /= double(atom->natoms); - if (atom->sp_flag) { - deep_pot_model_devi.compute_avg(tmp_avg_fm, all_force_mag); - deep_pot_model_devi.compute_std_f(std_fm, tmp_avg_fm, all_force_mag); - if (out_rel == 1) { - deep_pot_model_devi.compute_relative_std_f(std_fm, tmp_avg_fm, eps); - } - min = numeric_limits::max(), max = 0, avg = 0; - ana_st(max, min, avg, std_fm, nlocal); - MPI_Reduce(&min, &all_fm_min, 1, MPI_DOUBLE, MPI_MIN, 0, world); - MPI_Reduce(&max, &all_fm_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); - MPI_Reduce(&avg, &all_fm_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); - // need modified for only spin atoms - all_fm_avg /= double(atom->natoms); - } // std v std::vector send_v(9 * numb_models); std::vector recv_v(9 * numb_models); @@ -838,22 +414,10 @@ void PairDeepMD::compute(int eflag, int vflag) { all_f_max *= force_unit_cvt_factor; all_f_min *= force_unit_cvt_factor; all_f_avg *= force_unit_cvt_factor; - if (!atom->sp_flag) { - fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max - << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg - << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min - << " " << setw(18) << all_f_avg; - } else { - all_fm_max *= force_unit_cvt_factor; - all_fm_min *= force_unit_cvt_factor; - all_fm_avg *= force_unit_cvt_factor; - fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max - << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg - << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min - << " " << setw(18) << all_f_avg << " " << setw(18) << all_fm_max - << " " << setw(18) << all_fm_min << " " << setw(18) - << all_fm_avg; - } + fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max + << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg + << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min + << " " << setw(18) << all_f_avg; } if (out_each == 1) { // need support for spin atomic force. @@ -904,21 +468,10 @@ void PairDeepMD::compute(int eflag, int vflag) { } } else { if (numb_models == 1) { - if (!atom->sp_flag) { - try { - deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - } else { - try { - const vector &dcoord_const = dcoord; - const vector &dspin_const = dspin; - deep_pot.compute(dener, dforce, dforce_mag, dvirial, dcoord_const, - dspin_const, dtype, dbox); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } + try { + deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } } else { error->all(FLERR, "Serial version does not support model devi"); @@ -926,29 +479,12 @@ void PairDeepMD::compute(int eflag, int vflag) { } // get force - if (!atom->sp_flag) { - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; - } - } - } else { - // unit_factor = hbar / spin_norm; - const double hbar = 6.5821191e-04; - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; - fm[ii][dd] += scale[1][1] * dforce_mag[3 * ii + dd] / - (hbar / sp[ii][3]) * force_unit_cvt_factor; - } + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; } } - if (atom->sp_flag) { - std::map().swap(new_idx_map); - std::map().swap(old_idx_map); - // malloc_trim(0); - } // accumulate energy and virial if (eflag) { @@ -964,459 +500,6 @@ void PairDeepMD::compute(int eflag, int vflag) { } } -void PairDeepMD::allocate() { - allocated = 1; - int n = atom->ntypes; - - memory->create(setflag, n + 1, n + 1, "pair:setflag"); - memory->create(cutsq, n + 1, n + 1, "pair:cutsq"); - memory->create(scale, n + 1, n + 1, "pair:scale"); - - for (int i = 1; i <= n; i++) { - for (int j = i; j <= n; j++) { - setflag[i][j] = 0; - scale[i][j] = 0; - } - } - for (int i = 1; i <= numb_types; ++i) { - if (i > n) { - continue; - } - for (int j = i; j <= numb_types; ++j) { - if (j > n) { - continue; - } - setflag[i][j] = 1; - scale[i][j] = 1.0; - } - } -} - -static bool is_key(const string &input) { - vector keys; - keys.push_back("out_freq"); - keys.push_back("out_file"); - keys.push_back("fparam"); - keys.push_back("aparam"); - keys.push_back("fparam_from_compute"); - keys.push_back("aparam_from_compute"); - keys.push_back("ttm"); - keys.push_back("atomic"); - keys.push_back("relative"); - keys.push_back("relative_v"); - keys.push_back("virtual_len"); - keys.push_back("spin_norm"); - - for (int ii = 0; ii < keys.size(); ++ii) { - if (input == keys[ii]) { - return true; - } - } - return false; -} - -void PairDeepMD::settings(int narg, char **arg) { - if (narg <= 0) { - error->all(FLERR, "Illegal pair_style command"); - } - - vector models; - int iarg = 0; - while (iarg < narg) { - if (is_key(arg[iarg])) { - break; - } - iarg++; - } - for (int ii = 0; ii < iarg; ++ii) { - models.push_back(arg[ii]); - } - numb_models = models.size(); - if (numb_models == 1) { - try { - deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - cutoff = deep_pot.cutoff() * dist_unit_cvt_factor; - numb_types = deep_pot.numb_types(); - numb_types_spin = deep_pot.numb_types_spin(); - dim_fparam = deep_pot.dim_fparam(); - dim_aparam = deep_pot.dim_aparam(); - } else { - try { - deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); - deep_pot_model_devi.init(models, get_node_rank(), - get_file_content(models)); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - cutoff = deep_pot_model_devi.cutoff() * dist_unit_cvt_factor; - numb_types = deep_pot_model_devi.numb_types(); - numb_types_spin = deep_pot_model_devi.numb_types_spin(); - dim_fparam = deep_pot_model_devi.dim_fparam(); - dim_aparam = deep_pot_model_devi.dim_aparam(); - assert(cutoff == deep_pot.cutoff() * dist_unit_cvt_factor); - assert(numb_types == deep_pot.numb_types()); - assert(numb_types_spin == deep_pot.numb_types_spin()); - assert(dim_fparam == deep_pot.dim_fparam()); - assert(dim_aparam == deep_pot.dim_aparam()); - } - - out_freq = 100; - out_file = "model_devi.out"; - out_each = 0; - out_rel = 0; - eps = 0.; - fparam.clear(); - aparam.clear(); - while (iarg < narg) { - if (!is_key(arg[iarg])) { - error->all(FLERR, - "Illegal pair_style command\nwrong number of parameters\n"); - } - if (string(arg[iarg]) == string("out_freq")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_freq, not provided"); - } - out_freq = atoi(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("out_file")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_file, not provided"); - } - out_file = string(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("fparam")) { - for (int ii = 0; ii < dim_fparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal fparam, the dimension should be %d", - dim_fparam); - error->all(FLERR, tmp); - } - fparam.push_back(atof(arg[iarg + 1 + ii])); - } - iarg += 1 + dim_fparam; - } else if (string(arg[iarg]) == string("aparam")) { - for (int ii = 0; ii < dim_aparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal aparam, the dimension should be %d", - dim_aparam); - error->all(FLERR, tmp); - } - aparam.push_back(atof(arg[iarg + 1 + ii])); - } - iarg += 1 + dim_aparam; - } else if (string(arg[iarg]) == string("ttm")) { -#ifdef USE_TTM - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); - } - } - do_ttm = true; - ttm_fix_id = arg[iarg + 1]; - iarg += 1 + 1; -#else - error->all(FLERR, - "The deepmd-kit was compiled without support for TTM, please " - "rebuild it with LAMMPS version >=20210831"); -#endif - } - - /////////////////////////////////////////////// - // pair_style deepmd cp.pb fparam_from_compute TEMP - // compute TEMP all temp - ////////////////////////////////////////////// - else if (string(arg[iarg]) == string("fparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid fparam_from_compute key: should be " - "fparam_from_compute compute_fparam_id(str)"); - } - } - do_compute_fparam = true; - compute_fparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("aparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid aparam_from_compute key: should be " - "aparam_from_compute compute_aparam_id(str)"); - } - } - do_compute_aparam = true; - compute_aparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("atomic")) { - out_each = 1; - iarg += 1; - } else if (string(arg[iarg]) == string("relative")) { - out_rel = 1; - eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("relative_v")) { - out_rel_v = 1; - eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("virtual_len")) { - virtual_len.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - virtual_len[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } else if (string(arg[iarg]) == string("spin_norm")) { - spin_norm.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - spin_norm[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } - } - - if (out_freq < 0) { - error->all(FLERR, "Illegal out_freq, should be >= 0"); - } - if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { - error->all(FLERR, - "aparam, aparam_from_compute, and ttm should NOT be set " - "simultaneously"); - } - if (do_compute_fparam && fparam.size() > 0) { - error->all( - FLERR, - "fparam and fparam_from_compute should NOT be set simultaneously"); - } - - if (comm->me == 0) { - if (numb_models > 1 && out_freq > 0) { - if (!is_restart) { - fp.open(out_file); - fp << scientific; - if (!atom->sp_flag) { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" - << setw(18 + 1) << "avg_devi_f"; - if (out_each) { - // at this time, we don't know how many atoms - fp << setw(18 + 1) << "atm_devi_f(N)"; - } - fp << endl; - } else { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" - << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" - << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" - << endl; - } - } else { - fp.open(out_file, std::ofstream::out | std::ofstream::app); - fp << scientific; - } - } - string pre = " "; - cout << pre << ">>> Info of model(s):" << endl - << pre << "using " << setw(3) << numb_models << " model(s): "; - if (narg == 1) { - cout << arg[0] << " "; - } else { - for (int ii = 0; ii < models.size(); ++ii) { - cout << models[ii] << " "; - } - } - cout << endl - << pre << "rcut in model: " << cutoff << endl - << pre << "ntypes in model: " << numb_types << endl; - if (fparam.size() > 0) { - cout << pre << "using fparam(s): "; - for (int ii = 0; ii < dim_fparam; ++ii) { - cout << fparam[ii] << " "; - } - cout << endl; - } - if (do_compute_fparam) { - cout << pre << "using compute id (fparam): "; - cout << compute_fparam_id << " " << endl; - } - if (do_compute_aparam) { - cout << pre << "using compute id (aparam): "; - cout << compute_aparam_id << " " << endl; - } - if (aparam.size() > 0) { - cout << pre << "using aparam(s): "; - for (int ii = 0; ii < aparam.size(); ++ii) { - cout << aparam[ii] << " "; - } - cout << endl; - } - if (do_ttm) { - cout << pre << "using ttm fix: "; - cout << ttm_fix_id << " "; - if (dim_fparam > 0) { - cout << "(fparam)" << endl; - } else if (dim_aparam > 0) { - cout << "(aparam)" << endl; - } - } - } - - // comm_reverse = numb_models * 3; - if (atom->sp_flag) { - comm_reverse = numb_models * 3 * 2; - } else { - comm_reverse = numb_models * 3; - } - all_force.resize(numb_models); -} - -void PairDeepMD::read_restart(FILE *) { is_restart = true; } - -void PairDeepMD::write_restart(FILE *) { - // pass -} - -/* ---------------------------------------------------------------------- - set coeffs for one or more type pairs -------------------------------------------------------------------------- */ - -void PairDeepMD::coeff(int narg, char **arg) { - if (!allocated) { - allocate(); - } - - int n = atom->ntypes; - int ilo, ihi, jlo, jhi; - ilo = 0; - jlo = 0; - ihi = n; - jhi = n; - if (narg >= 2) { - utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); - utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); - if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { - error->all(FLERR, - "deepmd requires that the scale should be set to all atom " - "types, i.e. pair_coeff * *."); - } - } - if (narg <= 2) { - type_idx_map.resize(n); - for (int ii = 0; ii < n; ++ii) { - type_idx_map[ii] = ii; - } - } else { - int iarg = 2; - - // type_map is a list of strings with undetermined length - // note: although we have numb_types from the model, we do not require - // the number of types in the system matches that in the model - std::vector type_map; - std::string type_map_str; - deep_pot.get_type_map(type_map_str); - // convert the string to a vector of strings - std::istringstream iss(type_map_str); - std::string type_name; - while (iss >> type_name) { - type_map.push_back(type_name); - } - - type_idx_map.clear(); - type_names.clear(); - while (iarg < narg) { - std::string type_name = arg[iarg]; - type_names.push_back(type_name); - bool found_element = false; - for (int ii = 0; ii < type_map.size(); ++ii) { - if (type_map[ii] == type_name) { - type_idx_map.push_back(ii); - found_element = true; - break; - } - } - if (!found_element && "NULL" == type_name) { - type_idx_map.push_back(type_map.size()); // ghost type - found_element = true; - } - if (!found_element) { - error->all(FLERR, "pair_coeff: element " + type_name + - " not found in the model"); - } - iarg += 1; - } - numb_types = type_idx_map.size(); - if (numb_types < n) { - type_idx_map.resize(n); - for (int ii = numb_types; ii < n; ++ii) { - type_idx_map[ii] = -1; - } - } - } - for (int i = ilo; i <= ihi; i++) { - for (int j = MAX(jlo, i); j <= jhi; j++) { - setflag[i][j] = 1; - scale[i][j] = 1.0; - if (i > numb_types || j > numb_types) { - char warning_msg[1024]; - sprintf(warning_msg, - "Interaction between types %d and %d is set with deepmd, but " - "will be ignored.\n Deepmd model has only %d types, it only " - "computes the mulitbody interaction of types: 1-%d.", - i, j, numb_types, numb_types); - error->warning(FLERR, warning_msg); - } - } - } -} - -void PairDeepMD::init_style() { -#if LAMMPS_VERSION_NUMBER >= 20220324 - neighbor->add_request(this, NeighConst::REQ_FULL); -#else - int irequest = neighbor->request(this, instance_me); - neighbor->requests[irequest]->half = 0; - neighbor->requests[irequest]->full = 1; - // neighbor->requests[irequest]->newton = 2; -#endif - if (out_each == 1) { - int ntotal = atom->natoms; - int nprocs = comm->nprocs; - if (ntotal > stdf_comm_buff_size) { - stdf_comm_buff_size = ntotal; - } - memory->create(counts, nprocs, "deepmd:counts"); - memory->create(displacements, nprocs, "deepmd:displacements"); - memory->create(stdfsend, ntotal, "deepmd:stdfsendall"); - memory->create(stdfrecv, ntotal, "deepmd:stdfrecvall"); - memory->create(tagsend, ntotal, "deepmd:tagsendall"); - memory->create(tagrecv, ntotal, "deepmd:tagrecvall"); - } -} - -double PairDeepMD::init_one(int i, int j) { - if (i > numb_types || j > numb_types) { - char warning_msg[1024]; - sprintf(warning_msg, - "Interaction between types %d and %d is set with deepmd, but will " - "be ignored.\n Deepmd model has only %d types, it only computes " - "the mulitbody interaction of types: 1-%d.", - i, j, numb_types, numb_types); - error->warning(FLERR, warning_msg); - } - - if (setflag[i][j] == 0) { - scale[i][j] = 1.0; - } - scale[j][i] = scale[i][j]; - - return cutoff; -} - /* ---------------------------------------------------------------------- */ int PairDeepMD::pack_reverse_comm(int n, int first, double *buf) { @@ -1425,16 +508,7 @@ int PairDeepMD::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (atom->sp_flag) { - for (i = first; i < last; i++) { - for (int dd = 0; dd < numb_models; ++dd) { - buf[m++] = all_force[dd][3 * i + 0]; - buf[m++] = all_force[dd][3 * i + 1]; - buf[m++] = all_force[dd][3 * i + 2]; - buf[m++] = all_force_mag[dd][3 * i + 0]; - buf[m++] = all_force_mag[dd][3 * i + 1]; - buf[m++] = all_force_mag[dd][3 * i + 2]; - } - } + std::cout << "Pair style 'deepmd' does not support spin atoms, please use pair style 'deepspin' instead." << std::endl; } else { for (i = first; i < last; i++) { for (int dd = 0; dd < numb_models; ++dd) { @@ -1454,17 +528,7 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (atom->sp_flag) { - for (i = 0; i < n; i++) { - j = list[i]; - for (int dd = 0; dd < numb_models; ++dd) { - all_force[dd][3 * j + 0] += buf[m++]; - all_force[dd][3 * j + 1] += buf[m++]; - all_force[dd][3 * j + 2] += buf[m++]; - all_force_mag[dd][3 * j + 0] += buf[m++]; - all_force_mag[dd][3 * j + 1] += buf[m++]; - all_force_mag[dd][3 * j + 2] += buf[m++]; - } - } + std::cout << "Pair style 'deepmd' does not support spin atoms, please use pair style 'deepspin' instead." << std::endl; } else { for (i = 0; i < n; i++) { j = list[i]; @@ -1475,16 +539,4 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { } } } -} - -void *PairDeepMD::extract(const char *str, int &dim) { - if (strcmp(str, "cut_coul") == 0) { - dim = 0; - return (void *)&cutoff; - } - if (strcmp(str, "scale") == 0) { - dim = 2; - return (void *)scale; - } - return NULL; -} +} \ No newline at end of file diff --git a/source/lmp/pair_deepmd.h b/source/lmp/pair_deepmd.h index 54830260a2..cf97322814 100644 --- a/source/lmp/pair_deepmd.h +++ b/source/lmp/pair_deepmd.h @@ -12,22 +12,7 @@ PairStyle(deepmd, PairDeepMD) #ifndef LMP_PAIR_NNP_H #define LMP_PAIR_NNP_H -#include "pair.h" -#ifdef DP_USE_CXX_API -#ifdef LMPPLUGIN -#include "DeepPot.h" -#else -#include "deepmd/DeepPot.h" -#endif -namespace deepmd_compat = deepmd; -#else -#ifdef LMPPLUGIN -#include "deepmd.hpp" -#else -#include "deepmd/deepmd.hpp" -#endif -namespace deepmd_compat = deepmd::hpp; -#endif +#include "pair_base.h" #include #include #include @@ -39,83 +24,15 @@ namespace LAMMPS_NS { class CommBrickDeepMD : public CommBrick { friend class PairDeepMD; }; -class PairDeepMD : public Pair { +class PairDeepMD : public PairDeepMDBase { public: PairDeepMD(class LAMMPS *); ~PairDeepMD() override; void compute(int, int) override; - void *extract(const char *, int &) override; - void settings(int, char **) override; - void coeff(int, char **) override; - void init_style() override; - void write_restart(FILE *) override; - void read_restart(FILE *) override; - double init_one(int i, int j) override; int pack_reverse_comm(int, int, double *) override; void unpack_reverse_comm(int, int *, double *) override; - void print_summary(const std::string pre) const; - int get_node_rank(); - void cum_sum(std::map &, std::map &); - - std::string get_file_content(const std::string &model); - std::vector get_file_content( - const std::vector &models); - std::vector type_names; - double ener_unit_cvt_factor, dist_unit_cvt_factor, force_unit_cvt_factor; - - protected: - virtual void allocate(); - double **scale; private: - deepmd_compat::DeepPot deep_pot; - deepmd_compat::DeepPotModelDevi deep_pot_model_devi; - unsigned numb_models; - double cutoff; - int numb_types; - int numb_types_spin; - std::vector > all_force; - std::vector > all_force_mag; - std::ofstream fp; - int out_freq; - std::string out_file; - int dim_fparam; - int dim_aparam; - int out_each; - int out_rel; - int out_rel_v; - int stdf_comm_buff_size; - bool single_model; - bool multi_models_mod_devi; - bool multi_models_no_mod_devi; - bool is_restart; - std::vector virtual_len; - std::vector spin_norm; - // for spin systems, search new index of atoms by their old index - std::map new_idx_map; - std::map old_idx_map; - std::vector fparam; - std::vector aparam; - double eps; - double eps_v; - - void make_fparam_from_compute(std::vector &fparam); - bool do_compute_fparam; - std::string compute_fparam_id; - void make_aparam_from_compute(std::vector &aparam); - bool do_compute_aparam; - std::string compute_aparam_id; - - void make_ttm_fparam(std::vector &fparam); - - void make_ttm_aparam(std::vector &dparam); - bool do_ttm; - std::string ttm_fix_id; - int *counts, *displacements; - tagint *tagsend, *tagrecv; - double *stdfsend, *stdfrecv; - std::vector type_idx_map; - CommBrickDeepMD *commdata_; }; diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp new file mode 100644 index 0000000000..427bfc012e --- /dev/null +++ b/source/lmp/pair_deepspin.cpp @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include + +#include +#include +#include +#include +#include +#include + +#include "atom.h" +#include "citeme.h" +#include "comm.h" +#include "compute.h" +#include "domain.h" +#include "error.h" +#include "fix.h" +#include "force.h" +#include "memory.h" +#include "modify.h" +#include "neigh_list.h" +#include "neigh_request.h" +#include "neighbor.h" +#include "output.h" +#include "update.h" +#if LAMMPS_VERSION_NUMBER >= 20210831 +// in lammps #2902, fix_ttm members turns from private to protected +#define USE_TTM 1 +#include "fix_ttm_dp.h" +#endif + +#include "deepmd_version.h" +#include "pair_deepspin.h" + +using namespace LAMMPS_NS; +using namespace std; + +static const char cite_user_deepmd_package[] = + "USER-DEEPMD package:\n\n" + "@article{Wang_ComputPhysCommun_2018_v228_p178,\n" + " author = {Wang, Han and Zhang, Linfeng and Han, Jiequn and E, Weinan},\n" + " doi = {10.1016/j.cpc.2018.03.016},\n" + " url = {https://doi.org/10.1016/j.cpc.2018.03.016},\n" + " year = 2018,\n" + " month = {jul},\n" + " publisher = {Elsevier {BV}},\n" + " volume = 228,\n" + " journal = {Comput. Phys. Commun.},\n" + " title = {{DeePMD-kit: A deep learning package for many-body potential " + "energy representation and molecular dynamics}},\n" + " pages = {178--184}\n" + "}\n" + "@misc{Zeng_JChemPhys_2023_v159_p054801,\n" + " title = {{DeePMD-kit v2: A software package for deep potential " + "models}},\n" + " author = {Jinzhe Zeng and Duo Zhang and Denghui Lu and Pinghui Mo and " + "Zeyu Li\n" + " and Yixiao Chen and Mari{\\'a}n Rynik and Li'ang Huang and Ziyao " + "Li and \n" + " Shaochen Shi and Yingze Wang and Haotian Ye and Ping Tuo and " + "Jiabin\n" + " Yang and Ye Ding and Yifan Li and Davide Tisi and Qiyu Zeng and " + "Han \n" + " Bao and Yu Xia and Jiameng Huang and Koki Muraoka and Yibo Wang " + "and \n" + " Junhan Chang and Fengbo Yuan and Sigbj{\\o}rn L{\\o}land Bore " + "and " + "Chun\n" + " Cai and Yinnian Lin and Bo Wang and Jiayan Xu and Jia-Xin Zhu " + "and \n" + " Chenxing Luo and Yuzhi Zhang and Rhys E A Goodall and Wenshuo " + "Liang\n" + " and Anurag Kumar Singh and Sikai Yao and Jingchao Zhang and " + "Renata\n" + " Wentzcovitch and Jiequn Han and Jie Liu and Weile Jia and Darrin " + "M\n" + " York and Weinan E and Roberto Car and Linfeng Zhang and Han " + "Wang},\n" + " journal = {J. Chem. Phys.},\n" + " volume = 159,\n" + " issue = 5, \n" + " year = 2023,\n" + " pages = 054801,\n" + " doi = {10.1063/5.0155600},\n" + "}\n\n"; + +PairDeepSpin::PairDeepSpin(LAMMPS *lmp) + : PairDeepMDBase(lmp, cite_user_deepmd_package) +{ + // Constructor body can be empty +} + + +PairDeepSpin::~PairDeepSpin() { + // Ensure base class destructor is called +} + +void PairDeepSpin::compute(int eflag, int vflag) { + if (numb_models == 0) { + return; + } + // See + // https://docs.lammps.org/Developer_updating.html#use-ev-init-to-initialize-variables-derived-from-eflag-and-vflag + ev_init(eflag, vflag); + if (vflag_atom) { + error->all(FLERR, + "6-element atomic virial is not supported. Use compute " + "centroid/stress/atom command for 9-element atomic virial."); + } + bool do_ghost = true; + // dpa2 communication + commdata_ = (CommBrickDeepSpin *)comm; + double **x = atom->x; + double **f = atom->f; + int *type = atom->type; + int nlocal = atom->nlocal; + int nghost = 0; + if (do_ghost) { + nghost = atom->nghost; + } + int nall = nlocal + nghost; + int newton_pair = force->newton_pair; + + vector dspin(nall * 3, 0.); + vector dfm(nall * 3, 0.); + double **sp = atom->sp; + double **fm = atom->fm; + // spin initialize + if (atom->sp_flag) { + // get spin + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + dspin[ii * 3 + dd] = sp[ii][dd] * sp[ii][3]; // get real spin vector + } + } + } else { + std::cout << "Pair style 'deepspin' only supports spin atoms, please use pair style 'deepmd' instead." << std::endl; + } + + vector dtype(nall); + for (int ii = 0; ii < nall; ++ii) { + dtype[ii] = type_idx_map[type[ii] - 1]; + } + + double dener(0); + vector dforce(nall * 3); + vector dforce_mag(nall * 3); + vector dvirial(9, 0); + vector dcoord(nall * 3, 0.); + vector dbox(9, 0); + vector daparam; + + // get box + dbox[0] = domain->h[0] / dist_unit_cvt_factor; // xx + dbox[4] = domain->h[1] / dist_unit_cvt_factor; // yy + dbox[8] = domain->h[2] / dist_unit_cvt_factor; // zz + dbox[7] = domain->h[3] / dist_unit_cvt_factor; // zy + dbox[6] = domain->h[4] / dist_unit_cvt_factor; // zx + dbox[3] = domain->h[5] / dist_unit_cvt_factor; // yx + + // get coord + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + dcoord[ii * 3 + dd] = + (x[ii][dd] - domain->boxlo[dd]) / dist_unit_cvt_factor; + } + } + + if (do_compute_aparam) { + make_aparam_from_compute(daparam); + } else if (aparam.size() > 0) { + // uniform aparam + make_uniform_aparam(daparam, aparam, nlocal); + } else if (do_ttm) { +#ifdef USE_TTM + if (dim_aparam > 0) { + make_ttm_aparam(daparam); + } else if (dim_fparam > 0) { + make_ttm_fparam(fparam); + } +#endif + } + + if (do_compute_fparam) { + make_fparam_from_compute(fparam); + } + + // int ago = numb_models > 1 ? 0 : neighbor->ago; + int ago = neighbor->ago; + if (numb_models > 1) { + if (multi_models_no_mod_devi && + (out_freq > 0 && update->ntimestep % out_freq == 0)) { + ago = 0; + } else if (multi_models_mod_devi && + (out_freq == 0 || update->ntimestep % out_freq != 0)) { + ago = 0; + } + } + // compute + single_model = (numb_models == 1); + multi_models_no_mod_devi = + (numb_models > 1 && (out_freq == 0 || update->ntimestep % out_freq != 0)); + multi_models_mod_devi = + (numb_models > 1 && (out_freq > 0 && update->ntimestep % out_freq == 0)); + if (do_ghost) { + deepmd_compat::InputNlist lmp_list( + list->inum, list->ilist, list->numneigh, list->firstneigh, + commdata_->nswap, commdata_->sendnum, commdata_->recvnum, + commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, + commdata_->recvproc, &world); + deepmd_compat::InputNlist extend_lmp_list; + if (single_model || multi_models_no_mod_devi) { + // cvflag_atom is the right flag for the cvatom matrix + if (!(eflag_atom || cvflag_atom)) { + try { + deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, + dspin, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } + // do atomic energy and virial + else { + vector deatom(nall * 1, 0); + vector dvatom(nall * 9, 0); + try { + deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, deatom, dvatom, + dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + if (eflag_atom) { + for (int ii = 0; ii < nlocal; ++ii) { + eatom[ii] += scale[1][1] * deatom[ii] * ener_unit_cvt_factor; + } + } + // Added by Davide Tisi 2020 + // interface the atomic virial computed by DeepMD + // with the one used in centroid atoms + if (cvflag_atom) { + for (int ii = 0; ii < nall; ++ii) { + // vatom[ii][0] += 1.0 * dvatom[9*ii+0]; + // vatom[ii][1] += 1.0 * dvatom[9*ii+4]; + // vatom[ii][2] += 1.0 * dvatom[9*ii+8]; + // vatom[ii][3] += 1.0 * dvatom[9*ii+3]; + // vatom[ii][4] += 1.0 * dvatom[9*ii+6]; + // vatom[ii][5] += 1.0 * dvatom[9*ii+7]; + cvatom[ii][0] += + scale[1][1] * dvatom[9 * ii + 0] * ener_unit_cvt_factor; // xx + cvatom[ii][1] += + scale[1][1] * dvatom[9 * ii + 4] * ener_unit_cvt_factor; // yy + cvatom[ii][2] += + scale[1][1] * dvatom[9 * ii + 8] * ener_unit_cvt_factor; // zz + cvatom[ii][3] += + scale[1][1] * dvatom[9 * ii + 3] * ener_unit_cvt_factor; // xy + cvatom[ii][4] += + scale[1][1] * dvatom[9 * ii + 6] * ener_unit_cvt_factor; // xz + cvatom[ii][5] += + scale[1][1] * dvatom[9 * ii + 7] * ener_unit_cvt_factor; // yz + cvatom[ii][6] += + scale[1][1] * dvatom[9 * ii + 1] * ener_unit_cvt_factor; // yx + cvatom[ii][7] += + scale[1][1] * dvatom[9 * ii + 2] * ener_unit_cvt_factor; // zx + cvatom[ii][8] += + scale[1][1] * dvatom[9 * ii + 5] * ener_unit_cvt_factor; // zy + } + } + } + } else if (multi_models_mod_devi) { + vector deatom(nall * 1, 0); + vector dvatom(nall * 9, 0); + vector> all_virial; + vector all_energy; + vector> all_atom_energy; + vector> all_atom_virial; + if (!(eflag_atom || cvflag_atom)) { + try { + deep_pot_model_devi.compute_spin(all_energy, all_force, all_force_mag, + all_virial, dcoord, dspin, dtype, dbox, + nghost, lmp_list, ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } else { + try { + deep_pot_model_devi.compute_spin( + all_energy, all_force, all_force_mag, all_virial, + all_atom_energy, all_atom_virial, dcoord, dspin, dtype, dbox, + nghost, lmp_list, ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } + // deep_pot_model_devi.compute_avg (dener, all_energy); + // deep_pot_model_devi.compute_avg (dforce, all_force); + // deep_pot_model_devi.compute_avg (dvirial, all_virial); + // deep_pot_model_devi.compute_avg (deatom, all_atom_energy); + // deep_pot_model_devi.compute_avg (dvatom, all_atom_virial); + dener = all_energy[0]; + dforce = all_force[0]; + dforce_mag = all_force_mag[0]; + dvirial = all_virial[0]; + if (eflag_atom) { + deatom = all_atom_energy[0]; + for (int ii = 0; ii < nlocal; ++ii) { + eatom[ii] += scale[1][1] * deatom[ii] * ener_unit_cvt_factor; + } + } + // Added by Davide Tisi 2020 + // interface the atomic virial computed by DeepMD + // with the one used in centroid atoms + if (cvflag_atom) { + dvatom = all_atom_virial[0]; + for (int ii = 0; ii < nall; ++ii) { + // vatom[ii][0] += 1.0 * dvatom[9*ii+0]; + // vatom[ii][1] += 1.0 * dvatom[9*ii+4]; + // vatom[ii][2] += 1.0 * dvatom[9*ii+8]; + // vatom[ii][3] += 1.0 * dvatom[9*ii+3]; + // vatom[ii][4] += 1.0 * dvatom[9*ii+6]; + // vatom[ii][5] += 1.0 * dvatom[9*ii+7]; + cvatom[ii][0] += + scale[1][1] * dvatom[9 * ii + 0] * ener_unit_cvt_factor; // xx + cvatom[ii][1] += + scale[1][1] * dvatom[9 * ii + 4] * ener_unit_cvt_factor; // yy + cvatom[ii][2] += + scale[1][1] * dvatom[9 * ii + 8] * ener_unit_cvt_factor; // zz + cvatom[ii][3] += + scale[1][1] * dvatom[9 * ii + 3] * ener_unit_cvt_factor; // xy + cvatom[ii][4] += + scale[1][1] * dvatom[9 * ii + 6] * ener_unit_cvt_factor; // xz + cvatom[ii][5] += + scale[1][1] * dvatom[9 * ii + 7] * ener_unit_cvt_factor; // yz + cvatom[ii][6] += + scale[1][1] * dvatom[9 * ii + 1] * ener_unit_cvt_factor; // yx + cvatom[ii][7] += + scale[1][1] * dvatom[9 * ii + 2] * ener_unit_cvt_factor; // zx + cvatom[ii][8] += + scale[1][1] * dvatom[9 * ii + 5] * ener_unit_cvt_factor; // zy + } + } + if (out_freq > 0 && update->ntimestep % out_freq == 0) { + int rank = comm->me; + // std force + if (newton_pair) { +#if LAMMPS_VERSION_NUMBER >= 20220324 + comm->reverse_comm(this); +#else + comm->reverse_comm_pair(this); +#endif + } + vector std_f; + vector tmp_avg_f; + vector std_fm; + vector tmp_avg_fm; + deep_pot_model_devi.compute_avg(tmp_avg_f, all_force); + deep_pot_model_devi.compute_std_f(std_f, tmp_avg_f, all_force); + if (out_rel == 1) { + deep_pot_model_devi.compute_relative_std_f(std_f, tmp_avg_f, eps); + } + double min = numeric_limits::max(), max = 0, avg = 0; + ana_st(max, min, avg, std_f, nlocal); + double all_f_min = 0, all_f_max = 0, all_f_avg = 0; + double all_fm_min = 0, all_fm_max = 0, all_fm_avg = 0; + MPI_Reduce(&min, &all_f_min, 1, MPI_DOUBLE, MPI_MIN, 0, world); + MPI_Reduce(&max, &all_f_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); + MPI_Reduce(&avg, &all_f_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); + all_f_avg /= double(atom->natoms); + deep_pot_model_devi.compute_avg(tmp_avg_fm, all_force_mag); + deep_pot_model_devi.compute_std_f(std_fm, tmp_avg_fm, all_force_mag); + if (out_rel == 1) { + deep_pot_model_devi.compute_relative_std_f(std_fm, tmp_avg_fm, eps); + } + min = numeric_limits::max(), max = 0, avg = 0; + ana_st(max, min, avg, std_fm, nlocal); + MPI_Reduce(&min, &all_fm_min, 1, MPI_DOUBLE, MPI_MIN, 0, world); + MPI_Reduce(&max, &all_fm_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); + MPI_Reduce(&avg, &all_fm_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); + // need modified for only spin atoms + all_fm_avg /= double(atom->natoms); + // std v + std::vector send_v(9 * numb_models); + std::vector recv_v(9 * numb_models); + for (int kk = 0; kk < numb_models; ++kk) { + for (int ii = 0; ii < 9; ++ii) { + send_v[kk * 9 + ii] = all_virial[kk][ii] / double(atom->natoms); + } + } + MPI_Reduce(&send_v[0], &recv_v[0], 9 * numb_models, MPI_DOUBLE, MPI_SUM, + 0, world); + std::vector> all_virial_1(numb_models); + std::vector avg_virial, std_virial; + for (int kk = 0; kk < numb_models; ++kk) { + all_virial_1[kk].resize(9); + for (int ii = 0; ii < 9; ++ii) { + all_virial_1[kk][ii] = recv_v[kk * 9 + ii]; + } + } + double all_v_min = numeric_limits::max(), all_v_max = 0, + all_v_avg = 0; + if (rank == 0) { + deep_pot_model_devi.compute_avg(avg_virial, all_virial_1); + deep_pot_model_devi.compute_std(std_virial, avg_virial, all_virial_1, + 1); + if (out_rel_v == 1) { + deep_pot_model_devi.compute_relative_std(std_virial, avg_virial, + eps_v, 1); + } + for (int ii = 0; ii < 9; ++ii) { + if (std_virial[ii] > all_v_max) { + all_v_max = std_virial[ii]; + } + if (std_virial[ii] < all_v_min) { + all_v_min = std_virial[ii]; + } + all_v_avg += std_virial[ii] * std_virial[ii]; + } + all_v_avg = sqrt(all_v_avg / 9); + } + if (rank == 0) { + all_v_max *= ener_unit_cvt_factor; + all_v_min *= ener_unit_cvt_factor; + all_v_avg *= ener_unit_cvt_factor; + all_f_max *= force_unit_cvt_factor; + all_f_min *= force_unit_cvt_factor; + all_f_avg *= force_unit_cvt_factor; + all_fm_max *= force_unit_cvt_factor; + all_fm_min *= force_unit_cvt_factor; + all_fm_avg *= force_unit_cvt_factor; + fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max + << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg + << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min + << " " << setw(18) << all_f_avg << " " << setw(18) << all_fm_max + << " " << setw(18) << all_fm_min << " " << setw(18) + << all_fm_avg; + } + if (out_each == 1) { + // need support for spin atomic force. + vector std_f_all(atom->natoms); + // Gather std_f and tags + tagint *tag = atom->tag; + int nprocs = comm->nprocs; + // Grow arrays if necessary + if (atom->natoms > stdf_comm_buff_size) { + stdf_comm_buff_size = atom->natoms; + memory->destroy(stdfsend); + memory->destroy(stdfrecv); + memory->destroy(tagsend); + memory->destroy(tagrecv); + memory->create(stdfsend, stdf_comm_buff_size, "deepmd:stdfsendall"); + memory->create(stdfrecv, stdf_comm_buff_size, "deepmd:stdfrecvall"); + memory->create(tagsend, stdf_comm_buff_size, "deepmd:tagsendall"); + memory->create(tagrecv, stdf_comm_buff_size, "deepmd:tagrecvall"); + } + for (int ii = 0; ii < nlocal; ii++) { + tagsend[ii] = tag[ii]; + stdfsend[ii] = std_f[ii]; + } + MPI_Gather(&nlocal, 1, MPI_INT, counts, 1, MPI_INT, 0, world); + displacements[0] = 0; + for (int ii = 0; ii < nprocs - 1; ii++) { + displacements[ii + 1] = displacements[ii] + counts[ii]; + } + MPI_Gatherv(tagsend, nlocal, MPI_LMP_TAGINT, tagrecv, counts, + displacements, MPI_LMP_TAGINT, 0, world); + MPI_Gatherv(stdfsend, nlocal, MPI_DOUBLE, stdfrecv, counts, + displacements, MPI_DOUBLE, 0, world); + if (rank == 0) { + for (int dd = 0; dd < atom->natoms; ++dd) { + std_f_all[tagrecv[dd] - 1] = stdfrecv[dd] * force_unit_cvt_factor; + } + for (int dd = 0; dd < atom->natoms; ++dd) { + fp << " " << setw(18) << std_f_all[dd]; + } + } + } + if (rank == 0) { + fp << endl; + } + } + } else { + error->all(FLERR, "unknown computational branch"); + } + } else { + if (numb_models == 1) { + try { + deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, + dspin, dtype, dbox); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } else { + error->all(FLERR, "Serial version does not support model devi"); + } + } + + // get force + // unit_factor = hbar / spin_norm; + const double hbar = 6.5821191e-04; + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; + fm[ii][dd] += scale[1][1] * dforce_mag[3 * ii + dd] / + (hbar / sp[ii][3]) * force_unit_cvt_factor; + } + } + + std::map().swap(new_idx_map); + std::map().swap(old_idx_map); + // malloc_trim(0); + + // accumulate energy and virial + if (eflag) { + eng_vdwl += scale[1][1] * dener * ener_unit_cvt_factor; + } + if (vflag) { + virial[0] += 1.0 * dvirial[0] * scale[1][1] * ener_unit_cvt_factor; + virial[1] += 1.0 * dvirial[4] * scale[1][1] * ener_unit_cvt_factor; + virial[2] += 1.0 * dvirial[8] * scale[1][1] * ener_unit_cvt_factor; + virial[3] += 1.0 * dvirial[3] * scale[1][1] * ener_unit_cvt_factor; + virial[4] += 1.0 * dvirial[6] * scale[1][1] * ener_unit_cvt_factor; + virial[5] += 1.0 * dvirial[7] * scale[1][1] * ener_unit_cvt_factor; + } +} + +/* ---------------------------------------------------------------------- */ + +int PairDeepSpin::pack_reverse_comm(int n, int first, double *buf) { + int i, m, last; + + m = 0; + last = first + n; + if (!atom->sp_flag) { + std::cout << "Pair style 'deepspin' only supports spin atoms, please use pair style 'deepmd' instead." << std::endl; + } else { + for (i = first; i < last; i++) { + for (int dd = 0; dd < numb_models; ++dd) { + buf[m++] = all_force[dd][3 * i + 0]; + buf[m++] = all_force[dd][3 * i + 1]; + buf[m++] = all_force[dd][3 * i + 2]; + buf[m++] = all_force_mag[dd][3 * i + 0]; + buf[m++] = all_force_mag[dd][3 * i + 1]; + buf[m++] = all_force_mag[dd][3 * i + 2]; + } + } + } + return m; +} + +/* ---------------------------------------------------------------------- */ + +void PairDeepSpin::unpack_reverse_comm(int n, int *list, double *buf) { + int i, j, m; + + m = 0; + if (!atom->sp_flag) { + std::cout << "Pair style 'deepspin' only supports spin atoms, please use pair style 'deepmd' instead." << std::endl; + } else { + for (i = 0; i < n; i++) { + j = list[i]; + for (int dd = 0; dd < numb_models; ++dd) { + all_force[dd][3 * j + 0] += buf[m++]; + all_force[dd][3 * j + 1] += buf[m++]; + all_force[dd][3 * j + 2] += buf[m++]; + all_force_mag[dd][3 * j + 0] += buf[m++]; + all_force_mag[dd][3 * j + 1] += buf[m++]; + all_force_mag[dd][3 * j + 2] += buf[m++]; + } + } + } +} \ No newline at end of file diff --git a/source/lmp/pair_deepspin.h b/source/lmp/pair_deepspin.h new file mode 100644 index 0000000000..c7a29e46e5 --- /dev/null +++ b/source/lmp/pair_deepspin.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#ifndef LAMMPS_VERSION_NUMBER +#error Please define LAMMPS_VERSION_NUMBER to yyyymmdd +#endif + +#ifdef PAIR_CLASS + +PairStyle(deepspin, PairDeepSpin) + +#else + +#ifndef LMP_PAIR_NNP_SPIN_H +#define LMP_PAIR_NNP_SPIN_H + +#include "pair_base.h" +#include +#include +#include + +#include "comm_brick.h" +#define FLOAT_PREC double + +namespace LAMMPS_NS { +class CommBrickDeepSpin : public CommBrick { + friend class PairDeepSpin; +}; +class PairDeepSpin : public PairDeepMDBase { + public: + PairDeepSpin(class LAMMPS *); + ~PairDeepSpin() override; + void compute(int, int) override; + int pack_reverse_comm(int, int, double *) override; + void unpack_reverse_comm(int, int *, double *) override; + + private: + CommBrickDeepSpin *commdata_; +}; + +} // namespace LAMMPS_NS + +#endif +#endif From 5a9a0a0b999b351fd51583efb02da9305fe91509 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 31 Oct 2024 12:24:22 +0000 Subject: [PATCH 099/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/api_cc/include/DeepPotTF.h | 4 +- source/api_cc/src/DeepPotTF.cc | 55 ++++++++++++--------- source/api_cc/tests/test_deeppot_tf_spin.cc | 23 +++++---- source/lmp/tests/test_lammps_spin.py | 38 +++++++------- source/lmp/tests/test_lammps_spin_pt.py | 38 +++++++------- source/lmp/tests/write_lmp_data.py | 16 ++++-- 6 files changed, 91 insertions(+), 83 deletions(-) diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index 4fbbe2f5c3..cd2c376da7 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -399,8 +399,8 @@ class DeepPotTF : public DeepPotBase { template void extend_nlist(std::vector& extend_dcoord, - std::vector& extend_atype, - const std::vector& dcoord_, + std::vector& extend_atype, + const std::vector& dcoord_, const std::vector& dspin_, const std::vector& datype_); diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 789bd6c35f..f8ad1a5b68 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -714,26 +714,27 @@ void DeepPotTF::compute(ENERGYVTYPE& dener, std::vector dforce_tmp; if (dtype == tensorflow::DT_DOUBLE) { - int ret = session_input_tensors(input_tensors, extend_dcoord, ntypes, - extend_atype, dbox, cell_size, fparam, - aparam, atommap, "", aparam_nall); + int ret = session_input_tensors( + input_tensors, extend_dcoord, ntypes, extend_atype, dbox, cell_size, + fparam, aparam, atommap, "", aparam_nall); if (atomic) { - run_model(dener, dforce_tmp, dvirial, datom_energy_, datom_virial_, - session, input_tensors, atommap, nframes); + run_model(dener, dforce_tmp, dvirial, datom_energy_, + datom_virial_, session, input_tensors, atommap, + nframes); } else { run_model(dener, dforce_tmp, dvirial, session, input_tensors, atommap, nframes); } } else { - int ret = session_input_tensors(input_tensors, extend_dcoord, ntypes, - extend_atype, dbox, cell_size, fparam, - aparam, atommap, "", aparam_nall); + int ret = session_input_tensors( + input_tensors, extend_dcoord, ntypes, extend_atype, dbox, cell_size, + fparam, aparam, atommap, "", aparam_nall); if (atomic) { run_model(dener, dforce_tmp, dvirial, datom_energy_, datom_virial_, session, input_tensors, atommap, nframes); } else { - run_model(dener, dforce_tmp, dvirial, session, input_tensors, atommap, - nframes); + run_model(dener, dforce_tmp, dvirial, session, input_tensors, + atommap, nframes); } } // backward force and mag. @@ -1650,10 +1651,10 @@ template void DeepPotTF::extend( template void DeepPotTF::extend_nlist(std::vector& extend_dcoord, - std::vector& extend_atype, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_) { + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_) { if (dtype == tensorflow::DT_DOUBLE) { get_vector(virtual_len, "spin_attr/virtual_len"); get_vector(spin_norm, "spin_attr/spin_norm"); @@ -1667,19 +1668,26 @@ void DeepPotTF::extend_nlist(std::vector& extend_dcoord, int nloc = datype_.size(); int nloc_spin = 0; for (int ii = 0; ii < nloc; ii++) { - if (datype_[ii] < ntypes_spin) nloc_spin += 1; + if (datype_[ii] < ntypes_spin) { + nloc_spin += 1; + } } int extend_nall = nloc + nloc_spin; extend_dcoord.resize(static_cast(extend_nall) * 3); extend_atype.resize(extend_nall); for (int ii = 0; ii < nloc; ii++) { extend_atype[ii] = datype_[ii]; - if (datype_[ii] < ntypes_spin) + if (datype_[ii] < ntypes_spin) { extend_atype[ii + nloc] = datype_[ii] + ntypes - ntypes_spin; + } for (int jj = 0; jj < 3; jj++) { extend_dcoord[ii * 3 + jj] = dcoord_[ii * 3 + jj]; - if (datype_[ii] < ntypes_spin) - extend_dcoord[(ii + nloc) * 3 + jj] = dcoord_[ii * 3 + jj] + dspin_[ii * 3 + jj] / spin_norm[datype_[ii]] * virtual_len[datype_[ii]]; + if (datype_[ii] < ntypes_spin) { + extend_dcoord[(ii + nloc) * 3 + jj] = + dcoord_[ii * 3 + jj] + dspin_[ii * 3 + jj] / + spin_norm[datype_[ii]] * + virtual_len[datype_[ii]]; + } } } } @@ -1691,10 +1699,9 @@ template void DeepPotTF::extend_nlist( const std::vector& dspin_, const std::vector& datype_); -template void DeepPotTF::extend_nlist( - std::vector& extend_dcoord, - std::vector& extend_atype, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_); +template void DeepPotTF::extend_nlist(std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_); #endif diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc index 246fa0c51a..d15a7ed246 100644 --- a/source/api_cc/tests/test_deeppot_tf_spin.cc +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -19,22 +19,21 @@ class TestInferDeepPotSpin : public ::testing::Test { std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, - 0., 0., 0., 0., 0., 0.}; + 0., 0., 0., 0., 0., 0.}; std::vector atype = {0, 0, 1, 1}; std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; - std::vector expected_e = { - -7.314365618560289 , -7.313531316181837 , - -2.8980532245013997, -2.897373810282277}; + std::vector expected_e = {-7.314365618560289, -7.313531316181837, + -2.8980532245013997, -2.897373810282277}; std::vector expected_f = { - 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, - -0.0229926640905535, 0.0114378553363334, 0.019670014885563 , - 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, - -0.013170850878758 , -0.009124743467278 , 0.0142592491588383}; + 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, + -0.0229926640905535, 0.0114378553363334, 0.019670014885563, + 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, + -0.013170850878758, -0.009124743467278, 0.0142592491588383}; std::vector expected_fm = { - 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, - -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, + -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; int natoms; double expected_tot_e; diff --git a/source/lmp/tests/test_lammps_spin.py b/source/lmp/tests/test_lammps_spin.py index 11bf2bc93b..e1877628a5 100644 --- a/source/lmp/tests/test_lammps_spin.py +++ b/source/lmp/tests/test_lammps_spin.py @@ -9,7 +9,6 @@ Path, ) -import constants import numpy as np import pytest from lammps import ( @@ -19,7 +18,9 @@ write_lmp_data_spin, ) -pbtxt_file = Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist.pbtxt" +pbtxt_file = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist.pbtxt" +) pbtxt_file2 = ( Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist-2.pbtxt" ) @@ -32,46 +33,41 @@ md_file = Path(__file__).parent / "md.out" expected_ae = np.array( - [ - -7.314365618560289 , - -7.313531316181837 , - -2.8980532245013997, - -2.897373810282277 - ] + [-7.314365618560289, -7.313531316181837, -2.8980532245013997, -2.897373810282277] ) expected_e = np.sum(expected_ae) expected_f = np.array( [ [0.0275132293555514, -0.0112057401883111, -0.0212278132621243], - [-0.0229926640905535, 0.0114378553363334, 0.019670014885563], - [0.0086502856137601, 0.0088926283192558, -0.0127014507822769], - [-0.013170850878758 , -0.009124743467278 , 0.0142592491588383] + [-0.0229926640905535, 0.0114378553363334, 0.019670014885563], + [0.0086502856137601, 0.0088926283192558, -0.0127014507822769], + [-0.013170850878758, -0.009124743467278, 0.0142592491588383], ] ) expected_fm = np.array( [ - [0.0066245455049449, -0.0023055088004378, 0.0294608578045521], - [-0.0041979452385972, 0.0025775020220167, 0.0316295420619988], + [0.0066245455049449, -0.0023055088004378, 0.0294608578045521], + [-0.0041979452385972, 0.0025775020220167, 0.0316295420619988], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], - [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] ] ) expected_f2 = np.array( [ - [-0.0009939342103254, 0.0009450997605637, -0.0002710189976979], + [-0.0009939342103254, 0.0009450997605637, -0.0002710189976979], [0.0040364645780618, -0.0008326705633617, -0.000208982833015], - [0.0007716358981262, 0.0018705501216939, -0.002687696295354], - [-0.0038141662658625, -0.0019829793188958, 0.0031676981260669] + [0.0007716358981262, 0.0018705501216939, -0.002687696295354], + [-0.0038141662658625, -0.0019829793188958, 0.0031676981260669], ] ) expected_fm2 = np.array( [ - [0.0021649674715341, -0.0008507073771461, 0.0270620372234819], - [-0.0026523551738949, 0.0013308033074224, 0.0294569107929189], + [0.0021649674715341, -0.0008507073771461, 0.0270620372234819], + [-0.0026523551738949, 0.0013308033074224, 0.0294569107929189], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], - [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] ] ) @@ -250,4 +246,4 @@ def test_pair_deepmd_mpi(balance_args: list): assert md[6] == pytest.approx(np.mean(expected_md_f)) assert md[7] == pytest.approx(np.max(expected_md_fm)) assert md[8] == pytest.approx(np.min(expected_md_fm)) - assert md[9] == pytest.approx(np.mean(expected_md_fm)) \ No newline at end of file + assert md[9] == pytest.approx(np.mean(expected_md_fm)) diff --git a/source/lmp/tests/test_lammps_spin_pt.py b/source/lmp/tests/test_lammps_spin_pt.py index e0a596d2ae..93ec2e58a8 100644 --- a/source/lmp/tests/test_lammps_spin_pt.py +++ b/source/lmp/tests/test_lammps_spin_pt.py @@ -9,7 +9,6 @@ Path, ) -import constants import numpy as np import pytest from lammps import ( @@ -22,7 +21,9 @@ pbtxt_file2 = ( Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist-2.pbtxt" ) -pb_file = Path(__file__).parent.parent.parent / "tests" / "infer" / "deeppot_dpa_spin.pth" +pb_file = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deeppot_dpa_spin.pth" +) pb_file2 = Path(__file__).parent / "graph2.pb" system_file = Path(__file__).parent.parent.parent / "tests" data_file = Path(__file__).parent / "data.lmp" @@ -31,46 +32,41 @@ md_file = Path(__file__).parent / "md.out" expected_ae = np.array( - [ - -5.449480235829702, - -5.477427268428831, - -5.123857693399778, - -5.177090216511519 - ] + [-5.449480235829702, -5.477427268428831, -5.123857693399778, -5.177090216511519] ) expected_e = np.sum(expected_ae) expected_f = np.array( [ [0.0009801138704236, -0.0463347604851765, -0.0971306357815108], - [-0.1470821855808306, 0.0437825717490265, 0.1068452488480858], - [0.0227539242796509, -0.0733473535079378, 0.1021096625763913], - [0.123348147430756 , 0.0758995422440877, -0.1118242756429664] + [-0.1470821855808306, 0.0437825717490265, 0.1068452488480858], + [0.0227539242796509, -0.0733473535079378, 0.1021096625763913], + [0.123348147430756, 0.0758995422440877, -0.1118242756429664], ] ) expected_fm = np.array( [ - [0.0072488655758703, -0.0111496506342658, 0.018024837587741], - [-0.0469100751121456, 0.0170834549641258, 0.0338904617477562], + [0.0072488655758703, -0.0111496506342658, 0.018024837587741], + [-0.0469100751121456, 0.0170834549641258, 0.0338904617477562], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], - [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] ] ) expected_f2 = np.array( [ - [-0.0009939342103254, 0.0009450997605637, -0.0002710189976979], + [-0.0009939342103254, 0.0009450997605637, -0.0002710189976979], [0.0040364645780618, -0.0008326705633617, -0.000208982833015], - [0.0007716358981262, 0.0018705501216939, -0.002687696295354], - [-0.0038141662658625, -0.0019829793188958, 0.0031676981260669] + [0.0007716358981262, 0.0018705501216939, -0.002687696295354], + [-0.0038141662658625, -0.0019829793188958, 0.0031676981260669], ] ) expected_fm2 = np.array( [ - [0.0021649674715341, -0.0008507073771461, 0.0270620372234819], - [-0.0026523551738949, 0.0013308033074224, 0.0294569107929189], + [0.0021649674715341, -0.0008507073771461, 0.0270620372234819], + [-0.0026523551738949, 0.0013308033074224, 0.0294569107929189], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], - [0.0000000000000000, 0.00000000000000000, 0.00000000000000000] ] ) @@ -246,4 +242,4 @@ def test_pair_deepmd_mpi(balance_args: list): assert md[6] == pytest.approx(np.mean(expected_md_f)) assert md[7] == pytest.approx(np.max(expected_md_fm)) assert md[8] == pytest.approx(np.min(expected_md_fm)) - assert md[9] == pytest.approx(np.mean(expected_md_fm)) \ No newline at end of file + assert md[9] == pytest.approx(np.mean(expected_md_fm)) diff --git a/source/lmp/tests/write_lmp_data.py b/source/lmp/tests/write_lmp_data.py index 10c73c4076..2b64ccfbea 100644 --- a/source/lmp/tests/write_lmp_data.py +++ b/source/lmp/tests/write_lmp_data.py @@ -76,7 +76,7 @@ def write_lmp_data_spin(box, coord, spin, type_list, file_name): ntype = np.unique(type_list).shape[0] sp_norm = np.linalg.norm(spin, axis=1, keepdims=True) sp_norm = np.where(sp_norm == 0, 1, sp_norm) - sp_unit = spin/sp_norm + sp_unit = spin / sp_norm with open(file_name, "w") as f: f.write(comment_lmp_data + "\n") f.write("%d atoms\n" % (natom)) @@ -88,6 +88,16 @@ def write_lmp_data_spin(box, coord, spin, type_list, file_name): for i in range(natom): f.write( "%d %d %.10e %.10e %.10e %.10e %.10e %.10e %.10e\n" - % (i + 1, type_list[i], coord[i][0], coord[i][1], coord[i][2], sp_unit[i][0], sp_unit[i][1], sp_unit[i][2], sp_norm[i][0]) + % ( + i + 1, + type_list[i], + coord[i][0], + coord[i][1], + coord[i][2], + sp_unit[i][0], + sp_unit[i][1], + sp_unit[i][2], + sp_norm[i][0], + ) ) - f.write("\n") \ No newline at end of file + f.write("\n") From 605fb9b43dd176f40096e8cdf2619e810ef790c6 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 31 Oct 2024 20:36:14 +0800 Subject: [PATCH 100/193] Update pair_deepmd.cpp --- source/lmp/pair_deepmd.cpp | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 74514f9759..b50fed5094 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -199,18 +199,7 @@ void PairDeepMD::compute(int eflag, int vflag) { commdata_->nswap, commdata_->sendnum, commdata_->recvnum, commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, commdata_->recvproc, &world); - lmp_list.set_mask(NEIGHMASK); deepmd_compat::InputNlist extend_lmp_list; - if (atom->sp_flag) { - extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, - extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, - new_idx_map, old_idx_map, lmp_list, dcoord, dtype, nghost, dspin, - numb_types, numb_types_spin, virtual_len); - extend_lmp_list = - deepmd_compat::InputNlist(extend_inum, &extend_ilist[0], - &extend_numneigh[0], &extend_firstneigh[0]); - extend_lmp_list.set_mask(NEIGHMASK); - } if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { From 2cc6d8aef3b2c9c6e75a130d273c5595cd2080cb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 31 Oct 2024 12:37:27 +0000 Subject: [PATCH 101/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/api_c/src/c_api.cc | 16 +- source/api_cc/include/DeepPot.h | 266 +++++---- source/api_cc/src/DeepPot.cc | 562 +++++++++--------- .../api_cc/tests/test_deeppot_dpa1_pt_spin.cc | 4 +- source/lmp/pair_base.cpp | 16 +- source/lmp/pair_base.h | 13 +- source/lmp/pair_deepmd.cpp | 53 +- source/lmp/pair_deepmd.h | 2 +- source/lmp/pair_deepspin.cpp | 59 +- source/lmp/pair_deepspin.h | 4 +- 10 files changed, 505 insertions(+), 490 deletions(-) diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 9dae45eb92..992fb8404a 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -289,8 +289,8 @@ inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, std::vector e; std::vector f, fm, v, ae, av; - DP_REQUIRES_OK(dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, atype_, - cell_, fparam_, aparam_)); + DP_REQUIRES_OK(dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, + atype_, cell_, fparam_, aparam_)); // copy from C++ vectors to C arrays, if not NULL pointer if (energy) { std::copy(e.begin(), e.end(), energy); @@ -487,7 +487,7 @@ inline void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, std::vector f, fm, v, ae, av; DP_REQUIRES_OK( dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, - nghost, nlist->nl, ago, fparam_, aparam_)); + nghost, nlist->nl, ago, fparam_, aparam_)); // copy from C++ vectors to C arrays, if not NULL pointer if (energy) { std::copy(e.begin(), e.end(), energy); @@ -893,13 +893,13 @@ void DP_DeepPotModelDeviComputeNList_variant_sp(DP_DeepPotModelDevi* dp, std::vector e; std::vector> f, fm, v, ae, av; if (atomic_energy || atomic_virial) { - DP_REQUIRES_OK( - dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, - nghost, nlist->nl, ago, fparam_, aparam_)); + DP_REQUIRES_OK(dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, + atype_, cell_, nghost, nlist->nl, + ago, fparam_, aparam_)); } else { DP_REQUIRES_OK( - dp, dp->dp.compute_spin(e, f, fm, v, coord_, spin_, atype_, cell_, nghost, - nlist->nl, ago, fparam_, aparam_)); + dp, dp->dp.compute_spin(e, f, fm, v, coord_, spin_, atype_, cell_, + nghost, nlist->nl, ago, fparam_, aparam_)); } // 2D vector to 2D array, flatten first if (energy) { diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 34a5f530d9..d5f3f7d0da 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -459,27 +459,29 @@ class DeepPot { * @{ **/ template - void compute_spin(ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** @@ -558,33 +560,35 @@ class DeepPot { * @{ **/ template - void compute_spin(ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial @@ -660,31 +664,33 @@ class DeepPot { * @{ **/ template - void compute_spin(ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** @@ -773,37 +779,39 @@ class DeepPot { * @{ **/ template - void compute_spin(ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** * @brief Evaluate the energy, force, and virial with the mixed type @@ -1105,19 +1113,20 @@ class DeepPotModelDevi { *same aparam. **/ template - void compute_spin(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_force_mag, - std::vector >& all_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + std::vector& all_ener, + std::vector >& all_force, + std::vector >& all_force_mag, + std::vector >& all_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using these DP models. @@ -1189,21 +1198,22 @@ class DeepPotModelDevi { *same aparam. **/ template - void compute_spin(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_force_mag, - std::vector >& all_virial, - std::vector >& all_atom_energy, - std::vector >& all_atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute_spin( + std::vector& all_ener, + std::vector >& all_force, + std::vector >& all_force_mag, + std::vector >& all_virial, + std::vector >& all_atom_energy, + std::vector >& all_atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** * @brief Get the cutoff radius. * @return The cutoff radius. diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 4afdf6442e..7bad4108ed 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -137,15 +137,15 @@ template void DeepPot::compute(std::vector& dener, // support spin template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { std::vector dener_; std::vector datom_energy_, datom_virial_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, @@ -156,15 +156,15 @@ void DeepPot::compute_spin(ENERGYTYPE& dener, template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { std::vector datom_energy_, datom_virial_; dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, @@ -172,48 +172,48 @@ void DeepPot::compute_spin(std::vector& dener, } template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute(ENERGYTYPE& dener, @@ -302,18 +302,18 @@ template void DeepPot::compute(std::vector& dener, // support spin template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { std::vector dener_; std::vector datom_energy_, datom_virial_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, @@ -324,18 +324,18 @@ void DeepPot::compute_spin(ENERGYTYPE& dener, template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { std::vector datom_energy_, datom_virial_; dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, @@ -344,60 +344,60 @@ void DeepPot::compute_spin(std::vector& dener, // nlist, no atomic : nframe * precision template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); template void DeepPot::compute(ENERGYTYPE& dener, @@ -477,17 +477,17 @@ template void DeepPot::compute(std::vector& dener, // support spin template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { std::vector dener_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, @@ -496,72 +496,72 @@ void DeepPot::compute_spin(ENERGYTYPE& dener, } template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, true); } template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); template void DeepPot::compute(ENERGYTYPE& dener, @@ -660,20 +660,20 @@ template void DeepPot::compute(std::vector& dener, template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { std::vector dener_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, @@ -682,88 +682,88 @@ void DeepPot::compute_spin(ENERGYTYPE& dener, } template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, fparam_, aparam__, true); } // nlist, atomic : nframe * precision template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); // mixed type template @@ -1147,8 +1147,8 @@ void DeepPotModelDevi::compute_spin( all_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { dps[ii].compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], dcoord_, dspin_, datype_, dbox, nghost, - lmp_list, ago, fparam, aparam_); + all_virial[ii], dcoord_, dspin_, datype_, dbox, nghost, + lmp_list, ago, fparam, aparam_); } } @@ -1272,9 +1272,9 @@ void DeepPotModelDevi::compute_spin( all_atom_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { dps[ii].compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], all_atom_energy[ii], all_atom_virial[ii], - dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, - fparam, aparam_); + all_virial[ii], all_atom_energy[ii], + all_atom_virial[ii], dcoord_, dspin_, datype_, dbox, + nghost, lmp_list, ago, fparam, aparam_); } } diff --git a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc index c2cb01f6a8..4a40dffde2 100644 --- a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc +++ b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc @@ -157,8 +157,8 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { deepmd::DeepPot& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, - atype, box); + dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, + spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); diff --git a/source/lmp/pair_base.cpp b/source/lmp/pair_base.cpp index e98a4f09f5..cd3f49eb73 100644 --- a/source/lmp/pair_base.cpp +++ b/source/lmp/pair_base.cpp @@ -282,7 +282,7 @@ void PairDeepMDBase::cum_sum(std::map &sum, std::map &vec) { } } -PairDeepMDBase::PairDeepMDBase(LAMMPS *lmp, const char* cite_user_package) +PairDeepMDBase::PairDeepMDBase(LAMMPS *lmp, const char *cite_user_package) : Pair(lmp) { @@ -838,10 +838,10 @@ void *PairDeepMDBase::extract(const char *str, int &dim) { } void ana_st(double &max, - double &min, - double &sum, - const vector &vec, - const int &nloc) { + double &min, + double &sum, + const vector &vec, + const int &nloc) { if (nloc == 0) { return; } @@ -860,8 +860,8 @@ void ana_st(double &max, } void make_uniform_aparam(vector &daparam, - const vector &aparam, - const int &nlocal) { + const vector &aparam, + const int &nlocal) { unsigned dim_aparam = aparam.size(); daparam.resize(static_cast(dim_aparam) * nlocal); for (int ii = 0; ii < nlocal; ++ii) { @@ -869,4 +869,4 @@ void make_uniform_aparam(vector &daparam, daparam[ii * dim_aparam + jj] = aparam[jj]; } } -} \ No newline at end of file +} diff --git a/source/lmp/pair_base.h b/source/lmp/pair_base.h index 06c7a071d6..68fc4c1bde 100644 --- a/source/lmp/pair_base.h +++ b/source/lmp/pair_base.h @@ -103,14 +103,13 @@ class PairDeepMDBase : public Pair { } // namespace LAMMPS_NS - void make_uniform_aparam(std::vector &daparam, - const std::vector &aparam, - const int &nlocal); + const std::vector &aparam, + const int &nlocal); void ana_st(double &max, - double &min, - double &sum, - const std::vector &vec, - const int &nloc); + double &min, + double &sum, + const std::vector &vec, + const int &nloc); #endif diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index b50fed5094..d05e0df626 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -85,13 +85,12 @@ static const char cite_user_deepmd_package[] = "}\n\n"; PairDeepMD::PairDeepMD(LAMMPS *lmp) - : PairDeepMDBase(lmp, cite_user_deepmd_package) -{ + : PairDeepMDBase(lmp, cite_user_deepmd_package) { // Constructor body can be empty } PairDeepMD::~PairDeepMD() { - // Ensure base class destructor is called + // Ensure base class destructor is called } void PairDeepMD::compute(int eflag, int vflag) { @@ -125,7 +124,9 @@ void PairDeepMD::compute(int eflag, int vflag) { double **sp = atom->sp; double **fm = atom->fm; if (atom->sp_flag) { - std::cout << "Pair style 'deepmd' does not support spin atoms, please use pair style 'deepspin' instead." << std::endl; + std::cout << "Pair style 'deepmd' does not support spin atoms, please use " + "pair style 'deepspin' instead." + << std::endl; } vector dtype(nall); @@ -203,12 +204,12 @@ void PairDeepMD::compute(int eflag, int vflag) { if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { - try { - deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox, - nghost, lmp_list, ago, fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } + try { + deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox, nghost, + lmp_list, ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } } // do atomic energy and virial else { @@ -216,8 +217,7 @@ void PairDeepMD::compute(int eflag, int vflag) { vector dvatom(nall * 9, 0); try { deep_pot.compute(dener, dforce, dvirial, deatom, dvatom, dcoord, - dtype, dbox, nghost, lmp_list, ago, fparam, - daparam); + dtype, dbox, nghost, lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -267,18 +267,18 @@ void PairDeepMD::compute(int eflag, int vflag) { vector> all_atom_virial; if (!(eflag_atom || cvflag_atom)) { try { - deep_pot_model_devi.compute(all_energy, all_force, all_virial, - dcoord, dtype, dbox, nghost, lmp_list, - ago, fparam, daparam); + deep_pot_model_devi.compute(all_energy, all_force, all_virial, dcoord, + dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } } else { try { deep_pot_model_devi.compute(all_energy, all_force, all_virial, - all_atom_energy, all_atom_virial, - dcoord, dtype, dbox, nghost, lmp_list, - ago, fparam, daparam); + all_atom_energy, all_atom_virial, dcoord, + dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -404,9 +404,9 @@ void PairDeepMD::compute(int eflag, int vflag) { all_f_min *= force_unit_cvt_factor; all_f_avg *= force_unit_cvt_factor; fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max - << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg - << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min - << " " << setw(18) << all_f_avg; + << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg + << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min + << " " << setw(18) << all_f_avg; } if (out_each == 1) { // need support for spin atomic force. @@ -474,7 +474,6 @@ void PairDeepMD::compute(int eflag, int vflag) { } } - // accumulate energy and virial if (eflag) { eng_vdwl += scale[1][1] * dener * ener_unit_cvt_factor; @@ -497,7 +496,9 @@ int PairDeepMD::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (atom->sp_flag) { - std::cout << "Pair style 'deepmd' does not support spin atoms, please use pair style 'deepspin' instead." << std::endl; + std::cout << "Pair style 'deepmd' does not support spin atoms, please use " + "pair style 'deepspin' instead." + << std::endl; } else { for (i = first; i < last; i++) { for (int dd = 0; dd < numb_models; ++dd) { @@ -517,7 +518,9 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (atom->sp_flag) { - std::cout << "Pair style 'deepmd' does not support spin atoms, please use pair style 'deepspin' instead." << std::endl; + std::cout << "Pair style 'deepmd' does not support spin atoms, please use " + "pair style 'deepspin' instead." + << std::endl; } else { for (i = 0; i < n; i++) { j = list[i]; @@ -528,4 +531,4 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { } } } -} \ No newline at end of file +} diff --git a/source/lmp/pair_deepmd.h b/source/lmp/pair_deepmd.h index cf97322814..5a9024e3d7 100644 --- a/source/lmp/pair_deepmd.h +++ b/source/lmp/pair_deepmd.h @@ -12,12 +12,12 @@ PairStyle(deepmd, PairDeepMD) #ifndef LMP_PAIR_NNP_H #define LMP_PAIR_NNP_H -#include "pair_base.h" #include #include #include #include "comm_brick.h" +#include "pair_base.h" #define FLOAT_PREC double namespace LAMMPS_NS { diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 427bfc012e..226b0e029f 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -85,14 +85,12 @@ static const char cite_user_deepmd_package[] = "}\n\n"; PairDeepSpin::PairDeepSpin(LAMMPS *lmp) - : PairDeepMDBase(lmp, cite_user_deepmd_package) -{ + : PairDeepMDBase(lmp, cite_user_deepmd_package) { // Constructor body can be empty } - PairDeepSpin::~PairDeepSpin() { - // Ensure base class destructor is called + // Ensure base class destructor is called } void PairDeepSpin::compute(int eflag, int vflag) { @@ -134,7 +132,9 @@ void PairDeepSpin::compute(int eflag, int vflag) { } } } else { - std::cout << "Pair style 'deepspin' only supports spin atoms, please use pair style 'deepmd' instead." << std::endl; + std::cout << "Pair style 'deepspin' only supports spin atoms, please use " + "pair style 'deepmd' instead." + << std::endl; } vector dtype(nall); @@ -214,8 +214,8 @@ void PairDeepSpin::compute(int eflag, int vflag) { if (!(eflag_atom || cvflag_atom)) { try { deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, - dspin, dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); + dspin, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -225,9 +225,9 @@ void PairDeepSpin::compute(int eflag, int vflag) { vector deatom(nall * 1, 0); vector dvatom(nall * 9, 0); try { - deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, deatom, dvatom, - dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); + deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, deatom, + dvatom, dcoord, dspin, dtype, dbox, nghost, + lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -277,18 +277,18 @@ void PairDeepSpin::compute(int eflag, int vflag) { vector> all_atom_virial; if (!(eflag_atom || cvflag_atom)) { try { - deep_pot_model_devi.compute_spin(all_energy, all_force, all_force_mag, - all_virial, dcoord, dspin, dtype, dbox, - nghost, lmp_list, ago, fparam, daparam); + deep_pot_model_devi.compute_spin( + all_energy, all_force, all_force_mag, all_virial, dcoord, dspin, + dtype, dbox, nghost, lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } } else { try { deep_pot_model_devi.compute_spin( - all_energy, all_force, all_force_mag, all_virial, - all_atom_energy, all_atom_virial, dcoord, dspin, dtype, dbox, - nghost, lmp_list, ago, fparam, daparam); + all_energy, all_force, all_force_mag, all_virial, all_atom_energy, + all_atom_virial, dcoord, dspin, dtype, dbox, nghost, lmp_list, + ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -429,11 +429,10 @@ void PairDeepSpin::compute(int eflag, int vflag) { all_fm_min *= force_unit_cvt_factor; all_fm_avg *= force_unit_cvt_factor; fp << setw(12) << update->ntimestep << " " << setw(18) << all_v_max - << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg - << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min - << " " << setw(18) << all_f_avg << " " << setw(18) << all_fm_max - << " " << setw(18) << all_fm_min << " " << setw(18) - << all_fm_avg; + << " " << setw(18) << all_v_min << " " << setw(18) << all_v_avg + << " " << setw(18) << all_f_max << " " << setw(18) << all_f_min + << " " << setw(18) << all_f_avg << " " << setw(18) << all_fm_max + << " " << setw(18) << all_fm_min << " " << setw(18) << all_fm_avg; } if (out_each == 1) { // need support for spin atomic force. @@ -485,8 +484,8 @@ void PairDeepSpin::compute(int eflag, int vflag) { } else { if (numb_models == 1) { try { - deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, - dspin, dtype, dbox); + deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, dspin, + dtype, dbox); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -501,8 +500,8 @@ void PairDeepSpin::compute(int eflag, int vflag) { for (int ii = 0; ii < nall; ++ii) { for (int dd = 0; dd < 3; ++dd) { f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; - fm[ii][dd] += scale[1][1] * dforce_mag[3 * ii + dd] / - (hbar / sp[ii][3]) * force_unit_cvt_factor; + fm[ii][dd] += scale[1][1] * dforce_mag[3 * ii + dd] / (hbar / sp[ii][3]) * + force_unit_cvt_factor; } } @@ -532,7 +531,9 @@ int PairDeepSpin::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (!atom->sp_flag) { - std::cout << "Pair style 'deepspin' only supports spin atoms, please use pair style 'deepmd' instead." << std::endl; + std::cout << "Pair style 'deepspin' only supports spin atoms, please use " + "pair style 'deepmd' instead." + << std::endl; } else { for (i = first; i < last; i++) { for (int dd = 0; dd < numb_models; ++dd) { @@ -555,7 +556,9 @@ void PairDeepSpin::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (!atom->sp_flag) { - std::cout << "Pair style 'deepspin' only supports spin atoms, please use pair style 'deepmd' instead." << std::endl; + std::cout << "Pair style 'deepspin' only supports spin atoms, please use " + "pair style 'deepmd' instead." + << std::endl; } else { for (i = 0; i < n; i++) { j = list[i]; @@ -569,4 +572,4 @@ void PairDeepSpin::unpack_reverse_comm(int n, int *list, double *buf) { } } } -} \ No newline at end of file +} diff --git a/source/lmp/pair_deepspin.h b/source/lmp/pair_deepspin.h index c7a29e46e5..125caa1b9a 100644 --- a/source/lmp/pair_deepspin.h +++ b/source/lmp/pair_deepspin.h @@ -12,12 +12,12 @@ PairStyle(deepspin, PairDeepSpin) #ifndef LMP_PAIR_NNP_SPIN_H #define LMP_PAIR_NNP_SPIN_H -#include "pair_base.h" #include #include #include #include "comm_brick.h" +#include "pair_base.h" #define FLOAT_PREC double namespace LAMMPS_NS { @@ -31,7 +31,7 @@ class PairDeepSpin : public PairDeepMDBase { void compute(int, int) override; int pack_reverse_comm(int, int, double *) override; void unpack_reverse_comm(int, int *, double *) override; - + private: CommBrickDeepSpin *commdata_; }; From a0b79966867593e6a5977384b490039f007071e6 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 31 Oct 2024 20:45:43 +0800 Subject: [PATCH 102/193] Update plugin --- source/lmp/plugin/deepmdplugin.cpp | 11 +++++++++++ source/lmp/tests/test_lammps_spin.py | 6 +++--- source/lmp/tests/test_lammps_spin_pt.py | 6 +++--- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/source/lmp/plugin/deepmdplugin.cpp b/source/lmp/plugin/deepmdplugin.cpp index b7479ad3b5..007d02855b 100644 --- a/source/lmp/plugin/deepmdplugin.cpp +++ b/source/lmp/plugin/deepmdplugin.cpp @@ -7,6 +7,7 @@ #include "fix_dplr.h" #include "lammpsplugin.h" #include "pair_deepmd.h" +#include "pair_deepspin.h" #include "version.h" #if LAMMPS_VERSION_NUMBER >= 20220328 #include "pppm_dplr.h" @@ -15,6 +16,7 @@ using namespace LAMMPS_NS; static Pair *pairdeepmd(LAMMPS *lmp) { return new PairDeepMD(lmp); } +static Pair *pairdeepspin(LAMMPS *lmp) { return new PairDeepSpin(lmp); } static Compute *computedeepmdtensoratom(LAMMPS *lmp, int narg, char **arg) { return new ComputeDeeptensorAtom(lmp, narg, arg); @@ -41,6 +43,15 @@ extern "C" void lammpsplugin_init(void *lmp, void *handle, void *regfunc) { plugin.handle = handle; (*register_plugin)(&plugin, lmp); + plugin.version = LAMMPS_VERSION; + plugin.style = "pair"; + plugin.name = "deepspin"; + plugin.info = "deepspin pair style " STR_GIT_SUMM; + plugin.author = "Duo Zhang"; + plugin.creator.v1 = (lammpsplugin_factory1 *)&pairdeepspin; + plugin.handle = handle; + (*register_plugin)(&plugin, lmp); + plugin.style = "compute"; plugin.name = "deeptensor/atom"; plugin.info = "compute deeptensor/atom " STR_GIT_SUMM; diff --git a/source/lmp/tests/test_lammps_spin.py b/source/lmp/tests/test_lammps_spin.py index e1877628a5..31f5b41c98 100644 --- a/source/lmp/tests/test_lammps_spin.py +++ b/source/lmp/tests/test_lammps_spin.py @@ -139,7 +139,7 @@ def lammps(): def test_pair_deepmd(lammps): - lammps.pair_style(f"deepmd {pb_file.resolve()}") + lammps.pair_style(f"deepspin {pb_file.resolve()}") lammps.pair_coeff("* *") lammps.run(0) assert lammps.eval("pe") == pytest.approx(expected_e) @@ -152,7 +152,7 @@ def test_pair_deepmd(lammps): def test_pair_deepmd_model_devi(lammps): lammps.pair_style( - f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" ) lammps.pair_coeff("* *") lammps.run(0) @@ -176,7 +176,7 @@ def test_pair_deepmd_model_devi(lammps): def test_pair_deepmd_model_devi_atomic_relative(lammps): relative = 1.0 lammps.pair_style( - f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" ) lammps.pair_coeff("* *") lammps.run(0) diff --git a/source/lmp/tests/test_lammps_spin_pt.py b/source/lmp/tests/test_lammps_spin_pt.py index 93ec2e58a8..e215358d59 100644 --- a/source/lmp/tests/test_lammps_spin_pt.py +++ b/source/lmp/tests/test_lammps_spin_pt.py @@ -135,7 +135,7 @@ def lammps(): def test_pair_deepmd(lammps): - lammps.pair_style(f"deepmd {pb_file.resolve()}") + lammps.pair_style(f"deepspin {pb_file.resolve()}") lammps.pair_coeff("* *") lammps.run(0) assert lammps.eval("pe") == pytest.approx(expected_e) @@ -148,7 +148,7 @@ def test_pair_deepmd(lammps): def test_pair_deepmd_model_devi(lammps): lammps.pair_style( - f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" ) lammps.pair_coeff("* *") lammps.run(0) @@ -172,7 +172,7 @@ def test_pair_deepmd_model_devi(lammps): def test_pair_deepmd_model_devi_atomic_relative(lammps): relative = 1.0 lammps.pair_style( - f"deepmd {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" ) lammps.pair_coeff("* *") lammps.run(0) From 0cee8872dd3f99d0c3b438bfba09c9cca4917e4a Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 31 Oct 2024 09:05:33 -0400 Subject: [PATCH 103/193] fix(lmp): update print_summary (#4271) 1. Remove out-of-date float prec; 2. Include PyTorch libraries and include directories. ## Summary by CodeRabbit - **New Features** - Updated output messages for build information to enhance clarity, transitioning from TensorFlow-specific references to backend-oriented configurations. - **Bug Fixes** - Improved handling of backend include directories and library paths for better compatibility. - **Documentation** - Enhanced clarity in build information outputs related to backend configurations. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- source/lmp/deepmd_version.h.in | 8 ++++---- source/lmp/pair_deepmd.cpp | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/source/lmp/deepmd_version.h.in b/source/lmp/deepmd_version.h.in index 4b99bc7c33..0b74491778 100644 --- a/source/lmp/deepmd_version.h.in +++ b/source/lmp/deepmd_version.h.in @@ -3,8 +3,8 @@ #define GIT_BRANCH @GIT_BRANCH@ #define GIT_DATE @GIT_DATE@ #define DEEPMD_ROOT @CMAKE_INSTALL_PREFIX@ -#define TensorFlow_INCLUDE_DIRS @TensorFlow_INCLUDE_DIRS@ -#define TensorFlow_LIBRARY @TensorFlow_LIBRARY@ +#define BACKEND_INCLUDE_DIRS @BACKEND_INCLUDE_DIRS@ +#define BACKEND_LIBRARY_PATH @BACKEND_LIBRARY_PATH@ #define DPMD_CVT_STR(...) #__VA_ARGS__ #define DPMD_CVT_ASSTR(X) DPMD_CVT_STR(X) #define STR_GIT_SUMM DPMD_CVT_ASSTR(GIT_SUMM) @@ -13,5 +13,5 @@ #define STR_GIT_DATE DPMD_CVT_ASSTR(GIT_DATE) #define STR_FLOAT_PREC DPMD_CVT_ASSTR(FLOAT_PREC) #define STR_DEEPMD_ROOT DPMD_CVT_ASSTR(DEEPMD_ROOT) -#define STR_TensorFlow_INCLUDE_DIRS DPMD_CVT_ASSTR(TensorFlow_INCLUDE_DIRS) -#define STR_TensorFlow_LIBRARY DPMD_CVT_ASSTR(TensorFlow_LIBRARY) +#define STR_BACKEND_INCLUDE_DIRS DPMD_CVT_ASSTR(BACKEND_INCLUDE_DIRS) +#define STR_BACKEND_LIBRARY_PATH DPMD_CVT_ASSTR(BACKEND_LIBRARY_PATH) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 09d97fe460..d741814aa5 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -437,10 +437,8 @@ void PairDeepMD::print_summary(const string pre) const { cout << pre << "source branch: " << STR_GIT_BRANCH << endl; cout << pre << "source commit: " << STR_GIT_HASH << endl; cout << pre << "source commit at: " << STR_GIT_DATE << endl; - cout << pre << "build float prec: " << STR_FLOAT_PREC << endl; - cout << pre << "build with tf inc: " << STR_TensorFlow_INCLUDE_DIRS - << endl; - cout << pre << "build with tf lib: " << STR_TensorFlow_LIBRARY << endl; + cout << pre << "build with inc: " << STR_BACKEND_INCLUDE_DIRS << endl; + cout << pre << "build with lib: " << STR_BACKEND_LIBRARY_PATH << endl; std::cout.rdbuf(sbuf); utils::logmesg(lmp, buffer.str()); From 737f7c8bb77a1a32f76f1f2d72d7099a95db2fc7 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 31 Oct 2024 09:11:43 -0400 Subject: [PATCH 104/193] feat(jax/array-api): hybrid descriptor (#4275) ## Summary by CodeRabbit - **New Features** - Introduced support for the JAX backend in the hybrid descriptor framework. - Added a new `DescrptHybrid` class with specialized attribute handling. - Enhanced testing framework to support additional backends, including JAX and strict array API. - **Bug Fixes** - Improved attribute handling in multiple descriptor classes to ensure proper deserialization and registration. - **Documentation** - Updated documentation to reflect the addition of JAX as a supported backend for hybrid descriptors. --------- Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/descriptor/hybrid.py | 13 ++++--- deepmd/jax/descriptor/__init__.py | 4 +++ deepmd/jax/descriptor/hybrid.py | 26 ++++++++++++++ doc/model/train-hybrid.md | 4 +-- .../array_api_strict/descriptor/__init__.py | 19 ++++++++++ .../descriptor/base_descriptor.py | 11 ++++++ .../tests/array_api_strict/descriptor/dpa1.py | 5 +++ .../array_api_strict/descriptor/hybrid.py | 24 +++++++++++++ .../array_api_strict/descriptor/se_e2_a.py | 5 +++ .../array_api_strict/descriptor/se_e2_r.py | 5 +++ .../consistent/descriptor/test_hybrid.py | 35 +++++++++++++++++++ 11 files changed, 144 insertions(+), 7 deletions(-) create mode 100644 deepmd/jax/descriptor/hybrid.py create mode 100644 source/tests/array_api_strict/descriptor/base_descriptor.py create mode 100644 source/tests/array_api_strict/descriptor/hybrid.py diff --git a/deepmd/dpmodel/descriptor/hybrid.py b/deepmd/dpmodel/descriptor/hybrid.py index 4eb14f29cf..0d89902e4a 100644 --- a/deepmd/dpmodel/descriptor/hybrid.py +++ b/deepmd/dpmodel/descriptor/hybrid.py @@ -6,6 +6,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel.common import ( @@ -66,7 +67,7 @@ def __init__( ), f"number of atom types in {ii}th descriptor {self.descrpt_list[0].__class__.__name__} does not match others" # if hybrid sel is larger than sub sel, the nlist needs to be cut for each type hybrid_sel = self.get_sel() - self.nlist_cut_idx: list[np.ndarray] = [] + nlist_cut_idx: list[np.ndarray] = [] if self.mixed_types() and not all( descrpt.mixed_types() for descrpt in self.descrpt_list ): @@ -92,7 +93,8 @@ def __init__( cut_idx = np.concatenate( [range(ss, ee) for ss, ee in zip(start_idx, end_idx)] ) - self.nlist_cut_idx.append(cut_idx) + nlist_cut_idx.append(cut_idx) + self.nlist_cut_idx = nlist_cut_idx def get_rcut(self) -> float: """Returns the cut-off radius.""" @@ -242,6 +244,7 @@ def call( sw The smooth switch function. """ + xp = array_api_compat.array_namespace(coord_ext, atype_ext, nlist) out_descriptor = [] out_gr = [] out_g2 = None @@ -258,7 +261,7 @@ def call( for descrpt, nci in zip(self.descrpt_list, self.nlist_cut_idx): # cut the nlist to the correct length if self.mixed_types() == descrpt.mixed_types(): - nl = nlist[:, :, nci] + nl = xp.take(nlist, nci, axis=2) else: # mixed_types is True, but descrpt.mixed_types is False assert nl_distinguish_types is not None @@ -268,8 +271,8 @@ def call( if gr is not None: out_gr.append(gr) - out_descriptor = np.concatenate(out_descriptor, axis=-1) - out_gr = np.concatenate(out_gr, axis=-2) if out_gr else None + out_descriptor = xp.concat(out_descriptor, axis=-1) + out_gr = xp.concat(out_gr, axis=-2) if out_gr else None return out_descriptor, out_gr, out_g2, out_h2, out_sw @classmethod diff --git a/deepmd/jax/descriptor/__init__.py b/deepmd/jax/descriptor/__init__.py index 3ed096f9c1..cabee5a189 100644 --- a/deepmd/jax/descriptor/__init__.py +++ b/deepmd/jax/descriptor/__init__.py @@ -2,6 +2,9 @@ from deepmd.jax.descriptor.dpa1 import ( DescrptDPA1, ) +from deepmd.jax.descriptor.hybrid import ( + DescrptHybrid, +) from deepmd.jax.descriptor.se_e2_a import ( DescrptSeA, ) @@ -13,4 +16,5 @@ "DescrptSeA", "DescrptSeR", "DescrptDPA1", + "DescrptHybrid", ] diff --git a/deepmd/jax/descriptor/hybrid.py b/deepmd/jax/descriptor/hybrid.py new file mode 100644 index 0000000000..20fc5f838b --- /dev/null +++ b/deepmd/jax/descriptor/hybrid.py @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.hybrid import DescrptHybrid as DescrptHybridDP +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) + + +@BaseDescriptor.register("hybrid") +@flax_module +class DescrptHybrid(DescrptHybridDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"nlist_cut_idx"}: + value = [ArrayAPIVariable(to_jax_array(vv)) for vv in value] + elif name in {"descrpt_list"}: + value = [BaseDescriptor.deserialize(vv.serialize()) for vv in value] + + return super().__setattr__(name, value) diff --git a/doc/model/train-hybrid.md b/doc/model/train-hybrid.md index 1219d208a7..da3b40487b 100644 --- a/doc/model/train-hybrid.md +++ b/doc/model/train-hybrid.md @@ -1,7 +1,7 @@ -# Descriptor `"hybrid"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Descriptor `"hybrid"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: This descriptor hybridizes multiple descriptors to form a new descriptor. For example, we have a list of descriptors denoted by $\mathcal D_1$, $\mathcal D_2$, ..., $\mathcal D_N$, the hybrid descriptor this the concatenation of the list, i.e. $\mathcal D = (\mathcal D_1, \mathcal D_2, \cdots, \mathcal D_N)$. diff --git a/source/tests/array_api_strict/descriptor/__init__.py b/source/tests/array_api_strict/descriptor/__init__.py index 6ceb116d85..5667fed858 100644 --- a/source/tests/array_api_strict/descriptor/__init__.py +++ b/source/tests/array_api_strict/descriptor/__init__.py @@ -1 +1,20 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from .dpa1 import ( + DescrptDPA1, +) +from .hybrid import ( + DescrptHybrid, +) +from .se_e2_a import ( + DescrptSeA, +) +from .se_e2_r import ( + DescrptSeR, +) + +__all__ = [ + "DescrptSeA", + "DescrptSeR", + "DescrptDPA1", + "DescrptHybrid", +] diff --git a/source/tests/array_api_strict/descriptor/base_descriptor.py b/source/tests/array_api_strict/descriptor/base_descriptor.py new file mode 100644 index 0000000000..2a31895f55 --- /dev/null +++ b/source/tests/array_api_strict/descriptor/base_descriptor.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.make_base_descriptor import ( + make_base_descriptor, +) + +# no type annotations standard in array api +BaseDescriptor = make_base_descriptor(Any) diff --git a/source/tests/array_api_strict/descriptor/dpa1.py b/source/tests/array_api_strict/descriptor/dpa1.py index ebd688e303..d14444f269 100644 --- a/source/tests/array_api_strict/descriptor/dpa1.py +++ b/source/tests/array_api_strict/descriptor/dpa1.py @@ -27,6 +27,9 @@ from ..utils.type_embed import ( TypeEmbedNet, ) +from .base_descriptor import ( + BaseDescriptor, +) class GatedAttentionLayer(GatedAttentionLayerDP): @@ -72,6 +75,8 @@ def __setattr__(self, name: str, value: Any) -> None: return super().__setattr__(name, value) +@BaseDescriptor.register("dpa1") +@BaseDescriptor.register("se_atten") class DescrptDPA1(DescrptDPA1DP): def __setattr__(self, name: str, value: Any) -> None: if name == "se_atten": diff --git a/source/tests/array_api_strict/descriptor/hybrid.py b/source/tests/array_api_strict/descriptor/hybrid.py new file mode 100644 index 0000000000..aaaa24ed6b --- /dev/null +++ b/source/tests/array_api_strict/descriptor/hybrid.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.hybrid import DescrptHybrid as DescrptHybridDP + +from ..common import ( + to_array_api_strict_array, +) +from .base_descriptor import ( + BaseDescriptor, +) + + +@BaseDescriptor.register("hybrid") +class DescrptHybrid(DescrptHybridDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"nlist_cut_idx"}: + value = [to_array_api_strict_array(vv) for vv in value] + elif name in {"descrpt_list"}: + value = [BaseDescriptor.deserialize(vv.serialize()) for vv in value] + + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/descriptor/se_e2_a.py b/source/tests/array_api_strict/descriptor/se_e2_a.py index 654b9f8925..17da2aafbf 100644 --- a/source/tests/array_api_strict/descriptor/se_e2_a.py +++ b/source/tests/array_api_strict/descriptor/se_e2_a.py @@ -14,8 +14,13 @@ from ..utils.network import ( NetworkCollection, ) +from .base_descriptor import ( + BaseDescriptor, +) +@BaseDescriptor.register("se_e2_a") +@BaseDescriptor.register("se_a") class DescrptSeA(DescrptSeADP): def __setattr__(self, name: str, value: Any) -> None: if name in {"dstd", "davg"}: diff --git a/source/tests/array_api_strict/descriptor/se_e2_r.py b/source/tests/array_api_strict/descriptor/se_e2_r.py index 839e536cea..b499f4c4c9 100644 --- a/source/tests/array_api_strict/descriptor/se_e2_r.py +++ b/source/tests/array_api_strict/descriptor/se_e2_r.py @@ -14,8 +14,13 @@ from ..utils.network import ( NetworkCollection, ) +from .base_descriptor import ( + BaseDescriptor, +) +@BaseDescriptor.register("se_e2_r") +@BaseDescriptor.register("se_r") class DescrptSeR(DescrptSeRDP): def __setattr__(self, name: str, value: Any) -> None: if name in {"dstd", "davg"}: diff --git a/source/tests/consistent/descriptor/test_hybrid.py b/source/tests/consistent/descriptor/test_hybrid.py index cd52eea5be..c43652b498 100644 --- a/source/tests/consistent/descriptor/test_hybrid.py +++ b/source/tests/consistent/descriptor/test_hybrid.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -28,6 +30,16 @@ from deepmd.tf.descriptor.hybrid import DescrptHybrid as DescrptHybridTF else: DescrptHybridTF = None +if INSTALLED_JAX: + from deepmd.jax.descriptor.hybrid import DescrptHybrid as DescrptHybridJAX +else: + DescrptHybridJAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.hybrid import ( + DescrptHybrid as DescrptHybridStrict, + ) +else: + DescrptHybridStrict = None from deepmd.utils.argcheck import ( descrpt_hybrid_args, ) @@ -68,8 +80,13 @@ def data(self) -> dict: tf_class = DescrptHybridTF dp_class = DescrptHybridDP pt_class = DescrptHybridPT + jax_class = DescrptHybridJAX + array_api_strict_class = DescrptHybridStrict args = descrpt_hybrid_args() + skip_jax = not INSTALLED_JAX + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT + def setUp(self): CommonTest.setUp(self) @@ -132,5 +149,23 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From 8e27d2f65a4b4064fc9b8b34c603324e1eee1872 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 31 Oct 2024 09:13:15 -0400 Subject: [PATCH 105/193] feat(jax/array-api): dipole/polarizability fitting (#4278) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced `DipoleFittingNet` and `PolarFittingNet` classes for enhanced fitting functionality. - Expanded support for JAX as a backend for fitting tensors, alongside existing TensorFlow and PyTorch support. - **Bug Fixes** - Improved error handling and parameter validation in the `DipoleFitting` and `PolarFitting` classes. - **Documentation** - Updated documentation to reflect JAX as a supported backend for fitting tensors. - **Tests** - Enhanced testing framework to support evaluations with JAX and Array API Strict, including new test methods and properties. Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/dipole_fitting.py | 10 ++- .../dpmodel/fitting/polarizability_fitting.py | 71 +++++++++++-------- deepmd/jax/fitting/__init__.py | 4 ++ deepmd/jax/fitting/fitting.py | 27 +++++++ doc/model/train-fitting-tensor.md | 4 +- .../tests/array_api_strict/fitting/fitting.py | 21 ++++++ .../tests/consistent/fitting/test_dipole.py | 41 +++++++++++ source/tests/consistent/fitting/test_polar.py | 41 +++++++++++ 8 files changed, 184 insertions(+), 35 deletions(-) diff --git a/deepmd/dpmodel/fitting/dipole_fitting.py b/deepmd/dpmodel/fitting/dipole_fitting.py index 01bd60c777..cecba865d0 100644 --- a/deepmd/dpmodel/fitting/dipole_fitting.py +++ b/deepmd/dpmodel/fitting/dipole_fitting.py @@ -6,6 +6,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( @@ -207,6 +208,7 @@ def call( The atomic parameter. shape: nf x nloc x nap. nap being `numb_aparam` """ + xp = array_api_compat.array_namespace(descriptor, atype) nframes, nloc, _ = descriptor.shape assert gr is not None, "Must provide the rotation matrix for dipole fitting." # (nframes, nloc, m1) @@ -214,9 +216,11 @@ def call( self.var_name ] # (nframes * nloc, 1, m1) - out = out.reshape(-1, 1, self.embedding_width) + out = xp.reshape(out, (-1, 1, self.embedding_width)) # (nframes * nloc, m1, 3) - gr = gr.reshape(nframes * nloc, -1, 3) + gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nframes, nloc, 3) - out = np.einsum("bim,bmj->bij", out, gr).squeeze(-2).reshape(nframes, nloc, 3) + # out = np.einsum("bim,bmj->bij", out, gr).squeeze(-2).reshape(nframes, nloc, 3) + out = out @ gr + out = xp.reshape(out, (nframes, nloc, 3)) return {self.var_name: out} diff --git a/deepmd/dpmodel/fitting/polarizability_fitting.py b/deepmd/dpmodel/fitting/polarizability_fitting.py index 2d96eec580..b972b45971 100644 --- a/deepmd/dpmodel/fitting/polarizability_fitting.py +++ b/deepmd/dpmodel/fitting/polarizability_fitting.py @@ -6,6 +6,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.common import ( @@ -14,6 +15,9 @@ from deepmd.dpmodel import ( DEFAULT_PRECISION, ) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.fitting.base_fitting import ( BaseFitting, ) @@ -124,23 +128,18 @@ def __init__( self.embedding_width = embedding_width self.fit_diag = fit_diag - self.scale = scale - if self.scale is None: - self.scale = [1.0 for _ in range(ntypes)] + if scale is None: + scale = [1.0 for _ in range(ntypes)] else: - if isinstance(self.scale, list): - assert ( - len(self.scale) == ntypes - ), "Scale should be a list of length ntypes." - elif isinstance(self.scale, float): - self.scale = [self.scale for _ in range(ntypes)] + if isinstance(scale, list): + assert len(scale) == ntypes, "Scale should be a list of length ntypes." + elif isinstance(scale, float): + scale = [scale for _ in range(ntypes)] else: raise ValueError( "Scale must be a list of float of length ntypes or a float." ) - self.scale = np.array(self.scale, dtype=GLOBAL_NP_FLOAT_PRECISION).reshape( - ntypes, 1 - ) + self.scale = np.array(scale, dtype=GLOBAL_NP_FLOAT_PRECISION).reshape(ntypes, 1) self.shift_diag = shift_diag self.constant_matrix = np.zeros(ntypes, dtype=GLOBAL_NP_FLOAT_PRECISION) super().__init__( @@ -192,8 +191,8 @@ def serialize(self) -> dict: data["embedding_width"] = self.embedding_width data["fit_diag"] = self.fit_diag data["shift_diag"] = self.shift_diag - data["@variables"]["scale"] = self.scale - data["@variables"]["constant_matrix"] = self.constant_matrix + data["@variables"]["scale"] = to_numpy_array(self.scale) + data["@variables"]["constant_matrix"] = to_numpy_array(self.constant_matrix) return data @classmethod @@ -276,6 +275,7 @@ def call( The atomic parameter. shape: nf x nloc x nap. nap being `numb_aparam` """ + xp = array_api_compat.array_namespace(descriptor, atype) nframes, nloc, _ = descriptor.shape assert ( gr is not None @@ -284,28 +284,39 @@ def call( out = self._call_common(descriptor, atype, gr, g2, h2, fparam, aparam)[ self.var_name ] - out = out * self.scale[atype] + # out = out * self.scale[atype, ...] + scale_atype = xp.reshape( + xp.take(self.scale, xp.reshape(atype, [-1]), axis=0), (*atype.shape, 1) + ) + out = out * scale_atype # (nframes * nloc, m1, 3) - gr = gr.reshape(nframes * nloc, -1, 3) + gr = xp.reshape(gr, (nframes * nloc, -1, 3)) if self.fit_diag: - out = out.reshape(-1, self.embedding_width) - out = np.einsum("ij,ijk->ijk", out, gr) + out = xp.reshape(out, (-1, self.embedding_width)) + # out = np.einsum("ij,ijk->ijk", out, gr) + out = out[:, :, None] * gr else: - out = out.reshape(-1, self.embedding_width, self.embedding_width) - out = (out + np.transpose(out, axes=(0, 2, 1))) / 2 - out = np.einsum("bim,bmj->bij", out, gr) # (nframes * nloc, m1, 3) - out = np.einsum( - "bim,bmj->bij", np.transpose(gr, axes=(0, 2, 1)), out - ) # (nframes * nloc, 3, 3) - out = out.reshape(nframes, nloc, 3, 3) + out = xp.reshape(out, (-1, self.embedding_width, self.embedding_width)) + out = (out + xp.matrix_transpose(out)) / 2 + # out = np.einsum("bim,bmj->bij", out, gr) # (nframes * nloc, m1, 3) + out = out @ gr + # out = np.einsum( + # "bim,bmj->bij", np.transpose(gr, axes=(0, 2, 1)), out + # ) # (nframes * nloc, 3, 3) + out = xp.matrix_transpose(gr) @ out + out = xp.reshape(out, (nframes, nloc, 3, 3)) if self.shift_diag: - bias = self.constant_matrix[atype] + # bias = self.constant_matrix[atype] + bias = xp.reshape( + xp.take(self.constant_matrix, xp.reshape(atype, [-1]), axis=0), + (nframes, nloc), + ) # (nframes, nloc, 1) - bias = np.expand_dims(bias, axis=-1) * self.scale[atype] - eye = np.eye(3, dtype=descriptor.dtype) - eye = np.tile(eye, (nframes, nloc, 1, 1)) + bias = bias[..., None] * scale_atype + eye = xp.eye(3, dtype=descriptor.dtype) + eye = xp.tile(eye, (nframes, nloc, 1, 1)) # (nframes, nloc, 3, 3) - bias = np.expand_dims(bias, axis=-1) * eye + bias = bias[..., None] * eye out = out + bias return {"polarizability": out} diff --git a/deepmd/jax/fitting/__init__.py b/deepmd/jax/fitting/__init__.py index e72314dcab..226a6d5b43 100644 --- a/deepmd/jax/fitting/__init__.py +++ b/deepmd/jax/fitting/__init__.py @@ -1,10 +1,14 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from deepmd.jax.fitting.fitting import ( + DipoleFittingNet, DOSFittingNet, EnergyFittingNet, + PolarFittingNet, ) __all__ = [ "EnergyFittingNet", "DOSFittingNet", + "DipoleFittingNet", + "PolarFittingNet", ] diff --git a/deepmd/jax/fitting/fitting.py b/deepmd/jax/fitting/fitting.py index cef1f667b3..2a6186ac46 100644 --- a/deepmd/jax/fitting/fitting.py +++ b/deepmd/jax/fitting/fitting.py @@ -3,8 +3,12 @@ Any, ) +from deepmd.dpmodel.fitting.dipole_fitting import DipoleFitting as DipoleFittingNetDP from deepmd.dpmodel.fitting.dos_fitting import DOSFittingNet as DOSFittingNetDP from deepmd.dpmodel.fitting.ener_fitting import EnergyFittingNet as EnergyFittingNetDP +from deepmd.dpmodel.fitting.polarizability_fitting import ( + PolarFitting as PolarFittingNetDP, +) from deepmd.jax.common import ( ArrayAPIVariable, flax_module, @@ -53,3 +57,26 @@ class DOSFittingNet(DOSFittingNetDP): def __setattr__(self, name: str, value: Any) -> None: value = setattr_for_general_fitting(name, value) return super().__setattr__(name, value) + + +@BaseFitting.register("dipole") +@flax_module +class DipoleFittingNet(DipoleFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) + + +@BaseFitting.register("polar") +@flax_module +class PolarFittingNet(PolarFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + if name in { + "scale", + "constant_matrix", + }: + value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) + return super().__setattr__(name, value) diff --git a/doc/model/train-fitting-tensor.md b/doc/model/train-fitting-tensor.md index c6b54c69ef..d4d546eccf 100644 --- a/doc/model/train-fitting-tensor.md +++ b/doc/model/train-fitting-tensor.md @@ -1,7 +1,7 @@ -# Fit `tensor` like `Dipole` and `Polarizability` {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Fit `tensor` like `Dipole` and `Polarizability` {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: Unlike `energy`, which is a scalar, one may want to fit some high dimensional physical quantity, like `dipole` (vector) and `polarizability` (matrix, shorted as `polar`). Deep Potential has provided different APIs to do this. In this example, we will show you how to train a model to fit a water system. A complete training input script of the examples can be found in diff --git a/source/tests/array_api_strict/fitting/fitting.py b/source/tests/array_api_strict/fitting/fitting.py index 8b65320203..5a2bd9c58f 100644 --- a/source/tests/array_api_strict/fitting/fitting.py +++ b/source/tests/array_api_strict/fitting/fitting.py @@ -3,8 +3,12 @@ Any, ) +from deepmd.dpmodel.fitting.dipole_fitting import DipoleFitting as DipoleFittingNetDP from deepmd.dpmodel.fitting.dos_fitting import DOSFittingNet as DOSFittingNetDP from deepmd.dpmodel.fitting.ener_fitting import EnergyFittingNet as EnergyFittingNetDP +from deepmd.dpmodel.fitting.polarizability_fitting import ( + PolarFitting as PolarFittingNetDP, +) from ..common import ( to_array_api_strict_array, @@ -43,3 +47,20 @@ class DOSFittingNet(DOSFittingNetDP): def __setattr__(self, name: str, value: Any) -> None: value = setattr_for_general_fitting(name, value) return super().__setattr__(name, value) + + +class DipoleFittingNet(DipoleFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) + + +class PolarFittingNet(PolarFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + if name in { + "scale", + "constant_matrix", + }: + value = to_array_api_strict_array(value) + return super().__setattr__(name, value) diff --git a/source/tests/consistent/fitting/test_dipole.py b/source/tests/consistent/fitting/test_dipole.py index 5d7be1b0e5..55d6c44c34 100644 --- a/source/tests/consistent/fitting/test_dipole.py +++ b/source/tests/consistent/fitting/test_dipole.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -32,6 +34,21 @@ from deepmd.tf.fit.dipole import DipoleFittingSeA as DipoleFittingTF else: DipoleFittingTF = object +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) + from deepmd.jax.fitting.fitting import DipoleFittingNet as DipoleFittingJAX +else: + DipoleFittingJAX = object +if INSTALLED_ARRAY_API_STRICT: + import array_api_strict + + from ...array_api_strict.fitting.fitting import ( + DipoleFittingNet as DipoleFittingArrayAPIStrict, + ) +else: + DipoleFittingArrayAPIStrict = object from deepmd.utils.argcheck import ( fitting_dipole, ) @@ -69,7 +86,11 @@ def skip_pt(self) -> bool: tf_class = DipoleFittingTF dp_class = DipoleFittingDP pt_class = DipoleFittingPT + jax_class = DipoleFittingJAX + array_api_strict_class = DipoleFittingArrayAPIStrict args = fitting_dipole() + skip_jax = not INSTALLED_JAX + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT def setUp(self): CommonTest.setUp(self) @@ -143,6 +164,26 @@ def eval_dp(self, dp_obj: Any) -> Any: None, )["dipole"] + def eval_jax(self, jax_obj: Any) -> Any: + return np.asarray( + jax_obj( + jnp.asarray(self.inputs), + jnp.asarray(self.atype.reshape(1, -1)), + jnp.asarray(self.gr), + None, + )["dipole"] + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return np.asarray( + array_api_strict_obj( + array_api_strict.asarray(self.inputs), + array_api_strict.asarray(self.atype.reshape(1, -1)), + array_api_strict.asarray(self.gr), + None, + )["dipole"] + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same diff --git a/source/tests/consistent/fitting/test_polar.py b/source/tests/consistent/fitting/test_polar.py index 6a3465ba24..895974baf9 100644 --- a/source/tests/consistent/fitting/test_polar.py +++ b/source/tests/consistent/fitting/test_polar.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -32,6 +34,21 @@ from deepmd.tf.fit.polar import PolarFittingSeA as PolarFittingTF else: PolarFittingTF = object +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) + from deepmd.jax.fitting.fitting import PolarFittingNet as PolarFittingJAX +else: + PolarFittingJAX = object +if INSTALLED_ARRAY_API_STRICT: + import array_api_strict + + from ...array_api_strict.fitting.fitting import ( + PolarFittingNet as PolarFittingArrayAPIStrict, + ) +else: + PolarFittingArrayAPIStrict = object from deepmd.utils.argcheck import ( fitting_polar, ) @@ -69,7 +86,11 @@ def skip_pt(self) -> bool: tf_class = PolarFittingTF dp_class = PolarFittingDP pt_class = PolarFittingPT + jax_class = PolarFittingJAX + array_api_strict_class = PolarFittingArrayAPIStrict args = fitting_polar() + skip_jax = not INSTALLED_JAX + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT def setUp(self): CommonTest.setUp(self) @@ -143,6 +164,26 @@ def eval_dp(self, dp_obj: Any) -> Any: None, )["polarizability"] + def eval_jax(self, jax_obj: Any) -> Any: + return np.asarray( + jax_obj( + jnp.asarray(self.inputs), + jnp.asarray(self.atype.reshape(1, -1)), + jnp.asarray(self.gr), + None, + )["polarizability"] + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return np.asarray( + array_api_strict_obj( + array_api_strict.asarray(self.inputs), + array_api_strict.asarray(self.atype.reshape(1, -1)), + array_api_strict.asarray(self.gr), + None, + )["polarizability"] + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same From cdad3122d3a872fd811dc19a5007a68f6a34d0bc Mon Sep 17 00:00:00 2001 From: Chenqqian Zhang <100290172+Chengqian-Zhang@users.noreply.github.com> Date: Thu, 31 Oct 2024 21:15:27 +0800 Subject: [PATCH 106/193] fix(dptest): Wrong dptest results except for energy head (#4280) Solve issue #4249 In `/deepmd/entrypoints/test.py` line 127 `tmap = dp.get_type_map() if isinstance(dp, DeepPot) else None`. If we use `DeepProperty` or `DeepPolar` or `DeepDOS`..... `tmap` is None. So `type_map` is not modified again according to `type_map` in `input.json`. So `atype` is wrong in the model forward process. The model prediction value is wrong. According to @njzjz , It seems that in the `r2` branch, `get_type_map()` is only available in `DeepPot`. After we refactor `DeepEval` in v3, this should not be a problem anymore. I also change the order of `type_map` in UT to ensure that when `type_map` of `input.json` doesn't match the `type_map ` of data, the result of the dptest is still correct. ## Summary by CodeRabbit - **New Features** - Introduced warning logs for unsupported `DeepGlobalPolar` model usage, recommending the `DeepPolar` model instead. - **Bug Fixes** - Simplified logic for obtaining the type map, ensuring consistent retrieval from the updated source. - Adjusted model configuration in tests to influence type interpretation. - **Documentation** - Improved clarity of comments and logging statements for better understanding. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/entrypoints/test.py | 2 +- source/tests/pt/test_dp_test.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index d9ccf392f5..fd0393c914 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -124,7 +124,7 @@ def test( log.info(f"# testing system : {system}") # create data class - tmap = dp.get_type_map() if isinstance(dp, DeepPot) else None + tmap = dp.get_type_map() data = DeepmdData( system, set_prefix="set", diff --git a/source/tests/pt/test_dp_test.py b/source/tests/pt/test_dp_test.py index c18c3286f6..0427f2b14a 100644 --- a/source/tests/pt/test_dp_test.py +++ b/source/tests/pt/test_dp_test.py @@ -152,6 +152,9 @@ def setUp(self): self.config["training"]["training_data"]["systems"] = data_file self.config["training"]["validation_data"]["systems"] = data_file self.config["model"] = deepcopy(model_property) + self.config["model"]["type_map"] = [ + self.config["model"]["type_map"][i] for i in [1, 0, 3, 2] + ] self.input_json = "test_dp_test_property.json" with open(self.input_json, "w") as fp: json.dump(self.config, fp, indent=4) From 0d13911bbcc36bab42d18d62c7a62bd3fa4a8004 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yifan=20Li=E6=9D=8E=E4=B8=80=E5=B8=86?= Date: Thu, 31 Oct 2024 09:15:49 -0400 Subject: [PATCH 107/193] Print the reminder for the illegal memory error in the AutoBatchSize under tf (#4283) #3822 added a reminder for the illegal memory error. However, this reminder is only needed for tf. This PR moves the illegal memory reminder from base class AutoBatchSize to the inherited class under tf. ## Summary by CodeRabbit - **New Features** - Enhanced `AutoBatchSize` class to initialize batch size from an environment variable, improving user guidance on memory management with TensorFlow. - **Bug Fixes** - Removed redundant logging during initialization to streamline the process when GPU resources are available. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/tf/utils/batch_size.py | 16 ++++++++++++++++ deepmd/utils/batch_size.py | 5 ----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/deepmd/tf/utils/batch_size.py b/deepmd/tf/utils/batch_size.py index 33f1ec0da0..438bf36703 100644 --- a/deepmd/tf/utils/batch_size.py +++ b/deepmd/tf/utils/batch_size.py @@ -1,4 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import os + from packaging.version import ( Version, ) @@ -11,9 +13,23 @@ OutOfMemoryError, ) from deepmd.utils.batch_size import AutoBatchSize as AutoBatchSizeBase +from deepmd.utils.batch_size import ( + log, +) class AutoBatchSize(AutoBatchSizeBase): + def __init__(self, initial_batch_size: int = 1024, factor: float = 2.0) -> None: + super().__init__(initial_batch_size, factor) + DP_INFER_BATCH_SIZE = int(os.environ.get("DP_INFER_BATCH_SIZE", 0)) + if not DP_INFER_BATCH_SIZE > 0: + if self.is_gpu_available(): + log.info( + "If you encounter the error 'an illegal memory access was encountered', this may be due to a TensorFlow issue. " + "To avoid this, set the environment variable DP_INFER_BATCH_SIZE to a smaller value than the last adjusted batch size. " + "The environment variable DP_INFER_BATCH_SIZE controls the inference batch size (nframes * natoms). " + ) + def is_gpu_available(self) -> bool: """Check if GPU is available. diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index 259fe93bdb..5ab06e55e2 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -61,11 +61,6 @@ def __init__(self, initial_batch_size: int = 1024, factor: float = 2.0) -> None: self.maximum_working_batch_size = initial_batch_size if self.is_gpu_available(): self.minimal_not_working_batch_size = 2**31 - log.info( - "If you encounter the error 'an illegal memory access was encountered', this may be due to a TensorFlow issue. " - "To avoid this, set the environment variable DP_INFER_BATCH_SIZE to a smaller value than the last adjusted batch size. " - "The environment variable DP_INFER_BATCH_SIZE controls the inference batch size (nframes * natoms). " - ) else: self.minimal_not_working_batch_size = ( self.maximum_working_batch_size + 1 From 9c767adfba8a56c92501591199db60af11d53f94 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 31 Oct 2024 09:19:12 -0400 Subject: [PATCH 108/193] feat(dpmodel/jax): add entry point for dpmodel and jax backend (#4284) ## Summary by CodeRabbit - **New Features** - Introduced entry point loading functionality for enhanced module initialization in both `dpmodel` and `jax` components of the DeepMD framework. These changes improve the framework's functionality and streamline backend configuration. Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/__init__.py | 7 +++++++ deepmd/jax/__init__.py | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/deepmd/dpmodel/__init__.py b/deepmd/dpmodel/__init__.py index 6f83f849a3..111c2d6ced 100644 --- a/deepmd/dpmodel/__init__.py +++ b/deepmd/dpmodel/__init__.py @@ -1,4 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.utils.entry_point import ( + load_entry_point, +) + from .common import ( DEFAULT_PRECISION, PRECISION_DICT, @@ -32,3 +36,6 @@ "get_deriv_name", "get_hessian_name", ] + + +load_entry_point("deepmd.dpmodel") diff --git a/deepmd/jax/__init__.py b/deepmd/jax/__init__.py index 2ff078e797..bb5c0a5206 100644 --- a/deepmd/jax/__init__.py +++ b/deepmd/jax/__init__.py @@ -1,2 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later """JAX backend.""" + +from deepmd.utils.entry_point import ( + load_entry_point, +) + +load_entry_point("deepmd.jax") From ff04d8bdafa0985c83a9c2418b91466db63f0bc5 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 31 Oct 2024 14:56:28 -0400 Subject: [PATCH 109/193] fix(dpmodel/jax): fix fparam and aparam support in DeepEval (#4285) ## Summary by CodeRabbit - **New Features** - Enhanced error messages for improved clarity when input dimensions are incorrect. - Added support for optional fitting and atomic parameters in model evaluations. - **Bug Fixes** - Removed restrictions on providing fitting and atomic parameters, allowing for more flexible evaluations. - **Tests** - Introduced a new test class to validate the handling of fitting and atomic parameters in model evaluations. --------- Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/general_fitting.py | 8 ++-- deepmd/dpmodel/infer/deep_eval.py | 21 +++++++-- deepmd/jax/infer/deep_eval.py | 16 +++++-- deepmd/jax/utils/serialization.py | 8 ++-- source/tests/consistent/io/test_io.py | 56 +++++++++++++++++++++++ 5 files changed, 93 insertions(+), 16 deletions(-) diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index e55f57c774..a027e1e59d 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -388,8 +388,8 @@ def _call_common( assert fparam is not None, "fparam should not be None" if fparam.shape[-1] != self.numb_fparam: raise ValueError( - "get an input fparam of dim {fparam.shape[-1]}, ", - "which is not consistent with {self.numb_fparam}.", + f"get an input fparam of dim {fparam.shape[-1]}, " + f"which is not consistent with {self.numb_fparam}." ) fparam = (fparam - self.fparam_avg) * self.fparam_inv_std fparam = xp.tile( @@ -409,8 +409,8 @@ def _call_common( assert aparam is not None, "aparam should not be None" if aparam.shape[-1] != self.numb_aparam: raise ValueError( - "get an input aparam of dim {aparam.shape[-1]}, ", - "which is not consistent with {self.numb_aparam}.", + f"get an input aparam of dim {aparam.shape[-1]}, " + f"which is not consistent with {self.numb_aparam}." ) aparam = xp.reshape(aparam, [nf, nloc, self.numb_aparam]) aparam = (aparam - self.aparam_avg) * self.aparam_inv_std diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index c1f3e4630b..5463743ada 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -204,8 +204,6 @@ def eval( The output of the evaluation. The keys are the names of the output variables, and the values are the corresponding output arrays. """ - if fparam is not None or aparam is not None: - raise NotImplementedError # convert all of the input to numpy array atom_types = np.array(atom_types, dtype=np.int32) coords = np.array(coords) @@ -216,7 +214,7 @@ def eval( ) request_defs = self._get_request_defs(atomic) out = self._eval_func(self._eval_model, numb_test, natoms)( - coords, cells, atom_types, request_defs + coords, cells, atom_types, fparam, aparam, request_defs ) return dict( zip( @@ -306,6 +304,8 @@ def _eval_model( coords: np.ndarray, cells: Optional[np.ndarray], atom_types: np.ndarray, + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], request_defs: list[OutputVariableDef], ): model = self.dp @@ -323,12 +323,25 @@ def _eval_model( box_input = cells.reshape([-1, 3, 3]) else: box_input = None + if fparam is not None: + fparam_input = fparam.reshape(nframes, self.get_dim_fparam()) + else: + fparam_input = None + if aparam is not None: + aparam_input = aparam.reshape(nframes, natoms, self.get_dim_aparam()) + else: + aparam_input = None do_atomic_virial = any( x.category == OutputVariableCategory.DERV_C_REDU for x in request_defs ) batch_output = model( - coord_input, type_input, box=box_input, do_atomic_virial=do_atomic_virial + coord_input, + type_input, + box=box_input, + fparam=fparam_input, + aparam=aparam_input, + do_atomic_virial=do_atomic_virial, ) if isinstance(batch_output, tuple): batch_output = batch_output[0] diff --git a/deepmd/jax/infer/deep_eval.py b/deepmd/jax/infer/deep_eval.py index 76f044a327..c1967fb0da 100644 --- a/deepmd/jax/infer/deep_eval.py +++ b/deepmd/jax/infer/deep_eval.py @@ -214,8 +214,6 @@ def eval( The output of the evaluation. The keys are the names of the output variables, and the values are the corresponding output arrays. """ - if fparam is not None or aparam is not None: - raise NotImplementedError # convert all of the input to numpy array atom_types = np.array(atom_types, dtype=np.int32) coords = np.array(coords) @@ -226,7 +224,7 @@ def eval( ) request_defs = self._get_request_defs(atomic) out = self._eval_func(self._eval_model, numb_test, natoms)( - coords, cells, atom_types, request_defs + coords, cells, atom_types, fparam, aparam, request_defs ) return dict( zip( @@ -316,6 +314,8 @@ def _eval_model( coords: np.ndarray, cells: Optional[np.ndarray], atom_types: np.ndarray, + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], request_defs: list[OutputVariableDef], ): model = self.dp @@ -333,6 +333,14 @@ def _eval_model( box_input = cells.reshape([-1, 3, 3]) else: box_input = None + if fparam is not None: + fparam_input = fparam.reshape(nframes, self.get_dim_fparam()) + else: + fparam_input = None + if aparam is not None: + aparam_input = aparam.reshape(nframes, natoms, self.get_dim_aparam()) + else: + aparam_input = None do_atomic_virial = any( x.category == OutputVariableCategory.DERV_C_REDU for x in request_defs @@ -341,6 +349,8 @@ def _eval_model( to_jax_array(coord_input), to_jax_array(type_input), box=to_jax_array(box_input), + fparam=to_jax_array(fparam_input), + aparam=to_jax_array(aparam_input), do_atomic_virial=do_atomic_virial, ) if isinstance(batch_output, tuple): diff --git a/deepmd/jax/utils/serialization.py b/deepmd/jax/utils/serialization.py index fcfcc8a610..a7d57523e2 100644 --- a/deepmd/jax/utils/serialization.py +++ b/deepmd/jax/utils/serialization.py @@ -51,18 +51,16 @@ def deserialize_to_file(model_file: str, data: dict) -> None: model_def_script = data["model_def_script"] call_lower = model.call_lower - nf, nloc, nghost, nfp, nap = jax_export.symbolic_shape( - "nf, nloc, nghost, nfp, nap" - ) + nf, nloc, nghost = jax_export.symbolic_shape("nf, nloc, nghost") exported = jax_export.export(jax.jit(call_lower))( jax.ShapeDtypeStruct((nf, nloc + nghost, 3), jnp.float64), # extended_coord jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int32), # extended_atype jax.ShapeDtypeStruct((nf, nloc, model.get_nnei()), jnp.int64), # nlist jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int64), # mapping - jax.ShapeDtypeStruct((nf, nfp), jnp.float64) + jax.ShapeDtypeStruct((nf, model.get_dim_fparam()), jnp.float64) if model.get_dim_fparam() else None, # fparam - jax.ShapeDtypeStruct((nf, nap), jnp.float64) + jax.ShapeDtypeStruct((nf, nloc, model.get_dim_aparam()), jnp.float64) if model.get_dim_aparam() else None, # aparam False, # do_atomic_virial diff --git a/source/tests/consistent/io/test_io.py b/source/tests/consistent/io/test_io.py index dc0f280d56..af26c41694 100644 --- a/source/tests/consistent/io/test_io.py +++ b/source/tests/consistent/io/test_io.py @@ -136,6 +136,8 @@ def test_deep_eval(self): [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], dtype=GLOBAL_NP_FLOAT_PRECISION, ).reshape(1, 9) + natoms = self.atype.shape[1] + nframes = self.atype.shape[0] prefix = "test_consistent_io_" + self.__class__.__name__.lower() rets = [] for backend_name in ("tensorflow", "pytorch", "dpmodel", "jax"): @@ -145,10 +147,20 @@ def test_deep_eval(self): reference_data = copy.deepcopy(self.data) self.save_data_to_model(prefix + backend.suffixes[0], reference_data) deep_eval = DeepEval(prefix + backend.suffixes[0]) + if deep_eval.get_dim_fparam() > 0: + fparam = np.ones((nframes, deep_eval.get_dim_fparam())) + else: + fparam = None + if deep_eval.get_dim_aparam() > 0: + aparam = np.ones((nframes, natoms, deep_eval.get_dim_aparam())) + else: + aparam = None ret = deep_eval.eval( self.coords, self.box, self.atype, + fparam=fparam, + aparam=aparam, ) rets.append(ret) for ret in rets[1:]: @@ -199,3 +211,47 @@ def setUp(self): def tearDown(self): IOTest.tearDown(self) + + +class TestDeepPotFparamAparam(unittest.TestCase, IOTest): + def setUp(self): + model_def_script = { + "type_map": ["O", "H"], + "descriptor": { + "type": "se_e2_a", + "sel": [20, 20], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 3, + 6, + ], + "resnet_dt": False, + "axis_neuron": 2, + "precision": "float64", + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "type": "ener", + "neuron": [ + 5, + 5, + ], + "resnet_dt": True, + "precision": "float64", + "atom_ener": [], + "seed": 1, + "numb_fparam": 2, + "numb_aparam": 2, + }, + } + model = get_model(copy.deepcopy(model_def_script)) + self.data = { + "model": model.serialize(), + "backend": "test", + "model_def_script": model_def_script, + } + + def tearDown(self): + IOTest.tearDown(self) From 704db2ff84188424dac86d274e473935854b8524 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 31 Oct 2024 20:20:50 -0400 Subject: [PATCH 110/193] fix(lmp): add pair_deepmd_index arg to fix dplr for multiple deepmd pairs (#4274) Fix #4273. ## Summary by CodeRabbit - **New Features** - Introduced a new optional keyword `pair_deepmd_index` in the `fix dplr` command for enhanced control in simulations. - Updated documentation with clearer instructions and examples for the DPLR model, including training process and simulation setup. - **Bug Fixes** - Improved error handling related to the new `pair_deepmd_index` parameter to ensure proper usage. - **Documentation** - Enhanced descriptions and usability of the DPLR model documentation. Signed-off-by: Jinzhe Zeng --- doc/model/dplr.md | 6 +++++- source/lmp/fix_dplr.cpp | 9 ++++++++- source/lmp/fix_dplr.h | 3 +++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/doc/model/dplr.md b/doc/model/dplr.md index 91c2251346..cf071d4029 100644 --- a/doc/model/dplr.md +++ b/doc/model/dplr.md @@ -198,7 +198,7 @@ fix ID group-ID style_name keyword value ... - three or more keyword/value pairs may be appended ``` -keyword = *model* or *type_associate* or *bond_type* or *efield* +keyword = *model* or *type_associate* or *bond_type* or *efield* or *pair_deepmd_index* *model* value = name name = name of DPLR model file (e.g. frozen_model.pb) (not DW model) *type_associate* values = NR1 NW1 NR2 NW2 ... @@ -208,6 +208,8 @@ keyword = *model* or *type_associate* or *bond_type* or *efield* NBi = bond type of i-th (real atom, Wannier centroid) pair *efield* (optional) values = Ex Ey Ez Ex/Ey/Ez = electric field along x/y/z direction + *pair_deepmd_index* (optional) values = idx + idx = The index of pair_style deepmd, starting from 1, if more than one is used ``` **Examples** @@ -223,6 +225,8 @@ fix_modify 0 virial yes ``` The fix command `dplr` calculates the position of WCs by the DW model and back-propagates the long-range interaction on virtual atoms to real toms. +The fix command must be used after [pair_style `deepmd`](../third-party/lammps-command.md#pair_style-deepmd). +If there are more than 1 pair_style `deepmd`, `pair_deepmd_index` (starting from 1) must be set to assign the index of the pair_style `deepmd`. The atom names specified in [pair_style `deepmd`](../third-party/lammps-command.md#pair_style-deepmd) will be used to determine elements. If it is not set, the training parameter {ref}`type_map ` will be mapped to LAMMPS atom types. diff --git a/source/lmp/fix_dplr.cpp b/source/lmp/fix_dplr.cpp index 8a6be7d840..34fd2515ed 100644 --- a/source/lmp/fix_dplr.cpp +++ b/source/lmp/fix_dplr.cpp @@ -62,6 +62,7 @@ FixDPLR::FixDPLR(LAMMPS *lmp, int narg, char **arg) size_vector = 3; qe2f = force->qe2f; xstyle = ystyle = zstyle = NONE; + pair_deepmd_index = 0; if (strcmp(update->unit_style, "lj") == 0) { error->all(FLERR, @@ -125,6 +126,12 @@ FixDPLR::FixDPLR(LAMMPS *lmp, int narg, char **arg) } sort(bond_type.begin(), bond_type.end()); iarg = iend; + } else if (string(arg[iarg]) == string("pair_deepmd_index")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal pair_deepmd_index, not provided"); + } + pair_deepmd_index = atoi(arg[iarg + 1]); + iarg += 2; } else { break; } @@ -141,7 +148,7 @@ FixDPLR::FixDPLR(LAMMPS *lmp, int narg, char **arg) error->one(FLERR, e.what()); } - pair_deepmd = (PairDeepMD *)force->pair_match("deepmd", 1); + pair_deepmd = (PairDeepMD *)force->pair_match("deepmd", 1, pair_deepmd_index); if (!pair_deepmd) { error->all(FLERR, "pair_style deepmd should be set before this fix\n"); } diff --git a/source/lmp/fix_dplr.h b/source/lmp/fix_dplr.h index a6822fe4fe..c43296e611 100644 --- a/source/lmp/fix_dplr.h +++ b/source/lmp/fix_dplr.h @@ -80,6 +80,9 @@ class FixDPLR : public Fix { void update_efield_variables(); enum { NONE, CONSTANT, EQUAL }; std::vector type_idx_map; + /* The index of deepmd pair index, which starts from 1. By default 0, which + * works only when there is one deepmd pair. */ + int pair_deepmd_index; }; } // namespace LAMMPS_NS From 3dc6fff5ee3a8e201bc15905dce833dd89676dd1 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 1 Nov 2024 09:30:15 +0800 Subject: [PATCH 111/193] fix spin --- source/lmp/pair_deepspin.cpp | 1 - source/lmp/tests/run_mpi_pair_deepmd_spin.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 226b0e029f..01ef220586 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -208,7 +208,6 @@ void PairDeepSpin::compute(int eflag, int vflag) { commdata_->nswap, commdata_->sendnum, commdata_->recvnum, commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, commdata_->recvproc, &world); - deepmd_compat::InputNlist extend_lmp_list; if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { diff --git a/source/lmp/tests/run_mpi_pair_deepmd_spin.py b/source/lmp/tests/run_mpi_pair_deepmd_spin.py index 47e807e088..d960c45108 100644 --- a/source/lmp/tests/run_mpi_pair_deepmd_spin.py +++ b/source/lmp/tests/run_mpi_pair_deepmd_spin.py @@ -54,7 +54,7 @@ relative = 1.0 lammps.pair_style( - f"deepmd {pb_file} {pb_file2} out_file {md_file} out_freq 1 atomic relative {relative}" + f"deepspin {pb_file} {pb_file2} out_file {md_file} out_freq 1 atomic relative {relative}" ) lammps.pair_coeff("* *") lammps.run(0) From a4688194e9c42ff285df877fe5fcbff384f5a470 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 31 Oct 2024 22:09:42 -0400 Subject: [PATCH 112/193] feat(jax/array-api): property fitting (#4287) ## Summary by CodeRabbit - **New Features** - Introduced the `PropertyFittingNet` class for enhanced property-specific fitting operations. - Enhanced testing framework to support additional computational backends (JAX and Array API Strict). - **Bug Fixes** - Improved handling of attribute assignments in property fitting. - **Tests** - Added new methods and properties to the testing suite for evaluating property fitting with JAX and Array API Strict. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/jax/fitting/fitting.py | 11 ++++ .../tests/array_api_strict/fitting/fitting.py | 9 +++ .../tests/consistent/fitting/test_property.py | 62 +++++++++++++++++++ 3 files changed, 82 insertions(+) diff --git a/deepmd/jax/fitting/fitting.py b/deepmd/jax/fitting/fitting.py index 2a6186ac46..d62681490c 100644 --- a/deepmd/jax/fitting/fitting.py +++ b/deepmd/jax/fitting/fitting.py @@ -9,6 +9,9 @@ from deepmd.dpmodel.fitting.polarizability_fitting import ( PolarFitting as PolarFittingNetDP, ) +from deepmd.dpmodel.fitting.property_fitting import ( + PropertyFittingNet as PropertyFittingNetDP, +) from deepmd.jax.common import ( ArrayAPIVariable, flax_module, @@ -51,6 +54,14 @@ def __setattr__(self, name: str, value: Any) -> None: return super().__setattr__(name, value) +@BaseFitting.register("property") +@flax_module +class PropertyFittingNet(PropertyFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) + + @BaseFitting.register("dos") @flax_module class DOSFittingNet(DOSFittingNetDP): diff --git a/source/tests/array_api_strict/fitting/fitting.py b/source/tests/array_api_strict/fitting/fitting.py index 5a2bd9c58f..323a49cfe8 100644 --- a/source/tests/array_api_strict/fitting/fitting.py +++ b/source/tests/array_api_strict/fitting/fitting.py @@ -9,6 +9,9 @@ from deepmd.dpmodel.fitting.polarizability_fitting import ( PolarFitting as PolarFittingNetDP, ) +from deepmd.dpmodel.fitting.property_fitting import ( + PropertyFittingNet as PropertyFittingNetDP, +) from ..common import ( to_array_api_strict_array, @@ -43,6 +46,12 @@ def __setattr__(self, name: str, value: Any) -> None: return super().__setattr__(name, value) +class PropertyFittingNet(PropertyFittingNetDP): + def __setattr__(self, name: str, value: Any) -> None: + value = setattr_for_general_fitting(name, value) + return super().__setattr__(name, value) + + class DOSFittingNet(DOSFittingNetDP): def __setattr__(self, name: str, value: Any) -> None: value = setattr_for_general_fitting(name, value) diff --git a/source/tests/consistent/fitting/test_property.py b/source/tests/consistent/fitting/test_property.py index beb21d9c04..4e0fe04f9f 100644 --- a/source/tests/consistent/fitting/test_property.py +++ b/source/tests/consistent/fitting/test_property.py @@ -17,6 +17,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, CommonTest, parameterized, @@ -32,6 +34,22 @@ from deepmd.pt.utils.env import DEVICE as PT_DEVICE else: PropertyFittingPT = object +if INSTALLED_JAX: + from deepmd.jax.env import ( + jnp, + ) + from deepmd.jax.fitting.fitting import PropertyFittingNet as PropertyFittingJAX +else: + PropertyFittingJAX = object +if INSTALLED_ARRAY_API_STRICT: + import array_api_strict + + from ...array_api_strict.fitting.fitting import ( + PropertyFittingNet as PropertyFittingStrict, + ) +else: + PropertyFittingStrict = object + PropertyFittingTF = object @@ -84,9 +102,14 @@ def skip_pt(self) -> bool: def skip_tf(self) -> bool: return True + skip_jax = not INSTALLED_JAX + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT + tf_class = PropertyFittingTF dp_class = PropertyFittingDP pt_class = PropertyFittingPT + jax_class = PropertyFittingJAX + array_api_strict_class = PropertyFittingStrict args = fitting_property() def setUp(self): @@ -183,6 +206,45 @@ def eval_dp(self, dp_obj: Any) -> Any: aparam=self.aparam if numb_aparam else None, )["property"] + def eval_jax(self, jax_obj: Any) -> Any: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + numb_aparam, + task_dim, + intensive, + ) = self.param + return np.asarray( + jax_obj( + jnp.asarray(self.inputs), + jnp.asarray(self.atype.reshape(1, -1)), + fparam=jnp.asarray(self.fparam) if numb_fparam else None, + aparam=jnp.asarray(self.aparam) if numb_aparam else None, + )["property"] + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + array_api_strict.set_array_api_strict_flags(api_version="2023.12") + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + numb_aparam, + task_dim, + intensive, + ) = self.param + return np.asarray( + array_api_strict_obj( + array_api_strict.asarray(self.inputs), + array_api_strict.asarray(self.atype.reshape(1, -1)), + fparam=array_api_strict.asarray(self.fparam) if numb_fparam else None, + aparam=array_api_strict.asarray(self.aparam) if numb_aparam else None, + )["property"] + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same From 5c321470794245f8d235841dee689107b2c7b593 Mon Sep 17 00:00:00 2001 From: Anyang Peng <137014849+anyangml@users.noreply.github.com> Date: Fri, 1 Nov 2024 12:26:13 +0800 Subject: [PATCH 113/193] Feat: Add consistency test for ZBL between dp and pt (#4292) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced `DPZBLModel`, enhancing energy modeling capabilities. - Added `get_zbl_model` function for creating `DPZBLModel` from input data. - New `DPZBLLinearEnergyAtomicModel` class allows for complex interactions between atomic models. - **Bug Fixes** - Corrected typographical errors in multiple test classes to improve code clarity and consistency in method names. - Updated model type attributes for `DPZBLModel` and `LinearEnergyModel` to reflect accurate classifications. - **Tests** - Added comprehensive unit tests for energy models to ensure functionality across various backends. - Enhanced existing test classes with corrected method names for improved accuracy. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../atomic_model/linear_atomic_model.py | 2 + deepmd/dpmodel/model/dp_zbl_model.py | 66 ++++++ deepmd/dpmodel/model/model.py | 53 +++++ deepmd/pt/model/model/dp_linear_model.py | 2 +- deepmd/pt/model/model/dp_zbl_model.py | 2 +- source/tests/consistent/common.py | 4 +- .../tests/consistent/fitting/test_dipole.py | 2 +- source/tests/consistent/fitting/test_dos.py | 2 +- source/tests/consistent/fitting/test_ener.py | 2 +- source/tests/consistent/fitting/test_polar.py | 2 +- .../tests/consistent/fitting/test_property.py | 2 +- source/tests/consistent/model/test_ener.py | 2 +- .../tests/consistent/model/test_zbl_ener.py | 224 ++++++++++++++++++ .../tests/consistent/test_type_embedding.py | 2 +- 14 files changed, 356 insertions(+), 11 deletions(-) create mode 100644 deepmd/dpmodel/model/dp_zbl_model.py create mode 100644 source/tests/consistent/model/test_zbl_ener.py diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 5d86472674..224fdd145c 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -34,6 +34,7 @@ ) +@BaseAtomicModel.register("linear") class LinearEnergyAtomicModel(BaseAtomicModel): """Linear model make linear combinations of several existing models. @@ -324,6 +325,7 @@ def is_aparam_nall(self) -> bool: return False +@BaseAtomicModel.register("zbl") class DPZBLLinearEnergyAtomicModel(LinearEnergyAtomicModel): """Model linearly combine a list of AtomicModels. diff --git a/deepmd/dpmodel/model/dp_zbl_model.py b/deepmd/dpmodel/model/dp_zbl_model.py new file mode 100644 index 0000000000..ba19785235 --- /dev/null +++ b/deepmd/dpmodel/model/dp_zbl_model.py @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +from deepmd.dpmodel.atomic_model.linear_atomic_model import ( + DPZBLLinearEnergyAtomicModel, +) +from deepmd.dpmodel.model.base_model import ( + BaseModel, +) +from deepmd.dpmodel.model.dp_model import ( + DPModelCommon, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + +from .make_model import ( + make_model, +) + +DPZBLModel_ = make_model(DPZBLLinearEnergyAtomicModel) + + +@BaseModel.register("zbl") +class DPZBLModel(DPZBLModel_): + model_type = "zbl" + + def __init__( + self, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + local_jdata_cpy["dpmodel"], min_nbor_dist = DPModelCommon.update_sel( + train_data, type_map, local_jdata["dpmodel"] + ) + return local_jdata_cpy, min_nbor_dist diff --git a/deepmd/dpmodel/model/model.py b/deepmd/dpmodel/model/model.py index cccd0732cd..c29240214c 100644 --- a/deepmd/dpmodel/model/model.py +++ b/deepmd/dpmodel/model/model.py @@ -1,4 +1,13 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.dpmodel.atomic_model.dp_atomic_model import ( + DPAtomicModel, +) +from deepmd.dpmodel.atomic_model.pairtab_atomic_model import ( + PairTabAtomicModel, +) +from deepmd.dpmodel.descriptor.base_descriptor import ( + BaseDescriptor, +) from deepmd.dpmodel.descriptor.se_e2_a import ( DescrptSeA, ) @@ -8,6 +17,9 @@ from deepmd.dpmodel.model.base_model import ( BaseModel, ) +from deepmd.dpmodel.model.dp_zbl_model import ( + DPZBLModel, +) from deepmd.dpmodel.model.ener_model import ( EnergyModel, ) @@ -55,6 +67,45 @@ def get_standard_model(data: dict) -> EnergyModel: ) +def get_zbl_model(data: dict) -> DPZBLModel: + data["descriptor"]["ntypes"] = len(data["type_map"]) + descriptor = BaseDescriptor(**data["descriptor"]) + fitting_type = data["fitting_net"].pop("type") + if fitting_type == "ener": + fitting = EnergyFittingNet( + ntypes=descriptor.get_ntypes(), + dim_descrpt=descriptor.get_dim_out(), + mixed_types=descriptor.mixed_types(), + **data["fitting_net"], + ) + else: + raise ValueError(f"Unknown fitting type {fitting_type}") + + dp_model = DPAtomicModel(descriptor, fitting, type_map=data["type_map"]) + # pairtab + filepath = data["use_srtab"] + pt_model = PairTabAtomicModel( + filepath, + data["descriptor"]["rcut"], + data["descriptor"]["sel"], + type_map=data["type_map"], + ) + + rmin = data["sw_rmin"] + rmax = data["sw_rmax"] + atom_exclude_types = data.get("atom_exclude_types", []) + pair_exclude_types = data.get("pair_exclude_types", []) + return DPZBLModel( + dp_model, + pt_model, + rmin, + rmax, + type_map=data["type_map"], + atom_exclude_types=atom_exclude_types, + pair_exclude_types=pair_exclude_types, + ) + + def get_spin_model(data: dict) -> SpinModel: """Get a spin model from a dictionary. @@ -100,6 +151,8 @@ def get_model(data: dict): if model_type == "standard": if "spin" in data: return get_spin_model(data) + elif "use_srtab" in data: + return get_zbl_model(data) else: return get_standard_model(data) else: diff --git a/deepmd/pt/model/model/dp_linear_model.py b/deepmd/pt/model/model/dp_linear_model.py index d19070fc5b..4028d77228 100644 --- a/deepmd/pt/model/model/dp_linear_model.py +++ b/deepmd/pt/model/model/dp_linear_model.py @@ -30,7 +30,7 @@ @BaseModel.register("linear_ener") class LinearEnergyModel(DPLinearModel_): - model_type = "ener" + model_type = "linear_ener" def __init__( self, diff --git a/deepmd/pt/model/model/dp_zbl_model.py b/deepmd/pt/model/model/dp_zbl_model.py index e1ef00f5fe..0f05e3e56d 100644 --- a/deepmd/pt/model/model/dp_zbl_model.py +++ b/deepmd/pt/model/model/dp_zbl_model.py @@ -30,7 +30,7 @@ @BaseModel.register("zbl") class DPZBLModel(DPZBLModel_): - model_type = "ener" + model_type = "zbl" def __init__( self, diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index bcad7c4502..734486becb 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -75,7 +75,7 @@ class CommonTest(ABC): data: ClassVar[dict] """Arguments data.""" - addtional_data: ClassVar[dict] = {} + additional_data: ClassVar[dict] = {} """Additional data that will not be checked.""" tf_class: ClassVar[Optional[type]] """TensorFlow model class.""" @@ -128,7 +128,7 @@ def init_backend_cls(self, cls) -> Any: def pass_data_to_cls(self, cls, data) -> Any: """Pass data to the class.""" - return cls(**data, **self.addtional_data) + return cls(**data, **self.additional_data) @abstractmethod def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: diff --git a/source/tests/consistent/fitting/test_dipole.py b/source/tests/consistent/fitting/test_dipole.py index 55d6c44c34..60ee7322c1 100644 --- a/source/tests/consistent/fitting/test_dipole.py +++ b/source/tests/consistent/fitting/test_dipole.py @@ -104,7 +104,7 @@ def setUp(self): self.atype.sort() @property - def addtional_data(self) -> dict: + def additional_data(self) -> dict: ( resnet_dt, precision, diff --git a/source/tests/consistent/fitting/test_dos.py b/source/tests/consistent/fitting/test_dos.py index 774e3f655e..d3de3ef151 100644 --- a/source/tests/consistent/fitting/test_dos.py +++ b/source/tests/consistent/fitting/test_dos.py @@ -124,7 +124,7 @@ def setUp(self): ).reshape(-1, 1) @property - def addtional_data(self) -> dict: + def additional_data(self) -> dict: ( resnet_dt, precision, diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index e32410a0ec..f4e78ce966 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -134,7 +134,7 @@ def setUp(self): ).reshape(-1, 1) @property - def addtional_data(self) -> dict: + def additional_data(self) -> dict: ( resnet_dt, precision, diff --git a/source/tests/consistent/fitting/test_polar.py b/source/tests/consistent/fitting/test_polar.py index 895974baf9..bd9d013b8d 100644 --- a/source/tests/consistent/fitting/test_polar.py +++ b/source/tests/consistent/fitting/test_polar.py @@ -104,7 +104,7 @@ def setUp(self): self.atype.sort() @property - def addtional_data(self) -> dict: + def additional_data(self) -> dict: ( resnet_dt, precision, diff --git a/source/tests/consistent/fitting/test_property.py b/source/tests/consistent/fitting/test_property.py index 4e0fe04f9f..a096d4dd68 100644 --- a/source/tests/consistent/fitting/test_property.py +++ b/source/tests/consistent/fitting/test_property.py @@ -127,7 +127,7 @@ def setUp(self): ).reshape(-1, 1) @property - def addtional_data(self) -> dict: + def additional_data(self) -> dict: ( resnet_dt, precision, diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index 2a358ba7e0..98330ba849 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -130,7 +130,7 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_pt(data) elif cls is EnergyModelJAX: return get_model_jax(data) - return cls(**data, **self.addtional_data) + return cls(**data, **self.additional_data) def setUp(self): CommonTest.setUp(self) diff --git a/source/tests/consistent/model/test_zbl_ener.py b/source/tests/consistent/model/test_zbl_ener.py new file mode 100644 index 0000000000..f37bee0c90 --- /dev/null +++ b/source/tests/consistent/model/test_zbl_ener.py @@ -0,0 +1,224 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, +) + +import numpy as np + +from deepmd.dpmodel.model.dp_zbl_model import DPZBLModel as DPZBLModelDP +from deepmd.dpmodel.model.model import get_model as get_model_dp +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) + +from ..common import ( + INSTALLED_PT, + SKIP_FLAG, + CommonTest, + parameterized, +) +from .common import ( + ModelTest, +) + +if INSTALLED_PT: + from deepmd.pt.model.model import get_model as get_model_pt + from deepmd.pt.model.model.dp_zbl_model import DPZBLModel as DPZBLModelPT +else: + DPZBLModelPT = None +import os + +from deepmd.utils.argcheck import ( + model_args, +) + +TESTS_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + + +@parameterized( + ( + [], + [[0, 1]], + ), + ( + [], + [1], + ), +) +class TestEner(CommonTest, ModelTest, unittest.TestCase): + @property + def data(self) -> dict: + pair_exclude_types, atom_exclude_types = self.param + return { + "type_map": ["O", "H", "B"], + "use_srtab": f"{TESTS_DIR}/pt/water/data/zbl_tab_potential/H2O_tab_potential.txt", + "smin_alpha": 0.1, + "sw_rmin": 0.2, + "sw_rmax": 4.0, + "pair_exclude_types": pair_exclude_types, + "atom_exclude_types": atom_exclude_types, + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [3, 6], + "axis_neuron": 2, + "attn": 8, + "attn_layer": 2, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": False, + "temperature": 1.0, + "set_davg_zero": True, + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "neuron": [5, 5], + "resnet_dt": True, + "seed": 1, + }, + } + + dp_class = DPZBLModelDP + pt_class = DPZBLModelPT + args = model_args() + + def get_reference_backend(self): + """Get the reference backend. + + We need a reference backend that can reproduce forces. + """ + if not self.skip_pt: + return self.RefBackend.PT + if not self.skip_tf: + return self.RefBackend.TF + if not self.skip_jax: + return self.RefBackend.JAX + if not self.skip_dp: + return self.RefBackend.DP + raise ValueError("No available reference") + + @property + def skip_tf(self): + return True + + @property + def skip_jax(self): + return True + + def pass_data_to_cls(self, cls, data) -> Any: + """Pass data to the class.""" + data = data.copy() + if cls is DPZBLModelDP: + return get_model_dp(data) + elif cls is DPZBLModelPT: + return get_model_pt(data) + return cls(**data, **self.additional_data) + + def setUp(self): + CommonTest.setUp(self) + + self.ntypes = 2 + self.coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ).reshape(1, -1, 3) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32).reshape(1, -1) + self.box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ).reshape(1, 9) + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + + # TF requires the atype to be sort + idx_map = np.argsort(self.atype.ravel()) + self.atype = self.atype[:, idx_map] + self.coords = self.coords[:, idx_map] + + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: + return self.build_tf_model( + obj, + self.natoms, + self.coords, + self.atype, + self.box, + suffix, + ) + + def eval_dp(self, dp_obj: Any) -> Any: + return self.eval_dp_model( + dp_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def eval_pt(self, pt_obj: Any) -> Any: + return self.eval_pt_model( + pt_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_model( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: + # shape not matched. ravel... + if backend is self.RefBackend.DP: + return ( + ret["energy_redu"].ravel(), + ret["energy"].ravel(), + SKIP_FLAG, + SKIP_FLAG, + ) + elif backend is self.RefBackend.PT: + return ( + ret["energy"].ravel(), + ret["atom_energy"].ravel(), + ret["force"].ravel(), + ret["virial"].ravel(), + ) + elif backend is self.RefBackend.TF: + return (ret[0].ravel(), ret[1].ravel(), ret[2].ravel(), ret[3].ravel()) + elif backend is self.RefBackend.JAX: + return ( + ret["energy_redu"].ravel(), + ret["energy"].ravel(), + ret["energy_derv_r"].ravel(), + ret["energy_derv_c_redu"].ravel(), + ) + raise ValueError(f"Unknown backend: {backend}") diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index a4b516ef16..0dd17c841e 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -82,7 +82,7 @@ def data(self) -> dict: skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT @property - def addtional_data(self) -> dict: + def additional_data(self) -> dict: ( resnet_dt, precision, From eb2832bcc1d3af664a039055f44fff7f54d885ae Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 1 Nov 2024 04:58:59 -0400 Subject: [PATCH 114/193] feat(jax/array-api): se_e3 (#4286) ## Summary by CodeRabbit - **New Features** - Introduced a new descriptor class `DescrptSeT` for enhanced compatibility with array APIs. - Added support for JAX as a backend option for the `"se_e3"` descriptor. - **Bug Fixes** - Improved array handling in the `clear` method of the `NN` class to ensure compatibility across different array implementations. - **Documentation** - Updated the module exports to include the new `DescrptSeT` class. - Expanded documentation to reflect JAX as a supported backend for the `"se_e3"` descriptor. - **Tests** - Enhanced the test suite to support additional computational backends and added new evaluation methods. --------- Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/descriptor/se_t.py | 45 +++++++++++-------- deepmd/dpmodel/utils/network.py | 7 +-- deepmd/jax/descriptor/__init__.py | 4 ++ deepmd/jax/descriptor/se_t.py | 42 +++++++++++++++++ doc/model/train-se-e3.md | 4 +- .../tests/array_api_strict/descriptor/se_t.py | 32 +++++++++++++ .../tests/consistent/descriptor/test_se_t.py | 33 ++++++++++++++ 7 files changed, 144 insertions(+), 23 deletions(-) create mode 100644 deepmd/jax/descriptor/se_t.py create mode 100644 source/tests/array_api_strict/descriptor/se_t.py diff --git a/deepmd/dpmodel/descriptor/se_t.py b/deepmd/dpmodel/descriptor/se_t.py index 4dc4c965fb..5bc5970a87 100644 --- a/deepmd/dpmodel/descriptor/se_t.py +++ b/deepmd/dpmodel/descriptor/se_t.py @@ -6,6 +6,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( @@ -13,6 +14,10 @@ PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.common import ( + get_xp_precision, + to_numpy_array, +) from deepmd.dpmodel.utils import ( EmbeddingNet, EnvMat, @@ -25,9 +30,6 @@ from deepmd.dpmodel.utils.update_sel import ( UpdateSel, ) -from deepmd.env import ( - GLOBAL_NP_FLOAT_PRECISION, -) from deepmd.utils.data_system import ( DeepmdDataSystem, ) @@ -122,17 +124,18 @@ def __init__( # order matters, placed after the assignment of self.ntypes self.reinit_exclude(exclude_types) self.trainable = trainable + self.sel_cumsum = [0, *np.cumsum(self.sel).tolist()] in_dim = 1 # not considiering type embedding - self.embeddings = NetworkCollection( + embeddings = NetworkCollection( ntypes=self.ntypes, ndim=2, network_type="embedding_network", ) for ii, embedding_idx in enumerate( - itertools.product(range(self.ntypes), repeat=self.embeddings.ndim) + itertools.product(range(self.ntypes), repeat=embeddings.ndim) ): - self.embeddings[embedding_idx] = EmbeddingNet( + embeddings[embedding_idx] = EmbeddingNet( in_dim, self.neuron, self.activation_function, @@ -140,8 +143,9 @@ def __init__( self.precision, seed=child_seed(self.seed, ii), ) + self.embeddings = embeddings self.env_mat = EnvMat(self.rcut, self.rcut_smth, protection=self.env_protection) - self.nnei = np.sum(self.sel) + self.nnei = sum(self.sel) self.davg = np.zeros( [self.ntypes, self.nnei, 4], dtype=PRECISION_DICT[self.precision] ) @@ -299,20 +303,22 @@ def call( The smooth switch function. """ del mapping + xp = array_api_compat.array_namespace(coord_ext, atype_ext, nlist) # nf x nloc x nnei x 4 rr, diff, ww = self.env_mat.call( coord_ext, atype_ext, nlist, self.davg, self.dstd ) nf, nloc, nnei, _ = rr.shape - sec = np.append([0], np.cumsum(self.sel)) + sec = self.sel_cumsum ng = self.neuron[-1] - result = np.zeros([nf * nloc, ng], dtype=PRECISION_DICT[self.precision]) + result = xp.zeros([nf * nloc, ng], dtype=get_xp_precision(xp, self.precision)) exclude_mask = self.emask.build_type_exclude_mask(nlist, atype_ext) # merge nf and nloc axis, so for type_one_side == False, # we don't require atype is the same in all frames - exclude_mask = exclude_mask.reshape(nf * nloc, nnei) - rr = rr.reshape(nf * nloc, nnei, 4) + exclude_mask = xp.reshape(exclude_mask, (nf * nloc, nnei)) + rr = xp.reshape(rr, (nf * nloc, nnei, 4)) + rr = xp.astype(rr, get_xp_precision(xp, self.precision)) for embedding_idx in itertools.product( range(self.ntypes), repeat=self.embeddings.ndim @@ -325,23 +331,26 @@ def call( # nfnl x nt_i x 3 rr_i = rr[:, sec[ti] : sec[ti + 1], 1:] mm_i = exclude_mask[:, sec[ti] : sec[ti + 1]] - rr_i = rr_i * mm_i[:, :, None] + rr_i = rr_i * xp.astype(mm_i[:, :, None], rr_i.dtype) # nfnl x nt_j x 3 rr_j = rr[:, sec[tj] : sec[tj + 1], 1:] mm_j = exclude_mask[:, sec[tj] : sec[tj + 1]] - rr_j = rr_j * mm_j[:, :, None] + rr_j = rr_j * xp.astype(mm_j[:, :, None], rr_j.dtype) # nfnl x nt_i x nt_j - env_ij = np.einsum("ijm,ikm->ijk", rr_i, rr_j) + # env_ij = np.einsum("ijm,ikm->ijk", rr_i, rr_j) + env_ij = xp.sum(rr_i[:, :, None, :] * rr_j[:, None, :, :], axis=-1) # nfnl x nt_i x nt_j x 1 env_ij_reshape = env_ij[:, :, :, None] # nfnl x nt_i x nt_j x ng gg = self.embeddings[embedding_idx].call(env_ij_reshape) # nfnl x nt_i x nt_j x ng - res_ij = np.einsum("ijk,ijkm->im", env_ij, gg) + # res_ij = np.einsum("ijk,ijkm->im", env_ij, gg) + res_ij = xp.sum(env_ij[:, :, :, None] * gg, axis=(1, 2)) res_ij = res_ij * (1.0 / float(nei_type_i) / float(nei_type_j)) result += res_ij # nf x nloc x ng - result = result.reshape(nf, nloc, ng).astype(GLOBAL_NP_FLOAT_PRECISION) + result = xp.reshape(result, (nf, nloc, ng)) + result = xp.astype(result, get_xp_precision(xp, "global")) return result, None, None, None, ww def serialize(self) -> dict: @@ -369,8 +378,8 @@ def serialize(self) -> dict: "exclude_types": self.exclude_types, "env_protection": self.env_protection, "@variables": { - "davg": self.davg, - "dstd": self.dstd, + "davg": to_numpy_array(self.davg), + "dstd": to_numpy_array(self.dstd), }, "type_map": self.type_map, "trainable": self.trainable, diff --git a/deepmd/dpmodel/utils/network.py b/deepmd/dpmodel/utils/network.py index 5140a88c97..a81ddb69a6 100644 --- a/deepmd/dpmodel/utils/network.py +++ b/deepmd/dpmodel/utils/network.py @@ -572,11 +572,12 @@ def call(self, x): def clear(self): """Clear the network parameters to zero.""" for layer in self.layers: - layer.w.fill(0.0) + xp = array_api_compat.array_namespace(layer.w) + layer.w = xp.zeros_like(layer.w) if layer.b is not None: - layer.b.fill(0.0) + layer.b = xp.zeros_like(layer.b) if layer.idt is not None: - layer.idt.fill(0.0) + layer.idt = xp.zeros_like(layer.idt) return NN diff --git a/deepmd/jax/descriptor/__init__.py b/deepmd/jax/descriptor/__init__.py index cabee5a189..4e55bc7659 100644 --- a/deepmd/jax/descriptor/__init__.py +++ b/deepmd/jax/descriptor/__init__.py @@ -11,10 +11,14 @@ from deepmd.jax.descriptor.se_e2_r import ( DescrptSeR, ) +from deepmd.jax.descriptor.se_t import ( + DescrptSeT, +) __all__ = [ "DescrptSeA", "DescrptSeR", + "DescrptSeT", "DescrptDPA1", "DescrptHybrid", ] diff --git a/deepmd/jax/descriptor/se_t.py b/deepmd/jax/descriptor/se_t.py new file mode 100644 index 0000000000..029f4231fe --- /dev/null +++ b/deepmd/jax/descriptor/se_t.py @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_t import DescrptSeT as DescrptSeTDP +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.jax.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.jax.utils.network import ( + NetworkCollection, +) + + +@BaseDescriptor.register("se_e3") +@BaseDescriptor.register("se_at") +@BaseDescriptor.register("se_a_3be") +@flax_module +class DescrptSeT(DescrptSeTDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"dstd", "davg"}: + value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) + elif name in {"embeddings"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) diff --git a/doc/model/train-se-e3.md b/doc/model/train-se-e3.md index 3d82c42c9e..714d75259a 100644 --- a/doc/model/train-se-e3.md +++ b/doc/model/train-se-e3.md @@ -1,7 +1,7 @@ -# Descriptor `"se_e3"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ dpmodel_icon }} +# Descriptor `"se_e3"` {{ tensorflow_icon }} {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: TensorFlow {{ tensorflow_icon }}, PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: The notation of `se_e3` is short for three-body embedding DeepPot-SE, which incorporates embedded bond-angle information. diff --git a/source/tests/array_api_strict/descriptor/se_t.py b/source/tests/array_api_strict/descriptor/se_t.py new file mode 100644 index 0000000000..13e650aa17 --- /dev/null +++ b/source/tests/array_api_strict/descriptor/se_t.py @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_t import DescrptSeT as DescrptSeTDP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.exclude_mask import ( + PairExcludeMask, +) +from ..utils.network import ( + NetworkCollection, +) + + +class DescrptSeT(DescrptSeTDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"dstd", "davg"}: + value = to_array_api_strict_array(value) + elif name in {"embeddings"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) diff --git a/source/tests/consistent/descriptor/test_se_t.py b/source/tests/consistent/descriptor/test_se_t.py index 833b76f6e1..1e6110705a 100644 --- a/source/tests/consistent/descriptor/test_se_t.py +++ b/source/tests/consistent/descriptor/test_se_t.py @@ -12,6 +12,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -29,6 +31,14 @@ from deepmd.tf.descriptor.se_t import DescrptSeT as DescrptSeTTF else: DescrptSeTTF = None +if INSTALLED_JAX: + from deepmd.jax.descriptor.se_t import DescrptSeT as DescrptSeTJAX +else: + DescrptSeTJAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.se_t import DescrptSeT as DescrptSeTStrict +else: + DescrptSeTStrict = None from deepmd.utils.argcheck import ( descrpt_se_t_args, ) @@ -91,9 +101,14 @@ def skip_tf(self) -> bool: ) = self.param return env_protection != 0.0 or excluded_types + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT + skip_jax = not INSTALLED_JAX + tf_class = DescrptSeTTF dp_class = DescrptSeTDP pt_class = DescrptSeTPT + jax_class = DescrptSeTJAX + array_api_strict_class = DescrptSeTStrict args = descrpt_se_t_args() def setUp(self): @@ -168,6 +183,24 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From 8355947ef052fd3806a361e25bbcff18f3e2f627 Mon Sep 17 00:00:00 2001 From: Yan Wang <116817801+cherryWangY@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:41:59 +0800 Subject: [PATCH 115/193] Add 4 pt descriptor compression (#4227) se_a, se_atten(DPA1), se_t, se_r ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced a model compression feature across multiple descriptor classes, enhancing performance and efficiency. - Added `enable_compression` methods to various classes, allowing users to enable and configure compression settings. - **Bug Fixes** - Improved error handling for unsupported compression scenarios and parameter validation. - **Tests** - Added comprehensive unit tests for new compression functionalities across multiple descriptor classes to ensure accuracy and reliability. - **Documentation** - Enhanced documentation for new methods and classes to clarify usage and parameters related to compression. --------- Signed-off-by: Jinzhe Zeng Signed-off-by: Yan Wang <116817801+cherryWangY@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- .../descriptor/make_base_descriptor.py | 25 + deepmd/pt/model/descriptor/dpa1.py | 87 +++ deepmd/pt/model/descriptor/se_a.py | 134 +++- deepmd/pt/model/descriptor/se_atten.py | 163 ++++- deepmd/pt/model/descriptor/se_r.py | 116 +++- deepmd/pt/model/descriptor/se_t.py | 131 +++- deepmd/pt/utils/tabulate.py | 607 ++++++++++++++++++ deepmd/tf/utils/tabulate.py | 369 +---------- deepmd/utils/tabulate.py | 458 +++++++++++++ source/op/pt/tabulate_multi_device.cc | 8 +- .../model/test_compressed_descriptor_se_a.py | 132 ++++ .../test_compressed_descriptor_se_atten.py | 142 ++++ .../model/test_compressed_descriptor_se_r.py | 129 ++++ .../model/test_compressed_descriptor_se_t.py | 129 ++++ source/tests/pt/test_tabulate.py | 135 ++++ 15 files changed, 2377 insertions(+), 388 deletions(-) create mode 100644 deepmd/pt/utils/tabulate.py create mode 100644 deepmd/utils/tabulate.py create mode 100644 source/tests/pt/model/test_compressed_descriptor_se_a.py create mode 100644 source/tests/pt/model/test_compressed_descriptor_se_atten.py create mode 100644 source/tests/pt/model/test_compressed_descriptor_se_r.py create mode 100644 source/tests/pt/model/test_compressed_descriptor_se_t.py create mode 100644 source/tests/pt/test_tabulate.py diff --git a/deepmd/dpmodel/descriptor/make_base_descriptor.py b/deepmd/dpmodel/descriptor/make_base_descriptor.py index b9c1e93387..9f2891d8c0 100644 --- a/deepmd/dpmodel/descriptor/make_base_descriptor.py +++ b/deepmd/dpmodel/descriptor/make_base_descriptor.py @@ -147,6 +147,31 @@ def compute_input_stats( """Update mean and stddev for descriptor elements.""" raise NotImplementedError + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + raise NotImplementedError("This descriptor doesn't support compression!") + @abstractmethod def fwd( self, diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index d3156f7c84..76115b2810 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -24,9 +24,15 @@ from deepmd.pt.utils.env import ( RESERVED_PRECISON_DICT, ) +from deepmd.pt.utils.tabulate import ( + DPTabulate, +) from deepmd.pt.utils.update_sel import ( UpdateSel, ) +from deepmd.pt.utils.utils import ( + ActivationFn, +) from deepmd.utils.data_system import ( DeepmdDataSystem, ) @@ -261,6 +267,8 @@ def __init__( if ln_eps is None: ln_eps = 1e-5 + self.tebd_input_mode = tebd_input_mode + del type, spin, attn_mask self.se_atten = DescrptBlockSeAtten( rcut, @@ -293,6 +301,7 @@ def __init__( self.use_econf_tebd = use_econf_tebd self.use_tebd_bias = use_tebd_bias self.type_map = type_map + self.compress = False self.type_embedding = TypeEmbedNet( ntypes, tebd_dim, @@ -551,6 +560,84 @@ def t_cvt(xx): ) return obj + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + # do some checks before the mocel compression process + if self.compress: + raise ValueError("Compression is already enabled.") + assert ( + not self.se_atten.resnet_dt + ), "Model compression error: descriptor resnet_dt must be false!" + for tt in self.se_atten.exclude_types: + if (tt[0] not in range(self.se_atten.ntypes)) or ( + tt[1] not in range(self.se_atten.ntypes) + ): + raise RuntimeError( + "exclude types" + + str(tt) + + " must within the number of atomic types " + + str(self.se_atten.ntypes) + + "!" + ) + if ( + self.se_atten.ntypes * self.se_atten.ntypes + - len(self.se_atten.exclude_types) + == 0 + ): + raise RuntimeError( + "Empty embedding-nets are not supported in model compression!" + ) + + if self.se_atten.attn_layer != 0: + raise RuntimeError("Cannot compress model when attention layer is not 0.") + + if self.tebd_input_mode != "strip": + raise RuntimeError("Cannot compress model when tebd_input_mode == 'concat'") + + data = self.serialize() + self.table = DPTabulate( + self, + data["neuron"], + data["type_one_side"], + data["exclude_types"], + ActivationFn(data["activation_function"]), + ) + self.table_config = [ + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ] + self.lower, self.upper = self.table.build( + min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2 + ) + + self.se_atten.enable_compression( + self.table.data, self.table_config, self.lower, self.upper + ) + self.compress = True + def forward( self, extended_coord: torch.Tensor, diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index 56cb1f5bc6..630b96ce9b 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -58,11 +58,34 @@ from deepmd.pt.utils.exclude_mask import ( PairExcludeMask, ) +from deepmd.pt.utils.tabulate import ( + DPTabulate, +) +from deepmd.pt.utils.utils import ( + ActivationFn, +) from .base_descriptor import ( BaseDescriptor, ) +if not hasattr(torch.ops.deepmd, "tabulate_fusion_se_a"): + + def tabulate_fusion_se_a( + argument0, + argument1, + argument2, + argument3, + argument4, + ) -> list[torch.Tensor]: + raise NotImplementedError( + "tabulate_fusion_se_a is not available since customized PyTorch OP library is not built when freezing the model. " + "See documentation for model compression for details." + ) + + # Note: this hack cannot actually save a model that can be runned using LAMMPS. + torch.ops.deepmd.tabulate_fusion_se_a = tabulate_fusion_se_a + @BaseDescriptor.register("se_e2_a") @BaseDescriptor.register("se_a") @@ -93,6 +116,7 @@ def __init__( raise NotImplementedError("old implementation of spin is not supported.") super().__init__() self.type_map = type_map + self.compress = False self.sea = DescrptBlockSeA( rcut, rcut_smth, @@ -225,6 +249,53 @@ def reinit_exclude( """Update the type exclusions.""" self.sea.reinit_exclude(exclude_types) + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + if self.compress: + raise ValueError("Compression is already enabled.") + data = self.serialize() + self.table = DPTabulate( + self, + data["neuron"], + data["type_one_side"], + data["exclude_types"], + ActivationFn(data["activation_function"]), + ) + self.table_config = [ + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ] + self.lower, self.upper = self.table.build( + min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2 + ) + self.sea.enable_compression( + self.table.data, self.table_config, self.lower, self.upper + ) + self.compress = True + def forward( self, coord_ext: torch.Tensor, @@ -366,6 +437,10 @@ def update_sel( class DescrptBlockSeA(DescriptorBlock): ndescrpt: Final[int] __constants__: ClassVar[list] = ["ndescrpt"] + lower: dict[str, int] + upper: dict[str, int] + table_data: dict[str, torch.Tensor] + table_config: list[Union[int, float]] def __init__( self, @@ -425,6 +500,13 @@ def __init__( self.register_buffer("mean", mean) self.register_buffer("stddev", stddev) + # add for compression + self.compress = False + self.lower = {} + self.upper = {} + self.table_data = {} + self.table_config = [] + ndim = 1 if self.type_one_side else 2 filter_layers = NetworkCollection( ndim=ndim, ntypes=len(sel), network_type="embedding_network" @@ -443,6 +525,7 @@ def __init__( self.filter_layers = filter_layers self.stats = None # set trainable + self.trainable = trainable for param in self.parameters(): param.requires_grad = trainable @@ -470,6 +553,10 @@ def get_dim_out(self) -> int: """Returns the output dimension.""" return self.dim_out + def get_dim_rot_mat_1(self) -> int: + """Returns the first dimension of the rotation matrix. The rotation is of shape dim_1 x 3.""" + return self.filter_neuron[-1] + def get_dim_emb(self) -> int: """Returns the output dimension.""" return self.neuron[-1] @@ -578,6 +665,19 @@ def reinit_exclude( self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + def enable_compression( + self, + table_data, + table_config, + lower, + upper, + ) -> None: + self.compress = True + self.table_data = table_data + self.table_config = table_config + self.lower = lower + self.upper = upper + def forward( self, nlist: torch.Tensor, @@ -627,6 +727,7 @@ def forward( for embedding_idx, ll in enumerate(self.filter_layers.networks): if self.type_one_side: ii = embedding_idx + ti = -1 # torch.jit is not happy with slice(None) # ti_mask = torch.ones(nfnl, dtype=torch.bool, device=dmatrix.device) # applying a mask seems to cause performance degradation @@ -648,10 +749,35 @@ def forward( rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] rr = rr * mm[:, :, None] ss = rr[:, :, :1] - # nfnl x nt x ng - gg = ll.forward(ss) - # nfnl x 4 x ng - gr = torch.matmul(rr.permute(0, 2, 1), gg) + + if self.compress: + if self.type_one_side: + net = "filter_-1_net_" + str(ii) + else: + net = "filter_" + str(ti) + "_net_" + str(ii) + info = [ + self.lower[net], + self.upper[net], + self.upper[net] * self.table_config[0], + self.table_config[1], + self.table_config[2], + self.table_config[3], + ] + ss = ss.reshape(-1, 1) # xyz_scatter_tensor in tf + tensor_data = self.table_data[net].to(ss.device).to(dtype=self.prec) + gr = torch.ops.deepmd.tabulate_fusion_se_a( + tensor_data.contiguous(), + torch.tensor(info, dtype=self.prec, device="cpu").contiguous(), + ss.contiguous(), + rr.contiguous(), + self.filter_neuron[-1], + )[0] + else: + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = torch.matmul(rr.permute(0, 2, 1), gg) + if ti_mask is not None: xyz_scatter[ti_mask] += gr else: diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py index aab72f7e98..8c56ccf827 100644 --- a/deepmd/pt/model/descriptor/se_atten.py +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -49,9 +49,33 @@ check_version_compatibility, ) +if not hasattr(torch.ops.deepmd, "tabulate_fusion_se_atten"): + + def tabulate_fusion_se_atten( + argument0, + argument1, + argument2, + argument3, + argument4, + argument5, + argument6, + ) -> list[torch.Tensor]: + raise NotImplementedError( + "tabulate_fusion_se_atten is not available since customized PyTorch OP library is not built when freezing the model. " + "See documentation for model compression for details." + ) + + # Note: this hack cannot actually save a model that can be runned using LAMMPS. + torch.ops.deepmd.tabulate_fusion_se_atten = tabulate_fusion_se_atten + @DescriptorBlock.register("se_atten") class DescrptBlockSeAtten(DescriptorBlock): + lower: dict[str, int] + upper: dict[str, int] + table_data: dict[str, torch.Tensor] + table_config: list[Union[int, float]] + def __init__( self, rcut: float, @@ -178,6 +202,14 @@ def __init__( ln_eps = 1e-5 self.ln_eps = ln_eps + # add for compression + self.compress = False + self.is_sorted = False + self.lower = {} + self.upper = {} + self.table_data = {} + self.table_config = [] + if isinstance(sel, int): sel = [sel] @@ -189,6 +221,7 @@ def __init__( self.ndescrpt = self.nnei * 4 # order matters, placed after the assignment of self.ntypes self.reinit_exclude(exclude_types) + self.dpa1_attention = NeighborGatedAttention( self.attn_layer, self.nnei, @@ -277,6 +310,10 @@ def get_dim_out(self) -> int: """Returns the output dimension.""" return self.dim_out + def get_dim_rot_mat_1(self) -> int: + """Returns the first dimension of the rotation matrix. The rotation is of shape dim_1 x 3.""" + return self.filter_neuron[-1] + def get_dim_emb(self) -> int: """Returns the output dimension of embedding.""" return self.filter_neuron[-1] @@ -384,8 +421,22 @@ def reinit_exclude( exclude_types: list[tuple[int, int]] = [], ): self.exclude_types = exclude_types + self.is_sorted = len(self.exclude_types) == 0 self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + def enable_compression( + self, + table_data, + table_config, + lower, + upper, + ) -> None: + self.compress = True + self.table_data = table_data + self.table_config = table_config + self.lower = lower + self.upper = upper + def forward( self, nlist: torch.Tensor, @@ -450,20 +501,21 @@ def forward( sw = torch.squeeze(sw, -1) # nf x nloc x nt -> nf x nloc x nnei x nt atype_tebd = extended_atype_embd[:, :nloc, :] - atype_tebd_nnei = atype_tebd.unsqueeze(2).expand(-1, -1, self.nnei, -1) + atype_tebd_nnei = atype_tebd.unsqueeze(2).expand(-1, -1, self.nnei, -1) # i # nf x nall x nt nt = extended_atype_embd.shape[-1] atype_tebd_ext = extended_atype_embd # nb x (nloc x nnei) x nt index = nlist.reshape(nb, nloc * nnei).unsqueeze(-1).expand(-1, -1, nt) # nb x (nloc x nnei) x nt - atype_tebd_nlist = torch.gather(atype_tebd_ext, dim=1, index=index) + atype_tebd_nlist = torch.gather(atype_tebd_ext, dim=1, index=index) # j # nb x nloc x nnei x nt atype_tebd_nlist = atype_tebd_nlist.view(nb, nloc, nnei, nt) # beyond the cutoff sw should be 0.0 sw = sw.masked_fill(~nlist_mask, 0.0) # (nb x nloc) x nnei exclude_mask = exclude_mask.view(nb * nloc, nnei) + # nfnl x nnei x 4 dmatrix = dmatrix.view(-1, self.nnei, 4) nfnl = dmatrix.shape[0] @@ -482,33 +534,91 @@ def forward( ss = torch.concat([ss, nlist_tebd], dim=2) # nfnl x nnei x ng gg = self.filter_layers.networks[0](ss) + input_r = torch.nn.functional.normalize( + rr.reshape(-1, self.nnei, 4)[:, :, 1:4], dim=-1 + ) + gg = self.dpa1_attention( + gg, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + # nfnl x 4 x ng + xyz_scatter = torch.matmul(rr.permute(0, 2, 1), gg) elif self.tebd_input_mode in ["strip"]: - # nfnl x nnei x ng - gg_s = self.filter_layers.networks[0](ss) - assert self.filter_layers_strip is not None - if not self.type_one_side: - # nfnl x nnei x (tebd_dim * 2) - tt = torch.concat([nlist_tebd, atype_tebd], dim=2) + if self.compress: + net = "filter_net" + info = [ + self.lower[net], + self.upper[net], + self.upper[net] * self.table_config[0], + self.table_config[1], + self.table_config[2], + self.table_config[3], + ] + ss = ss.reshape(-1, 1) + # nfnl x nnei x ng + # gg_s = self.filter_layers.networks[0](ss) + assert self.filter_layers_strip is not None + if not self.type_one_side: + # nfnl x nnei x (tebd_dim * 2) + tt = torch.concat([nlist_tebd, atype_tebd], dim=2) # dynamic, index + else: + # nfnl x nnei x tebd_dim + tt = nlist_tebd + # nfnl x nnei x ng + gg_t = self.filter_layers_strip.networks[0](tt) + if self.smooth: + gg_t = gg_t * sw.reshape(-1, self.nnei, 1) + # nfnl x nnei x ng + # gg = gg_s * gg_t + gg_s + tensor_data = self.table_data[net].to(gg_t.device).to(dtype=self.prec) + info_tensor = torch.tensor(info, dtype=self.prec, device="cpu") + gg_t = gg_t.reshape(-1, gg_t.size(-1)) + # Convert all tensors to the required precision at once + ss, rr, gg_t = (t.to(self.prec) for t in (ss, rr, gg_t)) + xyz_scatter = torch.ops.deepmd.tabulate_fusion_se_atten( + tensor_data.contiguous(), + info_tensor.contiguous(), + ss.contiguous(), + rr.contiguous(), + gg_t.contiguous(), + self.filter_neuron[-1], + self.is_sorted, + )[0] + # to make torchscript happy + gg = torch.empty( + nframes, + nloc, + self.nnei, + self.filter_neuron[-1], + dtype=gg_t.dtype, + device=gg_t.device, + ) else: - # nfnl x nnei x tebd_dim - tt = nlist_tebd - # nfnl x nnei x ng - gg_t = self.filter_layers_strip.networks[0](tt) - if self.smooth: - gg_t = gg_t * sw.reshape(-1, self.nnei, 1) - # nfnl x nnei x ng - gg = gg_s * gg_t + gg_s + # nfnl x nnei x ng + gg_s = self.filter_layers.networks[0](ss) + assert self.filter_layers_strip is not None + if not self.type_one_side: + # nfnl x nnei x (tebd_dim * 2) + tt = torch.concat([nlist_tebd, atype_tebd], dim=2) # dynamic, index + else: + # nfnl x nnei x tebd_dim + tt = nlist_tebd + # nfnl x nnei x ng + gg_t = self.filter_layers_strip.networks[0](tt) + if self.smooth: + gg_t = gg_t * sw.reshape(-1, self.nnei, 1) + # nfnl x nnei x ng + gg = gg_s * gg_t + gg_s + input_r = torch.nn.functional.normalize( + rr.reshape(-1, self.nnei, 4)[:, :, 1:4], dim=-1 + ) + gg = self.dpa1_attention( + gg, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + # nfnl x 4 x ng + xyz_scatter = torch.matmul(rr.permute(0, 2, 1), gg) else: raise NotImplementedError - input_r = torch.nn.functional.normalize( - rr.reshape(-1, self.nnei, 4)[:, :, 1:4], dim=-1 - ) - gg = self.dpa1_attention( - gg, nlist_mask, input_r=input_r, sw=sw - ) # shape is [nframes*nloc, self.neei, out_size] - # nfnl x 4 x ng - xyz_scatter = torch.matmul(rr.permute(0, 2, 1), gg) xyz_scatter = xyz_scatter / self.nnei xyz_scatter_1 = xyz_scatter.permute(0, 2, 1) rot_mat = xyz_scatter_1[:, :, 1:4] @@ -516,9 +626,12 @@ def forward( result = torch.matmul( xyz_scatter_1, xyz_scatter_2 ) # shape is [nframes*nloc, self.filter_neuron[-1], self.axis_neuron] + return ( result.view(nframes, nloc, self.filter_neuron[-1] * self.axis_neuron), - gg.view(nframes, nloc, self.nnei, self.filter_neuron[-1]), + gg.view(nframes, nloc, self.nnei, self.filter_neuron[-1]) + if not self.compress + else None, dmatrix.view(nframes, nloc, self.nnei, 4)[..., 1:], rot_mat.view(nframes, nloc, self.filter_neuron[-1], 3), sw, diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index 0aa50c613f..4a74b7671f 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -32,9 +32,15 @@ from deepmd.pt.utils.exclude_mask import ( PairExcludeMask, ) +from deepmd.pt.utils.tabulate import ( + DPTabulate, +) from deepmd.pt.utils.update_sel import ( UpdateSel, ) +from deepmd.pt.utils.utils import ( + ActivationFn, +) from deepmd.utils.data_system import ( DeepmdDataSystem, ) @@ -52,10 +58,31 @@ BaseDescriptor, ) +if not hasattr(torch.ops.deepmd, "tabulate_fusion_se_r"): + + def tabulate_fusion_se_r( + argument0, + argument1, + argument2, + argument3, + ) -> list[torch.Tensor]: + raise NotImplementedError( + "tabulate_fusion_se_r is not available since customized PyTorch OP library is not built when freezing the model. " + "See documentation for model compression for details." + ) + + # Note: this hack cannot actually save a model that can be runned using LAMMPS. + torch.ops.deepmd.tabulate_fusion_se_r = tabulate_fusion_se_r + @BaseDescriptor.register("se_e2_r") @BaseDescriptor.register("se_r") class DescrptSeR(BaseDescriptor, torch.nn.Module): + lower: dict[str, int] + upper: dict[str, int] + table_data: dict[str, torch.Tensor] + table_config: list[Union[int, float]] + def __init__( self, rcut, @@ -90,6 +117,12 @@ def __init__( # order matters, placed after the assignment of self.ntypes self.reinit_exclude(exclude_types) self.env_protection = env_protection + # add for compression + self.compress = False + self.lower = {} + self.upper = {} + self.table_data = {} + self.table_config = [] self.sel = sel self.sec = torch.tensor( @@ -123,6 +156,7 @@ def __init__( self.filter_layers = filter_layers self.stats = None # set trainable + self.trainable = trainable for param in self.parameters(): param.requires_grad = trainable @@ -313,6 +347,51 @@ def reinit_exclude( self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + if self.compress: + raise ValueError("Compression is already enabled.") + data = self.serialize() + self.table = DPTabulate( + self, + data["neuron"], + data["type_one_side"], + data["exclude_types"], + ActivationFn(data["activation_function"]), + ) + self.table_config = [ + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ] + self.lower, self.upper = self.table.build( + min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2 + ) + self.table_data = self.table.data + self.compress = True + def forward( self, coord_ext: torch.Tensor, @@ -353,7 +432,7 @@ def forward( The smooth switch function. """ - del mapping + del mapping, comm_dict nf = nlist.shape[0] nloc = nlist.shape[1] atype = atype_ext[:, :nloc] @@ -380,19 +459,44 @@ def forward( # nfnl x nnei exclude_mask = self.emask(nlist, atype_ext).view(nfnl, self.nnei) + xyz_scatter_total = [] for ii, ll in enumerate(self.filter_layers.networks): # nfnl x nt mm = exclude_mask[:, self.sec[ii] : self.sec[ii + 1]] # nfnl x nt x 1 ss = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] ss = ss * mm[:, :, None] - # nfnl x nt x ng - gg = ll.forward(ss) - gg = torch.mean(gg, dim=1).unsqueeze(1) - xyz_scatter += gg * (self.sel[ii] / self.nnei) + if self.compress: + ss = ss.squeeze(-1) + net = "filter_-1_net_" + str(ii) + info = [ + self.lower[net], + self.upper[net], + self.upper[net] * self.table_config[0], + self.table_config[1], + self.table_config[2], + self.table_config[3], + ] + tensor_data = self.table_data[net].to(ss.device).to(dtype=self.prec) + xyz_scatter = torch.ops.deepmd.tabulate_fusion_se_r( + tensor_data.contiguous(), + torch.tensor(info, dtype=self.prec, device="cpu").contiguous(), + ss, + self.filter_neuron[-1], + )[0] + xyz_scatter_total.append(xyz_scatter) + else: + # nfnl x nt x ng + gg = ll.forward(ss) + gg = torch.mean(gg, dim=1).unsqueeze(1) + xyz_scatter += gg * (self.sel[ii] / self.nnei) res_rescale = 1.0 / 5.0 - result = xyz_scatter * res_rescale + if self.compress: + xyz_scatter = torch.cat(xyz_scatter_total, dim=1) + result = torch.mean(xyz_scatter, dim=1) * res_rescale + else: + result = xyz_scatter * res_rescale result = result.view(nf, nloc, self.filter_neuron[-1]) return ( result.to(dtype=env.GLOBAL_PT_FLOAT_PRECISION), diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 7b83bcbd69..5a634d7549 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -58,11 +58,34 @@ from deepmd.pt.utils.exclude_mask import ( PairExcludeMask, ) +from deepmd.pt.utils.tabulate import ( + DPTabulate, +) +from deepmd.pt.utils.utils import ( + ActivationFn, +) from .base_descriptor import ( BaseDescriptor, ) +if not hasattr(torch.ops.deepmd, "tabulate_fusion_se_t"): + + def tabulate_fusion_se_t( + argument0, + argument1, + argument2, + argument3, + argument4, + ) -> list[torch.Tensor]: + raise NotImplementedError( + "tabulate_fusion_se_t is not available since customized PyTorch OP library is not built when freezing the model. " + "See documentation for model compression for details." + ) + + # Note: this hack cannot actually save a model that can be runned using LAMMPS. + torch.ops.deepmd.tabulate_fusion_se_t = tabulate_fusion_se_t + @BaseDescriptor.register("se_e3") @BaseDescriptor.register("se_at") @@ -129,6 +152,7 @@ def __init__( raise NotImplementedError("old implementation of spin is not supported.") super().__init__() self.type_map = type_map + self.compress = False self.seat = DescrptBlockSeT( rcut, rcut_smth, @@ -252,6 +276,54 @@ def compute_input_stats( """ return self.seat.compute_input_stats(merged, path) + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + if self.compress: + raise ValueError("Compression is already enabled.") + data = self.serialize() + self.table = DPTabulate( + self, + data["neuron"], + exclude_types=data["exclude_types"], + activation_fn=ActivationFn(data["activation_function"]), + ) + stride_1_scaled = table_stride_1 * 10 + stride_2_scaled = table_stride_2 * 10 + self.table_config = [ + table_extrapolate, + stride_1_scaled, + stride_2_scaled, + check_frequency, + ] + self.lower, self.upper = self.table.build( + min_nbor_dist, table_extrapolate, stride_1_scaled, stride_2_scaled + ) + self.seat.enable_compression( + self.table.data, self.table_config, self.lower, self.upper + ) + self.compress = True + def reinit_exclude( self, exclude_types: list[tuple[int, int]] = [], @@ -396,6 +468,10 @@ def update_sel( class DescrptBlockSeT(DescriptorBlock): ndescrpt: Final[int] __constants__: ClassVar[list] = ["ndescrpt"] + lower: dict[str, int] + upper: dict[str, int] + table_data: dict[str, torch.Tensor] + table_config: list[Union[int, float]] def __init__( self, @@ -467,6 +543,12 @@ def __init__( self.split_sel = self.sel self.nnei = sum(sel) self.ndescrpt = self.nnei * 4 + # add for compression + self.compress = False + self.lower = {} + self.upper = {} + self.table_data = {} + self.table_config = [] wanted_shape = (self.ntypes, self.nnei, 4) mean = torch.zeros(wanted_shape, dtype=self.prec, device=env.DEVICE) @@ -628,6 +710,19 @@ def reinit_exclude( self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + def enable_compression( + self, + table_data, + table_config, + lower, + upper, + ) -> None: + self.compress = True + self.table_data = table_data + self.table_config = table_config + self.lower = lower + self.upper = upper + def forward( self, nlist: torch.Tensor, @@ -711,12 +806,36 @@ def forward( rr_j = rr_j * mm_j[:, :, None] # nfnl x nt_i x nt_j env_ij = torch.einsum("ijm,ikm->ijk", rr_i, rr_j) - # nfnl x nt_i x nt_j x 1 - env_ij_reshape = env_ij.unsqueeze(-1) - # nfnl x nt_i x nt_j x ng - gg = ll.forward(env_ij_reshape) - # nfnl x nt_i x nt_j x ng - res_ij = torch.einsum("ijk,ijkm->im", env_ij, gg) + if self.compress: + ebd_env_ij = env_ij.view(-1, 1) + net = "filter_" + str(ti) + "_net_" + str(tj) + info = [ + self.lower[net], + self.upper[net], + self.upper[net] * self.table_config[0], + self.table_config[1], + self.table_config[2], + self.table_config[3], + ] + tensor_data = ( + self.table_data[net].to(env_ij.device).to(dtype=self.prec) + ) + ebd_env_ij = ebd_env_ij.to(dtype=self.prec) + env_ij = env_ij.to(dtype=self.prec) + res_ij = torch.ops.deepmd.tabulate_fusion_se_t( + tensor_data.contiguous(), + torch.tensor(info, dtype=self.prec, device="cpu").contiguous(), + ebd_env_ij.contiguous(), + env_ij.contiguous(), + self.filter_neuron[-1], + )[0] + else: + # nfnl x nt_i x nt_j x 1 + env_ij_reshape = env_ij.unsqueeze(-1) + # nfnl x nt_i x nt_j x ng + gg = ll.forward(env_ij_reshape) + # nfnl x nt_i x nt_j x ng + res_ij = torch.einsum("ijk,ijkm->im", env_ij, gg) res_ij = res_ij * (1.0 / float(nei_type_i) / float(nei_type_j)) result += res_ij # xyz_scatter /= (self.nnei * self.nnei) diff --git a/deepmd/pt/utils/tabulate.py b/deepmd/pt/utils/tabulate.py new file mode 100644 index 0000000000..7394ac082d --- /dev/null +++ b/deepmd/pt/utils/tabulate.py @@ -0,0 +1,607 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from functools import ( + cached_property, +) + +import numpy as np +import torch + +import deepmd +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.utils import ( + ActivationFn, +) +from deepmd.utils.tabulate import ( + BaseTabulate, +) + +log = logging.getLogger(__name__) + +SQRT_2_PI = np.sqrt(2 / np.pi) +GGELU = 0.044715 + + +class DPTabulate(BaseTabulate): + r"""Class for tabulation. + + Compress a model, which including tabulating the embedding-net. + The table is composed of fifth-order polynomial coefficients and is assembled from two sub-tables. The first table takes the stride(parameter) as it's uniform stride, while the second table takes 10 * stride as it's uniform stride + The range of the first table is automatically detected by deepmd-kit, while the second table ranges from the first table's upper boundary(upper) to the extrapolate(parameter) * upper. + + Parameters + ---------- + descrpt + Descriptor of the original model + neuron + Number of neurons in each hidden layers of the embedding net :math:`\\mathcal{N}` + type_one_side + Try to build N_types tables. Otherwise, building N_types^2 tables + exclude_types : List[List[int]] + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + activation_function + The activation function in the embedding net. Supported options are {"tanh","gelu"} in common.ActivationFn. + """ + + def __init__( + self, + descrpt, + neuron: list[int], + type_one_side: bool = False, + exclude_types: list[list[int]] = [], + activation_fn: ActivationFn = ActivationFn("tanh"), + ) -> None: + super().__init__( + descrpt, + neuron, + type_one_side, + exclude_types, + True, + ) + self.descrpt_type = self._get_descrpt_type() + + supported_descrpt_type = ( + "Atten", + "A", + "T", + "R", + ) + + if self.descrpt_type in supported_descrpt_type: + self.sel_a = self.descrpt.get_sel() + self.rcut = self.descrpt.get_rcut() + self.rcut_smth = self.descrpt.get_rcut_smth() + else: + raise RuntimeError("Unsupported descriptor") + + # functype + activation_map = { + "tanh": 1, + "gelu": 2, + "gelu_tf": 2, + "relu": 3, + "relu6": 4, + "softplus": 5, + "sigmoid": 6, + } + + activation = activation_fn.activation + if activation in activation_map: + self.functype = activation_map[activation] + else: + raise RuntimeError("Unknown activation function type!") + + self.activation_fn = activation_fn + self.davg = self.descrpt.serialize()["@variables"]["davg"] + self.dstd = self.descrpt.serialize()["@variables"]["dstd"] + self.ntypes = self.descrpt.get_ntypes() + + self.embedding_net_nodes = self.descrpt.serialize()["embeddings"]["networks"] + + self.layer_size = self._get_layer_size() + self.table_size = self._get_table_size() + + self.bias = self._get_bias() + self.matrix = self._get_matrix() + + self.data_type = self._get_data_type() + self.last_layer_size = self._get_last_layer_size() + + def _make_data(self, xx, idx): + """Generate tabulation data for the given input. + + Parameters + ---------- + xx : np.ndarray + Input values to tabulate + idx : int + Index for accessing the correct network parameters + + Returns + ------- + tuple[np.ndarray, np.ndarray, np.ndarray] + Values, first derivatives, and second derivatives + """ + xx = torch.from_numpy(xx).view(-1, 1).to(env.DEVICE) + for layer in range(self.layer_size): + if layer == 0: + xbar = torch.matmul( + xx, + torch.from_numpy(self.matrix["layer_" + str(layer + 1)][idx]).to( + env.DEVICE + ), + ) + torch.from_numpy(self.bias["layer_" + str(layer + 1)][idx]).to( + env.DEVICE + ) + if self.neuron[0] == 1: + yy = ( + self._layer_0( + xx, + self.matrix["layer_" + str(layer + 1)][idx], + self.bias["layer_" + str(layer + 1)][idx], + ) + + xx + ) + dy = unaggregated_dy_dx_s( + yy - xx, + self.matrix["layer_" + str(layer + 1)][idx], + xbar, + self.functype, + ) + torch.ones((1, 1), dtype=yy.dtype) # pylint: disable=no-explicit-device + dy2 = unaggregated_dy2_dx_s( + yy - xx, + dy, + self.matrix["layer_" + str(layer + 1)][idx], + xbar, + self.functype, + ) + elif self.neuron[0] == 2: + tt, yy = self._layer_1( + xx, + self.matrix["layer_" + str(layer + 1)][idx], + self.bias["layer_" + str(layer + 1)][idx], + ) + dy = unaggregated_dy_dx_s( + yy - tt, + self.matrix["layer_" + str(layer + 1)][idx], + xbar, + self.functype, + ) + torch.ones((1, 2), dtype=yy.dtype) # pylint: disable=no-explicit-device + dy2 = unaggregated_dy2_dx_s( + yy - tt, + dy, + self.matrix["layer_" + str(layer + 1)][idx], + xbar, + self.functype, + ) + else: + yy = self._layer_0( + xx, + self.matrix["layer_" + str(layer + 1)][idx], + self.bias["layer_" + str(layer + 1)][idx], + ) + dy = unaggregated_dy_dx_s( + yy, + self.matrix["layer_" + str(layer + 1)][idx], + xbar, + self.functype, + ) + dy2 = unaggregated_dy2_dx_s( + yy, + dy, + self.matrix["layer_" + str(layer + 1)][idx], + xbar, + self.functype, + ) + else: + ybar = torch.matmul( + yy, + torch.from_numpy(self.matrix["layer_" + str(layer + 1)][idx]).to( + env.DEVICE + ), + ) + torch.from_numpy(self.bias["layer_" + str(layer + 1)][idx]).to( + env.DEVICE + ) + if self.neuron[layer] == self.neuron[layer - 1]: + zz = ( + self._layer_0( + yy, + self.matrix["layer_" + str(layer + 1)][idx], + self.bias["layer_" + str(layer + 1)][idx], + ) + + yy + ) + dz = unaggregated_dy_dx( + zz - yy, + self.matrix["layer_" + str(layer + 1)][idx], + dy, + ybar, + self.functype, + ) + dy2 = unaggregated_dy2_dx( + zz - yy, + self.matrix["layer_" + str(layer + 1)][idx], + dy, + dy2, + ybar, + self.functype, + ) + elif self.neuron[layer] == 2 * self.neuron[layer - 1]: + tt, zz = self._layer_1( + yy, + self.matrix["layer_" + str(layer + 1)][idx], + self.bias["layer_" + str(layer + 1)][idx], + ) + dz = unaggregated_dy_dx( + zz - tt, + self.matrix["layer_" + str(layer + 1)][idx], + dy, + ybar, + self.functype, + ) + dy2 = unaggregated_dy2_dx( + zz - tt, + self.matrix["layer_" + str(layer + 1)][idx], + dy, + dy2, + ybar, + self.functype, + ) + else: + zz = self._layer_0( + yy, + self.matrix["layer_" + str(layer + 1)][idx], + self.bias["layer_" + str(layer + 1)][idx], + ) + dz = unaggregated_dy_dx( + zz, + self.matrix["layer_" + str(layer + 1)][idx], + dy, + ybar, + self.functype, + ) + dy2 = unaggregated_dy2_dx( + zz, + self.matrix["layer_" + str(layer + 1)][idx], + dy, + dy2, + ybar, + self.functype, + ) + dy = dz + yy = zz + + vv = zz.detach().cpu().numpy().astype(self.data_type) + dd = dy.detach().cpu().numpy().astype(self.data_type) + d2 = dy2.detach().cpu().numpy().astype(self.data_type) + return vv, dd, d2 + + def _layer_0(self, x, w, b): + w = torch.from_numpy(w).to(env.DEVICE) + b = torch.from_numpy(b).to(env.DEVICE) + return self.activation_fn(torch.matmul(x, w) + b) + + def _layer_1(self, x, w, b): + w = torch.from_numpy(w).to(env.DEVICE) + b = torch.from_numpy(b).to(env.DEVICE) + t = torch.cat([x, x], dim=1) + return t, self.activation_fn(torch.matmul(x, w) + b) + t + + def _get_descrpt_type(self): + if isinstance(self.descrpt, deepmd.pt.model.descriptor.DescrptDPA1): + return "Atten" + elif isinstance(self.descrpt, deepmd.pt.model.descriptor.DescrptSeA): + return "A" + elif isinstance(self.descrpt, deepmd.pt.model.descriptor.DescrptSeR): + return "R" + elif isinstance(self.descrpt, deepmd.pt.model.descriptor.DescrptSeT): + return "T" + raise RuntimeError(f"Unsupported descriptor {self.descrpt}") + + def _get_layer_size(self): + # get the number of layers in EmbeddingNet + layer_size = 0 + basic_size = 0 + if self.type_one_side: + basic_size = len(self.embedding_net_nodes) * len(self.neuron) + else: + basic_size = ( + len(self.embedding_net_nodes) + * len(self.embedding_net_nodes[0]) + * len(self.neuron) + ) + if self.descrpt_type == "Atten": + layer_size = len(self.embedding_net_nodes[0]["layers"]) + elif self.descrpt_type == "A": + layer_size = len(self.embedding_net_nodes[0]["layers"]) + if self.type_one_side: + layer_size = basic_size // (self.ntypes - self._n_all_excluded) + elif self.descrpt_type == "T": + layer_size = len(self.embedding_net_nodes[0]["layers"]) + # layer_size = basic_size // int(comb(self.ntypes + 1, 2)) + elif self.descrpt_type == "R": + layer_size = basic_size // ( + self.ntypes * self.ntypes - len(self.exclude_types) + ) + if self.type_one_side: + layer_size = basic_size // (self.ntypes - self._n_all_excluded) + else: + raise RuntimeError("Unsupported descriptor") + return layer_size + + def _get_network_variable(self, var_name: str) -> dict: + """Get network variables (weights or biases) for all layers. + + Parameters + ---------- + var_name : str + Name of the variable to get ('w' for weights, 'b' for biases) + + Returns + ------- + dict + Dictionary mapping layer names to their variables + """ + result = {} + for layer in range(1, self.layer_size + 1): + result["layer_" + str(layer)] = [] + if self.descrpt_type == "Atten": + node = self.embedding_net_nodes[0]["layers"][layer - 1]["@variables"][ + var_name + ] + result["layer_" + str(layer)].append(node) + elif self.descrpt_type == "A": + if self.type_one_side: + for ii in range(0, self.ntypes): + if not self._all_excluded(ii): + node = self.embedding_net_nodes[ii]["layers"][layer - 1][ + "@variables" + ][var_name] + result["layer_" + str(layer)].append(node) + else: + result["layer_" + str(layer)].append(np.array([])) + else: + for ii in range(0, self.ntypes * self.ntypes): + if ( + ii // self.ntypes, + ii % self.ntypes, + ) not in self.exclude_types: + node = self.embedding_net_nodes[ + (ii % self.ntypes) * self.ntypes + ii // self.ntypes + ]["layers"][layer - 1]["@variables"][var_name] + result["layer_" + str(layer)].append(node) + else: + result["layer_" + str(layer)].append(np.array([])) + elif self.descrpt_type == "T": + for ii in range(self.ntypes): + for jj in range(ii, self.ntypes): + node = self.embedding_net_nodes[jj * self.ntypes + ii][ + "layers" + ][layer - 1]["@variables"][var_name] + result["layer_" + str(layer)].append(node) + elif self.descrpt_type == "R": + if self.type_one_side: + for ii in range(0, self.ntypes): + if not self._all_excluded(ii): + node = self.embedding_net_nodes[ii]["layers"][layer - 1][ + "@variables" + ][var_name] + result["layer_" + str(layer)].append(node) + else: + result["layer_" + str(layer)].append(np.array([])) + else: + for ii in range(0, self.ntypes * self.ntypes): + if ( + ii // self.ntypes, + ii % self.ntypes, + ) not in self.exclude_types: + node = self.embedding_net_nodes[ + (ii % self.ntypes) * self.ntypes + ii // self.ntypes + ]["layers"][layer - 1]["@variables"][var_name] + result["layer_" + str(layer)].append(node) + else: + result["layer_" + str(layer)].append(np.array([])) + else: + raise RuntimeError("Unsupported descriptor") + return result + + def _get_bias(self): + return self._get_network_variable("b") + + def _get_matrix(self): + return self._get_network_variable("w") + + def _convert_numpy_to_tensor(self): + """Convert self.data from np.ndarray to torch.Tensor.""" + for ii in self.data: + self.data[ii] = torch.tensor(self.data[ii], device=env.DEVICE) # pylint: disable=no-explicit-dtype + + @cached_property + def _n_all_excluded(self) -> int: + """Then number of types excluding all types.""" + return sum(int(self._all_excluded(ii)) for ii in range(0, self.ntypes)) + + +# customized op +def grad(xbar, y, functype): # functype=tanh, gelu, .. + if functype == 1: + return 1 - y * y + elif functype == 2: + var = np.tanh(SQRT_2_PI * (xbar + GGELU * xbar**3)) + return ( + 0.5 * SQRT_2_PI * xbar * (1 - var**2) * (3 * GGELU * xbar**2 + 1) + + 0.5 * var + + 0.5 + ) + elif functype == 3: + return 0.0 if xbar <= 0 else 1.0 + elif functype == 4: + return 0.0 if xbar <= 0 or xbar >= 6 else 1.0 + elif functype == 5: + return 1.0 - 1.0 / (1.0 + np.exp(xbar)) + elif functype == 6: + return y * (1 - y) + + raise ValueError(f"Unsupported function type: {functype}") + + +def grad_grad(xbar, y, functype): + if functype == 1: + return -2 * y * (1 - y * y) + elif functype == 2: + var1 = np.tanh(SQRT_2_PI * (xbar + GGELU * xbar**3)) + var2 = SQRT_2_PI * (1 - var1**2) * (3 * GGELU * xbar**2 + 1) + return ( + 3 * GGELU * SQRT_2_PI * xbar**2 * (1 - var1**2) + - SQRT_2_PI * xbar * var2 * (3 * GGELU * xbar**2 + 1) * var1 + + var2 + ) + elif functype in [3, 4]: + return 0 + elif functype == 5: + return np.exp(xbar) / ((1 + np.exp(xbar)) * (1 + np.exp(xbar))) + elif functype == 6: + return y * (1 - y) * (1 - 2 * y) + else: + return -1 + + +def unaggregated_dy_dx_s( + y: torch.Tensor, w_np: np.ndarray, xbar: torch.Tensor, functype: int +): + w = torch.from_numpy(w_np).to(env.DEVICE) + if y.dim() != 2: + raise ValueError("Dim of input y should be 2") + if w.dim() != 2: + raise ValueError("Dim of input w should be 2") + if xbar.dim() != 2: + raise ValueError("Dim of input xbar should be 2") + + length, width = y.shape + dy_dx = torch.zeros_like(y) + w = torch.flatten(w) + + for ii in range(length): + for jj in range(width): + dy_dx[ii, jj] = grad(xbar[ii, jj], y[ii, jj], functype) * w[jj] + + return dy_dx + + +def unaggregated_dy2_dx_s( + y: torch.Tensor, + dy: torch.Tensor, + w_np: np.ndarray, + xbar: torch.Tensor, + functype: int, +): + w = torch.from_numpy(w_np).to(env.DEVICE) + if y.dim() != 2: + raise ValueError("Dim of input y should be 2") + if dy.dim() != 2: + raise ValueError("Dim of input dy should be 2") + if w.dim() != 2: + raise ValueError("Dim of input w should be 2") + if xbar.dim() != 2: + raise ValueError("Dim of input xbar should be 2") + + length, width = y.shape + dy2_dx = torch.zeros_like(y) + w = torch.flatten(w) + + for ii in range(length): + for jj in range(width): + dy2_dx[ii, jj] = ( + grad_grad(xbar[ii, jj], y[ii, jj], functype) * w[jj] * w[jj] + ) + + return dy2_dx + + +def unaggregated_dy_dx( + z: torch.Tensor, + w_np: np.ndarray, + dy_dx: torch.Tensor, + ybar: torch.Tensor, + functype: int, +): + w = torch.from_numpy(w_np).to(env.DEVICE) + if z.dim() != 2: + raise ValueError("z tensor must have 2 dimensions") + if w.dim() != 2: + raise ValueError("w tensor must have 2 dimensions") + if dy_dx.dim() != 2: + raise ValueError("dy_dx tensor must have 2 dimensions") + if ybar.dim() != 2: + raise ValueError("ybar tensor must have 2 dimensions") + + length, width = z.shape + size = w.shape[0] + dy_dx = torch.flatten(dy_dx) + + dz_dx = torch.zeros_like(z) + + for kk in range(length): + for ii in range(width): + dz_drou = grad(ybar[kk, ii], z[kk, ii], functype) + accumulator = 0.0 + for jj in range(size): + accumulator += w[jj, ii] * dy_dx[kk * size + jj] + dz_drou *= accumulator + if width == 2 * size or width == size: + dz_drou += dy_dx[kk * size + ii % size] + dz_dx[kk, ii] = dz_drou + + return dz_dx + + +def unaggregated_dy2_dx( + z: torch.Tensor, + w_np: np.ndarray, + dy_dx: torch.Tensor, + dy2_dx: torch.Tensor, + ybar: torch.Tensor, + functype: int, +): + w = torch.from_numpy(w_np).to(env.DEVICE) + if z.dim() != 2: + raise ValueError("z tensor must have 2 dimensions") + if w.dim() != 2: + raise ValueError("w tensor must have 2 dimensions") + if dy_dx.dim() != 2: + raise ValueError("dy_dx tensor must have 2 dimensions") + if dy2_dx.dim() != 2: + raise ValueError("dy2_dx tensor must have 2 dimensions") + if ybar.dim() != 2: + raise ValueError("ybar tensor must have 2 dimensions") + + length, width = z.shape + size = w.shape[0] + dy_dx = torch.flatten(dy_dx) + dy2_dx = torch.flatten(dy2_dx) + + dz2_dx = torch.zeros_like(z) + + for kk in range(length): + for ii in range(width): + dz_drou = grad(ybar[kk, ii], z[kk, ii], functype) + accumulator1 = 0.0 + for jj in range(size): + accumulator1 += w[jj, ii] * dy2_dx[kk * size + jj] + dz_drou *= accumulator1 + accumulator2 = 0.0 + for jj in range(size): + accumulator2 += w[jj, ii] * dy_dx[kk * size + jj] + dz_drou += ( + grad_grad(ybar[kk, ii], z[kk, ii], functype) + * accumulator2 + * accumulator2 + ) + if width == 2 * size or width == size: + dz_drou += dy2_dx[kk * size + ii % size] + dz2_dx[kk, ii] = dz_drou + + return dz2_dx diff --git a/deepmd/tf/utils/tabulate.py b/deepmd/tf/utils/tabulate.py index 588ebdd55e..30171b12db 100644 --- a/deepmd/tf/utils/tabulate.py +++ b/deepmd/tf/utils/tabulate.py @@ -2,7 +2,6 @@ import logging from functools import ( cached_property, - lru_cache, ) from typing import ( Callable, @@ -28,11 +27,14 @@ get_embedding_net_nodes_from_graph_def, get_tensor_by_name_from_graph, ) +from deepmd.utils.tabulate import ( + BaseTabulate, +) log = logging.getLogger(__name__) -class DPTabulate: +class DPTabulate(BaseTabulate): r"""Class for tabulation. Compress a model, which including tabulating the embedding-net. @@ -71,13 +73,18 @@ def __init__( activation_fn: Callable[[tf.Tensor], tf.Tensor] = tf.nn.tanh, suffix: str = "", ) -> None: + super().__init__( + descrpt, + neuron, + type_one_side, + exclude_types, + False, + ) + + self.descrpt_type = self._get_descrpt_type() """Constructor.""" - self.descrpt = descrpt - self.neuron = neuron self.graph = graph self.graph_def = graph_def - self.type_one_side = type_one_side - self.exclude_types = exclude_types self.suffix = suffix # functype @@ -156,271 +163,25 @@ def __init__( self.upper = {} self.lower = {} - def build( - self, min_nbor_dist: float, extrapolate: float, stride0: float, stride1: float - ) -> tuple[dict[str, int], dict[str, int]]: - r"""Build the tables for model compression. - - Parameters - ---------- - min_nbor_dist - The nearest distance between neighbor atoms - extrapolate - The scale of model extrapolation - stride0 - The uniform stride of the first table - stride1 - The uniform stride of the second table - - Returns - ------- - lower : dict[str, int] - The lower boundary of environment matrix by net - upper : dict[str, int] - The upper boundary of environment matrix by net - """ - # tabulate range [lower, upper] with stride0 'stride0' - lower, upper = self._get_env_mat_range(min_nbor_dist) - if isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeAtten) or isinstance( - self.descrpt, deepmd.tf.descriptor.DescrptSeAEbdV2 - ): - uu = np.max(upper) - ll = np.min(lower) - xx = np.arange(ll, uu, stride0, dtype=self.data_type) - xx = np.append( - xx, - np.arange(uu, extrapolate * uu, stride1, dtype=self.data_type), - ) - xx = np.append(xx, np.array([extrapolate * uu], dtype=self.data_type)) - nspline = ((uu - ll) / stride0 + (extrapolate * uu - uu) / stride1).astype( - int - ) - self._build_lower( - "filter_net", xx, 0, uu, ll, stride0, stride1, extrapolate, nspline - ) - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeA): - for ii in range(self.table_size): - if (self.type_one_side and not self._all_excluded(ii)) or ( - not self.type_one_side - and (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types - ): - if self.type_one_side: - net = "filter_-1_net_" + str(ii) - # upper and lower should consider all types which are not excluded and sel>0 - idx = [ - (type_i, ii) not in self.exclude_types - and self.sel_a[type_i] > 0 - for type_i in range(self.ntypes) - ] - uu = np.max(upper[idx]) - ll = np.min(lower[idx]) - else: - ielement = ii // self.ntypes - net = ( - "filter_" + str(ielement) + "_net_" + str(ii % self.ntypes) - ) - uu = upper[ielement] - ll = lower[ielement] - xx = np.arange(ll, uu, stride0, dtype=self.data_type) - xx = np.append( - xx, - np.arange(uu, extrapolate * uu, stride1, dtype=self.data_type), - ) - xx = np.append( - xx, np.array([extrapolate * uu], dtype=self.data_type) - ) - nspline = ( - (uu - ll) / stride0 + (extrapolate * uu - uu) / stride1 - ).astype(int) - self._build_lower( - net, xx, ii, uu, ll, stride0, stride1, extrapolate, nspline - ) - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeT): - xx_all = [] - for ii in range(self.ntypes): - xx = np.arange( - extrapolate * lower[ii], lower[ii], stride1, dtype=self.data_type - ) - xx = np.append( - xx, np.arange(lower[ii], upper[ii], stride0, dtype=self.data_type) - ) - xx = np.append( - xx, - np.arange( - upper[ii], - extrapolate * upper[ii], - stride1, - dtype=self.data_type, - ), - ) - xx = np.append( - xx, np.array([extrapolate * upper[ii]], dtype=self.data_type) - ) - xx_all.append(xx) - nspline = ( - (upper - lower) / stride0 - + 2 * ((extrapolate * upper - upper) / stride1) - ).astype(int) - idx = 0 - for ii in range(self.ntypes): - for jj in range(ii, self.ntypes): - net = "filter_" + str(ii) + "_net_" + str(jj) - self._build_lower( - net, - xx_all[ii], - idx, - upper[ii], - lower[ii], - stride0, - stride1, - extrapolate, - nspline[ii], - ) - idx += 1 - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeR): - for ii in range(self.table_size): - if (self.type_one_side and not self._all_excluded(ii)) or ( - not self.type_one_side - and (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types - ): - if self.type_one_side: - net = "filter_-1_net_" + str(ii) - # upper and lower should consider all types which are not excluded and sel>0 - idx = [ - (type_i, ii) not in self.exclude_types - and self.sel_a[type_i] > 0 - for type_i in range(self.ntypes) - ] - uu = np.max(upper[idx]) - ll = np.min(lower[idx]) - else: - ielement = ii // self.ntypes - net = ( - "filter_" + str(ielement) + "_net_" + str(ii % self.ntypes) - ) - uu = upper[ielement] - ll = lower[ielement] - xx = np.arange(ll, uu, stride0, dtype=self.data_type) - xx = np.append( - xx, - np.arange(uu, extrapolate * uu, stride1, dtype=self.data_type), - ) - xx = np.append( - xx, np.array([extrapolate * uu], dtype=self.data_type) - ) - nspline = ( - (uu - ll) / stride0 + (extrapolate * uu - uu) / stride1 - ).astype(int) - self._build_lower( - net, xx, ii, uu, ll, stride0, stride1, extrapolate, nspline - ) - else: - raise RuntimeError("Unsupported descriptor") - self._convert_numpy_to_tensor() - - return self.lower, self.upper - - def _build_lower( - self, net, xx, idx, upper, lower, stride0, stride1, extrapolate, nspline - ): - vv, dd, d2 = self._make_data(xx, idx) - self.data[net] = np.zeros( - [nspline, 6 * self.last_layer_size], dtype=self.data_type - ) - - # tt.shape: [nspline, self.last_layer_size] - if isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeA): - tt = np.full((nspline, self.last_layer_size), stride1) # pylint: disable=no-explicit-dtype - tt[: int((upper - lower) / stride0), :] = stride0 - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeT): - tt = np.full((nspline, self.last_layer_size), stride1) # pylint: disable=no-explicit-dtype - tt[ - int((lower - extrapolate * lower) / stride1) + 1 : ( - int((lower - extrapolate * lower) / stride1) - + int((upper - lower) / stride0) - ), - :, - ] = stride0 - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeR): - tt = np.full((nspline, self.last_layer_size), stride1) # pylint: disable=no-explicit-dtype - tt[: int((upper - lower) / stride0), :] = stride0 - else: - raise RuntimeError("Unsupported descriptor") - - # hh.shape: [nspline, self.last_layer_size] - hh = ( - vv[1 : nspline + 1, : self.last_layer_size] - - vv[:nspline, : self.last_layer_size] - ) - - self.data[net][:, : 6 * self.last_layer_size : 6] = vv[ - :nspline, : self.last_layer_size - ] - self.data[net][:, 1 : 6 * self.last_layer_size : 6] = dd[ - :nspline, : self.last_layer_size - ] - self.data[net][:, 2 : 6 * self.last_layer_size : 6] = ( - 0.5 * d2[:nspline, : self.last_layer_size] - ) - self.data[net][:, 3 : 6 * self.last_layer_size : 6] = ( - 1 / (2 * tt * tt * tt) - ) * ( - 20 * hh - - ( - 8 * dd[1 : nspline + 1, : self.last_layer_size] - + 12 * dd[:nspline, : self.last_layer_size] - ) - * tt - - ( - 3 * d2[:nspline, : self.last_layer_size] - - d2[1 : nspline + 1, : self.last_layer_size] - ) - * tt - * tt - ) - self.data[net][:, 4 : 6 * self.last_layer_size : 6] = ( - 1 / (2 * tt * tt * tt * tt) - ) * ( - -30 * hh - + ( - 14 * dd[1 : nspline + 1, : self.last_layer_size] - + 16 * dd[:nspline, : self.last_layer_size] - ) - * tt - + ( - 3 * d2[:nspline, : self.last_layer_size] - - 2 * d2[1 : nspline + 1, : self.last_layer_size] - ) - * tt - * tt - ) - self.data[net][:, 5 : 6 * self.last_layer_size : 6] = ( - 1 / (2 * tt * tt * tt * tt * tt) - ) * ( - 12 * hh - - 6 - * ( - dd[1 : nspline + 1, : self.last_layer_size] - + dd[:nspline, : self.last_layer_size] - ) - * tt - + ( - d2[1 : nspline + 1, : self.last_layer_size] - - d2[:nspline, : self.last_layer_size] - ) - * tt - * tt - ) - - self.upper[net] = upper - self.lower[net] = lower - def _load_sub_graph(self): sub_graph_def = tf.GraphDef() with tf.Graph().as_default() as sub_graph: tf.import_graph_def(sub_graph_def, name="") return sub_graph, sub_graph_def + def _get_descrpt_type(self): + if isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeAtten): + return "Atten" + elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeAEbdV2): + return "AEbdV2" + elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeA): + return "A" + elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeT): + return "T" + elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeR): + return "R" + raise RuntimeError(f"Unsupported descriptor {self.descrpt}") + def _get_bias(self): bias = {} for layer in range(1, self.layer_size + 1): @@ -711,36 +472,6 @@ def _layer_1(self, x, w, b): t = tf.concat([x, x], axis=1) return t, self.activation_fn(tf.matmul(x, w) + b) + t - # Change the embedding net range to sw / min_nbor_dist - def _get_env_mat_range(self, min_nbor_dist): - sw = self._spline5_switch(min_nbor_dist, self.rcut_smth, self.rcut) - if isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeA): - lower = -self.davg[:, 0] / self.dstd[:, 0] - upper = ((1 / min_nbor_dist) * sw - self.davg[:, 0]) / self.dstd[:, 0] - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeT): - var = np.square(sw / (min_nbor_dist * self.dstd[:, 1:4])) - lower = np.min(-var, axis=1) - upper = np.max(var, axis=1) - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeR): - lower = -self.davg[:, 0] / self.dstd[:, 0] - upper = ((1 / min_nbor_dist) * sw - self.davg[:, 0]) / self.dstd[:, 0] - else: - raise RuntimeError("Unsupported descriptor") - log.info("training data with lower boundary: " + str(lower)) - log.info("training data with upper boundary: " + str(upper)) - # returns element-wise lower and upper - return np.floor(lower), np.ceil(upper) - - def _spline5_switch(self, xx, rmin, rmax): - if xx < rmin: - vv = 1 - elif xx < rmax: - uu = (xx - rmin) / (rmax - rmin) - vv = uu * uu * uu * (-6 * uu * uu + 15 * uu - 10) + 1 - else: - vv = 0 - return vv - def _get_layer_size(self): layer_size = 0 if isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeAtten) or isinstance( @@ -776,54 +507,6 @@ def _n_all_excluded(self) -> int: """Then number of types excluding all types.""" return sum(int(self._all_excluded(ii)) for ii in range(0, self.ntypes)) - @lru_cache - def _all_excluded(self, ii: int) -> bool: - """Check if type ii excluds all types. - - Parameters - ---------- - ii : int - type index - - Returns - ------- - bool - if type ii excluds all types - """ - return all((ii, type_i) in self.exclude_types for type_i in range(self.ntypes)) - - def _get_table_size(self): - table_size = 0 - if isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeAtten) or isinstance( - self.descrpt, deepmd.tf.descriptor.DescrptSeAEbdV2 - ): - table_size = 1 - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeA): - table_size = self.ntypes * self.ntypes - if self.type_one_side: - table_size = self.ntypes - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeT): - table_size = int(comb(self.ntypes + 1, 2)) - elif isinstance(self.descrpt, deepmd.tf.descriptor.DescrptSeR): - table_size = self.ntypes * self.ntypes - if self.type_one_side: - table_size = self.ntypes - else: - raise RuntimeError("Unsupported descriptor") - return table_size - - def _get_data_type(self): - for item in self.matrix["layer_" + str(self.layer_size)]: - if len(item) != 0: - return type(item[0][0]) - return None - - def _get_last_layer_size(self): - for item in self.matrix["layer_" + str(self.layer_size)]: - if len(item) != 0: - return item.shape[1] - return 0 - def _convert_numpy_to_tensor(self): """Convert self.data from np.ndarray to tf.Tensor.""" for ii in self.data: diff --git a/deepmd/utils/tabulate.py b/deepmd/utils/tabulate.py new file mode 100644 index 0000000000..545b265b88 --- /dev/null +++ b/deepmd/utils/tabulate.py @@ -0,0 +1,458 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from abc import ( + ABC, + abstractmethod, +) +from functools import ( + lru_cache, +) + +import numpy as np +from scipy.special import ( + comb, +) + +log = logging.getLogger(__name__) + + +class BaseTabulate(ABC): + """A base class for pt and tf tabulation.""" + + def __init__( + self, + descrpt, + neuron, + type_one_side, + exclude_types, + is_pt, + ) -> None: + """Constructor.""" + super().__init__() + + """Shared attributes.""" + self.descrpt = descrpt + self.neuron = neuron + self.type_one_side = type_one_side + self.exclude_types = exclude_types + self.is_pt = is_pt + + """Need to be initialized in the subclass.""" + self.descrpt_type = "Base" + + self.sel_a = [] + self.rcut = 0.0 + self.rcut_smth = 0.0 + + self.davg = np.array([]) + self.dstd = np.array([]) + self.ntypes = 0 + + self.layer_size = 0 + self.table_size = 0 + + self.bias = {} + self.matrix = {} + + self.data_type = None + self.last_layer_size = 0 + + """Save the tabulation result.""" + self.data = {} + + self.upper = {} + self.lower = {} + + def build( + self, min_nbor_dist: float, extrapolate: float, stride0: float, stride1: float + ) -> tuple[dict[str, int], dict[str, int]]: + r"""Build the tables for model compression. + + Parameters + ---------- + min_nbor_dist + The nearest distance between neighbor atoms + extrapolate + The scale of model extrapolation + stride0 + The uniform stride of the first table + stride1 + The uniform stride of the second table + + Returns + ------- + lower : dict[str, int] + The lower boundary of environment matrix by net + upper : dict[str, int] + The upper boundary of environment matrix by net + """ + # tabulate range [lower, upper] with stride0 'stride0' + lower, upper = self._get_env_mat_range(min_nbor_dist) + if self.descrpt_type in ("Atten", "AEbdV2"): + uu = np.max(upper) + ll = np.min(lower) + xx = np.arange(ll, uu, stride0, dtype=self.data_type) + xx = np.append( + xx, + np.arange(uu, extrapolate * uu, stride1, dtype=self.data_type), + ) + xx = np.append(xx, np.array([extrapolate * uu], dtype=self.data_type)) + nspline = ((uu - ll) / stride0 + (extrapolate * uu - uu) / stride1).astype( + int + ) + self._build_lower( + "filter_net", xx, 0, uu, ll, stride0, stride1, extrapolate, nspline + ) + elif self.descrpt_type == "A": + for ii in range(self.table_size): + if (self.type_one_side and not self._all_excluded(ii)) or ( + not self.type_one_side + and (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types + ): + if self.type_one_side: + net = "filter_-1_net_" + str(ii) + # upper and lower should consider all types which are not excluded and sel>0 + idx = [ + (type_i, ii) not in self.exclude_types + and self.sel_a[type_i] > 0 + for type_i in range(self.ntypes) + ] + uu = np.max(upper[idx]) + ll = np.min(lower[idx]) + else: + ielement = ii // self.ntypes + net = ( + "filter_" + str(ielement) + "_net_" + str(ii % self.ntypes) + ) + if self.is_pt: + uu = np.max(upper[ielement]) + ll = np.min(lower[ielement]) + else: + uu = upper[ielement] + ll = lower[ielement] + xx = np.arange(ll, uu, stride0, dtype=self.data_type) + xx = np.append( + xx, + np.arange(uu, extrapolate * uu, stride1, dtype=self.data_type), + ) + xx = np.append( + xx, np.array([extrapolate * uu], dtype=self.data_type) + ) + nspline = ( + (uu - ll) / stride0 + (extrapolate * uu - uu) / stride1 + ).astype(int) + self._build_lower( + net, xx, ii, uu, ll, stride0, stride1, extrapolate, nspline + ) + elif self.descrpt_type == "T": + xx_all = [] + for ii in range(self.ntypes): + """Pt and tf is different here. Pt version is a two-dimensional array.""" + if self.is_pt: + uu = np.max(upper[ii]) + ll = np.min(lower[ii]) + else: + ll = lower[ii] + uu = upper[ii] + xx = np.arange(extrapolate * ll, ll, stride1, dtype=self.data_type) + xx = np.append(xx, np.arange(ll, uu, stride0, dtype=self.data_type)) + xx = np.append( + xx, + np.arange( + uu, + extrapolate * uu, + stride1, + dtype=self.data_type, + ), + ) + xx = np.append(xx, np.array([extrapolate * uu], dtype=self.data_type)) + xx_all.append(xx) + nspline = ( + (upper - lower) / stride0 + + 2 * ((extrapolate * upper - upper) / stride1) + ).astype(int) + idx = 0 + for ii in range(self.ntypes): + if self.is_pt: + uu = np.max(upper[ii]) + ll = np.min(lower[ii]) + else: + ll = lower[ii] + uu = upper[ii] + for jj in range(ii, self.ntypes): + net = "filter_" + str(ii) + "_net_" + str(jj) + self._build_lower( + net, + xx_all[ii], + idx, + uu, + ll, + stride0, + stride1, + extrapolate, + nspline[ii][0] if self.is_pt else nspline[ii], + ) + idx += 1 + elif self.descrpt_type == "R": + for ii in range(self.table_size): + if (self.type_one_side and not self._all_excluded(ii)) or ( + not self.type_one_side + and (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types + ): + if self.type_one_side: + net = "filter_-1_net_" + str(ii) + # upper and lower should consider all types which are not excluded and sel>0 + idx = [ + (type_i, ii) not in self.exclude_types + and self.sel_a[type_i] > 0 + for type_i in range(self.ntypes) + ] + uu = np.max(upper[idx]) + ll = np.min(lower[idx]) + else: + ielement = ii // self.ntypes + net = ( + "filter_" + str(ielement) + "_net_" + str(ii % self.ntypes) + ) + uu = upper[ielement] + ll = lower[ielement] + xx = np.arange(ll, uu, stride0, dtype=self.data_type) + xx = np.append( + xx, + np.arange(uu, extrapolate * uu, stride1, dtype=self.data_type), + ) + xx = np.append( + xx, np.array([extrapolate * uu], dtype=self.data_type) + ) + nspline = ( + (uu - ll) / stride0 + (extrapolate * uu - uu) / stride1 + ).astype(int) + self._build_lower( + net, xx, ii, uu, ll, stride0, stride1, extrapolate, nspline + ) + else: + raise RuntimeError("Unsupported descriptor") + + self._convert_numpy_to_tensor() + if self.is_pt: + self._convert_numpy_float_to_int() + return self.lower, self.upper + + def _build_lower( + self, net, xx, idx, upper, lower, stride0, stride1, extrapolate, nspline + ): + vv, dd, d2 = self._make_data(xx, idx) + self.data[net] = np.zeros( + [nspline, 6 * self.last_layer_size], dtype=self.data_type + ) + + # tt.shape: [nspline, self.last_layer_size] + if self.descrpt_type in ("Atten", "A", "AEbdV2"): + tt = np.full((nspline, self.last_layer_size), stride1) # pylint: disable=no-explicit-dtype + tt[: int((upper - lower) / stride0), :] = stride0 + elif self.descrpt_type == "T": + tt = np.full((nspline, self.last_layer_size), stride1) # pylint: disable=no-explicit-dtype + tt[ + int((lower - extrapolate * lower) / stride1) + 1 : ( + int((lower - extrapolate * lower) / stride1) + + int((upper - lower) / stride0) + ), + :, + ] = stride0 + elif self.descrpt_type == "R": + tt = np.full((nspline, self.last_layer_size), stride1) # pylint: disable=no-explicit-dtype + tt[: int((upper - lower) / stride0), :] = stride0 + else: + raise RuntimeError("Unsupported descriptor") + + # hh.shape: [nspline, self.last_layer_size] + hh = ( + vv[1 : nspline + 1, : self.last_layer_size] + - vv[:nspline, : self.last_layer_size] + ) + + self.data[net][:, : 6 * self.last_layer_size : 6] = vv[ + :nspline, : self.last_layer_size + ] + self.data[net][:, 1 : 6 * self.last_layer_size : 6] = dd[ + :nspline, : self.last_layer_size + ] + self.data[net][:, 2 : 6 * self.last_layer_size : 6] = ( + 0.5 * d2[:nspline, : self.last_layer_size] + ) + self.data[net][:, 3 : 6 * self.last_layer_size : 6] = ( + 1 / (2 * tt * tt * tt) + ) * ( + 20 * hh + - ( + 8 * dd[1 : nspline + 1, : self.last_layer_size] + + 12 * dd[:nspline, : self.last_layer_size] + ) + * tt + - ( + 3 * d2[:nspline, : self.last_layer_size] + - d2[1 : nspline + 1, : self.last_layer_size] + ) + * tt + * tt + ) + self.data[net][:, 4 : 6 * self.last_layer_size : 6] = ( + 1 / (2 * tt * tt * tt * tt) + ) * ( + -30 * hh + + ( + 14 * dd[1 : nspline + 1, : self.last_layer_size] + + 16 * dd[:nspline, : self.last_layer_size] + ) + * tt + + ( + 3 * d2[:nspline, : self.last_layer_size] + - 2 * d2[1 : nspline + 1, : self.last_layer_size] + ) + * tt + * tt + ) + self.data[net][:, 5 : 6 * self.last_layer_size : 6] = ( + 1 / (2 * tt * tt * tt * tt * tt) + ) * ( + 12 * hh + - 6 + * ( + dd[1 : nspline + 1, : self.last_layer_size] + + dd[:nspline, : self.last_layer_size] + ) + * tt + + ( + d2[1 : nspline + 1, : self.last_layer_size] + - d2[:nspline, : self.last_layer_size] + ) + * tt + * tt + ) + + self.upper[net] = upper + self.lower[net] = lower + + @abstractmethod + def _make_data(self, xx, idx) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """Generate tabulation data for the given input. + + Parameters + ---------- + xx : np.ndarray + Input values to tabulate + idx : int + Index for accessing the correct network parameters + + Returns + ------- + tuple[np.ndarray, np.ndarray, np.ndarray] + Values, first derivatives, and second derivatives + """ + pass + + @lru_cache + def _all_excluded(self, ii: int) -> bool: + """Check if type ii excluds all types. + + Parameters + ---------- + ii : int + type index + + Returns + ------- + bool + if type ii excluds all types + """ + return all((ii, type_i) in self.exclude_types for type_i in range(self.ntypes)) + + @abstractmethod + def _get_descrpt_type(self): + """Get the descrpt type.""" + pass + + @abstractmethod + def _get_layer_size(self): + """Get the number of embedding layer.""" + pass + + def _get_table_size(self): + table_size = 0 + if self.descrpt_type in ("Atten", "AEbdV2"): + table_size = 1 + elif self.descrpt_type == "A": + table_size = self.ntypes * self.ntypes + if self.type_one_side: + table_size = self.ntypes + elif self.descrpt_type == "T": + table_size = int(comb(self.ntypes + 1, 2)) + elif self.descrpt_type == "R": + table_size = self.ntypes * self.ntypes + if self.type_one_side: + table_size = self.ntypes + else: + raise RuntimeError("Unsupported descriptor") + return table_size + + def _get_data_type(self): + for item in self.matrix["layer_" + str(self.layer_size)]: + if len(item) != 0: + return type(item[0][0]) + return None + + def _get_last_layer_size(self): + for item in self.matrix["layer_" + str(self.layer_size)]: + if len(item) != 0: + return item.shape[1] + return 0 + + @abstractmethod + def _get_bias(self): + """Get bias of embedding net.""" + pass + + @abstractmethod + def _get_matrix(self): + """Get weight matrx of embedding net.""" + pass + + @abstractmethod + def _convert_numpy_to_tensor(self): + """Convert self.data from np.ndarray to torch.Tensor.""" + pass + + def _convert_numpy_float_to_int(self): + """Convert self.lower and self.upper from np.float32 or np.float64 to int.""" + self.lower = {k: int(v) for k, v in self.lower.items()} + self.upper = {k: int(v) for k, v in self.upper.items()} + + def _get_env_mat_range(self, min_nbor_dist): + """Change the embedding net range to sw / min_nbor_dist.""" + sw = self._spline5_switch(min_nbor_dist, self.rcut_smth, self.rcut) + if self.descrpt_type in ("Atten", "A", "AEbdV2"): + lower = -self.davg[:, 0] / self.dstd[:, 0] + upper = ((1 / min_nbor_dist) * sw - self.davg[:, 0]) / self.dstd[:, 0] + elif self.descrpt_type == "T": + var = np.square(sw / (min_nbor_dist * self.dstd[:, 1:4])) + lower = np.min(-var, axis=1) + upper = np.max(var, axis=1) + elif self.descrpt_type == "R": + lower = -self.davg[:, 0] / self.dstd[:, 0] + upper = ((1 / min_nbor_dist) * sw - self.davg[:, 0]) / self.dstd[:, 0] + else: + raise RuntimeError("Unsupported descriptor") + log.info("training data with lower boundary: " + str(lower)) + log.info("training data with upper boundary: " + str(upper)) + # returns element-wise lower and upper + return np.floor(lower), np.ceil(upper) + + def _spline5_switch(self, xx, rmin, rmax): + if xx < rmin: + vv = 1 + elif xx < rmax: + uu = (xx - rmin) / (rmax - rmin) + vv = uu * uu * uu * (-6 * uu * uu + 15 * uu - 10) + 1 + else: + vv = 0 + return vv diff --git a/source/op/pt/tabulate_multi_device.cc b/source/op/pt/tabulate_multi_device.cc index bdc6f63f94..5c710f5c37 100644 --- a/source/op/pt/tabulate_multi_device.cc +++ b/source/op/pt/tabulate_multi_device.cc @@ -905,7 +905,7 @@ class TabulateFusionSeROp std::vector tabulate_fusion_se_a( const torch::Tensor& table_tensor, - const torch::Tensor& table_info_tensor, + const torch::Tensor& table_info_tensor, // only cpu const torch::Tensor& em_x_tensor, const torch::Tensor& em_tensor, int64_t last_layer_size) { @@ -915,7 +915,7 @@ std::vector tabulate_fusion_se_a( std::vector tabulate_fusion_se_atten( const torch::Tensor& table_tensor, - const torch::Tensor& table_info_tensor, + const torch::Tensor& table_info_tensor, // only cpu const torch::Tensor& em_x_tensor, const torch::Tensor& em_tensor, const torch::Tensor& two_embed_tensor, @@ -928,7 +928,7 @@ std::vector tabulate_fusion_se_atten( std::vector tabulate_fusion_se_t( const torch::Tensor& table_tensor, - const torch::Tensor& table_info_tensor, + const torch::Tensor& table_info_tensor, // only cpu const torch::Tensor& em_x_tensor, const torch::Tensor& em_tensor, int64_t last_layer_size) { @@ -938,7 +938,7 @@ std::vector tabulate_fusion_se_t( std::vector tabulate_fusion_se_r( const torch::Tensor& table_tensor, - const torch::Tensor& table_info_tensor, + const torch::Tensor& table_info_tensor, // only cpu const torch::Tensor& em_tensor, int64_t last_layer_size) { return TabulateFusionSeROp::apply(table_tensor, table_info_tensor, em_tensor, diff --git a/source/tests/pt/model/test_compressed_descriptor_se_a.py b/source/tests/pt/model/test_compressed_descriptor_se_a.py new file mode 100644 index 0000000000..14d82a452c --- /dev/null +++ b/source/tests/pt/model/test_compressed_descriptor_se_a.py @@ -0,0 +1,132 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, +) + +import numpy as np +import torch + +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.pt.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pt.utils.env import DEVICE as PT_DEVICE +from deepmd.pt.utils.nlist import build_neighbor_list as build_neighbor_list_pt +from deepmd.pt.utils.nlist import ( + extend_coord_with_ghosts as extend_coord_with_ghosts_pt, +) + +from ...consistent.common import ( + parameterized, +) + + +def eval_pt_descriptor( + pt_obj: Any, natoms, coords, atype, box, mixed_types: bool = False +) -> Any: + ext_coords, ext_atype, mapping = extend_coord_with_ghosts_pt( + torch.from_numpy(coords).to(PT_DEVICE).reshape(1, -1, 3), + torch.from_numpy(atype).to(PT_DEVICE).reshape(1, -1), + torch.from_numpy(box).to(PT_DEVICE).reshape(1, 3, 3), + pt_obj.get_rcut(), + ) + nlist = build_neighbor_list_pt( + ext_coords, + ext_atype, + natoms[0], + pt_obj.get_rcut(), + pt_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + result, _, _, _, _ = pt_obj(ext_coords, ext_atype, nlist, mapping=mapping) + return result + + +@parameterized(("float32", "float64"), (True, False)) +class TestDescriptorSeA(unittest.TestCase): + def setUp(self): + (self.dtype, self.type_one_side) = self.param + if self.dtype == "float32": + self.atol = 1e-5 + elif self.dtype == "float64": + self.atol = 1e-10 + self.seed = 21 + self.sel = [9, 10] + self.rcut_smth = 5.80 + self.rcut = 6.00 + self.neuron = [6, 12, 24] + self.axis_neuron = 3 + self.ntypes = 2 + self.coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + self.box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + + self.se_a = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + self.neuron, + self.axis_neuron, + type_one_side=self.type_one_side, + seed=21, + precision=self.dtype, + ) + + def test_compressed_forward(self): + result_pt = eval_pt_descriptor( + self.se_a, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + self.se_a.enable_compression(0.5) + result_pt_compressed = eval_pt_descriptor( + self.se_a, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + self.assertEqual(result_pt.shape, result_pt_compressed.shape) + torch.testing.assert_close( + result_pt, + result_pt_compressed, + atol=self.atol, + rtol=self.atol, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/model/test_compressed_descriptor_se_atten.py b/source/tests/pt/model/test_compressed_descriptor_se_atten.py new file mode 100644 index 0000000000..a439255396 --- /dev/null +++ b/source/tests/pt/model/test_compressed_descriptor_se_atten.py @@ -0,0 +1,142 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, +) + +import numpy as np +import torch + +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.pt.model.descriptor.dpa1 import ( + DescrptDPA1, +) +from deepmd.pt.utils.env import DEVICE as PT_DEVICE +from deepmd.pt.utils.nlist import build_neighbor_list as build_neighbor_list_pt +from deepmd.pt.utils.nlist import ( + extend_coord_with_ghosts as extend_coord_with_ghosts_pt, +) + +from ...consistent.common import ( + parameterized, +) + + +def eval_pt_descriptor( + pt_obj: Any, natoms, coords, atype, box, mixed_types: bool = False +) -> Any: + ext_coords, ext_atype, mapping = extend_coord_with_ghosts_pt( + torch.from_numpy(coords).to(PT_DEVICE).reshape(1, -1, 3), + torch.from_numpy(atype).to(PT_DEVICE).reshape(1, -1), + torch.from_numpy(box).to(PT_DEVICE).reshape(1, 3, 3), + pt_obj.get_rcut(), + ) + nlist = build_neighbor_list_pt( + ext_coords, + ext_atype, + natoms[0], + pt_obj.get_rcut(), + pt_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + result, _, _, _, _ = pt_obj(ext_coords, ext_atype, nlist, mapping=mapping) + return result + + +@parameterized(("float32", "float64"), (True, False)) +class TestDescriptorSeAtten(unittest.TestCase): + def setUp(self): + (self.dtype, self.type_one_side) = self.param + if self.dtype == "float32": + self.atol = 1e-5 + elif self.dtype == "float64": + self.atol = 1e-10 + self.seed = 21 + self.sel = [10] + self.rcut_smth = 5.80 + self.rcut = 6.00 + self.neuron = [6, 12, 24] + self.axis_neuron = 3 + self.ntypes = 2 + self.coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + self.box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + + self.se_atten = DescrptDPA1( + self.rcut, + self.rcut_smth, + self.sel, + self.ntypes, + self.neuron, + self.axis_neuron, + 4, + attn=8, + attn_layer=0, + seed=21, + precision=self.dtype, + type_one_side=self.type_one_side, + tebd_input_mode="strip", + ) + + def test_compressed_forward(self): + result_pt = eval_pt_descriptor( + self.se_atten, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + if self.dtype == "float32": + result_pt = result_pt.to(torch.float32) + elif self.dtype == "float64": + result_pt = result_pt.to(torch.float64) + + self.se_atten.enable_compression(0.5) + result_pt_compressed = eval_pt_descriptor( + self.se_atten, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + self.assertEqual(result_pt.shape, result_pt_compressed.shape) + torch.testing.assert_close( + result_pt, + result_pt_compressed, + atol=self.atol, + rtol=self.atol, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/model/test_compressed_descriptor_se_r.py b/source/tests/pt/model/test_compressed_descriptor_se_r.py new file mode 100644 index 0000000000..156cb9a06d --- /dev/null +++ b/source/tests/pt/model/test_compressed_descriptor_se_r.py @@ -0,0 +1,129 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, +) + +import numpy as np +import torch + +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.pt.model.descriptor.se_r import ( + DescrptSeR, +) +from deepmd.pt.utils.env import DEVICE as PT_DEVICE +from deepmd.pt.utils.nlist import build_neighbor_list as build_neighbor_list_pt +from deepmd.pt.utils.nlist import ( + extend_coord_with_ghosts as extend_coord_with_ghosts_pt, +) + +from ...consistent.common import ( + parameterized, +) + + +def eval_pt_descriptor( + pt_obj: Any, natoms, coords, atype, box, mixed_types: bool = False +) -> Any: + ext_coords, ext_atype, mapping = extend_coord_with_ghosts_pt( + torch.from_numpy(coords).to(PT_DEVICE).reshape(1, -1, 3), + torch.from_numpy(atype).to(PT_DEVICE).reshape(1, -1), + torch.from_numpy(box).to(PT_DEVICE).reshape(1, 3, 3), + pt_obj.get_rcut(), + ) + nlist = build_neighbor_list_pt( + ext_coords, + ext_atype, + natoms[0], + pt_obj.get_rcut(), + pt_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + result, _, _, _, _ = pt_obj(ext_coords, ext_atype, nlist, mapping=mapping) + return result + + +@parameterized(("float32", "float64")) +class TestDescriptorSeR(unittest.TestCase): + def setUp(self): + (self.dtype,) = self.param + if self.dtype == "float32": + self.atol = 1e-5 + elif self.dtype == "float64": + self.atol = 1e-10 + self.seed = 21 + self.sel = [9, 10] + self.rcut_smth = 5.80 + self.rcut = 6.00 + self.neuron = [6, 12, 24] + self.ntypes = 2 + self.coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + self.box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + + self.se_r = DescrptSeR( + self.rcut, + self.rcut_smth, + self.sel, + self.neuron, + seed=21, + precision=self.dtype, + ) + + def test_compressed_forward(self): + result_pt = eval_pt_descriptor( + self.se_r, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + self.se_r.enable_compression(0.5) + result_pt_compressed = eval_pt_descriptor( + self.se_r, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + self.assertEqual(result_pt.shape, result_pt_compressed.shape) + torch.testing.assert_close( + result_pt, + result_pt_compressed, + atol=self.atol, + rtol=self.atol, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/model/test_compressed_descriptor_se_t.py b/source/tests/pt/model/test_compressed_descriptor_se_t.py new file mode 100644 index 0000000000..aa3054bc0d --- /dev/null +++ b/source/tests/pt/model/test_compressed_descriptor_se_t.py @@ -0,0 +1,129 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, +) + +import numpy as np +import torch + +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.pt.model.descriptor.se_t import ( + DescrptSeT, +) +from deepmd.pt.utils.env import DEVICE as PT_DEVICE +from deepmd.pt.utils.nlist import build_neighbor_list as build_neighbor_list_pt +from deepmd.pt.utils.nlist import ( + extend_coord_with_ghosts as extend_coord_with_ghosts_pt, +) + +from ...consistent.common import ( + parameterized, +) + + +def eval_pt_descriptor( + pt_obj: Any, natoms, coords, atype, box, mixed_types: bool = False +) -> Any: + ext_coords, ext_atype, mapping = extend_coord_with_ghosts_pt( + torch.from_numpy(coords).to(PT_DEVICE).reshape(1, -1, 3), + torch.from_numpy(atype).to(PT_DEVICE).reshape(1, -1), + torch.from_numpy(box).to(PT_DEVICE).reshape(1, 3, 3), + pt_obj.get_rcut(), + ) + nlist = build_neighbor_list_pt( + ext_coords, + ext_atype, + natoms[0], + pt_obj.get_rcut(), + pt_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + result, _, _, _, _ = pt_obj(ext_coords, ext_atype, nlist, mapping=mapping) + return result + + +@parameterized(("float32", "float64")) +class TestDescriptorSeT(unittest.TestCase): + def setUp(self): + (self.dtype,) = self.param + if self.dtype == "float32": + self.atol = 1e-5 + elif self.dtype == "float64": + self.atol = 1e-10 + self.seed = 21 + self.sel = [9, 10] + self.rcut_smth = 5.80 + self.rcut = 6.00 + self.neuron = [6, 12, 24] + self.ntypes = 2 + self.coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + self.box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + + self.se_t = DescrptSeT( + self.rcut, + self.rcut_smth, + self.sel, + self.neuron, + seed=21, + precision=self.dtype, + ) + + def test_compressed_forward(self): + result_pt = eval_pt_descriptor( + self.se_t, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + self.se_t.enable_compression(0.5) + result_pt_compressed = eval_pt_descriptor( + self.se_t, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + self.assertEqual(result_pt.shape, result_pt_compressed.shape) + torch.testing.assert_close( + result_pt, + result_pt_compressed, + atol=self.atol, + rtol=self.atol, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_tabulate.py b/source/tests/pt/test_tabulate.py new file mode 100644 index 0000000000..c03773827d --- /dev/null +++ b/source/tests/pt/test_tabulate.py @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import torch + +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.tabulate import ( + unaggregated_dy2_dx, + unaggregated_dy2_dx_s, + unaggregated_dy_dx, + unaggregated_dy_dx_s, +) +from deepmd.tf.env import ( + op_module, + tf, +) + + +def setUpModule(): + tf.compat.v1.enable_eager_execution() + + +def tearDownModule(): + tf.compat.v1.disable_eager_execution() + + +class TestDPTabulate(unittest.TestCase): + def setUp(self): + self.w = np.array( + [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1.0, 1.1, 1.2]], + dtype=np.float64, + ) + + self.x = np.array( + [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9], [1.0, 1.1, 1.2]], + dtype=np.float64, # 4 x 3 + ) + + self.b = np.array([[0.1], [0.2], [0.3], [0.4]], dtype=np.float64) # 4 x 1 + + self.xbar = np.matmul(self.x, self.w) + self.b # 4 x 4 + + self.y = np.tanh(self.xbar) + + def test_ops(self): + dy_tf = op_module.unaggregated_dy_dx_s( + tf.constant(self.y, dtype="double"), + tf.constant(self.w, dtype="double"), + tf.constant(self.xbar, dtype="double"), + tf.constant(1), + ) + + dy_pt = unaggregated_dy_dx_s( + torch.from_numpy(self.y), + self.w, + torch.from_numpy(self.xbar), + 1, + ) + + dy_tf_numpy = dy_tf.numpy() + dy_pt_numpy = dy_pt.detach().numpy() + + np.testing.assert_almost_equal(dy_tf_numpy, dy_pt_numpy, decimal=10) + + dy2_tf = op_module.unaggregated_dy2_dx_s( + tf.constant(self.y, dtype="double"), + dy_tf, + tf.constant(self.w, dtype="double"), + tf.constant(self.xbar, dtype="double"), + tf.constant(1), + ) + + dy2_pt = unaggregated_dy2_dx_s( + torch.from_numpy(self.y), + dy_pt, + self.w, + torch.from_numpy(self.xbar), + 1, + ) + + dy2_tf_numpy = dy2_tf.numpy() + dy2_pt_numpy = dy2_pt.detach().numpy() + + np.testing.assert_almost_equal(dy2_tf_numpy, dy2_pt_numpy, decimal=10) + + dz_tf = op_module.unaggregated_dy_dx( + tf.constant(self.y, dtype="double"), + tf.constant(self.w, dtype="double"), + dy_tf, + tf.constant(self.xbar, dtype="double"), + tf.constant(1), + ) + + dz_pt = unaggregated_dy_dx( + torch.from_numpy(self.y).to(env.DEVICE), + self.w, + dy_pt, + torch.from_numpy(self.xbar).to(env.DEVICE), + 1, + ) + + dz_tf_numpy = dz_tf.numpy() + dz_pt_numpy = dz_pt.detach().cpu().numpy() + + np.testing.assert_almost_equal(dz_tf_numpy, dz_pt_numpy, decimal=10) + + dy2_tf = op_module.unaggregated_dy2_dx( + tf.constant(self.y, dtype="double"), + tf.constant(self.w, dtype="double"), + dy_tf, + dy2_tf, + tf.constant(self.xbar, dtype="double"), + tf.constant(1), + ) + + dy2_pt = unaggregated_dy2_dx( + torch.from_numpy(self.y).to(env.DEVICE), + self.w, + dy_pt, + dy2_pt, + torch.from_numpy(self.xbar).to(env.DEVICE), + 1, + ) + + dy2_tf_numpy = dy2_tf.numpy() + dy2_pt_numpy = dy2_pt.detach().cpu().numpy() + + np.testing.assert_almost_equal(dy2_tf_numpy, dy2_pt_numpy, decimal=10) + + +if __name__ == "__main__": + unittest.main() From c3bf841dd38145aa1000b951385408e3632e59b8 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:53:50 +0800 Subject: [PATCH 116/193] Update pair_base.cpp --- source/lmp/pair_base.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/lmp/pair_base.cpp b/source/lmp/pair_base.cpp index cd3f49eb73..9f83e5b040 100644 --- a/source/lmp/pair_base.cpp +++ b/source/lmp/pair_base.cpp @@ -354,10 +354,8 @@ void PairDeepMDBase::print_summary(const string pre) const { cout << pre << "source branch: " << STR_GIT_BRANCH << endl; cout << pre << "source commit: " << STR_GIT_HASH << endl; cout << pre << "source commit at: " << STR_GIT_DATE << endl; - cout << pre << "build float prec: " << STR_FLOAT_PREC << endl; - cout << pre << "build with tf inc: " << STR_TensorFlow_INCLUDE_DIRS - << endl; - cout << pre << "build with tf lib: " << STR_TensorFlow_LIBRARY << endl; + cout << pre << "build with inc: " << STR_BACKEND_INCLUDE_DIRS << endl; + cout << pre << "build with lib: " << STR_BACKEND_LIBRARY_PATH << endl; std::cout.rdbuf(sbuf); utils::logmesg(lmp, buffer.str()); From 5451acd582203f58a71af39598d73bd34a82ed76 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 1 Nov 2024 20:24:34 +0800 Subject: [PATCH 117/193] Update test_deeppot_tf_spin.cc --- source/api_cc/tests/test_deeppot_tf_spin.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc index d15a7ed246..95f35a49cd 100644 --- a/source/api_cc/tests/test_deeppot_tf_spin.cc +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -74,7 +74,7 @@ TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist) { deepmd::DeepPot& dp = this->dp; double ener; std::vector force, force_mag, virial; - dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); @@ -98,7 +98,7 @@ TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist_atomic) { deepmd::DeepPot& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); From 319493a3b064e7338fae15e87a0101ef9e4ca839 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 12:26:04 +0000 Subject: [PATCH 118/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/api_cc/tests/test_deeppot_tf_spin.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc index 95f35a49cd..23b79b64d7 100644 --- a/source/api_cc/tests/test_deeppot_tf_spin.cc +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -98,8 +98,8 @@ TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist_atomic) { deepmd::DeepPot& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, - atype, box); + dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, + spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); // EXPECT_EQ(atom_ener.size(), natoms); From b1e4a03927641707be1075a9561b66ef6fb56a99 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 1 Nov 2024 21:26:06 +0800 Subject: [PATCH 119/193] Update build_lammps.sh --- source/install/build_lammps.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/install/build_lammps.sh b/source/install/build_lammps.sh index add1194151..d101714739 100755 --- a/source/install/build_lammps.sh +++ b/source/install/build_lammps.sh @@ -23,7 +23,7 @@ fi cd ${BUILD_TMP_DIR}/lammps-${LAMMPS_VERSION} mkdir -p ${BUILD_TMP_DIR}/lammps-${LAMMPS_VERSION}/build cd ${BUILD_TMP_DIR}/lammps-${LAMMPS_VERSION}/build -cmake -C ../cmake/presets/all_off.cmake -D PKG_PLUGIN=ON -D PKG_MOLECULE=ON -DLAMMPS_EXCEPTIONS=yes -D BUILD_SHARED_LIBS=yes -D LAMMPS_INSTALL_RPATH=ON -D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -D CMAKE_INSTALL_LIBDIR=lib -D CMAKE_INSTALL_FULL_LIBDIR=${INSTALL_PREFIX}/lib ../cmake +cmake -C ../cmake/presets/all_off.cmake -D PKG_PLUGIN=ON -D PKG_SPIN=ON -D PKG_MOLECULE=ON -DLAMMPS_EXCEPTIONS=yes -D BUILD_SHARED_LIBS=yes -D LAMMPS_INSTALL_RPATH=ON -D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -D CMAKE_INSTALL_LIBDIR=lib -D CMAKE_INSTALL_FULL_LIBDIR=${INSTALL_PREFIX}/lib ../cmake make -j${NPROC} make install From 6a75c6b3826280dc2f3761d4dd851df4d495c7cc Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 1 Nov 2024 21:44:00 -0400 Subject: [PATCH 120/193] feat(jax/array-api): se_t_tebd (#4288) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced support for JAX as a backend for the "se_e3_tebd" descriptor, enhancing flexibility in computational options. - Added serialization and deserialization methods to the descriptor classes for better state management. - **Bug Fixes** - Improved handling of attributes in the descriptor classes to ensure correct data types and transformations. - **Tests** - Enhanced the test suite to support multiple backends, including JAX and Array API Strict, improving the robustness of testing. Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/descriptor/se_t_tebd.py | 150 +++++++++++++----- deepmd/jax/descriptor/se_t_tebd.py | 56 +++++++ doc/model/train-se-e3-tebd.md | 4 +- .../array_api_strict/descriptor/se_t_tebd.py | 47 ++++++ .../consistent/descriptor/test_se_t_tebd.py | 37 +++++ 5 files changed, 253 insertions(+), 41 deletions(-) create mode 100644 deepmd/jax/descriptor/se_t_tebd.py create mode 100644 source/tests/array_api_strict/descriptor/se_t_tebd.py diff --git a/deepmd/dpmodel/descriptor/se_t_tebd.py b/deepmd/dpmodel/descriptor/se_t_tebd.py index ca89c23968..298f823690 100644 --- a/deepmd/dpmodel/descriptor/se_t_tebd.py +++ b/deepmd/dpmodel/descriptor/se_t_tebd.py @@ -5,12 +5,20 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.array_api import ( + xp_take_along_axis, +) +from deepmd.dpmodel.common import ( + get_xp_precision, + to_numpy_array, +) from deepmd.dpmodel.utils import ( EmbeddingNet, EnvMat, @@ -26,9 +34,6 @@ from deepmd.dpmodel.utils.update_sel import ( UpdateSel, ) -from deepmd.env import ( - GLOBAL_NP_FLOAT_PRECISION, -) from deepmd.utils.data_system import ( DeepmdDataSystem, ) @@ -318,11 +323,15 @@ def call( sw The smooth switch function. """ + xp = array_api_compat.array_namespace(nlist, coord_ext, atype_ext) del mapping nf, nloc, nnei = nlist.shape - nall = coord_ext.reshape(nf, -1).shape[1] // 3 + nall = xp.reshape(coord_ext, (nf, -1)).shape[1] // 3 # nf x nall x tebd_dim - atype_embd_ext = self.type_embedding.call()[atype_ext] + atype_embd_ext = xp.reshape( + xp.take(self.type_embedding.call(), xp.reshape(atype_ext, [-1]), axis=0), + (nf, nall, self.tebd_dim), + ) # nfnl x tebd_dim atype_embd = atype_embd_ext[:, :nloc, :] grrg, g2, h2, rot_mat, sw = self.se_ttebd( @@ -334,8 +343,8 @@ def call( ) # nf x nloc x (ng + tebd_dim) if self.concat_output_tebd: - grrg = np.concatenate( - [grrg, atype_embd.reshape(nf, nloc, self.tebd_dim)], axis=-1 + grrg = xp.concat( + [grrg, xp.reshape(atype_embd, (nf, nloc, self.tebd_dim))], axis=-1 ) return grrg, rot_mat, None, None, sw @@ -368,8 +377,8 @@ def serialize(self) -> dict: "env_protection": obj.env_protection, "smooth": self.smooth, "@variables": { - "davg": obj["davg"], - "dstd": obj["dstd"], + "davg": to_numpy_array(obj["davg"]), + "dstd": to_numpy_array(obj["dstd"]), }, "trainable": self.trainable, } @@ -491,12 +500,12 @@ def __init__( else: self.embd_input_dim = 1 - self.embeddings = NetworkCollection( + embeddings = NetworkCollection( ndim=0, ntypes=self.ntypes, network_type="embedding_network", ) - self.embeddings[0] = EmbeddingNet( + embeddings[0] = EmbeddingNet( self.embd_input_dim, self.neuron, self.activation_function, @@ -504,13 +513,14 @@ def __init__( self.precision, seed=child_seed(seed, 0), ) + self.embeddings = embeddings if self.tebd_input_mode in ["strip"]: - self.embeddings_strip = NetworkCollection( + embeddings_strip = NetworkCollection( ndim=0, ntypes=self.ntypes, network_type="embedding_network", ) - self.embeddings_strip[0] = EmbeddingNet( + embeddings_strip[0] = EmbeddingNet( self.tebd_dim_input, self.neuron, self.activation_function, @@ -518,6 +528,7 @@ def __init__( self.precision, seed=child_seed(seed, 1), ) + self.embeddings_strip = embeddings_strip else: self.embeddings_strip = None @@ -652,6 +663,7 @@ def call( atype_embd_ext: Optional[np.ndarray] = None, mapping: Optional[np.ndarray] = None, ): + xp = array_api_compat.array_namespace(nlist, coord_ext, atype_ext) # nf x nloc x nnei x 4 dmatrix, diff, sw = self.env_mat.call( coord_ext, atype_ext, nlist, self.mean, self.stddev @@ -659,47 +671,49 @@ def call( nf, nloc, nnei, _ = dmatrix.shape exclude_mask = self.emask.build_type_exclude_mask(nlist, atype_ext) # nfnl x nnei - exclude_mask = exclude_mask.reshape(nf * nloc, nnei) + exclude_mask = xp.reshape(exclude_mask, (nf * nloc, nnei)) # nfnl x nnei - nlist = nlist.reshape(nf * nloc, nnei) - nlist = np.where(exclude_mask, nlist, -1) + nlist = xp.reshape(nlist, (nf * nloc, nnei)) + nlist = xp.where(exclude_mask, nlist, xp.full_like(nlist, -1)) # nfnl x nnei nlist_mask = nlist != -1 # nfnl x nnei x 1 - sw = np.where(nlist_mask[:, :, None], sw.reshape(nf * nloc, nnei, 1), 0.0) + sw = xp.where( + nlist_mask[:, :, None], + xp.reshape(sw, (nf * nloc, nnei, 1)), + xp.zeros((nf * nloc, nnei, 1), dtype=sw.dtype), + ) # nfnl x nnei x 4 - dmatrix = dmatrix.reshape(nf * nloc, nnei, 4) + dmatrix = xp.reshape(dmatrix, (nf * nloc, nnei, 4)) # nfnl x nnei x 4 rr = dmatrix - rr = rr * exclude_mask[:, :, None] + rr = rr * xp.astype(exclude_mask[:, :, None], rr.dtype) # nfnl x nt_i x 3 rr_i = rr[:, :, 1:] # nfnl x nt_j x 3 rr_j = rr[:, :, 1:] # nfnl x nt_i x nt_j - env_ij = np.einsum("ijm,ikm->ijk", rr_i, rr_j) + # env_ij = np.einsum("ijm,ikm->ijk", rr_i, rr_j) + env_ij = xp.sum(rr_i[:, :, None, :] * rr_j[:, None, :, :], axis=-1) # nfnl x nt_i x nt_j x 1 - ss = np.expand_dims(env_ij, axis=-1) + ss = env_ij[..., None] - nlist_masked = np.where(nlist_mask, nlist, 0) - index = np.tile(nlist_masked.reshape(nf, -1, 1), (1, 1, self.tebd_dim)) + nlist_masked = xp.where(nlist_mask, nlist, xp.zeros_like(nlist)) + index = xp.tile(xp.reshape(nlist_masked, (nf, -1, 1)), (1, 1, self.tebd_dim)) # nfnl x nnei x tebd_dim - atype_embd_nlist = np.take_along_axis(atype_embd_ext, index, axis=1).reshape( - nf * nloc, nnei, self.tebd_dim + atype_embd_nlist = xp_take_along_axis(atype_embd_ext, index, axis=1) + atype_embd_nlist = xp.reshape( + atype_embd_nlist, (nf * nloc, nnei, self.tebd_dim) ) # nfnl x nt_i x nt_j x tebd_dim - nlist_tebd_i = np.tile( - np.expand_dims(atype_embd_nlist, axis=2), [1, 1, self.nnei, 1] - ) - nlist_tebd_j = np.tile( - np.expand_dims(atype_embd_nlist, axis=1), [1, self.nnei, 1, 1] - ) + nlist_tebd_i = xp.tile(atype_embd_nlist[:, :, None, :], (1, 1, self.nnei, 1)) + nlist_tebd_j = xp.tile(atype_embd_nlist[:, None, :, :], (1, self.nnei, 1, 1)) ng = self.neuron[-1] if self.tebd_input_mode in ["concat"]: # nfnl x nt_i x nt_j x (1 + tebd_dim * 2) - ss = np.concatenate([ss, nlist_tebd_i, nlist_tebd_j], axis=-1) + ss = xp.concat([ss, nlist_tebd_i, nlist_tebd_j], axis=-1) # nfnl x nt_i x nt_j x ng gg = self.cal_g(ss, 0) elif self.tebd_input_mode in ["strip"]: @@ -707,14 +721,14 @@ def call( gg_s = self.cal_g(ss, 0) assert self.embeddings_strip is not None # nfnl x nt_i x nt_j x (tebd_dim * 2) - tt = np.concatenate([nlist_tebd_i, nlist_tebd_j], axis=-1) + tt = xp.concat([nlist_tebd_i, nlist_tebd_j], axis=-1) # nfnl x nt_i x nt_j x ng gg_t = self.cal_g_strip(tt, 0) if self.smooth: gg_t = ( gg_t - * sw.reshape(nf * nloc, self.nnei, 1, 1) - * sw.reshape(nf * nloc, 1, self.nnei, 1) + * xp.reshape(sw, (nf * nloc, self.nnei, 1, 1)) + * xp.reshape(sw, (nf * nloc, 1, self.nnei, 1)) ) # nfnl x nt_i x nt_j x ng gg = gg_s * gg_t + gg_s @@ -722,12 +736,12 @@ def call( raise NotImplementedError # nfnl x ng - res_ij = np.einsum("ijk,ijkm->im", env_ij, gg) + # res_ij = np.einsum("ijk,ijkm->im", env_ij, gg) + res_ij = xp.sum(env_ij[:, :, :, None] * gg[:, :, :, :], axis=(1, 2)) res_ij = res_ij * (1.0 / float(self.nnei) / float(self.nnei)) # nf x nl x ng - result = res_ij.reshape(nf, nloc, self.filter_neuron[-1]).astype( - GLOBAL_NP_FLOAT_PRECISION - ) + result = xp.reshape(res_ij, (nf, nloc, self.filter_neuron[-1])) + result = xp.astype(result, get_xp_precision(xp, "global")) return ( result, None, @@ -743,3 +757,61 @@ def has_message_passing(self) -> bool: def need_sorted_nlist_for_lower(self) -> bool: """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" return False + + def serialize(self) -> dict: + """Serialize the descriptor to dict.""" + obj = self + data = { + "@class": "Descriptor", + "type": "se_e3_tebd", + "@version": 1, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "ntypes": obj.ntypes, + "neuron": obj.neuron, + "tebd_dim": obj.tebd_dim, + "tebd_input_mode": obj.tebd_input_mode, + "set_davg_zero": obj.set_davg_zero, + "activation_function": obj.activation_function, + "resnet_dt": obj.resnet_dt, + # make deterministic + "precision": np.dtype(PRECISION_DICT[obj.precision]).name, + "embeddings": obj.embeddings.serialize(), + "env_mat": obj.env_mat.serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "smooth": obj.smooth, + "@variables": { + "davg": to_numpy_array(obj["davg"]), + "dstd": to_numpy_array(obj["dstd"]), + }, + } + if obj.tebd_input_mode in ["strip"]: + data.update({"embeddings_strip": obj.embeddings_strip.serialize()}) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptSeTTebd": + """Deserialize from dict.""" + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + data.pop("type") + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + env_mat = data.pop("env_mat") + tebd_input_mode = data["tebd_input_mode"] + if tebd_input_mode in ["strip"]: + embeddings_strip = data.pop("embeddings_strip") + else: + embeddings_strip = None + se_ttebd = cls(**data) + + se_ttebd["davg"] = variables["davg"] + se_ttebd["dstd"] = variables["dstd"] + se_ttebd.embeddings = NetworkCollection.deserialize(embeddings) + if tebd_input_mode in ["strip"]: + se_ttebd.embeddings_strip = NetworkCollection.deserialize(embeddings_strip) + + return se_ttebd diff --git a/deepmd/jax/descriptor/se_t_tebd.py b/deepmd/jax/descriptor/se_t_tebd.py new file mode 100644 index 0000000000..84e3d3f084 --- /dev/null +++ b/deepmd/jax/descriptor/se_t_tebd.py @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_t_tebd import ( + DescrptBlockSeTTebd as DescrptBlockSeTTebdDP, +) +from deepmd.dpmodel.descriptor.se_t_tebd import DescrptSeTTebd as DescrptSeTTebdDP +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.jax.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.jax.utils.network import ( + NetworkCollection, +) +from deepmd.jax.utils.type_embed import ( + TypeEmbedNet, +) + + +@flax_module +class DescrptBlockSeTTebd(DescrptBlockSeTTebdDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) + elif name in {"embeddings", "embeddings_strip"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) + + +@BaseDescriptor.register("se_e3_tebd") +@flax_module +class DescrptSeTTebd(DescrptSeTTebdDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "se_ttebd": + value = DescrptBlockSeTTebd.deserialize(value.serialize()) + elif name == "type_embedding": + value = TypeEmbedNet.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/doc/model/train-se-e3-tebd.md b/doc/model/train-se-e3-tebd.md index 5935a8920a..49d0d80f42 100644 --- a/doc/model/train-se-e3-tebd.md +++ b/doc/model/train-se-e3-tebd.md @@ -1,7 +1,7 @@ -# Descriptor `"se_e3_tebd"` {{ pytorch_icon }} {{ dpmodel_icon }} +# Descriptor `"se_e3_tebd"` {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: The notation of `se_e3_tebd` is short for the three-body embedding descriptor with type embeddings, where the notation `se` denotes the Deep Potential Smooth Edition (DeepPot-SE). diff --git a/source/tests/array_api_strict/descriptor/se_t_tebd.py b/source/tests/array_api_strict/descriptor/se_t_tebd.py new file mode 100644 index 0000000000..12fc04e69e --- /dev/null +++ b/source/tests/array_api_strict/descriptor/se_t_tebd.py @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.se_t_tebd import ( + DescrptBlockSeTTebd as DescrptBlockSeTTebdDP, +) +from deepmd.dpmodel.descriptor.se_t_tebd import DescrptSeTTebd as DescrptSeTTebdDP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.exclude_mask import ( + PairExcludeMask, +) +from ..utils.network import ( + NetworkCollection, +) +from ..utils.type_embed import ( + TypeEmbedNet, +) + + +class DescrptBlockSeTTebd(DescrptBlockSeTTebdDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_array_api_strict_array(value) + elif name in {"embeddings", "embeddings_strip"}: + if value is not None: + value = NetworkCollection.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) + + +class DescrptSeTTebd(DescrptSeTTebdDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "se_ttebd": + value = DescrptBlockSeTTebd.deserialize(value.serialize()) + elif name == "type_embedding": + value = TypeEmbedNet.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/source/tests/consistent/descriptor/test_se_t_tebd.py b/source/tests/consistent/descriptor/test_se_t_tebd.py index 3299a04c78..4712c28e53 100644 --- a/source/tests/consistent/descriptor/test_se_t_tebd.py +++ b/source/tests/consistent/descriptor/test_se_t_tebd.py @@ -15,6 +15,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, CommonTest, parameterized, @@ -28,6 +30,16 @@ else: DescrptSeTTebdPT = None DescrptSeTTebdTF = None +if INSTALLED_JAX: + from deepmd.jax.descriptor.se_t_tebd import DescrptSeTTebd as DescrptSeTTebdJAX +else: + DescrptSeTTebdJAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.se_t_tebd import ( + DescrptSeTTebd as DescrptSeTTebdStrict, + ) +else: + DescrptSeTTebdStrict = None from deepmd.utils.argcheck import ( descrpt_se_e3_tebd_args, ) @@ -134,9 +146,14 @@ def skip_tf(self) -> bool: ) = self.param return True + skip_jax = not INSTALLED_JAX + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT + tf_class = DescrptSeTTebdTF dp_class = DescrptSeTTebdDP pt_class = DescrptSeTTebdPT + jax_class = DescrptSeTTebdJAX + array_api_strict_class = DescrptSeTTebdStrict args = descrpt_se_e3_tebd_args().append(Argument("ntypes", int, optional=False)) def setUp(self): @@ -216,6 +233,26 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From e1c868ef01849b6812fd3efeb3697c4f5de138a0 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 1 Nov 2024 21:44:52 -0400 Subject: [PATCH 121/193] feat(jax/array-api): se_atten_v2 (#4289) ## Summary by CodeRabbit - **New Features** - Introduced a new descriptor class `DescrptSeAttenV2`, enhancing the framework's capabilities. - Improved test suite flexibility to support multiple backends, including JAX and strict array API. - **Bug Fixes** - Updated serialization logic for `davg` and `dstd` to ensure consistent output format. - **Tests** - Enhanced `TestSeAttenV2` class with new properties and methods for better backend evaluation. Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/descriptor/se_atten_v2.py | 7 +- deepmd/jax/descriptor/se_atten_v2.py | 13 +++ .../descriptor/se_atten_v2.py | 10 ++ .../consistent/descriptor/test_se_atten_v2.py | 95 +++++++++++++++++++ 4 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 deepmd/jax/descriptor/se_atten_v2.py create mode 100644 source/tests/array_api_strict/descriptor/se_atten_v2.py diff --git a/deepmd/dpmodel/descriptor/se_atten_v2.py b/deepmd/dpmodel/descriptor/se_atten_v2.py index e0ac222524..897863ec0f 100644 --- a/deepmd/dpmodel/descriptor/se_atten_v2.py +++ b/deepmd/dpmodel/descriptor/se_atten_v2.py @@ -11,6 +11,9 @@ DEFAULT_PRECISION, PRECISION_DICT, ) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.utils import ( NetworkCollection, ) @@ -146,8 +149,8 @@ def serialize(self) -> dict: "exclude_types": obj.exclude_types, "env_protection": obj.env_protection, "@variables": { - "davg": obj["davg"], - "dstd": obj["dstd"], + "davg": to_numpy_array(obj["davg"]), + "dstd": to_numpy_array(obj["dstd"]), }, ## to be updated when the options are supported. "trainable": self.trainable, diff --git a/deepmd/jax/descriptor/se_atten_v2.py b/deepmd/jax/descriptor/se_atten_v2.py new file mode 100644 index 0000000000..a7ef4035cd --- /dev/null +++ b/deepmd/jax/descriptor/se_atten_v2.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.dpmodel.descriptor.se_atten_v2 import DescrptSeAttenV2 as DescrptSeAttenV2DP +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.jax.descriptor.dpa1 import ( + DescrptDPA1, +) + + +@BaseDescriptor.register("se_atten_v2") +class DescrptSeAttenV2(DescrptDPA1, DescrptSeAttenV2DP): + pass diff --git a/source/tests/array_api_strict/descriptor/se_atten_v2.py b/source/tests/array_api_strict/descriptor/se_atten_v2.py new file mode 100644 index 0000000000..a2e06ac0e2 --- /dev/null +++ b/source/tests/array_api_strict/descriptor/se_atten_v2.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.dpmodel.descriptor.se_atten_v2 import DescrptSeAttenV2 as DescrptSeAttenV2DP + +from .dpa1 import ( + DescrptDPA1, +) + + +class DescrptSeAttenV2(DescrptDPA1, DescrptSeAttenV2DP): + pass diff --git a/source/tests/consistent/descriptor/test_se_atten_v2.py b/source/tests/consistent/descriptor/test_se_atten_v2.py index a3fe4e98b4..f4a8119ca3 100644 --- a/source/tests/consistent/descriptor/test_se_atten_v2.py +++ b/source/tests/consistent/descriptor/test_se_atten_v2.py @@ -16,6 +16,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, CommonTest, parameterized, @@ -30,6 +32,18 @@ ) else: DescrptSeAttenV2PT = None +if INSTALLED_JAX: + from deepmd.jax.descriptor.se_atten_v2 import ( + DescrptSeAttenV2 as DescrptSeAttenV2JAX, + ) +else: + DescrptSeAttenV2JAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.se_atten_v2 import ( + DescrptSeAttenV2 as DescrptSeAttenV2Strict, + ) +else: + DescrptSeAttenV2Strict = None DescrptSeAttenV2TF = None from deepmd.utils.argcheck import ( descrpt_se_atten_args, @@ -175,9 +189,70 @@ def skip_dp(self) -> bool: def skip_tf(self) -> bool: return True + @property + def skip_jax(self) -> bool: + ( + tebd_dim, + resnet_dt, + type_one_side, + attn, + attn_layer, + attn_dotr, + excluded_types, + env_protection, + set_davg_zero, + scaling_factor, + normalize, + temperature, + ln_eps, + concat_output_tebd, + precision, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return not INSTALLED_JAX or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + attn_dotr, + normalize, + temperature, + ) + + @property + def skip_array_api_strict(self) -> bool: + ( + tebd_dim, + resnet_dt, + type_one_side, + attn, + attn_layer, + attn_dotr, + excluded_types, + env_protection, + set_davg_zero, + scaling_factor, + normalize, + temperature, + ln_eps, + concat_output_tebd, + precision, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return ( + not INSTALLED_ARRAY_API_STRICT + or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + attn_dotr, + normalize, + temperature, + ) + ) + tf_class = DescrptSeAttenV2TF dp_class = DescrptSeAttenV2DP pt_class = DescrptSeAttenV2PT + jax_class = DescrptSeAttenV2JAX + array_api_strict_class = DescrptSeAttenV2Strict args = descrpt_se_atten_args().append(Argument("ntypes", int, optional=False)) def setUp(self): @@ -244,6 +319,26 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From fb41a4fdb4e74a94cfe9a43b8b8d1d97a247402a Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 1 Nov 2024 21:49:08 -0400 Subject: [PATCH 122/193] feat(jax): atomic virial (#4290) For the frozen model, store two exported functions: one enables do_atomic_virial and the other doesn't. This PR is in conflict with #4285 (in `serialization.py`), and the conflict must be resolved after one is merged. ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced a new parameter for enhanced atomic virial data handling in model evaluations. - Added support for atomic virial calculations in multiple model evaluation methods. - Updated export functionality to dynamically include atomic virial data based on user input. - **Bug Fixes** - Improved output structures across various backends to accommodate new atomic virial data. - **Tests** - Enhanced test cases to verify the new atomic virial functionalities and ensure compatibility with existing evaluations. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/jax/infer/deep_eval.py | 3 ++ deepmd/jax/model/base_model.py | 60 +++++++++++++++++++--- deepmd/jax/model/hlo.py | 11 +++- deepmd/jax/utils/serialization.py | 49 +++++++++++++----- source/tests/consistent/io/test_io.py | 9 ++++ source/tests/consistent/model/common.py | 10 +++- source/tests/consistent/model/test_ener.py | 11 +++- 7 files changed, 131 insertions(+), 22 deletions(-) diff --git a/deepmd/jax/infer/deep_eval.py b/deepmd/jax/infer/deep_eval.py index c1967fb0da..b60076c68c 100644 --- a/deepmd/jax/infer/deep_eval.py +++ b/deepmd/jax/infer/deep_eval.py @@ -93,6 +93,9 @@ def __init__( model_data = load_dp_model(model_file) self.dp = HLO( stablehlo=model_data["@variables"]["stablehlo"].tobytes(), + stablehlo_atomic_virial=model_data["@variables"][ + "stablehlo_atomic_virial" + ].tobytes(), model_def_script=model_data["model_def_script"], **model_data["constants"], ) diff --git a/deepmd/jax/model/base_model.py b/deepmd/jax/model/base_model.py index 8631c85d16..1e880700a2 100644 --- a/deepmd/jax/model/base_model.py +++ b/deepmd/jax/model/base_model.py @@ -91,17 +91,65 @@ def eval_output( assert vdef.r_differentiable # avr: [nf, *def, nall, 3, 3] avr = jnp.einsum("f...ai,faj->f...aij", ff, extended_coord) + # the correction sums to zero, which does not contribute to global virial + if do_atomic_virial: + + def eval_ce( + cc_ext, + extended_atype, + nlist, + mapping, + fparam, + aparam, + *, + _kk=kk, + _atom_axis=atom_axis - 1, + ): + # atomic_ret[_kk]: [nf, nloc, *def] + atomic_ret = self.atomic_model.forward_common_atomic( + cc_ext[None, ...], + extended_atype[None, ...], + nlist[None, ...], + mapping=mapping[None, ...] if mapping is not None else None, + fparam=fparam[None, ...] if fparam is not None else None, + aparam=aparam[None, ...] if aparam is not None else None, + ) + nloc = nlist.shape[0] + cc_loc = jax.lax.stop_gradient(cc_ext)[:nloc, ...] + cc_loc = jnp.reshape(cc_loc, [nloc, *[1] * def_ndim, 3]) + # [*def, 3] + return jnp.sum( + atomic_ret[_kk][0, ..., None] * cc_loc, axis=_atom_axis + ) + + # extended_virial_corr: [nf, *def, 3, nall, 3] + extended_virial_corr = jax.vmap(jax.jacrev(eval_ce, argnums=0))( + extended_coord, + extended_atype, + nlist, + mapping, + fparam, + aparam, + ) + # move the first 3 to the last + # [nf, *def, nall, 3, 3] + extended_virial_corr = jnp.transpose( + extended_virial_corr, + [ + 0, + *range(1, def_ndim + 1), + def_ndim + 2, + def_ndim + 3, + def_ndim + 1, + ], + ) + avr += extended_virial_corr + # to [...,3,3] -> [...,9] # avr: [nf, *def, nall, 9] avr = jnp.reshape(avr, [*ff.shape[:-1], 9]) # extended_virial: [nf, nall, *def, 9] extended_virial = jnp.transpose( avr, [0, def_ndim + 1, *range(1, def_ndim + 1), def_ndim + 2] ) - - # the correction sums to zero, which does not contribute to global virial - # cannot jit - # if do_atomic_virial: - # raise NotImplementedError("Atomic virial is not implemented yet.") - # to [...,3,3] -> [...,9] model_predict[kk_derv_c] = extended_virial return model_predict diff --git a/deepmd/jax/model/hlo.py b/deepmd/jax/model/hlo.py index 010e3d7a5e..2946f8bec7 100644 --- a/deepmd/jax/model/hlo.py +++ b/deepmd/jax/model/hlo.py @@ -45,6 +45,7 @@ class HLO(BaseModel): def __init__( self, stablehlo, + stablehlo_atomic_virial, model_def_script, type_map, rcut, @@ -58,6 +59,9 @@ def __init__( sel, ) -> None: self._call_lower = jax_export.deserialize(stablehlo).call + self._call_lower_atomic_virial = jax_export.deserialize( + stablehlo_atomic_virial + ).call self.stablehlo = stablehlo self.type_map = type_map self.rcut = rcut @@ -170,14 +174,17 @@ def call_lower( aparam: Optional[jnp.ndarray] = None, do_atomic_virial: bool = False, ): - return self._call_lower( + if do_atomic_virial: + call_lower = self._call_lower_atomic_virial + else: + call_lower = self._call_lower + return call_lower( extended_coord, extended_atype, nlist, mapping, fparam, aparam, - do_atomic_virial, ) def get_type_map(self) -> list[str]: diff --git a/deepmd/jax/utils/serialization.py b/deepmd/jax/utils/serialization.py index a7d57523e2..ec2de3060e 100644 --- a/deepmd/jax/utils/serialization.py +++ b/deepmd/jax/utils/serialization.py @@ -52,23 +52,48 @@ def deserialize_to_file(model_file: str, data: dict) -> None: call_lower = model.call_lower nf, nloc, nghost = jax_export.symbolic_shape("nf, nloc, nghost") - exported = jax_export.export(jax.jit(call_lower))( - jax.ShapeDtypeStruct((nf, nloc + nghost, 3), jnp.float64), # extended_coord - jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int32), # extended_atype - jax.ShapeDtypeStruct((nf, nloc, model.get_nnei()), jnp.int64), # nlist - jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int64), # mapping - jax.ShapeDtypeStruct((nf, model.get_dim_fparam()), jnp.float64) - if model.get_dim_fparam() - else None, # fparam - jax.ShapeDtypeStruct((nf, nloc, model.get_dim_aparam()), jnp.float64) - if model.get_dim_aparam() - else None, # aparam - False, # do_atomic_virial + + def exported_whether_do_atomic_virial(do_atomic_virial): + def call_lower_with_fixed_do_atomic_virial( + coord, atype, nlist, nlist_start, fparam, aparam + ): + return call_lower( + coord, + atype, + nlist, + nlist_start, + fparam, + aparam, + do_atomic_virial=do_atomic_virial, + ) + + return jax_export.export(jax.jit(call_lower_with_fixed_do_atomic_virial))( + jax.ShapeDtypeStruct( + (nf, nloc + nghost, 3), jnp.float64 + ), # extended_coord + jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int32), # extended_atype + jax.ShapeDtypeStruct((nf, nloc, model.get_nnei()), jnp.int64), # nlist + jax.ShapeDtypeStruct((nf, nloc + nghost), jnp.int64), # mapping + jax.ShapeDtypeStruct((nf, model.get_dim_fparam()), jnp.float64) + if model.get_dim_fparam() + else None, # fparam + jax.ShapeDtypeStruct((nf, nloc, model.get_dim_aparam()), jnp.float64) + if model.get_dim_aparam() + else None, # aparam + ) + + exported = exported_whether_do_atomic_virial(do_atomic_virial=False) + exported_atomic_virial = exported_whether_do_atomic_virial( + do_atomic_virial=True ) serialized: bytearray = exported.serialize() + serialized_atomic_virial = exported_atomic_virial.serialize() data = data.copy() data.setdefault("@variables", {}) data["@variables"]["stablehlo"] = np.void(serialized) + data["@variables"]["stablehlo_atomic_virial"] = np.void( + serialized_atomic_virial + ) data["constants"] = { "type_map": model.get_type_map(), "rcut": model.get_rcut(), diff --git a/source/tests/consistent/io/test_io.py b/source/tests/consistent/io/test_io.py index af26c41694..91cd391322 100644 --- a/source/tests/consistent/io/test_io.py +++ b/source/tests/consistent/io/test_io.py @@ -163,6 +163,15 @@ def test_deep_eval(self): aparam=aparam, ) rets.append(ret) + ret = deep_eval.eval( + self.coords, + self.box, + self.atype, + fparam=fparam, + aparam=aparam, + do_atomic_virial=True, + ) + rets.append(ret) for ret in rets[1:]: for vv1, vv2 in zip(rets[0], ret): if np.isnan(vv2).all(): diff --git a/source/tests/consistent/model/common.py b/source/tests/consistent/model/common.py index 11940d9bdf..4eeb19b1f0 100644 --- a/source/tests/consistent/model/common.py +++ b/source/tests/consistent/model/common.py @@ -51,7 +51,13 @@ def build_tf_model(self, obj, natoms, coords, atype, box, suffix): {}, suffix=suffix, ) - return [ret["energy"], ret["atom_ener"], ret["force"], ret["virial"]], { + return [ + ret["energy"], + ret["atom_ener"], + ret["force"], + ret["virial"], + ret["atom_virial"], + ], { t_coord: coords, t_type: atype, t_natoms: natoms, @@ -69,6 +75,7 @@ def eval_pt_model(self, pt_obj: Any, natoms, coords, atype, box) -> Any: numpy_to_torch(coords), numpy_to_torch(atype), box=numpy_to_torch(box), + do_atomic_virial=True, ).items() } @@ -83,5 +90,6 @@ def assert_jax_array(arr): numpy_to_jax(coords), numpy_to_jax(atype), box=numpy_to_jax(box), + do_atomic_virial=True, ).items() } diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index 98330ba849..ec73c57fa8 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -216,6 +216,7 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret["energy"].ravel(), SKIP_FLAG, SKIP_FLAG, + SKIP_FLAG, ) elif backend is self.RefBackend.PT: return ( @@ -223,14 +224,22 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret["atom_energy"].ravel(), ret["force"].ravel(), ret["virial"].ravel(), + ret["atom_virial"].ravel(), ) elif backend is self.RefBackend.TF: - return (ret[0].ravel(), ret[1].ravel(), ret[2].ravel(), ret[3].ravel()) + return ( + ret[0].ravel(), + ret[1].ravel(), + ret[2].ravel(), + ret[3].ravel(), + ret[4].ravel(), + ) elif backend is self.RefBackend.JAX: return ( ret["energy_redu"].ravel(), ret["energy"].ravel(), ret["energy_derv_r"].ravel(), ret["energy_derv_c_redu"].ravel(), + ret["energy_derv_c"].ravel(), ) raise ValueError(f"Unknown backend: {backend}") From f129cffc78c9b707dd325f3a20599b413f20d0f2 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 1 Nov 2024 21:49:34 -0400 Subject: [PATCH 123/193] ci: install GPU JAX in GPU CI (#4293) ## Summary by CodeRabbit - **Chores** - Updated the workflow configuration for testing CUDA to improve efficiency and concurrency. - Added a new package for enhanced environment setup in CUDA testing. - Introduced an environment variable to optimize GPU memory allocation during tests. Signed-off-by: Jinzhe Zeng --- .github/workflows/test_cuda.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index 6bf4c8552f..996a1bcff0 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -47,7 +47,7 @@ jobs: && sudo apt-get -y install cuda-12-3 libcudnn8=8.9.5.*-1+cuda12.3 if: false # skip as we use nvidia image - run: python -m pip install -U uv - - run: source/install/uv_with_retry.sh pip install --system "tensorflow~=2.18.0rc2" "torch~=2.5.0" + - run: source/install/uv_with_retry.sh pip install --system "tensorflow~=2.18.0rc2" "torch~=2.5.0" "jax[cuda12]" - run: | export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') @@ -61,6 +61,8 @@ jobs: env: NUM_WORKERS: 0 CUDA_VISIBLE_DEVICES: 0 + # See https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html + XLA_PYTHON_CLIENT_PREALLOCATE: false - name: Download libtorch run: | wget https://download.pytorch.org/libtorch/cu124/libtorch-cxx11-abi-shared-with-deps-2.5.0%2Bcu124.zip -O libtorch.zip From 6bc730f821eaa8c51c73519afb315bdbb6af7f69 Mon Sep 17 00:00:00 2001 From: Yan Wang <116817801+cherryWangY@users.noreply.github.com> Date: Sat, 2 Nov 2024 10:05:16 +0800 Subject: [PATCH 124/193] Enable Hybrid Descriptor to be compressed (#4297) ## Summary by CodeRabbit - **New Features** - Introduced a new method `enable_compression` in the `DescrptHybrid` class, allowing users to configure compression settings related to neighbor distance and table parameters. - **Documentation** - Enhanced documentation for the `call` method, providing clearer descriptions of parameters and return values. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/dpmodel/descriptor/hybrid.py | 32 ++++++++++++++++++++++++++++ deepmd/pt/model/descriptor/hybrid.py | 32 ++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/deepmd/dpmodel/descriptor/hybrid.py b/deepmd/dpmodel/descriptor/hybrid.py index 0d89902e4a..106fcaf11e 100644 --- a/deepmd/dpmodel/descriptor/hybrid.py +++ b/deepmd/dpmodel/descriptor/hybrid.py @@ -210,6 +210,38 @@ def get_stat_mean_and_stddev( stddev_list.append(stddev_item) return mean_list, stddev_list + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + for descrpt in self.descrpt_list: + descrpt.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def call( self, coord_ext, diff --git a/deepmd/pt/model/descriptor/hybrid.py b/deepmd/pt/model/descriptor/hybrid.py index ba64f53ef7..00984df238 100644 --- a/deepmd/pt/model/descriptor/hybrid.py +++ b/deepmd/pt/model/descriptor/hybrid.py @@ -224,6 +224,38 @@ def get_stat_mean_and_stddev( stddev_list.append(stddev_item) return mean_list, stddev_list + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + for descrpt in self.descrpt_list: + descrpt.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward( self, coord_ext: torch.Tensor, From 25bb8212610bf281bf74732824f382d74f694478 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 2 Nov 2024 03:27:48 -0400 Subject: [PATCH 125/193] feat(jax/array-api): DPA-2 (#4294) ## Summary by CodeRabbit - **New Features** - Introduced new classes for enhanced descriptor functionality, including `DescrptDPA2`, `DescrptBlockRepformers`, and `DescrptBlockSeTTebd`. - Added serialization and deserialization methods for better state management of descriptor objects. - **Improvements** - Enhanced compatibility with various array backends through the integration of `array_api_compat`. - Refactored existing methods to utilize new array API functions for improved performance. - Updated documentation to reflect JAX as a supported backend alongside PyTorch. - **Bug Fixes** - Updated handling of attributes in several classes to ensure correct deserialization and type safety. - **Tests** - Enhanced testing capabilities for JAX and Array API Strict backend integration, including conditional imports and new evaluation methods. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> --- deepmd/dpmodel/descriptor/dpa2.py | 37 ++- deepmd/dpmodel/descriptor/repformers.py | 311 ++++++++++++------ deepmd/dpmodel/utils/nlist.py | 23 +- deepmd/jax/descriptor/dpa2.py | 61 ++++ deepmd/jax/descriptor/repformers.py | 107 ++++++ doc/model/dpa2.md | 4 +- .../tests/array_api_strict/descriptor/dpa2.py | 57 ++++ .../array_api_strict/descriptor/repformers.py | 98 ++++++ .../tests/consistent/descriptor/test_dpa2.py | 36 ++ 9 files changed, 616 insertions(+), 118 deletions(-) create mode 100644 deepmd/jax/descriptor/dpa2.py create mode 100644 deepmd/jax/descriptor/repformers.py create mode 100644 source/tests/array_api_strict/descriptor/dpa2.py create mode 100644 source/tests/array_api_strict/descriptor/repformers.py diff --git a/deepmd/dpmodel/descriptor/dpa2.py b/deepmd/dpmodel/descriptor/dpa2.py index 1dbb14961e..200747c0ef 100644 --- a/deepmd/dpmodel/descriptor/dpa2.py +++ b/deepmd/dpmodel/descriptor/dpa2.py @@ -4,11 +4,18 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( NativeOP, ) +from deepmd.dpmodel.array_api import ( + xp_take_along_axis, +) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.utils import ( EnvMat, NetworkCollection, @@ -787,9 +794,10 @@ def call( The smooth switch function. shape: nf x nloc x nnei """ + xp = array_api_compat.array_namespace(coord_ext, atype_ext, nlist) use_three_body = self.use_three_body nframes, nloc, nnei = nlist.shape - nall = coord_ext.reshape(nframes, -1).shape[1] // 3 + nall = xp.reshape(coord_ext, (nframes, -1)).shape[1] // 3 # nlists nlist_dict = build_multiple_neighbor_list( coord_ext, @@ -798,7 +806,10 @@ def call( self.nsel_list, ) # repinit - g1_ext = self.type_embedding.call()[atype_ext] + g1_ext = xp.reshape( + xp.take(self.type_embedding.call(), xp.reshape(atype_ext, [-1]), axis=0), + (nframes, nall, self.tebd_dim), + ) g1_inp = g1_ext[:, :nloc, :] g1, _, _, _, _ = self.repinit( nlist_dict[ @@ -823,7 +834,7 @@ def call( g1_ext, mapping, ) - g1 = np.concatenate([g1, g1_three_body], axis=-1) + g1 = xp.concat([g1, g1_three_body], axis=-1) # linear to change shape g1 = self.g1_shape_tranform(g1) if self.add_tebd_to_repinit_out: @@ -831,8 +842,10 @@ def call( g1 = g1 + self.tebd_transform(g1_inp) # mapping g1 assert mapping is not None - mapping_ext = np.tile(mapping.reshape(nframes, nall, 1), (1, 1, g1.shape[-1])) - g1_ext = np.take_along_axis(g1, mapping_ext, axis=1) + mapping_ext = xp.tile( + xp.reshape(mapping, (nframes, nall, 1)), (1, 1, g1.shape[-1]) + ) + g1_ext = xp_take_along_axis(g1, mapping_ext, axis=1) # repformer g1, g2, h2, rot_mat, sw = self.repformers( nlist_dict[ @@ -846,7 +859,7 @@ def call( mapping, ) if self.concat_output_tebd: - g1 = np.concatenate([g1, g1_inp], axis=-1) + g1 = xp.concat([g1, g1_inp], axis=-1) return g1, rot_mat, g2, h2, sw def serialize(self) -> dict: @@ -883,8 +896,8 @@ def serialize(self) -> dict: "embeddings": repinit.embeddings.serialize(), "env_mat": EnvMat(repinit.rcut, repinit.rcut_smth).serialize(), "@variables": { - "davg": repinit["davg"], - "dstd": repinit["dstd"], + "davg": to_numpy_array(repinit["davg"]), + "dstd": to_numpy_array(repinit["dstd"]), }, } if repinit.tebd_input_mode in ["strip"]: @@ -896,8 +909,8 @@ def serialize(self) -> dict: "repformer_layers": [layer.serialize() for layer in repformers.layers], "env_mat": EnvMat(repformers.rcut, repformers.rcut_smth).serialize(), "@variables": { - "davg": repformers["davg"], - "dstd": repformers["dstd"], + "davg": to_numpy_array(repformers["davg"]), + "dstd": to_numpy_array(repformers["dstd"]), }, } data.update( @@ -913,8 +926,8 @@ def serialize(self) -> dict: repinit_three_body.rcut, repinit_three_body.rcut_smth ).serialize(), "@variables": { - "davg": repinit_three_body["davg"], - "dstd": repinit_three_body["dstd"], + "davg": to_numpy_array(repinit_three_body["davg"]), + "dstd": to_numpy_array(repinit_three_body["dstd"]), }, } if repinit_three_body.tebd_input_mode in ["strip"]: diff --git a/deepmd/dpmodel/descriptor/repformers.py b/deepmd/dpmodel/descriptor/repformers.py index ef79ecdd28..5422ff345e 100644 --- a/deepmd/dpmodel/descriptor/repformers.py +++ b/deepmd/dpmodel/descriptor/repformers.py @@ -5,12 +5,19 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel import ( PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.array_api import ( + xp_take_along_axis, +) +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.utils import ( EnvMat, PairExcludeMask, @@ -38,6 +45,28 @@ ) +def xp_transpose_01423(x): + xp = array_api_compat.array_namespace(x) + x_shape2 = x.shape[2] + x_shape3 = x.shape[3] + x_shape4 = x.shape[4] + x = xp.reshape(x, (x.shape[0], x.shape[1], x_shape2 * x_shape3, x_shape4)) + x = xp.matrix_transpose(x) + x = xp.reshape(x, (x.shape[0], x.shape[1], x_shape4, x_shape2, x_shape3)) + return x + + +def xp_transpose_01342(x): + xp = array_api_compat.array_namespace(x) + x_shape2 = x.shape[2] + x_shape3 = x.shape[3] + x_shape4 = x.shape[4] + x = xp.reshape(x, (x.shape[0], x.shape[1], x_shape2, x_shape3 * x_shape4)) + x = xp.matrix_transpose(x) + x = xp.reshape(x, (x.shape[0], x.shape[1], x_shape3, x_shape4, x_shape2)) + return x + + @DescriptorBlock.register("se_repformer") @DescriptorBlock.register("se_uni") class DescrptBlockRepformers(NativeOP, DescriptorBlock): @@ -360,8 +389,9 @@ def call( atype_embd_ext: Optional[np.ndarray] = None, mapping: Optional[np.ndarray] = None, ): + xp = array_api_compat.array_namespace(nlist, coord_ext, atype_ext) exclude_mask = self.emask.build_type_exclude_mask(nlist, atype_ext) - nlist = np.where(exclude_mask, nlist, -1) + nlist = xp.where(exclude_mask, nlist, xp.full_like(nlist, -1)) # nf x nloc x nnei x 4 dmatrix, diff, sw = self.env_mat.call( coord_ext, atype_ext, nlist, self.mean, self.stddev @@ -370,8 +400,8 @@ def call( # nf x nloc x nnei nlist_mask = nlist != -1 # nf x nloc x nnei - sw = sw.reshape(nf, nloc, nnei) - sw = np.where(nlist_mask, sw, 0.0) + sw = xp.reshape(sw, (nf, nloc, nnei)) + sw = xp.where(nlist_mask, sw, xp.zeros_like(sw)) # nf x nloc x tebd_dim atype_embd = atype_embd_ext[:, :nloc, :] assert list(atype_embd.shape) == [nf, nloc, self.g1_dim] @@ -379,22 +409,22 @@ def call( g1 = self.act(atype_embd) # nf x nloc x nnei x 1, nf x nloc x nnei x 3 if not self.direct_dist: - g2, h2 = np.split(dmatrix, [1], axis=-1) + g2, h2 = xp.split(dmatrix, [1], axis=-1) else: - g2, h2 = np.linalg.norm(diff, axis=-1, keepdims=True), diff + g2, h2 = xp.linalg.vector_norm(diff, axis=-1, keepdims=True), diff g2 = g2 / self.rcut h2 = h2 / self.rcut # nf x nloc x nnei x ng2 g2 = self.act(self.g2_embd(g2)) # set all padding positions to index of 0 # if a neighbor is real or not is indicated by nlist_mask - nlist[nlist == -1] = 0 + nlist = xp.where(nlist == -1, xp.zeros_like(nlist), nlist) # nf x nall x ng1 - mapping = np.tile(mapping.reshape(nf, -1, 1), (1, 1, self.g1_dim)) + mapping = xp.tile(xp.reshape(mapping, (nf, -1, 1)), (1, 1, self.g1_dim)) for idx, ll in enumerate(self.layers): # g1: nf x nloc x ng1 # g1_ext: nf x nall x ng1 - g1_ext = np.take_along_axis(g1, mapping, axis=1) + g1_ext = xp_take_along_axis(g1, mapping, axis=1) g1, g2, h2 = ll.call( g1_ext, g2, @@ -415,8 +445,9 @@ def call( use_sqrt_nnei=self.use_sqrt_nnei, ) # (nf x nloc) x ng2 x 3 - rot_mat = np.transpose(h2g2, (0, 1, 3, 2)) - return g1, g2, h2, rot_mat.reshape(nf, nloc, self.dim_emb, 3), sw + # rot_mat = xp.transpose(h2g2, (0, 1, 3, 2)) + rot_mat = xp.matrix_transpose(h2g2) + return g1, g2, h2, xp.reshape(rot_mat, (nf, nloc, self.dim_emb, 3)), sw def has_message_passing(self) -> bool: """Returns whether the descriptor block has message passing.""" @@ -426,6 +457,72 @@ def need_sorted_nlist_for_lower(self) -> bool: """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" return False + @classmethod + def deserialize(cls, data): + """Deserialize the descriptor block.""" + data = data.copy() + g2_embd = NativeLayer.deserialize(data.pop("g2_embd")) + layers = [RepformerLayer.deserialize(dd) for dd in data.pop("repformer_layers")] + env_mat = EnvMat.deserialize(data.pop("env_mat")) + variables = data.pop("@variables") + davg = variables["davg"] + dstd = variables["dstd"] + obj = cls(**data) + obj.g2_embd = g2_embd + obj.layers = layers + obj.env_mat = env_mat + obj.mean = davg + obj.stddev = dstd + return obj + + def serialize(self): + """Serialize the descriptor block.""" + return { + "rcut": self.rcut, + "rcut_smth": self.rcut_smth, + "sel": self.sel, + "ntypes": self.ntypes, + "nlayers": self.nlayers, + "g1_dim": self.g1_dim, + "g2_dim": self.g2_dim, + "axis_neuron": self.axis_neuron, + "direct_dist": self.direct_dist, + "update_g1_has_conv": self.update_g1_has_conv, + "update_g1_has_drrd": self.update_g1_has_drrd, + "update_g1_has_grrg": self.update_g1_has_grrg, + "update_g1_has_attn": self.update_g1_has_attn, + "update_g2_has_g1g1": self.update_g2_has_g1g1, + "update_g2_has_attn": self.update_g2_has_attn, + "update_h2": self.update_h2, + "attn1_hidden": self.attn1_hidden, + "attn1_nhead": self.attn1_nhead, + "attn2_hidden": self.attn2_hidden, + "attn2_nhead": self.attn2_nhead, + "attn2_has_gate": self.attn2_has_gate, + "activation_function": self.activation_function, + "update_style": self.update_style, + "update_residual": self.update_residual, + "update_residual_init": self.update_residual_init, + "set_davg_zero": self.set_davg_zero, + "smooth": self.smooth, + "exclude_types": self.exclude_types, + "env_protection": self.env_protection, + "precision": self.precision, + "trainable_ln": self.trainable_ln, + "use_sqrt_nnei": self.use_sqrt_nnei, + "g1_out_conv": self.g1_out_conv, + "g1_out_mlp": self.g1_out_mlp, + "ln_eps": self.ln_eps, + # variables + "g2_embd": self.g2_embd.serialize(), + "repformer_layers": [layer.serialize() for layer in self.layers], + "env_mat": self.env_mat.serialize(), + "@variables": { + "davg": to_numpy_array(self["davg"]), + "dstd": to_numpy_array(self["dstd"]), + }, + } + # translated by GPT and modified def get_residual( @@ -487,16 +584,17 @@ def _make_nei_g1( gg1: np.ndarray Neighbor-wise atomic invariant rep, with shape [nf, nloc, nnei, ng1]. """ + xp = array_api_compat.array_namespace(g1_ext, nlist) # nlist: nf x nloc x nnei nf, nloc, nnei = nlist.shape # g1_ext: nf x nall x ng1 ng1 = g1_ext.shape[-1] # index: nf x (nloc x nnei) x ng1 - index = np.tile(nlist.reshape(nf, nloc * nnei, 1), (1, 1, ng1)) + index = xp.tile(xp.reshape(nlist, (nf, nloc * nnei, 1)), (1, 1, ng1)) # gg1 : nf x (nloc x nnei) x ng1 - gg1 = np.take_along_axis(g1_ext, index, axis=1) + gg1 = xp_take_along_axis(g1_ext, index, axis=1) # gg1 : nf x nloc x nnei x ng1 - gg1 = gg1.reshape(nf, nloc, nnei, ng1) + gg1 = xp.reshape(gg1, (nf, nloc, nnei, ng1)) return gg1 @@ -514,7 +612,8 @@ def _apply_nlist_mask( nlist_mask Neighbor list mask, where zero means no neighbor, with shape [nf, nloc, nnei]. """ - masked_gg = np.where(nlist_mask[:, :, :, None], gg, 0.0) + xp = array_api_compat.array_namespace(gg, nlist_mask) + masked_gg = xp.where(nlist_mask[:, :, :, None], gg, xp.zeros_like(gg)) return masked_gg @@ -570,6 +669,7 @@ def _cal_hg( hg The transposed rotation matrix, with shape [nf, nloc, 3, ng]. """ + xp = array_api_compat.array_namespace(g, h, nlist_mask, sw) # g: nf x nloc x nnei x ng # h: nf x nloc x nnei x 3 # msk: nf x nloc x nnei @@ -580,21 +680,23 @@ def _cal_hg( if not smooth: # nf x nloc if not use_sqrt_nnei: - invnnei = 1.0 / (epsilon + np.sum(nlist_mask, axis=-1)) + invnnei = 1.0 / (epsilon + xp.sum(xp.astype(nlist_mask, g.dtype), axis=-1)) else: - invnnei = 1.0 / (epsilon + np.sqrt(np.sum(nlist_mask, axis=-1))) + invnnei = 1.0 / ( + epsilon + xp.sqrt(xp.sum(xp.astype(nlist_mask, g.dtype), axis=-1)) + ) # nf x nloc x 1 x 1 - invnnei = invnnei[:, :, np.newaxis, np.newaxis] + invnnei = invnnei[:, :, xp.newaxis, xp.newaxis] else: g = _apply_switch(g, sw) if not use_sqrt_nnei: - invnnei = (1.0 / float(nnei)) * np.ones((nf, nloc, 1, 1), dtype=g.dtype) + invnnei = (1.0 / float(nnei)) * xp.ones((nf, nloc, 1, 1), dtype=g.dtype) else: - invnnei = (1.0 / (float(nnei) ** 0.5)) * np.ones( + invnnei = (1.0 / (float(nnei) ** 0.5)) * xp.ones( (nf, nloc, 1, 1), dtype=g.dtype ) # nf x nloc x 3 x ng - hg = np.matmul(np.transpose(h, axes=(0, 1, 3, 2)), g) * invnnei + hg = xp.matmul(xp.matrix_transpose(h), g) * invnnei return hg @@ -614,14 +716,15 @@ def _cal_grrg(hg: np.ndarray, axis_neuron: int) -> np.ndarray: grrg Atomic invariant rep, with shape [nf, nloc, (axis_neuron * ng)]. """ + xp = array_api_compat.array_namespace(hg) # nf x nloc x 3 x ng nf, nloc, _, ng = hg.shape # nf x nloc x 3 x axis - hgm = np.split(hg, [axis_neuron], axis=-1)[0] + hgm = hg[..., :axis_neuron] # nf x nloc x axis_neuron x ng - grrg = np.matmul(np.transpose(hgm, axes=(0, 1, 3, 2)), hg) / (3.0**1) + grrg = xp.matmul(xp.matrix_transpose(hgm), hg) / (3.0**1) # nf x nloc x (axis_neuron * ng) - grrg = grrg.reshape(nf, nloc, axis_neuron * ng) + grrg = xp.reshape(grrg, (nf, nloc, axis_neuron * ng)) return grrg @@ -718,6 +821,7 @@ def call( nlist_mask: np.ndarray, # nf x nloc x nnei sw: np.ndarray, # nf x nloc x nnei ) -> np.ndarray: + xp = array_api_compat.array_namespace(g2, h2, nlist_mask, sw) ( nf, nloc, @@ -726,43 +830,47 @@ def call( ) = g2.shape nd, nh = self.hidden_dim, self.head_num # nf x nloc x nnei x nd x (nh x 2) - g2qk = self.mapqk(g2).reshape(nf, nloc, nnei, nd, nh * 2) + g2qk = self.mapqk(g2) + g2qk = xp.reshape(g2qk, (nf, nloc, nnei, nd, nh * 2)) # nf x nloc x (nh x 2) x nnei x nd - g2qk = np.transpose(g2qk, (0, 1, 4, 2, 3)) + # g2qk = xp.transpose(g2qk, (0, 1, 4, 2, 3)) + g2qk = xp_transpose_01423(g2qk) # nf x nloc x nh x nnei x nd - g2q, g2k = np.split(g2qk, [nh], axis=2) + # g2q, g2k = xp.split(g2qk, [nh], axis=2) + g2q = g2qk[:, :, :nh, :, :] + g2k = g2qk[:, :, nh:, :, :] # g2q = np.linalg.norm(g2q, axis=-1) # g2k = np.linalg.norm(g2k, axis=-1) # nf x nloc x nh x nnei x nnei - attnw = np.matmul(g2q, np.transpose(g2k, axes=(0, 1, 2, 4, 3))) / nd**0.5 + attnw = xp.matmul(g2q, xp.matrix_transpose(g2k)) / nd**0.5 if self.has_gate: - gate = np.matmul(h2, np.transpose(h2, axes=(0, 1, 3, 2))).reshape( - nf, nloc, 1, nnei, nnei - ) + gate = xp.matmul(h2, xp.matrix_transpose(h2)) + gate = xp.reshape(gate, (nf, nloc, 1, nnei, nnei)) attnw = attnw * gate # mask the attenmap, nf x nloc x 1 x 1 x nnei - attnw_mask = ~np.expand_dims(np.expand_dims(nlist_mask, axis=2), axis=2) + attnw_mask = ~xp.expand_dims(xp.expand_dims(nlist_mask, axis=2), axis=2) # mask the attenmap, nf x nloc x 1 x nnei x 1 - attnw_mask_c = ~np.expand_dims(np.expand_dims(nlist_mask, axis=2), axis=-1) + attnw_mask_c = ~xp.expand_dims(xp.expand_dims(nlist_mask, axis=2), axis=-1) if self.smooth: attnw = (attnw + self.attnw_shift) * sw[:, :, None, :, None] * sw[ :, :, None, None, : ] - self.attnw_shift else: - attnw = np.where(attnw_mask, -np.inf, attnw) + attnw = xp.where(attnw_mask, xp.full_like(attnw, -xp.inf), attnw) attnw = np_softmax(attnw, axis=-1) - attnw = np.where(attnw_mask, 0.0, attnw) + attnw = xp.where(attnw_mask, xp.zeros_like(attnw), attnw) # nf x nloc x nh x nnei x nnei - attnw = np.where(attnw_mask_c, 0.0, attnw) + attnw = xp.where(attnw_mask_c, xp.zeros_like(attnw), attnw) if self.smooth: attnw = attnw * sw[:, :, None, :, None] * sw[:, :, None, None, :] # nf x nloc x nnei x nnei - h2h2t = np.matmul(h2, np.transpose(h2, axes=(0, 1, 3, 2))) / 3.0**0.5 + h2h2t = xp.matmul(h2, xp.matrix_transpose(h2)) / 3.0**0.5 # nf x nloc x nh x nnei x nnei ret = attnw * h2h2t[:, :, None, :, :] # ret = np.exp(g2qk - np.max(g2qk, axis=-1, keepdims=True)) # nf x nloc x nnei x nnei x nh - ret = np.transpose(ret, (0, 1, 3, 4, 2)) + # ret = xp.transpose(ret, (0, 1, 3, 4, 2)) + ret = xp_transpose_01342(ret) return ret def serialize(self) -> dict: @@ -835,19 +943,22 @@ def call( AA: np.ndarray, # nf x nloc x nnei x nnei x nh g2: np.ndarray, # nf x nloc x nnei x ng2 ) -> np.ndarray: + xp = array_api_compat.array_namespace(AA, g2) nf, nloc, nnei, ng2 = g2.shape nh = self.head_num # nf x nloc x nnei x ng2 x nh - g2v = self.mapv(g2).reshape(nf, nloc, nnei, ng2, nh) + g2v = self.mapv(g2) + g2v = xp.reshape(g2v, (nf, nloc, nnei, ng2, nh)) # nf x nloc x nh x nnei x ng2 - g2v = np.transpose(g2v, (0, 1, 4, 2, 3)) + g2v = xp_transpose_01423(g2v) # g2v = np.linalg.norm(g2v, axis=-1) # nf x nloc x nh x nnei x nnei - AA = np.transpose(AA, (0, 1, 4, 2, 3)) + AA = xp_transpose_01423(AA) # nf x nloc x nh x nnei x ng2 - ret = np.matmul(AA, g2v) + ret = xp.matmul(AA, g2v) # nf x nloc x nnei x ng2 x nh - ret = np.transpose(ret, (0, 1, 3, 4, 2)).reshape(nf, nloc, nnei, (ng2 * nh)) + ret = xp_transpose_01342(ret) + ret = xp.reshape(ret, (nf, nloc, nnei, (ng2 * nh))) # nf x nloc x nnei x ng2 return self.head_map(ret) @@ -910,19 +1021,21 @@ def call( AA: np.ndarray, # nf x nloc x nnei x nnei x nh h2: np.ndarray, # nf x nloc x nnei x 3 ) -> np.ndarray: + xp = array_api_compat.array_namespace(AA, h2) nf, nloc, nnei, _ = h2.shape nh = self.head_num # nf x nloc x nh x nnei x nnei - AA = np.transpose(AA, (0, 1, 4, 2, 3)) - h2m = np.expand_dims(h2, axis=2) + AA = xp_transpose_01423(AA) + h2m = xp.expand_dims(h2, axis=2) # nf x nloc x nh x nnei x 3 - h2m = np.tile(h2m, (1, 1, nh, 1, 1)) + h2m = xp.tile(h2m, (1, 1, nh, 1, 1)) # nf x nloc x nh x nnei x 3 - ret = np.matmul(AA, h2m) + ret = xp.matmul(AA, h2m) # nf x nloc x nnei x 3 x nh - ret = np.transpose(ret, (0, 1, 3, 4, 2)).reshape(nf, nloc, nnei, 3, nh) + ret = xp_transpose_01342(ret) + ret = xp.reshape(ret, (nf, nloc, nnei, 3, nh)) # nf x nloc x nnei x 3 - return np.squeeze(self.head_map(ret), axis=-1) + return xp.squeeze(self.head_map(ret), axis=-1) def serialize(self) -> dict: """Serialize the networks to a dict. @@ -1005,49 +1118,49 @@ def call( nlist_mask: np.ndarray, # nf x nloc x nnei sw: np.ndarray, # nf x nloc x nnei ) -> np.ndarray: + xp = array_api_compat.array_namespace(g1, gg1, nlist_mask, sw) nf, nloc, nnei = nlist_mask.shape ni, nd, nh = self.input_dim, self.hidden_dim, self.head_num assert ni == g1.shape[-1] assert ni == gg1.shape[-1] # nf x nloc x nd x nh - g1q = self.mapq(g1).reshape(nf, nloc, nd, nh) + g1q = self.mapq(g1) + g1q = xp.reshape(g1q, (nf, nloc, nd, nh)) # nf x nloc x nh x nd - g1q = np.transpose(g1q, (0, 1, 3, 2)) + g1q = xp.matrix_transpose(g1q) # nf x nloc x nnei x (nd+ni) x nh - gg1kv = self.mapkv(gg1).reshape(nf, nloc, nnei, nd + ni, nh) - gg1kv = np.transpose(gg1kv, (0, 1, 4, 2, 3)) + gg1kv = self.mapkv(gg1) + gg1kv = xp.reshape(gg1kv, (nf, nloc, nnei, nd + ni, nh)) + gg1kv = xp_transpose_01423(gg1kv) # nf x nloc x nh x nnei x nd, nf x nloc x nh x nnei x ng1 - gg1k, gg1v = np.split(gg1kv, [nd], axis=-1) + # gg1k, gg1v = xp.split(gg1kv, [nd], axis=-1) + gg1k = gg1kv[:, :, :, :, :nd] + gg1v = gg1kv[:, :, :, :, nd:] # nf x nloc x nh x 1 x nnei attnw = ( - np.matmul( - np.expand_dims(g1q, axis=-2), np.transpose(gg1k, axes=(0, 1, 2, 4, 3)) - ) - / nd**0.5 + xp.matmul(xp.expand_dims(g1q, axis=-2), xp.matrix_transpose(gg1k)) / nd**0.5 ) # nf x nloc x nh x nnei - attnw = np.squeeze(attnw, axis=-2) + attnw = xp.squeeze(attnw, axis=-2) # mask the attenmap, nf x nloc x 1 x nnei - attnw_mask = ~np.expand_dims(nlist_mask, axis=-2) + attnw_mask = ~xp.expand_dims(nlist_mask, axis=-2) # nf x nloc x nh x nnei if self.smooth: - attnw = (attnw + self.attnw_shift) * np.expand_dims( + attnw = (attnw + self.attnw_shift) * xp.expand_dims( sw, axis=-2 ) - self.attnw_shift else: - attnw = np.where(attnw_mask, -np.inf, attnw) + attnw = xp.where(attnw_mask, xp.full_like(attnw, -xp.inf), attnw) attnw = np_softmax(attnw, axis=-1) - attnw = np.where(attnw_mask, 0.0, attnw) + attnw = xp.where(attnw_mask, xp.zeros_like(attnw), attnw) if self.smooth: - attnw = attnw * np.expand_dims(sw, axis=-2) + attnw = attnw * xp.expand_dims(sw, axis=-2) # nf x nloc x nh x ng1 - ret = ( - np.matmul(np.expand_dims(attnw, axis=-2), gg1v) - .squeeze(-2) - .reshape(nf, nloc, nh * ni) - ) + ret = xp.matmul(xp.expand_dims(attnw, axis=-2), gg1v) + ret = xp.squeeze(ret, axis=-2) + ret = xp.reshape(ret, (nf, nloc, nh * ni)) # nf x nloc x ng1 ret = self.head_map(ret) return ret @@ -1178,12 +1291,12 @@ def __init__( ], "'update_residual_init' only support 'norm' or 'const'!" self.update_residual = update_residual self.update_residual_init = update_residual_init - self.g1_residual = [] - self.g2_residual = [] - self.h2_residual = [] + g1_residual = [] + g2_residual = [] + h2_residual = [] if self.update_style == "res_residual": - self.g1_residual.append( + g1_residual.append( get_residual( g1_dim, self.update_residual, @@ -1217,7 +1330,7 @@ def __init__( seed=child_seed(seed, 2), ) if self.update_style == "res_residual": - self.g2_residual.append( + g2_residual.append( get_residual( g2_dim, self.update_residual, @@ -1234,7 +1347,7 @@ def __init__( seed=child_seed(seed, 15), ) if self.update_style == "res_residual": - self.g1_residual.append( + g1_residual.append( get_residual( g1_dim, self.update_residual, @@ -1263,7 +1376,7 @@ def __init__( seed=child_seed(seed, 4), ) if self.update_style == "res_residual": - self.g1_residual.append( + g1_residual.append( get_residual( g1_dim, self.update_residual, @@ -1281,7 +1394,7 @@ def __init__( seed=child_seed(seed, 5), ) if self.update_style == "res_residual": - self.g2_residual.append( + g2_residual.append( get_residual( g2_dim, self.update_residual, @@ -1312,7 +1425,7 @@ def __init__( seed=child_seed(seed, 9), ) if self.update_style == "res_residual": - self.g2_residual.append( + g2_residual.append( get_residual( g2_dim, self.update_residual, @@ -1327,7 +1440,7 @@ def __init__( g2_dim, attn2_nhead, precision=precision, seed=child_seed(seed, 11) ) if self.update_style == "res_residual": - self.h2_residual.append( + h2_residual.append( get_residual( 1, self.update_residual, @@ -1346,7 +1459,7 @@ def __init__( seed=child_seed(seed, 13), ) if self.update_style == "res_residual": - self.g1_residual.append( + g1_residual.append( get_residual( g1_dim, self.update_residual, @@ -1356,6 +1469,10 @@ def __init__( ) ) + self.g1_residual = g1_residual + self.g2_residual = g2_residual + self.h2_residual = h2_residual + def cal_1_dim(self, g1d: int, g2d: int, ax: int) -> int: ret = g1d if not self.g1_out_mlp else 0 if self.update_g1_has_grrg: @@ -1408,35 +1525,40 @@ def _update_g1_conv( The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, and remains 0 beyond rcut, with shape nf x nloc x nnei. """ + xp = array_api_compat.array_namespace(gg1, g2, nlist_mask, sw) assert self.proj_g1g2 is not None nf, nloc, nnei, _ = g2.shape ng1 = gg1.shape[-1] ng2 = g2.shape[-1] if not self.g1_out_conv: # gg1 : nf x nloc x nnei x ng2 - gg1 = self.proj_g1g2(gg1).reshape(nf, nloc, nnei, ng2) + gg1 = self.proj_g1g2(gg1) + gg1 = xp.reshape(gg1, (nf, nloc, nnei, ng2)) else: # gg1 : nf x nloc x nnei x ng1 - gg1 = gg1.reshape(nf, nloc, nnei, ng1) + gg1 = xp.reshape(gg1, (nf, nloc, nnei, ng1)) # nf x nloc x nnei x ng2/ng1 gg1 = _apply_nlist_mask(gg1, nlist_mask) if not self.smooth: # normalized by number of neighbors, not smooth # nf x nloc - invnnei = 1.0 / (self.epsilon + np.sum(nlist_mask, axis=-1)) + invnnei = 1.0 / ( + self.epsilon + xp.sum(xp.astype(nlist_mask, gg1.dtype), axis=-1) + ) # nf x nloc x 1 - invnnei = invnnei[:, :, np.newaxis] + invnnei = invnnei[:, :, xp.newaxis] else: gg1 = _apply_switch(gg1, sw) - invnnei = (1.0 / float(nnei)) * np.ones((nf, nloc, 1), dtype=gg1.dtype) + invnnei = (1.0 / float(nnei)) * xp.ones((nf, nloc, 1), dtype=gg1.dtype) if not self.g1_out_conv: # nf x nloc x ng2 - g1_11 = np.sum(g2 * gg1, axis=2) * invnnei + g1_11 = xp.sum(g2 * gg1, axis=2) * invnnei else: # nf x nloc x ng1 - g2 = self.proj_g1g2(g2).reshape(nf, nloc, nnei, ng1) + g2 = self.proj_g1g2(g2) + g2 = xp.reshape(g2, (nf, nloc, nnei, ng1)) # nb x nloc x ng1 - g1_11 = np.sum(g2 * gg1, axis=2) * invnnei + g1_11 = xp.sum(g2 * gg1, axis=2) * invnnei return g1_11 def _update_g2_g1g1( @@ -1461,7 +1583,8 @@ def _update_g2_g1g1( The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, and remains 0 beyond rcut, with shape nf x nloc x nnei. """ - ret = np.expand_dims(g1, axis=-2) * gg1 + xp = array_api_compat.array_namespace(g1, gg1, nlist_mask, sw) + ret = xp.expand_dims(g1, axis=-2) * gg1 # nf x nloc x nnei x ng1 ret = _apply_nlist_mask(ret, nlist_mask) if self.smooth: @@ -1493,6 +1616,7 @@ def call( g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant """ + xp = array_api_compat.array_namespace(g1_ext, g2, h2, nlist, nlist_mask, sw) cal_gg1 = ( self.update_g1_has_drrd or self.update_g1_has_conv @@ -1502,7 +1626,8 @@ def call( nf, nloc, nnei, _ = g2.shape nall = g1_ext.shape[1] - g1, _ = np.split(g1_ext, [nloc], axis=1) + # g1, _ = xp.split(g1_ext, [nloc], axis=1) + g1 = g1_ext[:, :nloc, :] assert (nf, nloc) == g1.shape[:2] assert (nf, nloc, nnei) == h2.shape[:3] @@ -1592,7 +1717,7 @@ def call( # nf x nloc x [ng1+ng2+(axisxng2)+(axisxng1)] # conv grrg drrd - g1_1 = self.act(self.linear1(np.concatenate(g1_mlp, axis=-1))) + g1_1 = self.act(self.linear1(xp.concat(g1_mlp, axis=-1))) g1_update.append(g1_1) if self.update_g1_has_attn: @@ -1752,9 +1877,9 @@ def serialize(self) -> dict: if self.update_style == "res_residual": data.update( { - "g1_residual": self.g1_residual, - "g2_residual": self.g2_residual, - "h2_residual": self.h2_residual, + "g1_residual": [to_numpy_array(aa) for aa in self.g1_residual], + "g2_residual": [to_numpy_array(aa) for aa in self.g2_residual], + "h2_residual": [to_numpy_array(aa) for aa in self.h2_residual], } ) return data diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index b827032588..7b3b25df36 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -215,30 +215,31 @@ def build_multiple_neighbor_list( value being the corresponding nlist. """ + xp = array_api_compat.array_namespace(coord, nlist) assert len(rcuts) == len(nsels) if len(rcuts) == 0: return {} nb, nloc, nsel = nlist.shape if nsel < nsels[-1]: - pad = -1 * np.ones((nb, nloc, nsels[-1] - nsel), dtype=nlist.dtype) - nlist = np.concatenate([nlist, pad], axis=-1) + pad = -1 * xp.ones((nb, nloc, nsels[-1] - nsel), dtype=nlist.dtype) + nlist = xp.concat([nlist, pad], axis=-1) nsel = nsels[-1] - coord1 = coord.reshape(nb, -1, 3) + coord1 = xp.reshape(coord, (nb, -1, 3)) nall = coord1.shape[1] coord0 = coord1[:, :nloc, :] nlist_mask = nlist == -1 - tnlist_0 = nlist.copy() - tnlist_0[nlist_mask] = 0 - index = np.tile(tnlist_0.reshape(nb, nloc * nsel, 1), [1, 1, 3]) - coord2 = np.take_along_axis(coord1, index, axis=1).reshape(nb, nloc, nsel, 3) + tnlist_0 = xp.where(nlist_mask, xp.zeros_like(nlist), nlist) + index = xp.tile(xp.reshape(tnlist_0, (nb, nloc * nsel, 1)), (1, 1, 3)) + coord2 = xp_take_along_axis(coord1, index, axis=1) + coord2 = xp.reshape(coord2, (nb, nloc, nsel, 3)) diff = coord2 - coord0[:, :, None, :] - rr = np.linalg.norm(diff, axis=-1) - rr = np.where(nlist_mask, float("inf"), rr) + rr = xp.linalg.vector_norm(diff, axis=-1) + rr = xp.where(nlist_mask, xp.full_like(rr, float("inf")), rr) nlist0 = nlist ret = {} for rc, ns in zip(rcuts[::-1], nsels[::-1]): - tnlist_1 = np.copy(nlist0[:, :, :ns]) - tnlist_1[rr[:, :, :ns] > rc] = -1 + tnlist_1 = nlist0[:, :, :ns] + tnlist_1 = xp.where(rr[:, :, :ns] > rc, xp.full_like(tnlist_1, -1), tnlist_1) ret[get_multiple_nlist_key(rc, ns)] = tnlist_1 return ret diff --git a/deepmd/jax/descriptor/dpa2.py b/deepmd/jax/descriptor/dpa2.py new file mode 100644 index 0000000000..8eea324b41 --- /dev/null +++ b/deepmd/jax/descriptor/dpa2.py @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2DP +from deepmd.dpmodel.utils.network import Identity as IdentityDP +from deepmd.dpmodel.utils.network import NativeLayer as NativeLayerDP +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.jax.descriptor.dpa1 import ( + DescrptBlockSeAtten, +) +from deepmd.jax.descriptor.repformers import ( + DescrptBlockRepformers, +) +from deepmd.jax.descriptor.se_t_tebd import ( + DescrptBlockSeTTebd, +) +from deepmd.jax.utils.network import ( + NativeLayer, +) +from deepmd.jax.utils.type_embed import ( + TypeEmbedNet, +) + + +@BaseDescriptor.register("dpa2") +@flax_module +class DescrptDPA2(DescrptDPA2DP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) + elif name in {"repinit"}: + value = DescrptBlockSeAtten.deserialize(value.serialize()) + elif name in {"repinit_three_body"}: + if value is not None: + value = DescrptBlockSeTTebd.deserialize(value.serialize()) + elif name in {"repformers"}: + value = DescrptBlockRepformers.deserialize(value.serialize()) + elif name in {"type_embedding"}: + value = TypeEmbedNet.deserialize(value.serialize()) + elif name in {"g1_shape_tranform", "tebd_transform"}: + if value is None: + pass + elif isinstance(value, NativeLayerDP): + value = NativeLayer.deserialize(value.serialize()) + elif isinstance(value, IdentityDP): + # IdentityDP doesn't contain any value - it's good to go + pass + else: + raise ValueError(f"Unknown layer type: {type(value)}") + return super().__setattr__(name, value) diff --git a/deepmd/jax/descriptor/repformers.py b/deepmd/jax/descriptor/repformers.py new file mode 100644 index 0000000000..77ca4a9a6b --- /dev/null +++ b/deepmd/jax/descriptor/repformers.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.repformers import ( + Atten2EquiVarApply as Atten2EquiVarApplyDP, +) +from deepmd.dpmodel.descriptor.repformers import Atten2Map as Atten2MapDP +from deepmd.dpmodel.descriptor.repformers import ( + Atten2MultiHeadApply as Atten2MultiHeadApplyDP, +) +from deepmd.dpmodel.descriptor.repformers import ( + DescrptBlockRepformers as DescrptBlockRepformersDP, +) +from deepmd.dpmodel.descriptor.repformers import LocalAtten as LocalAttenDP +from deepmd.dpmodel.descriptor.repformers import RepformerLayer as RepformerLayerDP +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.jax.utils.network import ( + LayerNorm, + NativeLayer, +) + + +@flax_module +class DescrptBlockRepformers(DescrptBlockRepformersDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) + elif name in {"layers"}: + value = [RepformerLayer.deserialize(layer.serialize()) for layer in value] + elif name == "g2_embd": + value = NativeLayer.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) + + +@flax_module +class Atten2Map(Atten2MapDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mapqk"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +@flax_module +class Atten2MultiHeadApply(Atten2MultiHeadApplyDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mapv", "head_map"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +@flax_module +class Atten2EquiVarApply(Atten2EquiVarApplyDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"head_map"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +@flax_module +class LocalAtten(LocalAttenDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mapq", "mapkv", "head_map"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +@flax_module +class RepformerLayer(RepformerLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"linear1", "linear2", "g1_self_mlp", "proj_g1g2", "proj_g1g1g2"}: + if value is not None: + value = NativeLayer.deserialize(value.serialize()) + elif name in {"g1_residual", "g2_residual", "h2_residual"}: + value = [ArrayAPIVariable(to_jax_array(vv)) for vv in value] + elif name in {"attn2g_map"}: + if value is not None: + value = Atten2Map.deserialize(value.serialize()) + elif name in {"attn2_mh_apply"}: + if value is not None: + value = Atten2MultiHeadApply.deserialize(value.serialize()) + elif name in {"attn2_lm"}: + if value is not None: + value = LayerNorm.deserialize(value.serialize()) + elif name in {"attn2_ev_apply"}: + if value is not None: + value = Atten2EquiVarApply.deserialize(value.serialize()) + elif name in {"loc_attn"}: + if value is not None: + value = LocalAtten.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/doc/model/dpa2.md b/doc/model/dpa2.md index 24ce5222e9..27ffc1b14d 100644 --- a/doc/model/dpa2.md +++ b/doc/model/dpa2.md @@ -1,7 +1,7 @@ -# Descriptor DPA-2 {{ pytorch_icon }} {{ dpmodel_icon }} +# Descriptor DPA-2 {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} :::{note} -**Supported backends**: PyTorch {{ pytorch_icon }}, DP {{ dpmodel_icon }} +**Supported backends**: PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: The DPA-2 model implementation. See https://arxiv.org/abs/2312.15492 for more details. diff --git a/source/tests/array_api_strict/descriptor/dpa2.py b/source/tests/array_api_strict/descriptor/dpa2.py new file mode 100644 index 0000000000..a510c6b461 --- /dev/null +++ b/source/tests/array_api_strict/descriptor/dpa2.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2DP +from deepmd.dpmodel.utils.network import Identity as IdentityDP +from deepmd.dpmodel.utils.network import NativeLayer as NativeLayerDP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.network import ( + NativeLayer, +) +from ..utils.type_embed import ( + TypeEmbedNet, +) +from .base_descriptor import ( + BaseDescriptor, +) +from .dpa1 import ( + DescrptBlockSeAtten, +) +from .repformers import ( + DescrptBlockRepformers, +) +from .se_t_tebd import ( + DescrptBlockSeTTebd, +) + + +@BaseDescriptor.register("dpa2") +class DescrptDPA2(DescrptDPA2DP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_array_api_strict_array(value) + elif name in {"repinit"}: + value = DescrptBlockSeAtten.deserialize(value.serialize()) + elif name in {"repinit_three_body"}: + if value is not None: + value = DescrptBlockSeTTebd.deserialize(value.serialize()) + elif name in {"repformers"}: + value = DescrptBlockRepformers.deserialize(value.serialize()) + elif name in {"type_embedding"}: + value = TypeEmbedNet.deserialize(value.serialize()) + elif name in {"g1_shape_tranform", "tebd_transform"}: + if value is None: + pass + elif isinstance(value, NativeLayerDP): + value = NativeLayer.deserialize(value.serialize()) + elif isinstance(value, IdentityDP): + # IdentityDP doesn't contain any value - it's good to go + pass + else: + raise ValueError(f"Unknown layer type: {type(value)}") + return super().__setattr__(name, value) diff --git a/source/tests/array_api_strict/descriptor/repformers.py b/source/tests/array_api_strict/descriptor/repformers.py new file mode 100644 index 0000000000..ff65ff849f --- /dev/null +++ b/source/tests/array_api_strict/descriptor/repformers.py @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, +) + +from deepmd.dpmodel.descriptor.repformers import ( + Atten2EquiVarApply as Atten2EquiVarApplyDP, +) +from deepmd.dpmodel.descriptor.repformers import Atten2Map as Atten2MapDP +from deepmd.dpmodel.descriptor.repformers import ( + Atten2MultiHeadApply as Atten2MultiHeadApplyDP, +) +from deepmd.dpmodel.descriptor.repformers import ( + DescrptBlockRepformers as DescrptBlockRepformersDP, +) +from deepmd.dpmodel.descriptor.repformers import LocalAtten as LocalAttenDP +from deepmd.dpmodel.descriptor.repformers import RepformerLayer as RepformerLayerDP + +from ..common import ( + to_array_api_strict_array, +) +from ..utils.exclude_mask import ( + PairExcludeMask, +) +from ..utils.network import ( + LayerNorm, + NativeLayer, +) + + +class DescrptBlockRepformers(DescrptBlockRepformersDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mean", "stddev"}: + value = to_array_api_strict_array(value) + elif name in {"layers"}: + value = [RepformerLayer.deserialize(layer.serialize()) for layer in value] + elif name == "g2_embd": + value = NativeLayer.deserialize(value.serialize()) + elif name == "env_mat": + # env_mat doesn't store any value + pass + elif name == "emask": + value = PairExcludeMask(value.ntypes, value.exclude_types) + + return super().__setattr__(name, value) + + +class Atten2Map(Atten2MapDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mapqk"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +class Atten2MultiHeadApply(Atten2MultiHeadApplyDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mapv", "head_map"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +class Atten2EquiVarApply(Atten2EquiVarApplyDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"head_map"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +class LocalAtten(LocalAttenDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"mapq", "mapkv", "head_map"}: + value = NativeLayer.deserialize(value.serialize()) + return super().__setattr__(name, value) + + +class RepformerLayer(RepformerLayerDP): + def __setattr__(self, name: str, value: Any) -> None: + if name in {"linear1", "linear2", "g1_self_mlp", "proj_g1g2", "proj_g1g1g2"}: + if value is not None: + value = NativeLayer.deserialize(value.serialize()) + elif name in {"g1_residual", "g2_residual", "h2_residual"}: + value = [to_array_api_strict_array(vv) for vv in value] + elif name in {"attn2g_map"}: + if value is not None: + value = Atten2Map.deserialize(value.serialize()) + elif name in {"attn2_mh_apply"}: + if value is not None: + value = Atten2MultiHeadApply.deserialize(value.serialize()) + elif name in {"attn2_lm"}: + if value is not None: + value = LayerNorm.deserialize(value.serialize()) + elif name in {"attn2_ev_apply"}: + if value is not None: + value = Atten2EquiVarApply.deserialize(value.serialize()) + elif name in {"loc_attn"}: + if value is not None: + value = LocalAtten.deserialize(value.serialize()) + return super().__setattr__(name, value) diff --git a/source/tests/consistent/descriptor/test_dpa2.py b/source/tests/consistent/descriptor/test_dpa2.py index 53f9ce4200..17c55db368 100644 --- a/source/tests/consistent/descriptor/test_dpa2.py +++ b/source/tests/consistent/descriptor/test_dpa2.py @@ -15,6 +15,8 @@ ) from ..common import ( + INSTALLED_ARRAY_API_STRICT, + INSTALLED_JAX, INSTALLED_PT, CommonTest, parameterized, @@ -28,6 +30,15 @@ else: DescrptDPA2PT = None +if INSTALLED_JAX: + from deepmd.jax.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2JAX +else: + DescrptDPA2JAX = None +if INSTALLED_ARRAY_API_STRICT: + from ...array_api_strict.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2Strict +else: + DescrptDPA2Strict = None + # not implemented DescrptDPA2TF = None @@ -269,9 +280,14 @@ def skip_tf(self) -> bool: ) = self.param return True + skip_jax = not INSTALLED_JAX + skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT + tf_class = DescrptDPA2TF dp_class = DescrptDPA2DP pt_class = DescrptDPA2PT + jax_class = DescrptDPA2JAX + array_api_strict_class = DescrptDPA2Strict args = descrpt_dpa2_args().append(Argument("ntypes", int, optional=False)) def setUp(self): @@ -367,6 +383,26 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) + def eval_jax(self, jax_obj: Any) -> Any: + return self.eval_jax_descriptor( + jax_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: + return self.eval_array_api_strict_descriptor( + array_api_strict_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: return (ret[0],) From bfbe2edfba582864029008bef2e3908470cff142 Mon Sep 17 00:00:00 2001 From: Yan Wang <116817801+cherryWangY@users.noreply.github.com> Date: Sat, 2 Nov 2024 17:47:27 +0800 Subject: [PATCH 126/193] Add compression API to BaseModel and AtomicModel (#4298) ## Summary by CodeRabbit - **New Features** - Introduced `enable_compression` method across multiple classes to allow configuration of compression settings for descriptors. - Enhanced robustness of output definitions and serialization processes in the `DPAtomicModel` class. - Added `enable_compression` method to `LinearEnergyAtomicModel` for improved model compression capabilities. - **Bug Fixes** - Improved error handling in the `fitting_output_def` method to ensure fallback functionality when the fitting network is unavailable. These updates enhance the functionality and reliability of the model management features. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../dpmodel/atomic_model/dp_atomic_model.py | 31 ++++++++++++++++++ .../atomic_model/linear_atomic_model.py | 32 +++++++++++++++++++ .../atomic_model/make_base_atomic_model.py | 25 +++++++++++++++ deepmd/dpmodel/model/base_model.py | 22 +++++++++++++ deepmd/dpmodel/model/make_model.py | 28 ++++++++++++++++ .../pt/model/atomic_model/dp_atomic_model.py | 31 ++++++++++++++++++ .../model/atomic_model/linear_atomic_model.py | 32 +++++++++++++++++++ deepmd/pt/model/model/make_model.py | 28 ++++++++++++++++ 8 files changed, 229 insertions(+) diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index a621ece27e..3db0bb0c68 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -86,6 +86,37 @@ def need_sorted_nlist_for_lower(self) -> bool: """Returns whether the atomic model needs sorted nlist when using `forward_lower`.""" return self.descriptor.need_sorted_nlist_for_lower() + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call descriptor enable_compression(). + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.descriptor.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward_atomic( self, extended_coord: np.ndarray, diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 224fdd145c..485f82cb72 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -149,6 +149,38 @@ def _sort_rcuts_sels(self) -> tuple[list[float], list[int]]: ) return [p[0] for p in zipped], [p[1] for p in zipped] + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Compress model. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + for model in self.models: + model.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward_atomic( self, extended_coord, diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index 99a92c23a4..a4c38518a3 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -148,6 +148,31 @@ def change_type_map( ) -> None: pass + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call descriptor enable_compression(). + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + raise NotImplementedError("This atomi model doesn't support compression!") + def make_atom_mask( self, atype: t_tensor, diff --git a/deepmd/dpmodel/model/base_model.py b/deepmd/dpmodel/model/base_model.py index 777697b4b7..15c0bfc083 100644 --- a/deepmd/dpmodel/model/base_model.py +++ b/deepmd/dpmodel/model/base_model.py @@ -191,6 +191,28 @@ def update_sel( cls = cls.get_class_by_type(model_type) return cls.update_sel(train_data, type_map, local_jdata) + def enable_compression( + self, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Enable model compression by tabulation. + + Parameters + ---------- + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + raise NotImplementedError("This atomic model doesn't support compression!") + @classmethod def get_model(cls, model_params: dict) -> "BaseBaseModel": """Get the model by the parameters. diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index b6379573e1..95d97262df 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -186,6 +186,34 @@ def model_output_type(self) -> list[str]: ] return vars + def enable_compression( + self, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call atomic_model enable_compression(). + + Parameters + ---------- + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.atomic_model.enable_compression( + self.get_min_nbor_dist(), + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def call( self, coord, diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 48c8d0d859..4f53fb69f9 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -160,6 +160,37 @@ def deserialize(cls, data) -> "DPAtomicModel": obj = super().deserialize(data) return obj + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call descriptor enable_compression(). + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.descriptor.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward_atomic( self, extended_coord, diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 570fcdcc43..33c2eb6c59 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -184,6 +184,38 @@ def _sort_rcuts_sels(self) -> tuple[list[float], list[int]]: sorted_sels: list[int] = outer_sorted[:, 1].to(torch.int64).tolist() return sorted_rcuts, sorted_sels + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Compress model. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + for model in self.models: + model.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward_atomic( self, extended_coord: torch.Tensor, diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index a9d5e26060..709fce8150 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -98,6 +98,34 @@ def model_output_type(self) -> list[str]: vars.append(kk) return vars + def enable_compression( + self, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call atomic_model enable_compression(). + + Parameters + ---------- + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.atomic_model.enable_compression( + self.get_min_nbor_dist(), + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + # cannot use the name forward. torch script does not work def forward_common( self, From 7aaf284311a5be35b9605652365c98f487c5dbd0 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 3 Nov 2024 21:14:08 -0500 Subject: [PATCH 127/193] feat(jax): zbl (#4301) ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced new classes: `DPZBLLinearEnergyAtomicModel` and `PairTabAtomicModel`, enhancing atomic model functionalities. - Added `get_zbl_model` function for constructing `DPZBLModel` from input data. - Improved error handling in vector normalization with `safe_for_vector_norm` and `safe_for_sqrt`. - **Bug Fixes** - Enhanced distance calculations in `format_nlist` to prevent NaN errors. - **Documentation** - Updated comments and docstrings for clarity on recent changes. - **Tests** - Enhanced test support for JAX backend in `test_zbl_ener.py`. --------- Signed-off-by: Jinzhe Zeng --- .../atomic_model/linear_atomic_model.py | 47 ++++++----- .../atomic_model/pairtab_atomic_model.py | 77 +++++++++++-------- deepmd/dpmodel/descriptor/dpa1.py | 5 +- deepmd/dpmodel/utils/safe_gradient.py | 32 ++++++++ deepmd/jax/atomic_model/dp_atomic_model.py | 23 ++++++ .../jax/atomic_model/linear_atomic_model.py | 61 +++++++++++++++ .../jax/atomic_model/pairtab_atomic_model.py | 50 ++++++++++++ deepmd/jax/model/__init__.py | 8 +- deepmd/jax/model/dp_zbl_model.py | 50 ++++++++++++ deepmd/jax/model/model.py | 53 +++++++++++++ .../tests/consistent/model/test_zbl_ener.py | 11 ++- 11 files changed, 363 insertions(+), 54 deletions(-) create mode 100644 deepmd/dpmodel/utils/safe_gradient.py create mode 100644 deepmd/jax/atomic_model/linear_atomic_model.py create mode 100644 deepmd/jax/atomic_model/pairtab_atomic_model.py create mode 100644 deepmd/jax/model/dp_zbl_model.py diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 485f82cb72..2c7e029d53 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -5,6 +5,7 @@ Union, ) +import array_api_compat import numpy as np from deepmd.dpmodel.utils.nlist import ( @@ -69,7 +70,7 @@ def __init__( self.models = models sub_model_type_maps = [md.get_type_map() for md in models] err_msg = [] - self.mapping_list = [] + mapping_list = [] common_type_map = set(type_map) self.type_map = type_map for tpmp in sub_model_type_maps: @@ -77,7 +78,8 @@ def __init__( err_msg.append( f"type_map {tpmp} is not a subset of type_map {type_map}" ) - self.mapping_list.append(self.remap_atype(tpmp, self.type_map)) + mapping_list.append(self.remap_atype(tpmp, self.type_map)) + self.mapping_list = mapping_list assert len(err_msg) == 0, "\n".join(err_msg) self.mixed_types_list = [model.mixed_types() for model in self.models] @@ -212,8 +214,9 @@ def forward_atomic( result_dict the result dict, defined by the fitting net output def. """ + xp = array_api_compat.array_namespace(extended_coord, extended_atype, nlist) nframes, nloc, nnei = nlist.shape - extended_coord = extended_coord.reshape(nframes, -1, 3) + extended_coord = xp.reshape(extended_coord, (nframes, -1, 3)) sorted_rcuts, sorted_sels = self._sort_rcuts_sels() nlists = build_multiple_neighbor_list( extended_coord, @@ -244,10 +247,10 @@ def forward_atomic( aparam, )["energy"] ) - self.weights = self._compute_weight(extended_coord, extended_atype, nlists_) + weights = self._compute_weight(extended_coord, extended_atype, nlists_) fit_ret = { - "energy": np.sum(np.stack(ener_list) * np.stack(self.weights), axis=0), + "energy": xp.sum(xp.stack(ener_list) * xp.stack(weights), axis=0), } # (nframes, nloc, 1) return fit_ret @@ -320,11 +323,12 @@ def _compute_weight( nlists_: list[np.ndarray], ) -> list[np.ndarray]: """This should be a list of user defined weights that matches the number of models to be combined.""" + xp = array_api_compat.array_namespace(extended_coord, extended_atype, nlists_) nmodels = len(self.models) nframes, nloc, _ = nlists_[0].shape # the dtype of weights is the interface data type. return [ - np.ones((nframes, nloc, 1), dtype=GLOBAL_NP_FLOAT_PRECISION) / nmodels + xp.ones((nframes, nloc, 1), dtype=GLOBAL_NP_FLOAT_PRECISION) / nmodels for _ in range(nmodels) ] @@ -442,6 +446,7 @@ def _compute_weight( self.sw_rmax > self.sw_rmin ), "The upper boundary `sw_rmax` must be greater than the lower boundary `sw_rmin`." + xp = array_api_compat.array_namespace(extended_coord, extended_atype) dp_nlist = nlists_[0] zbl_nlist = nlists_[1] @@ -450,40 +455,40 @@ def _compute_weight( # use the larger rr based on nlist nlist_larger = zbl_nlist if zbl_nnei >= dp_nnei else dp_nlist - masked_nlist = np.clip(nlist_larger, 0, None) + masked_nlist = xp.clip(nlist_larger, 0, None) pairwise_rr = PairTabAtomicModel._get_pairwise_dist( extended_coord, masked_nlist ) - numerator = np.sum( - np.where( + numerator = xp.sum( + xp.where( nlist_larger != -1, - pairwise_rr * np.exp(-pairwise_rr / self.smin_alpha), - np.zeros_like(nlist_larger), + pairwise_rr * xp.exp(-pairwise_rr / self.smin_alpha), + xp.zeros_like(nlist_larger), ), axis=-1, ) # masked nnei will be zero, no need to handle - denominator = np.sum( - np.where( + denominator = xp.sum( + xp.where( nlist_larger != -1, - np.exp(-pairwise_rr / self.smin_alpha), - np.zeros_like(nlist_larger), + xp.exp(-pairwise_rr / self.smin_alpha), + xp.zeros_like(nlist_larger), ), axis=-1, ) # handle masked nnei. with np.errstate(divide="ignore", invalid="ignore"): sigma = numerator / denominator u = (sigma - self.sw_rmin) / (self.sw_rmax - self.sw_rmin) - coef = np.zeros_like(u) + coef = xp.zeros_like(u) left_mask = sigma < self.sw_rmin mid_mask = (self.sw_rmin <= sigma) & (sigma < self.sw_rmax) right_mask = sigma >= self.sw_rmax - coef[left_mask] = 1 + coef = xp.where(left_mask, xp.ones_like(coef), coef) with np.errstate(invalid="ignore"): smooth = -6 * u**5 + 15 * u**4 - 10 * u**3 + 1 - coef[mid_mask] = smooth[mid_mask] - coef[right_mask] = 0 + coef = xp.where(mid_mask, smooth, coef) + coef = xp.where(right_mask, xp.zeros_like(coef), coef) # to handle masked atoms - coef = np.where(sigma != 0, coef, np.zeros_like(coef)) + coef = xp.where(sigma != 0, coef, xp.zeros_like(coef)) self.zbl_weight = coef - return [1 - np.expand_dims(coef, -1), np.expand_dims(coef, -1)] + return [1 - xp.expand_dims(coef, -1), xp.expand_dims(coef, -1)] diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index 2899f106bc..c927089daf 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -5,12 +5,19 @@ Union, ) +import array_api_compat import numpy as np +from deepmd.dpmodel.array_api import ( + xp_take_along_axis, +) from deepmd.dpmodel.output_def import ( FittingOutputDef, OutputVariableDef, ) +from deepmd.dpmodel.utils.safe_gradient import ( + safe_for_sqrt, +) from deepmd.utils.pair_tab import ( PairTab, ) @@ -74,9 +81,10 @@ def __init__( self.atom_ener = atom_ener if self.tab_file is not None: - self.tab_info, self.tab_data = self.tab.get() - nspline, ntypes_tab = self.tab_info[-2:].astype(int) - self.tab_data = self.tab_data.reshape(ntypes_tab, ntypes_tab, nspline, 4) + tab_info, tab_data = self.tab.get() + nspline, ntypes_tab = tab_info[-2:].astype(int) + self.tab_info = tab_info + self.tab_data = tab_data.reshape(ntypes_tab, ntypes_tab, nspline, 4) if self.ntypes != ntypes_tab: raise ValueError( "The `type_map` provided does not match the number of columns in the table." @@ -189,8 +197,9 @@ def forward_atomic( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, ) -> dict[str, np.ndarray]: + xp = array_api_compat.array_namespace(extended_coord, extended_atype, nlist) nframes, nloc, nnei = nlist.shape - extended_coord = extended_coord.reshape(nframes, -1, 3) + extended_coord = xp.reshape(extended_coord, (nframes, -1, 3)) # this will mask all -1 in the nlist mask = nlist >= 0 @@ -200,23 +209,21 @@ def forward_atomic( pairwise_rr = self._get_pairwise_dist( extended_coord, masked_nlist ) # (nframes, nloc, nnei) - self.tab_data = self.tab_data.reshape( - self.tab.ntypes, self.tab.ntypes, self.tab.nspline, 4 - ) # (nframes, nloc, nnei), index type is int64. j_type = extended_atype[ - np.arange(extended_atype.shape[0], dtype=np.int64)[:, None, None], + xp.arange(extended_atype.shape[0], dtype=xp.int64)[:, None, None], masked_nlist, ] raw_atomic_energy = self._pair_tabulated_inter( nlist, atype, j_type, pairwise_rr ) - atomic_energy = 0.5 * np.sum( - np.where(nlist != -1, raw_atomic_energy, np.zeros_like(raw_atomic_energy)), + atomic_energy = 0.5 * xp.sum( + xp.where(nlist != -1, raw_atomic_energy, xp.zeros_like(raw_atomic_energy)), axis=-1, - ).reshape(nframes, nloc, 1) + ) + atomic_energy = xp.reshape(atomic_energy, (nframes, nloc, 1)) return {"energy": atomic_energy} @@ -255,36 +262,42 @@ def _pair_tabulated_inter( This function is used to calculate the pairwise energy between two atoms. It uses a table containing cubic spline coefficients calculated in PairTab. """ + xp = array_api_compat.array_namespace(nlist, i_type, j_type, rr) nframes, nloc, nnei = nlist.shape rmin = self.tab_info[0] hh = self.tab_info[1] hi = 1.0 / hh - nspline = int(self.tab_info[2] + 0.1) + # jax jit does not support convert to a Python int, so we need to convert to xp.int64. + nspline = (self.tab_info[2] + 0.1).astype(xp.int64) uu = (rr - rmin) * hi # this is broadcasted to (nframes,nloc,nnei) # if nnei of atom 0 has -1 in the nlist, uu would be 0. # this is to handle the nlist where the mask is set to 0, so that we don't raise exception for those atoms. - uu = np.where(nlist != -1, uu, nspline + 1) + uu = xp.where(nlist != -1, uu, nspline + 1) - if np.any(uu < 0): - raise Exception("coord go beyond table lower boundary") + # unsupported by jax + # if xp.any(uu < 0): + # raise Exception("coord go beyond table lower boundary") - idx = uu.astype(int) + idx = xp.astype(uu, xp.int64) uu -= idx table_coef = self._extract_spline_coefficient( i_type, j_type, idx, self.tab_data, nspline ) - table_coef = table_coef.reshape(nframes, nloc, nnei, 4) + table_coef = xp.reshape(table_coef, (nframes, nloc, nnei, 4)) ener = self._calculate_ener(table_coef, uu) # here we need to overwrite energy to zero at rcut and beyond. mask_beyond_rcut = rr >= self.rcut # also overwrite values beyond extrapolation to zero extrapolation_mask = rr >= self.tab.rmin + nspline * self.tab.hh - ener[mask_beyond_rcut] = 0 - ener[extrapolation_mask] = 0 + ener = xp.where( + xp.logical_or(mask_beyond_rcut, extrapolation_mask), + xp.zeros_like(ener), + ener, + ) return ener @@ -304,12 +317,13 @@ def _get_pairwise_dist(coords: np.ndarray, nlist: np.ndarray) -> np.ndarray: np.ndarray The pairwise distance between the atoms (nframes, nloc, nnei). """ + xp = array_api_compat.array_namespace(coords, nlist) # index type is int64 - batch_indices = np.arange(nlist.shape[0], dtype=np.int64)[:, None, None] + batch_indices = xp.arange(nlist.shape[0], dtype=xp.int64)[:, None, None] neighbor_atoms = coords[batch_indices, nlist] loc_atoms = coords[:, : nlist.shape[1], :] pairwise_dr = loc_atoms[:, :, None, :] - neighbor_atoms - pairwise_rr = np.sqrt(np.sum(np.power(pairwise_dr, 2), axis=-1)) + pairwise_rr = safe_for_sqrt(xp.sum(xp.power(pairwise_dr, 2), axis=-1)) return pairwise_rr @@ -319,7 +333,7 @@ def _extract_spline_coefficient( j_type: np.ndarray, idx: np.ndarray, tab_data: np.ndarray, - nspline: int, + nspline: np.int64, ) -> np.ndarray: """Extract the spline coefficient from the table. @@ -341,9 +355,10 @@ def _extract_spline_coefficient( np.ndarray The spline coefficient. (nframes, nloc, nnei, 4), shape may be squeezed. """ + xp = array_api_compat.array_namespace(i_type, j_type, idx, tab_data) # (nframes, nloc, nnei) - expanded_i_type = np.broadcast_to( - i_type[:, :, np.newaxis], + expanded_i_type = xp.broadcast_to( + i_type[:, :, xp.newaxis], (i_type.shape[0], i_type.shape[1], j_type.shape[-1]), ) @@ -351,18 +366,20 @@ def _extract_spline_coefficient( expanded_tab_data = tab_data[expanded_i_type, j_type] # (nframes, nloc, nnei, 1, 4) - expanded_idx = np.broadcast_to( - idx[..., np.newaxis, np.newaxis], (*idx.shape, 1, 4) + expanded_idx = xp.broadcast_to( + idx[..., xp.newaxis, xp.newaxis], (*idx.shape, 1, 4) ) - clipped_indices = np.clip(expanded_idx, 0, nspline - 1).astype(int) + clipped_indices = xp.clip(expanded_idx, 0, nspline - 1).astype(int) # (nframes, nloc, nnei, 4) - final_coef = np.squeeze( - np.take_along_axis(expanded_tab_data, clipped_indices, 3) + final_coef = xp.squeeze( + xp_take_along_axis(expanded_tab_data, clipped_indices, 3) ) # when the spline idx is beyond the table, all spline coefficients are set to `0`, and the resulting ener corresponding to the idx is also `0`. - final_coef[expanded_idx.squeeze() > nspline] = 0 + final_coef = xp.where( + expanded_idx.squeeze() > nspline, xp.zeros_like(final_coef), final_coef + ) return final_coef @staticmethod diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index 2f2b12e03c..b033811507 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -27,6 +27,9 @@ LayerNorm, NativeLayer, ) +from deepmd.dpmodel.utils.safe_gradient import ( + safe_for_vector_norm, +) from deepmd.dpmodel.utils.seed import ( child_seed, ) @@ -943,7 +946,7 @@ def call( else: raise NotImplementedError - normed = xp.linalg.vector_norm( + normed = safe_for_vector_norm( xp.reshape(rr, (-1, nnei, 4))[:, :, 1:4], axis=-1, keepdims=True ) input_r = xp.reshape(rr, (-1, nnei, 4))[:, :, 1:4] / xp.maximum( diff --git a/deepmd/dpmodel/utils/safe_gradient.py b/deepmd/dpmodel/utils/safe_gradient.py new file mode 100644 index 0000000000..2baf530c08 --- /dev/null +++ b/deepmd/dpmodel/utils/safe_gradient.py @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""Safe versions of some functions that have problematic gradients. + +Check https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where +for more information. +""" + +import array_api_compat + + +def safe_for_sqrt(x): + """Safe version of sqrt that has a gradient of 0 at x = 0.""" + xp = array_api_compat.array_namespace(x) + mask = x > 0.0 + return xp.where(mask, xp.sqrt(xp.where(mask, x, xp.ones_like(x))), xp.zeros_like(x)) + + +def safe_for_vector_norm(x, /, *, axis=None, keepdims=False, ord=2): + """Safe version of sqrt that has a gradient of 0 at x = 0.""" + xp = array_api_compat.array_namespace(x) + mask = xp.sum(xp.square(x), axis=axis, keepdims=True) > 0 + if keepdims: + mask_squeezed = mask + else: + mask_squeezed = xp.squeeze(mask, axis=axis) + return xp.where( + mask_squeezed, + xp.linalg.vector_norm( + xp.where(mask, x, xp.ones_like(x)), axis=axis, keepdims=keepdims, ord=ord + ), + xp.zeros_like(mask_squeezed, dtype=x.dtype), + ) diff --git a/deepmd/jax/atomic_model/dp_atomic_model.py b/deepmd/jax/atomic_model/dp_atomic_model.py index 077209e29a..5898fd3ff8 100644 --- a/deepmd/jax/atomic_model/dp_atomic_model.py +++ b/deepmd/jax/atomic_model/dp_atomic_model.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, + Optional, ) from deepmd.dpmodel.atomic_model.dp_atomic_model import DPAtomicModel as DPAtomicModelDP @@ -13,6 +14,10 @@ from deepmd.jax.descriptor.base_descriptor import ( BaseDescriptor, ) +from deepmd.jax.env import ( + jax, + jnp, +) from deepmd.jax.fitting.base_fitting import ( BaseFitting, ) @@ -28,3 +33,21 @@ class DPAtomicModel(DPAtomicModelDP): def __setattr__(self, name: str, value: Any) -> None: value = base_atomic_model_set_attr(name, value) return super().__setattr__(name, value) + + def forward_common_atomic( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + ) -> dict[str, jnp.ndarray]: + return super().forward_common_atomic( + extended_coord, + extended_atype, + jax.lax.stop_gradient(nlist), + mapping=mapping, + fparam=fparam, + aparam=aparam, + ) diff --git a/deepmd/jax/atomic_model/linear_atomic_model.py b/deepmd/jax/atomic_model/linear_atomic_model.py new file mode 100644 index 0000000000..6ce82fa07c --- /dev/null +++ b/deepmd/jax/atomic_model/linear_atomic_model.py @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, + Optional, +) + +from deepmd.dpmodel.atomic_model.linear_atomic_model import ( + DPZBLLinearEnergyAtomicModel as DPZBLLinearEnergyAtomicModelDP, +) +from deepmd.jax.atomic_model.base_atomic_model import ( + base_atomic_model_set_attr, +) +from deepmd.jax.atomic_model.dp_atomic_model import ( + DPAtomicModel, +) +from deepmd.jax.atomic_model.pairtab_atomic_model import ( + PairTabAtomicModel, +) +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.env import ( + jax, + jnp, +) + + +@flax_module +class DPZBLLinearEnergyAtomicModel(DPZBLLinearEnergyAtomicModelDP): + def __setattr__(self, name: str, value: Any) -> None: + value = base_atomic_model_set_attr(name, value) + if name == "mapping_list": + value = [ArrayAPIVariable(to_jax_array(vv)) for vv in value] + elif name == "zbl_weight": + value = ArrayAPIVariable(to_jax_array(value)) + elif name == "models": + value = [ + DPAtomicModel.deserialize(value[0].serialize()), + PairTabAtomicModel.deserialize(value[1].serialize()), + ] + return super().__setattr__(name, value) + + def forward_common_atomic( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + ) -> dict[str, jnp.ndarray]: + return super().forward_common_atomic( + extended_coord, + extended_atype, + jax.lax.stop_gradient(nlist), + mapping=mapping, + fparam=fparam, + aparam=aparam, + ) diff --git a/deepmd/jax/atomic_model/pairtab_atomic_model.py b/deepmd/jax/atomic_model/pairtab_atomic_model.py new file mode 100644 index 0000000000..023f4e886a --- /dev/null +++ b/deepmd/jax/atomic_model/pairtab_atomic_model.py @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, + Optional, +) + +from deepmd.dpmodel.atomic_model.pairtab_atomic_model import ( + PairTabAtomicModel as PairTabAtomicModelDP, +) +from deepmd.jax.atomic_model.base_atomic_model import ( + base_atomic_model_set_attr, +) +from deepmd.jax.common import ( + ArrayAPIVariable, + flax_module, + to_jax_array, +) +from deepmd.jax.env import ( + jax, + jnp, +) + + +@flax_module +class PairTabAtomicModel(PairTabAtomicModelDP): + def __setattr__(self, name: str, value: Any) -> None: + value = base_atomic_model_set_attr(name, value) + if name in {"tab_info", "tab_data"}: + value = to_jax_array(value) + if value is not None: + value = ArrayAPIVariable(value) + return super().__setattr__(name, value) + + def forward_common_atomic( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + ) -> dict[str, jnp.ndarray]: + return super().forward_common_atomic( + extended_coord, + extended_atype, + jax.lax.stop_gradient(nlist), + mapping=mapping, + fparam=fparam, + aparam=aparam, + ) diff --git a/deepmd/jax/model/__init__.py b/deepmd/jax/model/__init__.py index 05a60c4ffe..bba5bc766a 100644 --- a/deepmd/jax/model/__init__.py +++ b/deepmd/jax/model/__init__.py @@ -1,6 +1,12 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from .dp_zbl_model import ( + DPZBLLinearEnergyAtomicModel, +) from .ener_model import ( EnergyModel, ) -__all__ = ["EnergyModel"] +__all__ = [ + "EnergyModel", + "DPZBLLinearEnergyAtomicModel", +] diff --git a/deepmd/jax/model/dp_zbl_model.py b/deepmd/jax/model/dp_zbl_model.py new file mode 100644 index 0000000000..028fa8593b --- /dev/null +++ b/deepmd/jax/model/dp_zbl_model.py @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, + Optional, +) + +from deepmd.dpmodel.model.dp_zbl_model import DPZBLModel as DPZBLModelDP +from deepmd.jax.atomic_model.linear_atomic_model import ( + DPZBLLinearEnergyAtomicModel, +) +from deepmd.jax.common import ( + flax_module, +) +from deepmd.jax.env import ( + jnp, +) +from deepmd.jax.model.base_model import ( + BaseModel, + forward_common_atomic, +) + + +@BaseModel.register("zbl") +@flax_module +class DPZBLModel(DPZBLModelDP): + def __setattr__(self, name: str, value: Any) -> None: + if name == "atomic_model": + value = DPZBLLinearEnergyAtomicModel.deserialize(value.serialize()) + return super().__setattr__(name, value) + + def forward_common_atomic( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ): + return forward_common_atomic( + self, + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) diff --git a/deepmd/jax/model/model.py b/deepmd/jax/model/model.py index 7fa3efda6e..e636eba4c6 100644 --- a/deepmd/jax/model/model.py +++ b/deepmd/jax/model/model.py @@ -3,15 +3,27 @@ deepcopy, ) +from deepmd.jax.atomic_model.dp_atomic_model import ( + DPAtomicModel, +) +from deepmd.jax.atomic_model.pairtab_atomic_model import ( + PairTabAtomicModel, +) from deepmd.jax.descriptor.base_descriptor import ( BaseDescriptor, ) from deepmd.jax.fitting.base_fitting import ( BaseFitting, ) +from deepmd.jax.fitting.fitting import ( + EnergyFittingNet, +) from deepmd.jax.model.base_model import ( BaseModel, ) +from deepmd.jax.model.dp_zbl_model import ( + DPZBLModel, +) def get_standard_model(data: dict): @@ -45,6 +57,45 @@ def get_standard_model(data: dict): ) +def get_zbl_model(data: dict) -> DPZBLModel: + data["descriptor"]["ntypes"] = len(data["type_map"]) + descriptor_type = data["descriptor"].pop("type") + descriptor = BaseDescriptor.get_class_by_type(descriptor_type)(**data["descriptor"]) + fitting_type = data["fitting_net"].pop("type") + if fitting_type == "ener": + fitting = EnergyFittingNet( + ntypes=descriptor.get_ntypes(), + dim_descrpt=descriptor.get_dim_out(), + mixed_types=descriptor.mixed_types(), + **data["fitting_net"], + ) + else: + raise ValueError(f"Unknown fitting type {fitting_type}") + + dp_model = DPAtomicModel(descriptor, fitting, type_map=data["type_map"]) + # pairtab + filepath = data["use_srtab"] + pt_model = PairTabAtomicModel( + filepath, + data["descriptor"]["rcut"], + data["descriptor"]["sel"], + type_map=data["type_map"], + ) + rmin = data["sw_rmin"] + rmax = data["sw_rmax"] + atom_exclude_types = data.get("atom_exclude_types", []) + pair_exclude_types = data.get("pair_exclude_types", []) + return DPZBLModel( + dp_model, + pt_model, + rmin, + rmax, + type_map=data["type_map"], + atom_exclude_types=atom_exclude_types, + pair_exclude_types=pair_exclude_types, + ) + + def get_model(data: dict): """Get a model from a dictionary. @@ -57,6 +108,8 @@ def get_model(data: dict): if model_type == "standard": if "spin" in data: raise NotImplementedError("Spin model is not implemented yet.") + elif "use_srtab" in data: + return get_zbl_model(data) else: return get_standard_model(data) else: diff --git a/source/tests/consistent/model/test_zbl_ener.py b/source/tests/consistent/model/test_zbl_ener.py index f37bee0c90..a63543ab74 100644 --- a/source/tests/consistent/model/test_zbl_ener.py +++ b/source/tests/consistent/model/test_zbl_ener.py @@ -13,6 +13,7 @@ ) from ..common import ( + INSTALLED_JAX, INSTALLED_PT, SKIP_FLAG, CommonTest, @@ -27,6 +28,11 @@ from deepmd.pt.model.model.dp_zbl_model import DPZBLModel as DPZBLModelPT else: DPZBLModelPT = None +if INSTALLED_JAX: + from deepmd.jax.model.dp_zbl_model import DPZBLModel as DPZBLModelJAX + from deepmd.jax.model.model import get_model as get_model_jax +else: + DPZBLModelJAX = None import os from deepmd.utils.argcheck import ( @@ -86,6 +92,7 @@ def data(self) -> dict: dp_class = DPZBLModelDP pt_class = DPZBLModelPT + jax_class = DPZBLModelJAX args = model_args() def get_reference_backend(self): @@ -109,7 +116,7 @@ def skip_tf(self): @property def skip_jax(self): - return True + return not INSTALLED_JAX def pass_data_to_cls(self, cls, data) -> Any: """Pass data to the class.""" @@ -118,6 +125,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_dp(data) elif cls is DPZBLModelPT: return get_model_pt(data) + elif cls is DPZBLModelJAX: + return get_model_jax(data) return cls(**data, **self.additional_data) def setUp(self): From de6abef681db76cd0fde9feb47dffecb03439496 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 4 Nov 2024 23:24:22 +0800 Subject: [PATCH 128/193] reformat C/C++ interface --- source/api_c/include/c_api.h | 543 ++++--- source/api_c/include/c_api_internal.h | 46 +- source/api_c/include/deepmd.hpp | 1253 ++++++++++------ source/api_c/src/c_api.cc | 746 +++++----- source/api_c/tests/test_deeppot_a.cc | 8 +- source/api_cc/include/DeepBaseModel.h | 283 ++++ source/api_cc/include/DeepPot.h | 254 +--- source/api_cc/include/DeepSpin.h | 552 ++++++++ source/api_cc/include/DeepSpinPT.h | 273 ++++ source/api_cc/include/DeepSpinTF.h | 339 +++++ source/api_cc/src/DeepBaseModel.cc | 246 ++++ source/api_cc/src/DeepPot.cc | 282 +--- source/api_cc/src/DeepSpin.cc | 627 ++++++++ source/api_cc/src/DeepSpinPT.cc | 574 ++++++++ source/api_cc/src/DeepSpinTF.cc | 1261 +++++++++++++++++ .../api_cc/tests/test_deeppot_dpa1_pt_spin.cc | 251 +++- source/api_cc/tests/test_deeppot_tf_spin.cc | 8 +- source/lmp/pair_base.cpp | 297 +--- source/lmp/pair_base.h | 15 +- source/lmp/pair_deepmd.cpp | 288 +++- source/lmp/pair_deepmd.h | 21 + source/lmp/pair_deepspin.cpp | 338 ++++- source/lmp/pair_deepspin.h | 22 + 23 files changed, 6615 insertions(+), 1912 deletions(-) create mode 100644 source/api_cc/include/DeepBaseModel.h create mode 100644 source/api_cc/include/DeepSpin.h create mode 100644 source/api_cc/include/DeepSpinPT.h create mode 100644 source/api_cc/include/DeepSpinTF.h create mode 100644 source/api_cc/src/DeepBaseModel.cc create mode 100644 source/api_cc/src/DeepSpin.cc create mode 100644 source/api_cc/src/DeepSpinPT.cc create mode 100644 source/api_cc/src/DeepSpinTF.cc diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 6338da1625..b72a3fcc7c 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -94,6 +94,20 @@ extern void DP_DeleteNlist(DP_Nlist* nl); */ const char* DP_NlistCheckOK(DP_Nlist* dp); +/** + * @brief The deep potential base model. + **/ +typedef struct DP_DeepBaseModel DP_DeepBaseModel; + +extern void DP_DeleteDeepBaseModel(DP_DeepBaseModel* dp); + +/** + * @brief The deep potential base model deviation. + **/ +typedef struct DP_DeepBaseModelDevi DP_DeepBaseModelDevi; + +extern void DP_DeleteDeepBaseModelDevi(DP_DeepBaseModelDevi* dp); + /** * @brief The deep potential. **/ @@ -140,6 +154,40 @@ extern DP_DeepPot* DP_NewDeepPotWithParam2(const char* c_model, */ extern void DP_DeleteDeepPot(DP_DeepPot* dp); +/** + * @brief The deep potential spin. + **/ +typedef struct DP_DeepSpin DP_DeepSpin; + +/** + * @brief DP constructor with initialization. + * @param[in] c_model The name of the frozen model file. + * @returns A pointer to the deep potential. + **/ +extern DP_DeepSpin* DP_NewDeepSpin(const char* c_model); + +/** + * @brief DP constructor with initialization. + * @version 2 + * @param c_model The name of the frozen model file. + * @param gpu_rank The rank of the GPU. + * @param c_file_content The content of the model file. + * @param size_file_content The size of the model file. + * @return DP_DeepSpin* A pointer to the deep potential. + */ +extern DP_DeepSpin* DP_NewDeepSpinWithParam2(const char* c_model, + const int gpu_rank, + const char* c_file_content, + const int size_file_content); + +/** + * @brief Delete a Deep Potential. + * + * @param dp Deep Potential to delete. + */ +extern void DP_DeleteDeepSpin(DP_DeepSpin* dp); + +// deprecated interface version1 /** * @brief Evaluate the energy, force and virial by using a DP. (double version) * @attention The number of frames is assumed to be 1. @@ -171,44 +219,6 @@ extern void DP_DeepPotCompute(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); -/** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP - *with spin input. (double version) - * @attention The number of frames is assumed to be 1. - * @param[in] dp The DP to use. - * @param[in] natoms The number of atoms. - * @param[in] coord The coordinates of atoms. The array should be of size natoms - *x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be - *of size natoms x 3. - * @param[in] atype The atom types. The array should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size 9. Pass - *NULL if pbc is not used. - * @param[out] energy Output energy. - * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force. The array should be of size - *natoms x 3. - * @param[out] virial Output virial. The array should be of size 9. - * @param[out] atomic_energy Output atomic energy. The array should be of size - *natoms. - * @param[out] atomic_virial Output atomic virial. The array should be of size - *natoms x 9. - * @warning The output arrays should be allocated before calling this function. - *Pass NULL if not required. - **/ -extern void DP_DeepPotComputeSP(DP_DeepPot* dp, - const int natom, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); - /** * @brief Evaluate the energy, force and virial by using a DP. (float version) * @attention The number of frames is assumed to be 1. @@ -240,44 +250,6 @@ extern void DP_DeepPotComputef(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); -/** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP - *with spin input. (float version) - * @attention The number of frames is assumed to be 1. - * @param[in] dp The DP to use. - * @param[in] natoms The number of atoms. - * @param[in] coord The coordinates of atoms. The array should be of size natoms - *x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be - *of size natoms x 3. - * @param[in] atype The atom types. The array should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size 9. Pass - *NULL if pbc is not used. - * @param[out] energy Output energy. - * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force. The array should be of size - *natoms x 3. - * @param[out] virial Output virial. The array should be of size 9. - * @param[out] atomic_energy Output atomic energy. The array should be of size - *natoms. - * @param[out] atomic_virial Output atomic virial. The array should be of size - *natoms x 9. - * @warning The output arrays should be allocated before calling this function. - *Pass NULL if not required. - **/ -extern void DP_DeepPotComputefSP(DP_DeepPot* dp, - const int natom, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); - /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor *list. (double version) @@ -316,22 +288,6 @@ extern void DP_DeepPotComputeNList(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); -extern void DP_DeepPotComputeNListSP(DP_DeepPot* dp, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); - /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor *list. (float version) @@ -370,22 +326,6 @@ extern void DP_DeepPotComputeNListf(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); -extern void DP_DeepPotComputeNListfSP(DP_DeepPot* dp, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); - /** * @brief Evaluate the energy, force and virial by using a DP. (double version) * @version 2 @@ -454,21 +394,21 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, * @warning The output arrays should be allocated before calling this function. *Pass NULL if not required. **/ -extern void DP_DeepPotCompute2SP(DP_DeepPot* dp, - const int nframes, - const int natom, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); +extern void DP_DeepSpinCompute2(DP_DeepSpin* dp, + const int nframes, + const int natom, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP. (float version) @@ -538,21 +478,21 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, * @warning The output arrays should be allocated before calling this function. *Pass NULL if not required. **/ -extern void DP_DeepPotComputef2SP(DP_DeepPot* dp, - const int nframes, - const int natom, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); +extern void DP_DeepSpinComputef2(DP_DeepSpin* dp, + const int nframes, + const int natom, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor @@ -600,24 +540,24 @@ extern void DP_DeepPotComputeNList2(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); -extern void DP_DeepPotComputeNList2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); +extern void DP_DeepSpinComputeNList2(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the neighbor @@ -665,24 +605,24 @@ extern void DP_DeepPotComputeNListf2(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); -extern void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); +extern void DP_DeepSpinComputeNListf2(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP with the mixed @@ -802,6 +742,47 @@ extern DP_DeepPotModelDevi* DP_NewDeepPotModelDeviWithParam( */ extern void DP_DeleteDeepPotModelDevi(DP_DeepPotModelDevi* dp); +/** + * @brief The deep potential spin model deviation. + **/ +typedef struct DP_DeepSpinModelDevi DP_DeepSpinModelDevi; + +/** + * @brief DP spin model deviation constructor with initialization. + * @param[in] c_models The array of the name of the frozen model file. + * @param[in] nmodels The number of models. + **/ +extern DP_DeepSpinModelDevi* DP_NewDeepSpinModelDevi(const char** c_models, + int n_models); + +/** + * @brief DP spin model deviation constructor with initialization. + * + * @param[in] c_models The array of the name of the frozen model file. + * @param[in] nmodels The number of models. + * @param[in] gpu_rank The rank of the GPU. + * @param[in] c_file_contents The contents of the model file. + * @param[in] n_file_contents The number of the contents of the model file. + * @param[in] size_file_contents The sizes of the contents of the model file. + * @return DP_DeepSpinModelDevi* A pointer to the deep potential model + * deviation. + */ +extern DP_DeepSpinModelDevi* DP_NewDeepSpinModelDeviWithParam( + const char** c_model, + const int n_models, + const int gpu_rank, + const char** c_file_contents, + const int n_file_contents, + const int* size_file_contents); + +/** + * @brief Delete a Deep Potential Spin Model Deviation. + * + * @param dp Deep Potential Spin Model to delete. + */ +extern void DP_DeleteDeepSpinModelDevi(DP_DeepSpinModelDevi* dp); + +// deprecated interface version1 /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (double version) @@ -943,6 +924,7 @@ void DP_DeepPotModelDeviComputef2(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); +// deprecated interface version1 /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (double version) @@ -980,22 +962,6 @@ extern void DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi* dp, double* atomic_energy, double* atomic_virial); -extern void DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi* dp, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); - /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (float version) @@ -1033,22 +999,6 @@ extern void DP_DeepPotModelDeviComputeNListf(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); -extern void DP_DeepPotModelDeviComputeNListfSP(DP_DeepPotModelDevi* dp, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); - /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (double version) @@ -1095,24 +1045,24 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, double* atomic_energy, double* atomic_virial); -void DP_DeepPotModelDeviComputeNList2SP(DP_DeepPotModelDevi* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); +void DP_DeepSpinModelDeviComputeNList2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP model deviation @@ -1160,141 +1110,142 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); -void DP_DeepPotModelDeviComputeNListf2SP(DP_DeepPotModelDevi* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); - -/** - * @brief Get the type map of a DP model deviation. - * @param[in] dp The DP model deviation to use. - * @return The cutoff radius. - */ -double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp); - -/** - * @brief Get the number of types of a DP model deviation. - * @param[in] dp The DP model deviation to use. - * @return The number of types of the DP model deviation. - */ -int DP_DeepPotModelDeviGetNumbTypes(DP_DeepPotModelDevi* dp); - -/** - * @brief Get the number of types with spin of a DP model deviation. - * @param[in] dp The DP model deviation to use. - * @return The number of types with spin of the DP model deviation. - */ -int DP_DeepPotModelDeviGetNumbTypesSpin(DP_DeepPotModelDevi* dp); - -/** - * @brief Check if there is any exceptions throw. - * - * @param dp The DP model deviation to use. - * @return const char* error message. - */ -const char* DP_DeepPotModelDeviCheckOK(DP_DeepPotModelDevi* dp); +void DP_DeepSpinModelDeviComputeNListf2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); +// Deep Base Model methods /** * @brief Get the type map of a DP. - * @param[in] dp The DP to use. + * @param[in] dpbase The DP to use. * @return The cutoff radius. */ -double DP_DeepPotGetCutoff(DP_DeepPot* dp); +double DP_DeepBaseModelGetCutoff(DP_DeepBaseModel* dpbase); /** * @brief Get the number of types of a DP. - * @param[in] dp The DP to use. + * @param[in] dpbase The DP to use. * @return The number of types of the DP. */ -int DP_DeepPotGetNumbTypes(DP_DeepPot* dp); +int DP_DeepBaseModelGetNumbTypes(DP_DeepBaseModel* dpbase); /** * @brief Get the number of types with spin of a DP. - * @param[in] dp The DP to use. + * @param[in] dpbase The DP to use. * @return The number of types with spin of the DP. */ -int DP_DeepPotGetNumbTypesSpin(DP_DeepPot* dp); +int DP_DeepBaseModelGetNumbTypesSpin(DP_DeepBaseModel* dpbase); /** * @brief Get the dimension of frame parameters of a DP. - * @param[in] dp The DP to use. + * @param[in] dpbase The DP to use. * @return The dimension of frame parameters of the DP. */ -int DP_DeepPotGetDimFParam(DP_DeepPot* dp); +int DP_DeepBaseModelGetDimFParam(DP_DeepBaseModel* dpbase); /** * @brief Get the dimension of atomic parameters of a DP. - * @param[in] dp The DP to use. + * @param[in] dpbase The DP to use. * @return The dimension of atomic parameters of the DP. */ -int DP_DeepPotGetDimAParam(DP_DeepPot* dp); +int DP_DeepBaseModelGetDimAParam(DP_DeepBaseModel* dpbase); /** * @brief Check whether the atomic dimension of atomic parameters is nall * instead of nloc. * - * @param[in] dp The DP to use. + * @param[in] dpbase The DP to use. * @return true the atomic dimension of atomic parameters is nall * @return false the atomic dimension of atomic parameters is nloc */ -bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp); +bool DP_DeepBaseModelIsAParamNAll(DP_DeepBaseModel* dpbase); /** * @brief Get the type map of a DP. - * @param[in] dp The DP to use. + * @param[in] dpbase The DP to use. * @return The type map of the DP. */ -const char* DP_DeepPotGetTypeMap(DP_DeepPot* dp); +const char* DP_DeepBaseModelGetTypeMap(DP_DeepBaseModel* dpbase); + +/** + * @brief Check if there is any exceptions throw. + * + * @param dpbase The DP to use. + * @return const char* error message. + */ +const char* DP_DeepBaseModelCheckOK(DP_DeepBaseModel* dpbase); /** * @brief Get the dimension of frame parameters of a DP Model Deviation. - * @param[in] dp The DP Model Deviation to use. + * @param[in] dpbase The DP Model Deviation to use. * @return The dimension of frame parameters of the DP Model Deviation. */ -int DP_DeepPotModelDeviGetDimFParam(DP_DeepPotModelDevi* dp); +int DP_DeepBaseModelDeviGetDimFParam(DP_DeepBaseModelDevi* dpbase); /** * @brief Get the dimension of atomic parameters of a DP Model Deviation. - * @param[in] dp The DP Model Deviation to use. + * @param[in] dpbase The DP Model Deviation to use. * @return The dimension of atomic parameters of the DP Model Deviation. */ -int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp); +int DP_DeepBaseModelDeviGetDimAParam(DP_DeepBaseModelDevi* dpbase); /** * @brief Check whether the atomic dimension of atomic parameters is nall * instead of nloc. * - * @param[in] dp The DP Model Deviation to use. + * @param[in] dpbase The DP Model Deviation to use. * @return true the atomic dimension of atomic parameters is nall * @return false the atomic dimension of atomic parameters is nloc */ -bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp); +bool DP_DeepBaseModelDeviIsAParamNAll(DP_DeepBaseModelDevi* dpbase); /** - * @brief The deep tensor. - **/ -typedef struct DP_DeepTensor DP_DeepTensor; + * @brief Get the type map of a DP model deviation. + * @param[in] dpbase The DP model deviation to use. + * @return The cutoff radius. + */ +double DP_DeepBaseModelDeviGetCutoff(DP_DeepBaseModelDevi* dpbase); + +/** + * @brief Get the number of types of a DP model deviation. + * @param[in] dpbase The DP model deviation to use. + * @return The number of types of the DP model deviation. + */ +int DP_DeepBaseModelDeviGetNumbTypes(DP_DeepBaseModelDevi* dpbase); + +/** + * @brief Get the number of types with spin of a DP model deviation. + * @param[in] dpbase The DP model deviation to use. + * @return The number of types with spin of the DP model deviation. + */ +int DP_DeepBaseModelDeviGetNumbTypesSpin(DP_DeepBaseModelDevi* dpbase); /** * @brief Check if there is any exceptions throw. * - * @param dp The DP to use. + * @param dpbase The DP model deviation to use. * @return const char* error message. */ -const char* DP_DeepPotCheckOK(DP_DeepPot* dp); +const char* DP_DeepBaseModelDeviCheckOK(DP_DeepBaseModelDevi* dpbase); + +/** + * @brief The deep tensor. + **/ +typedef struct DP_DeepTensor DP_DeepTensor; /** * @brief Deep Tensor constructor with initialization. diff --git a/source/api_c/include/c_api_internal.h b/source/api_c/include/c_api_internal.h index 85e1d2f421..1310c46487 100644 --- a/source/api_c/include/c_api_internal.h +++ b/source/api_c/include/c_api_internal.h @@ -2,7 +2,9 @@ #include #include "DataModifier.h" +#include "DeepBaseModel.h" #include "DeepPot.h" +#include "DeepSpin.h" #include "DeepTensor.h" #include "neighbor_list.h" @@ -33,28 +35,56 @@ struct DP_Nlist { std::string exception; }; -struct DP_DeepPot { - DP_DeepPot(); - DP_DeepPot(deepmd::DeepPot& dp); +struct DP_DeepBaseModel { + DP_DeepBaseModel(); + DP_DeepBaseModel(deepmd::DeepBaseModel& dpbase); - deepmd::DeepPot dp; + deepmd::DeepBaseModel dpbase; std::string exception; int dfparam; int daparam; bool aparam_nall; }; -struct DP_DeepPotModelDevi { - DP_DeepPotModelDevi(); - DP_DeepPotModelDevi(deepmd::DeepPotModelDevi& dp); +struct DP_DeepBaseModelDevi { + DP_DeepBaseModelDevi(); + DP_DeepBaseModelDevi(deepmd::DeepBaseModelDevi& dpbase); - deepmd::DeepPotModelDevi dp; + deepmd::DeepBaseModelDevi dpbase; std::string exception; int dfparam; int daparam; bool aparam_nall; }; +struct DP_DeepPot : DP_DeepBaseModel { + DP_DeepPot(); + DP_DeepPot(deepmd::DeepPot& dp); + + deepmd::DeepPot dp; +}; + +struct DP_DeepPotModelDevi : DP_DeepBaseModelDevi { + DP_DeepPotModelDevi(); + DP_DeepPotModelDevi(deepmd::DeepPotModelDevi& dp); + + deepmd::DeepPotModelDevi dp; +}; + +struct DP_DeepSpin : DP_DeepBaseModel { + DP_DeepSpin(); + DP_DeepSpin(deepmd::DeepSpin& dp); + + deepmd::DeepSpin dp; +}; + +struct DP_DeepSpinModelDevi : DP_DeepBaseModelDevi { + DP_DeepSpinModelDevi(); + DP_DeepSpinModelDevi(deepmd::DeepSpinModelDevi& dp); + + deepmd::DeepSpinModelDevi dp; +}; + struct DP_DeepTensor { DP_DeepTensor(); DP_DeepTensor(deepmd::DeepTensor& dt); diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index f664d622fe..98c46eb04a 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -97,7 +97,7 @@ inline void _DP_DeepPotCompute(DP_DeepPot *dp, // support spin template -inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, +inline void _DP_DeepPotComputeSP(DP_DeepSpin *dp, const int nframes, const int natom, const FPTYPE *coord, @@ -114,7 +114,7 @@ inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, FPTYPE *atomic_virial); template <> -inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, +inline void _DP_DeepPotComputeSP(DP_DeepSpin *dp, const int nframes, const int natom, const double *coord, @@ -129,13 +129,13 @@ inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, double *virial, double *atomic_energy, double *atomic_virial) { - DP_DeepPotCompute2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, - aparam, energy, force, force_mag, virial, atomic_energy, - atomic_virial); + DP_DeepSpinCompute2(dp, nframes, natom, coord, spin, atype, cell, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, + atomic_virial); } template <> -inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, +inline void _DP_DeepPotComputeSP(DP_DeepSpin *dp, const int nframes, const int natom, const float *coord, @@ -150,9 +150,9 @@ inline void _DP_DeepPotComputeSP(DP_DeepPot *dp, float *virial, float *atomic_energy, float *atomic_virial) { - DP_DeepPotComputef2SP(dp, nframes, natom, coord, spin, atype, cell, fparam, - aparam, energy, force, force_mag, virial, atomic_energy, - atomic_virial); + DP_DeepSpinComputef2(dp, nframes, natom, coord, spin, atype, cell, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, + atomic_virial); } template @@ -219,7 +219,7 @@ inline void _DP_DeepPotComputeNList(DP_DeepPot *dp, // support spin template -inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, +inline void _DP_DeepPotComputeNListSP(DP_DeepSpin *dp, const int nframes, const int natom, const FPTYPE *coord, @@ -239,7 +239,7 @@ inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, FPTYPE *atomic_virial); template <> -inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, +inline void _DP_DeepPotComputeNListSP(DP_DeepSpin *dp, const int nframes, const int natom, const double *coord, @@ -257,13 +257,13 @@ inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, double *virial, double *atomic_energy, double *atomic_virial) { - DP_DeepPotComputeNList2SP(dp, nframes, natom, coord, spin, atype, cell, - nghost, nlist, ago, fparam, aparam, energy, force, - force_mag, virial, atomic_energy, atomic_virial); + DP_DeepSpinComputeNList2(dp, nframes, natom, coord, spin, atype, cell, nghost, + nlist, ago, fparam, aparam, energy, force, force_mag, + virial, atomic_energy, atomic_virial); } template <> -inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, +inline void _DP_DeepPotComputeNListSP(DP_DeepSpin *dp, const int nframes, const int natom, const float *coord, @@ -281,9 +281,9 @@ inline void _DP_DeepPotComputeNListSP(DP_DeepPot *dp, float *virial, float *atomic_energy, float *atomic_virial) { - DP_DeepPotComputeNListf2SP(dp, nframes, natom, coord, spin, atype, cell, - nghost, nlist, ago, fparam, aparam, energy, force, - force_mag, virial, atomic_energy, atomic_virial); + DP_DeepSpinComputeNListf2(dp, nframes, natom, coord, spin, atype, cell, + nghost, nlist, ago, fparam, aparam, energy, force, + force_mag, virial, atomic_energy, atomic_virial); } template @@ -449,7 +449,7 @@ inline void _DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi *dp, } template -inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, +inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepSpinModelDevi *dp, const int natom, const FPTYPE *coord, const FPTYPE *spin, @@ -467,7 +467,7 @@ inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, FPTYPE *atomic_energy, FPTYPE *atomic_virial); template <> -inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, +inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepSpinModelDevi *dp, const int natom, const double *coord, const double *spin, @@ -484,12 +484,12 @@ inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, double *virial, double *atomic_energy, double *atomic_virial) { - DP_DeepPotModelDeviComputeNList2SP( + DP_DeepSpinModelDeviComputeNList2( dp, 1, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } template <> -inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, +inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepSpinModelDevi *dp, const int natom, const float *coord, const float *spin, @@ -506,7 +506,7 @@ inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi *dp, float *virial, float *atomic_energy, float *atomic_virial) { - DP_DeepPotModelDeviComputeNListf2SP( + DP_DeepSpinModelDeviComputeNListf2( dp, 1, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -836,16 +836,128 @@ void inline convert_nlist(InputNlist &to_nlist, to_nlist.nl = DP_NewNlist(to_nlist.inum, to_nlist.ilist, to_nlist.numneigh, to_nlist.firstneigh); } +/** + * @brief Deep Potential Base Model. + **/ +class DeepBaseModel { + public: + /** + * @brief DP Base Model constructor without initialization. + **/ + DeepBaseModel() : dpbase(nullptr) {}; + ~DeepBaseModel() { DP_DeleteDeepBaseModel(dpbase); }; + + /** + * @brief Get the cutoff radius. + * @return The cutoff radius. + **/ + double cutoff() const { + assert(dpbase); + return DP_DeepBaseModelGetCutoff(dpbase); + }; + /** + * @brief Get the number of types. + * @return The number of types. + **/ + int numb_types() const { + assert(dpbase); + return DP_DeepBaseModelGetNumbTypes(dpbase); + }; + /** + * @brief Get the number of types with spin. + * @return The number of types with spin. + **/ + int numb_types_spin() const { + assert(dpbase); + return DP_DeepBaseModelGetNumbTypesSpin(dpbase); + }; + /** + * @brief Get the type map (element name of the atom types) of this model. + * @param[out] type_map The type map of this model. + **/ + void get_type_map(std::string &type_map) { + const char *type_map_c = DP_DeepBaseModelGetTypeMap(dpbase); + type_map.assign(type_map_c); + DP_DeleteChar(type_map_c); + }; + /** + * @brief Print the summary of DeePMD-kit, including the version and the build + * information. + * @param[in] pre The prefix to each line. + */ + void print_summary(const std::string &pre) const { + DP_PrintSummary(pre.c_str()); + } + /** + * @brief Get the dimension of the frame parameter. + * @return The dimension of the frame parameter. + **/ + int dim_fparam() const { + assert(dpbase); + return dfparam; + } + /** + * @brief Get the dimension of the atomic parameter. + * @return The dimension of the atomic parameter. + **/ + int dim_aparam() const { + assert(dpbase); + return daparam; + } + + protected: + DP_DeepBaseModel *dpbase; + int dfparam; + int daparam; + bool aparam_nall; + template + void validate_fparam_aparam(const int &nframes, + const int &nloc, + const std::vector &fparam, + const std::vector &aparam) const { + if (fparam.size() != dfparam && + fparam.size() != static_cast(nframes) * dfparam) { + throw deepmd::hpp::deepmd_exception( + "the dim of frame parameter provided is not consistent with what the " + "model uses"); + } + + if (aparam.size() != static_cast(daparam) * nloc && + aparam.size() != static_cast(nframes) * daparam * nloc) { + throw deepmd::hpp::deepmd_exception( + "the dim of atom parameter provided is not consistent with what the " + "model uses"); + } + } + template + void tile_fparam_aparam(std::vector &out_param, + const int &nframes, + const int &dparam, + const std::vector ¶m) const { + if (param.size() == dparam) { + out_param.resize(static_cast(nframes) * dparam); + for (int ii = 0; ii < nframes; ++ii) { + std::copy(param.begin(), param.end(), + out_param.begin() + static_cast(ii) * dparam); + } + } else if (param.size() == static_cast(nframes) * dparam) { + out_param = param; + } + } +}; + /** * @brief Deep Potential. **/ -class DeepPot { +class DeepPot : public DeepBaseModel { public: /** * @brief DP constructor without initialization. **/ DeepPot() : dp(nullptr) {}; - ~DeepPot() { DP_DeleteDeepPot(dp); }; + ~DeepPot() { + // the base destructor will be called + }; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -883,10 +995,11 @@ class DeepPot { } dp = DP_NewDeepPotWithParam2(model.c_str(), gpu_rank, file_content.c_str(), file_content.size()); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); - dfparam = DP_DeepPotGetDimFParam(dp); - daparam = DP_DeepPotGetDimAParam(dp); - aparam_nall = DP_DeepPotIsAParamNAll(dp); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + dfparam = DP_DeepBaseModelGetDimFParam((DP_DeepBaseModel *)dp); + daparam = DP_DeepBaseModelGetDimAParam((DP_DeepBaseModel *)dp); + aparam_nall = DP_DeepBaseModelIsAParamNAll((DP_DeepBaseModel *)dp); + dpbase = (DP_DeepBaseModel *)dp; }; /** @@ -943,20 +1056,18 @@ class DeepPot { _DP_DeepPotCompute(dp, nframes, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; - // support spin /** - * @brief Evaluate the energy, force, magnetic force and virial by using this - *DP with spin input. + * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial + *by using this DP. * @param[out] ener The system energy. * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). @@ -971,13 +1082,13 @@ class DeepPot { * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute_spin( + void compute( ENERGYVTYPE &ener, std::vector &force, - std::vector &force_mag, std::vector &virial, + std::vector &atom_energy, + std::vector &atom_virial, const std::vector &coord, - const std::vector &spin, const std::vector &atype, const std::vector &box, const std::vector &fparam = std::vector(), @@ -989,16 +1100,18 @@ class DeepPot { assert(box.size() == nframes * 9); } const VALUETYPE *coord_ = &coord[0]; - const VALUETYPE *spin_ = &spin[0]; const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; + double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); force.resize(static_cast(nframes) * natoms * 3); - force_mag.resize(static_cast(nframes) * natoms * 3); virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; - VALUETYPE *force_mag_ = &force_mag[0]; VALUETYPE *virial_ = &virial[0]; + VALUETYPE *atomic_ener_ = &atom_energy[0]; + VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; validate_fparam_aparam(nframes, natoms, fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); @@ -1006,24 +1119,53 @@ class DeepPot { const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeSP(dp, nframes, natoms, coord_, spin_, atype_, - box_, fparam__, aparam__, ener_, force_, - force_mag_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + _DP_DeepPotCompute(dp, nframes, natoms, coord_, atype_, box_, + fparam__, aparam__, ener_, force_, virial_, + atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; + /** - * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial - *by using this DP. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. * @param[out] virial The virial. * @param[out] atom_energy The atomic energy. * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @warning Natoms should not be zero when computing multiple frames. + **/ + + /** + * @brief Evaluate the energy, force and virial by using this DP with the + *neighbor list. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] fparam The frame parameter. The array can be of size : * nframes x dim_fparam. * dim_fparam. Then all frames are assumed to be provided with the same @@ -1039,11 +1181,12 @@ class DeepPot { ENERGYVTYPE &ener, std::vector &force, std::vector &virial, - std::vector &atom_energy, - std::vector &atom_virial, const std::vector &coord, const std::vector &atype, const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, const std::vector &fparam = std::vector(), const std::vector &aparam = std::vector()) { unsigned int natoms = atype.size(); @@ -1055,45 +1198,42 @@ class DeepPot { const VALUETYPE *coord_ = &coord[0]; const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; - double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); force.resize(static_cast(nframes) * natoms * 3); virial.resize(static_cast(nframes) * 9); - atom_energy.resize(static_cast(nframes) * natoms); - atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; - VALUETYPE *atomic_ener_ = &atom_energy[0]; - VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, natoms, fparam, aparam); + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotCompute(dp, nframes, natoms, coord_, atype_, box_, - fparam__, aparam__, ener_, force_, virial_, - atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + _DP_DeepPotComputeNList( + dp, nframes, natoms, coord_, atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. + * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial + *by using this DP with the neighbor list. * @param[out] ener The system energy. * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. * @param[out] virial The virial. * @param[out] atom_energy The atomic energy. * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] fparam The frame parameter. The array can be of size : * nframes x dim_fparam. * dim_fparam. Then all frames are assumed to be provided with the same @@ -1105,17 +1245,18 @@ class DeepPot { * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute_spin( + void compute( ENERGYVTYPE &ener, std::vector &force, - std::vector &force_mag, std::vector &virial, std::vector &atom_energy, std::vector &atom_virial, const std::vector &coord, - const std::vector &spin, const std::vector &atype, const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, const std::vector &fparam = std::vector(), const std::vector &aparam = std::vector()) { unsigned int natoms = atype.size(); @@ -1125,48 +1266,46 @@ class DeepPot { assert(box.size() == nframes * 9); } const VALUETYPE *coord_ = &coord[0]; - const VALUETYPE *spin_ = &spin[0]; const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); force.resize(static_cast(nframes) * natoms * 3); - force_mag.resize(static_cast(nframes) * natoms * 3); virial.resize(static_cast(nframes) * 9); atom_energy.resize(static_cast(nframes) * natoms); atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; - VALUETYPE *force_mag_ = &force_mag[0]; VALUETYPE *virial_ = &virial[0]; VALUETYPE *atomic_ener_ = &atom_energy[0]; VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, natoms, fparam, aparam); + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeSP( - dp, nframes, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, - ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + _DP_DeepPotComputeNList(dp, nframes, natoms, coord_, atype_, + box_, nghost, lmp_list.nl, ago, fparam__, + aparam__, ener_, force_, virial_, + atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; - /** * @brief Evaluate the energy, force and virial by using this DP with the - *neighbor list. + *mixed type. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] virial The virial. + * @param[in] nframes The number of frames. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). - * @param[in] nghost The number of ghost atoms. - * @param[in] nlist The neighbor list. - * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] fparam The frame parameter. The array can be of size : * nframes x dim_fparam. * dim_fparam. Then all frames are assumed to be provided with the same @@ -1175,23 +1314,19 @@ class DeepPot { * nframes x natoms x dim_aparam. * natoms x dim_aparam. Then all frames are assumed to be provided with the *same aparam. - * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute( + void compute_mixed_type( ENERGYVTYPE &ener, std::vector &force, std::vector &virial, + const int &nframes, const std::vector &coord, const std::vector &atype, const std::vector &box, - const int nghost, - const InputNlist &lmp_list, - const int &ago, const std::vector &fparam = std::vector(), const std::vector &aparam = std::vector()) { - unsigned int natoms = atype.size(); - unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; + unsigned int natoms = atype.size() / nframes; assert(nframes * natoms * 3 == coord.size()); if (!box.empty()) { assert(box.size() == nframes * 9); @@ -1205,84 +1340,31 @@ class DeepPot { VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), - fparam, aparam); + validate_fparam_aparam(nframes, natoms, fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, - (aparam_nall ? natoms : (natoms - nghost)) * daparam, - aparam); + tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeNList( - dp, nframes, natoms, coord_, atype_, box_, nghost, lmp_list.nl, ago, - fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); - }; - // support spin - template - void compute_spin( - ENERGYVTYPE &ener, - std::vector &force, - std::vector &force_mag, - std::vector &virial, - const std::vector &coord, - const std::vector &spin, - const std::vector &atype, - const std::vector &box, - const int nghost, - const InputNlist &lmp_list, - const int &ago, - const std::vector &fparam = std::vector(), - const std::vector &aparam = std::vector()) { - unsigned int natoms = atype.size(); - unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; - assert(nframes * natoms * 3 == coord.size()); - if (!box.empty()) { - assert(box.size() == nframes * 9); - } - const VALUETYPE *coord_ = &coord[0]; - const VALUETYPE *spin_ = &spin[0]; - const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; - const int *atype_ = &atype[0]; - double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); - force.resize(static_cast(nframes) * natoms * 3); - force_mag.resize(static_cast(nframes) * natoms * 3); - virial.resize(static_cast(nframes) * 9); - VALUETYPE *force_ = &force[0]; - VALUETYPE *force_mag_ = &force_mag[0]; - VALUETYPE *virial_ = &virial[0]; - std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), - fparam, aparam); - tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, - (aparam_nall ? natoms : (natoms - nghost)) * daparam, - aparam); - const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; - const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeNListSP(dp, nframes, natoms, coord_, spin_, - atype_, box_, nghost, lmp_list.nl, ago, - fparam__, aparam__, ener_, force_, - force_mag_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + _DP_DeepPotComputeMixedType(dp, nframes, natoms, coord_, atype_, + box_, fparam__, aparam__, ener_, + force_, virial_, nullptr, nullptr); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial - *by using this DP with the neighbor list. + *by using this DP with the mixed type. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] virial The virial. * @param[out] atom_energy The atomic energy. * @param[out] atom_virial The atomic virial. + * @param[in] nframes The number of frames. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). - * @param[in] nghost The number of ghost atoms. - * @param[in] nlist The neighbor list. - * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] fparam The frame parameter. The array can be of size : * nframes x dim_fparam. * dim_fparam. Then all frames are assumed to be provided with the same @@ -1291,25 +1373,21 @@ class DeepPot { * nframes x natoms x dim_aparam. * natoms x dim_aparam. Then all frames are assumed to be provided with the *same aparam. - * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute( + void compute_mixed_type( ENERGYVTYPE &ener, std::vector &force, std::vector &virial, std::vector &atom_energy, std::vector &atom_virial, + const int &nframes, const std::vector &coord, const std::vector &atype, const std::vector &box, - const int nghost, - const InputNlist &lmp_list, - const int &ago, const std::vector &fparam = std::vector(), const std::vector &aparam = std::vector()) { - unsigned int natoms = atype.size(); - unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; + unsigned int natoms = atype.size() / nframes; assert(nframes * natoms * 3 == coord.size()); if (!box.empty()) { assert(box.size() == nframes * 9); @@ -1328,37 +1406,110 @@ class DeepPot { VALUETYPE *atomic_ener_ = &atom_energy[0]; VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), - fparam, aparam); + validate_fparam_aparam(nframes, natoms, fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, - (aparam_nall ? natoms : (natoms - nghost)) * daparam, - aparam); + tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeNList(dp, nframes, natoms, coord_, atype_, - box_, nghost, lmp_list.nl, ago, fparam__, - aparam__, ener_, force_, virial_, - atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + _DP_DeepPotComputeMixedType( + dp, nframes, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, + force_, virial_, atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + }; + + private: + DP_DeepPot *dp; +}; + +class DeepSpin : public DeepBaseModel { + public: + /** + * @brief DP constructor without initialization. + **/ + DeepSpin() : dp(nullptr) {}; + ~DeepSpin() { + // the base destructor will be called + }; + /** + * @brief DP constructor with initialization. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. + * @param[in] file_content The content of the frozen model file. + **/ + DeepSpin(const std::string &model, + const int &gpu_rank = 0, + const std::string &file_content = "") + : dp(nullptr) { + try { + init(model, gpu_rank, file_content); + } catch (...) { + // Clean up and rethrow, as the destructor will not be called + if (dp) { + DP_DeleteDeepSpin(dp); + } + throw; + } + }; + /** + * @brief Initialize the DP. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. + * @param[in] file_content The content of the frozen model file. + **/ + void init(const std::string &model, + const int &gpu_rank = 0, + const std::string &file_content = "") { + if (dp) { + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do " + "nothing at the second call of initializer" + << std::endl; + return; + } + dp = DP_NewDeepSpinWithParam2(model.c_str(), gpu_rank, file_content.c_str(), + file_content.size()); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + dfparam = DP_DeepBaseModelGetDimFParam((DP_DeepBaseModel *)dp); + daparam = DP_DeepBaseModelGetDimAParam((DP_DeepBaseModel *)dp); + aparam_nall = DP_DeepBaseModelIsAParamNAll((DP_DeepBaseModel *)dp); + dpbase = (DP_DeepBaseModel *)dp; }; + // support spin + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this + *DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @warning Natoms should not be zero when computing multiple frames. + **/ template void compute_spin( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, std::vector &virial, - std::vector &atom_energy, - std::vector &atom_virial, const std::vector &coord, const std::vector &spin, const std::vector &atype, const std::vector &box, - const int nghost, - const InputNlist &lmp_list, - const int &ago, const std::vector &fparam = std::vector(), const std::vector &aparam = std::vector()) { unsigned int natoms = atype.size(); @@ -1375,37 +1526,35 @@ class DeepPot { force.resize(static_cast(nframes) * natoms * 3); force_mag.resize(static_cast(nframes) * natoms * 3); virial.resize(static_cast(nframes) * 9); - atom_energy.resize(static_cast(nframes) * natoms); - atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *force_mag_ = &force_mag[0]; VALUETYPE *virial_ = &virial[0]; - VALUETYPE *atomic_ener_ = &atom_energy[0]; - VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), - fparam, aparam); + validate_fparam_aparam(nframes, natoms, fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, - (aparam_nall ? natoms : (natoms - nghost)) * daparam, - aparam); + tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeNListSP( - dp, nframes, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, - ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, - atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + + _DP_DeepPotComputeSP(dp, nframes, natoms, coord_, spin_, atype_, + box_, fparam__, aparam__, ener_, force_, + force_mag_, virial_, nullptr, nullptr); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; + /** - * @brief Evaluate the energy, force and virial by using this DP with the - *mixed type. + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. * @param[out] ener The system energy. * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. * @param[out] virial The virial. - * @param[in] nframes The number of frames. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. * @param[in] coord The coordinates of atoms. The array should be of size *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. * @param[in] box The cell of the region. The array should be of size nframes *x 9 (PBC) or empty (no PBC). @@ -1417,31 +1566,44 @@ class DeepPot { * nframes x natoms x dim_aparam. * natoms x dim_aparam. Then all frames are assumed to be provided with the *same aparam. + * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute_mixed_type( + void compute_spin( ENERGYVTYPE &ener, std::vector &force, + std::vector &force_mag, std::vector &virial, - const int &nframes, + std::vector &atom_energy, + std::vector &atom_virial, const std::vector &coord, + const std::vector &spin, const std::vector &atype, const std::vector &box, const std::vector &fparam = std::vector(), const std::vector &aparam = std::vector()) { - unsigned int natoms = atype.size() / nframes; + unsigned int natoms = atype.size(); + unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; assert(nframes * natoms * 3 == coord.size()); if (!box.empty()) { assert(box.size() == nframes * 9); } const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; + double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); force.resize(static_cast(nframes) * natoms * 3); + force_mag.resize(static_cast(nframes) * natoms * 3); virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; + VALUETYPE *force_mag_ = &force_mag[0]; VALUETYPE *virial_ = &virial[0]; + VALUETYPE *atomic_ener_ = &atom_energy[0]; + VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; validate_fparam_aparam(nframes, natoms, fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); @@ -1449,124 +1611,161 @@ class DeepPot { const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeMixedType(dp, nframes, natoms, coord_, atype_, - box_, fparam__, aparam__, ener_, - force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + _DP_DeepPotComputeSP( + dp, nframes, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, + ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; - /** - * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial - *by using this DP with the mixed type. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] nframes The number of frames. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9 (PBC) or empty (no PBC). - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - **/ + + // support spin template - void compute_mixed_type( + void compute_spin( ENERGYVTYPE &ener, std::vector &force, + std::vector &force_mag, std::vector &virial, - std::vector &atom_energy, - std::vector &atom_virial, - const int &nframes, const std::vector &coord, + const std::vector &spin, const std::vector &atype, const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, const std::vector &fparam = std::vector(), const std::vector &aparam = std::vector()) { - unsigned int natoms = atype.size() / nframes; + unsigned int natoms = atype.size(); + unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; assert(nframes * natoms * 3 == coord.size()); if (!box.empty()) { assert(box.size() == nframes * 9); } const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; - double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); force.resize(static_cast(nframes) * natoms * 3); + force_mag.resize(static_cast(nframes) * natoms * 3); virial.resize(static_cast(nframes) * 9); - atom_energy.resize(static_cast(nframes) * natoms); - atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; + VALUETYPE *force_mag_ = &force_mag[0]; VALUETYPE *virial_ = &virial[0]; - VALUETYPE *atomic_ener_ = &atom_energy[0]; - VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, natoms, fparam, aparam); + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - - _DP_DeepPotComputeMixedType( - dp, nframes, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, - force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotCheckOK, dp); + _DP_DeepPotComputeNListSP(dp, nframes, natoms, coord_, spin_, + atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, + force_mag_, virial_, nullptr, nullptr); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + }; + + // support spin + template + void compute_spin( + ENERGYVTYPE &ener, + std::vector &force, + std::vector &force_mag, + std::vector &virial, + std::vector &atom_energy, + std::vector &atom_virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = natoms > 0 ? coord.size() / natoms / 3 : 1; + assert(nframes * natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == nframes * 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); + force.resize(static_cast(nframes) * natoms * 3); + force_mag.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); + VALUETYPE *force_ = &force[0]; + VALUETYPE *force_mag_ = &force_mag[0]; + VALUETYPE *virial_ = &virial[0]; + VALUETYPE *atomic_ener_ = &atom_energy[0]; + VALUETYPE *atomic_virial_ = &atom_virial[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + _DP_DeepPotComputeNListSP( + dp, nframes, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, + ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, + atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; + + private: + DP_DeepSpin *dp; +}; + +/** + * @brief Deep Potential base model deviation. + **/ +class DeepBaseModelDevi { + public: + /** + * @brief DP model deviation constructor without initialization. + **/ + DeepBaseModelDevi() : dpbase(nullptr) {}; + ~DeepBaseModelDevi() { DP_DeleteDeepBaseModelDevi(dpbase); }; + /** * @brief Get the cutoff radius. * @return The cutoff radius. **/ double cutoff() const { - assert(dp); - return DP_DeepPotGetCutoff(dp); + assert(dpbase); + return DP_DeepBaseModelDeviGetCutoff(dpbase); }; /** * @brief Get the number of types. * @return The number of types. **/ int numb_types() const { - assert(dp); - return DP_DeepPotGetNumbTypes(dp); + assert(dpbase); + return DP_DeepBaseModelDeviGetNumbTypes(dpbase); }; /** * @brief Get the number of types with spin. * @return The number of types with spin. **/ int numb_types_spin() const { - assert(dp); - return DP_DeepPotGetNumbTypesSpin(dp); - }; - /** - * @brief Get the type map (element name of the atom types) of this model. - * @param[out] type_map The type map of this model. - **/ - void get_type_map(std::string &type_map) { - const char *type_map_c = DP_DeepPotGetTypeMap(dp); - type_map.assign(type_map_c); - DP_DeleteChar(type_map_c); + assert(dpbase); + return DP_DeepBaseModelDeviGetNumbTypesSpin(dpbase); }; - /** - * @brief Print the summary of DeePMD-kit, including the version and the build - * information. - * @param[in] pre The prefix to each line. - */ - void print_summary(const std::string &pre) const { - DP_PrintSummary(pre.c_str()); - } /** * @brief Get the dimension of the frame parameter. * @return The dimension of the frame parameter. **/ int dim_fparam() const { - assert(dp); + assert(dpbase); return dfparam; } /** @@ -1574,12 +1773,128 @@ class DeepPot { * @return The dimension of the atomic parameter. **/ int dim_aparam() const { - assert(dp); + assert(dpbase); return daparam; } + /** + * @brief Compute the average of vectors. + * @param[out] avg The average of vectors. + * @param[in] xx The vectors of all models. + **/ + template + void compute_avg(std::vector &avg, + const std::vector> &xx) { + assert(xx.size() == numb_models); + if (numb_models == 0) { + return; + } - private: - DP_DeepPot *dp; + avg.resize(xx[0].size()); + fill(avg.begin(), avg.end(), VALUETYPE(0.)); + + for (unsigned ii = 0; ii < numb_models; ++ii) { + for (unsigned jj = 0; jj < avg.size(); ++jj) { + avg[jj] += xx[ii][jj]; + } + } + + for (unsigned jj = 0; jj < avg.size(); ++jj) { + avg[jj] /= VALUETYPE(numb_models); + } + }; + /** + * @brief Compute the standard deviation of vectors. + * @param[out] std The standard deviation of vectors. + * @param[in] avg The average of vectors. + * @param[in] xx The vectors of all models. + * @param[in] stride The stride to compute the deviation. + **/ + template + void compute_std(std::vector &std, + const std::vector &avg, + const std::vector> &xx, + const int &stride) { + assert(xx.size() == numb_models); + if (numb_models == 0) { + return; + } + + unsigned ndof = avg.size(); + unsigned nloc = ndof / stride; + assert(nloc * stride == ndof); + + std.resize(nloc); + fill(std.begin(), std.end(), VALUETYPE(0.)); + + for (unsigned ii = 0; ii < numb_models; ++ii) { + for (unsigned jj = 0; jj < nloc; ++jj) { + const VALUETYPE *tmp_f = &(xx[ii][static_cast(jj) * stride]); + const VALUETYPE *tmp_avg = &(avg[static_cast(jj) * stride]); + for (unsigned dd = 0; dd < stride; ++dd) { + VALUETYPE vdiff = tmp_f[dd] - tmp_avg[dd]; + std[jj] += vdiff * vdiff; + } + } + } + + for (unsigned jj = 0; jj < nloc; ++jj) { + std[jj] = sqrt(std[jj] / VALUETYPE(numb_models)); + } + }; + /** + * @brief Compute the relative standard deviation of vectors. + * @param[out] std The standard deviation of vectors. + * @param[in] avg The average of vectors. + * @param[in] eps The level parameter for computing the deviation. + * @param[in] stride The stride to compute the deviation. + **/ + template + void compute_relative_std(std::vector &std, + const std::vector &avg, + const VALUETYPE eps, + const int &stride) { + unsigned ndof = avg.size(); + unsigned nloc = std.size(); + assert(nloc * stride == ndof); + + for (unsigned ii = 0; ii < nloc; ++ii) { + const VALUETYPE *tmp_avg = &(avg[static_cast(ii) * stride]); + VALUETYPE f_norm = 0.0; + for (unsigned dd = 0; dd < stride; ++dd) { + f_norm += tmp_avg[dd] * tmp_avg[dd]; + } + f_norm = sqrt(f_norm); + std[ii] /= f_norm + eps; + } + }; + /** + * @brief Compute the standard deviation of forces. + * @param[out] std The standard deviation of forces. + * @param[in] avg The average of forces. + * @param[in] xx The vectors of all forces. + **/ + template + void compute_std_f(std::vector &std, + const std::vector &avg, + const std::vector> &xx) { + compute_std(std, avg, xx, 3); + }; + /** + * @brief Compute the relative standard deviation of forces. + * @param[out] std The relative standard deviation of forces. + * @param[in] avg The relative average of forces. + * @param[in] eps The level parameter for computing the deviation. + **/ + template + void compute_relative_std_f(std::vector &std, + const std::vector &avg, + const VALUETYPE eps) { + compute_relative_std(std, avg, eps, 3); + }; + + protected: + DP_DeepBaseModelDevi *dpbase; + int numb_models; int dfparam; int daparam; bool aparam_nall; @@ -1622,13 +1937,15 @@ class DeepPot { /** * @brief Deep Potential model deviation. **/ -class DeepPotModelDevi { +class DeepPotModelDevi : public DeepBaseModelDevi { public: /** * @brief DP model deviation constructor without initialization. **/ DeepPotModelDevi() : dp(nullptr) {}; - ~DeepPotModelDevi() { DP_DeleteDeepPotModelDevi(dp); }; + ~DeepPotModelDevi() { + // the base destructor will be called + }; /** * @brief DP model deviation constructor with initialization. * @param[in] models The names of the frozen model file. @@ -1678,11 +1995,12 @@ class DeepPotModelDevi { dp = DP_NewDeepPotModelDeviWithParam( cstrings.data(), cstrings.size(), gpu_rank, c_file_contents.data(), c_file_contents.size(), size_file_contents.data()); - DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); numb_models = models.size(); - dfparam = DP_DeepPotModelDeviGetDimFParam(dp); - daparam = DP_DeepPotModelDeviGetDimAParam(dp); - aparam_nall = DP_DeepPotModelDeviIsAParamNAll(dp); + dfparam = DP_DeepBaseModelDeviGetDimFParam((DP_DeepBaseModelDevi *)dp); + daparam = DP_DeepBaseModelDeviGetDimAParam((DP_DeepBaseModelDevi *)dp); + aparam_nall = DP_DeepBaseModelDeviIsAParamNAll((DP_DeepBaseModelDevi *)dp); + dpbase = (DP_DeepBaseModelDevi *)dp; }; /** @@ -1744,7 +2062,7 @@ class DeepPotModelDevi { _DP_DeepPotModelDeviCompute(dp, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); // reshape ener.resize(numb_models); @@ -1829,7 +2147,7 @@ class DeepPotModelDevi { _DP_DeepPotModelDeviCompute( dp, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); // reshape ener.resize(numb_models); @@ -1926,7 +2244,7 @@ class DeepPotModelDevi { _DP_DeepPotModelDeviComputeNList( dp, natoms, coord_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); // reshape ener.resize(numb_models); @@ -1994,7 +2312,7 @@ class DeepPotModelDevi { dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); // reshape ener.resize(numb_models); force.resize(numb_models); @@ -2092,7 +2410,7 @@ class DeepPotModelDevi { _DP_DeepPotModelDeviComputeNList( dp, natoms, coord_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); // reshape ener.resize(numb_models); @@ -2177,7 +2495,7 @@ class DeepPotModelDevi { dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); // reshape ener.resize(numb_models); force.resize(numb_models); @@ -2209,202 +2527,241 @@ class DeepPotModelDevi { } } }; + + private: + DP_DeepPotModelDevi *dp; +}; + +class DeepSpinModelDevi : public DeepBaseModelDevi { + public: /** - * @brief Get the cutoff radius. - * @return The cutoff radius. - **/ - double cutoff() const { - assert(dp); - return DP_DeepPotModelDeviGetCutoff(dp); - }; - /** - * @brief Get the number of types. - * @return The number of types. + * @brief DP model deviation constructor without initialization. **/ - int numb_types() const { - assert(dp); - return DP_DeepPotModelDeviGetNumbTypes(dp); + DeepSpinModelDevi() : dp(nullptr) {}; + ~DeepSpinModelDevi() { + // the base destructor will be called }; /** - * @brief Get the number of types with spin. - * @return The number of types with spin. + * @brief DP model deviation constructor with initialization. + * @param[in] models The names of the frozen model file. **/ - int numb_types_spin() const { - assert(dp); - return DP_DeepPotModelDeviGetNumbTypesSpin(dp); + DeepSpinModelDevi(const std::vector &models) : dp(nullptr) { + try { + init(models); + } catch (...) { + // Clean up and rethrow, as the destructor will not be called + if (dp) { + DP_DeleteDeepSpinModelDevi(dp); + } + throw; + } }; /** - * @brief Get the dimension of the frame parameter. - * @return The dimension of the frame parameter. - **/ - int dim_fparam() const { - assert(dp); - return dfparam; - } - /** - * @brief Get the dimension of the atomic parameter. - * @return The dimension of the atomic parameter. + * @brief Initialize the DP model deviation. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. + * @param[in] file_content The content of the frozen model file. **/ - int dim_aparam() const { - assert(dp); - return daparam; - } - /** - * @brief Compute the average of vectors. - * @param[out] avg The average of vectors. - * @param[in] xx The vectors of all models. - **/ - template - void compute_avg(std::vector &avg, - const std::vector> &xx) { - assert(xx.size() == numb_models); - if (numb_models == 0) { + void init(const std::vector &models, + const int &gpu_rank = 0, + const std::vector &file_content = + std::vector()) { + if (dp) { + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do " + "nothing at the second call of initializer" + << std::endl; return; } - - avg.resize(xx[0].size()); - fill(avg.begin(), avg.end(), VALUETYPE(0.)); - - for (unsigned ii = 0; ii < numb_models; ++ii) { - for (unsigned jj = 0; jj < avg.size(); ++jj) { - avg[jj] += xx[ii][jj]; - } + std::vector cstrings; + cstrings.reserve(models.size()); + for (std::string const &str : models) { + cstrings.push_back(str.data()); } - for (unsigned jj = 0; jj < avg.size(); ++jj) { - avg[jj] /= VALUETYPE(numb_models); + std::vector c_file_contents; + std::vector size_file_contents; + c_file_contents.reserve(file_content.size()); + size_file_contents.reserve(file_content.size()); + for (std::string const &str : file_content) { + c_file_contents.push_back(str.data()); + size_file_contents.push_back(str.size()); } + + dp = DP_NewDeepSpinModelDeviWithParam( + cstrings.data(), cstrings.size(), gpu_rank, c_file_contents.data(), + c_file_contents.size(), size_file_contents.data()); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + numb_models = models.size(); + dfparam = DP_DeepBaseModelDeviGetDimFParam((DP_DeepBaseModelDevi *)dp); + daparam = DP_DeepBaseModelDeviGetDimAParam((DP_DeepBaseModelDevi *)dp); + aparam_nall = DP_DeepBaseModelDeviIsAParamNAll((DP_DeepBaseModelDevi *)dp); + dpbase = (DP_DeepBaseModelDevi *)dp; }; - /** - * @brief Compute the standard deviation of vectors. - * @param[out] std The standard deviation of vectors. - * @param[in] avg The average of vectors. - * @param[in] xx The vectors of all models. - * @param[in] stride The stride to compute the deviation. - **/ + // support spin template - void compute_std(std::vector &std, - const std::vector &avg, - const std::vector> &xx, - const int &stride) { - assert(xx.size() == numb_models); - if (numb_models == 0) { - return; + void compute_spin( + std::vector &ener, + std::vector> &force, + std::vector> &force_mag, + std::vector> &virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = 1; + assert(natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == 9); } - - unsigned ndof = avg.size(); - unsigned nloc = ndof / stride; - assert(nloc * stride == ndof); - - std.resize(nloc); - fill(std.begin(), std.end(), VALUETYPE(0.)); - - for (unsigned ii = 0; ii < numb_models; ++ii) { - for (unsigned jj = 0; jj < nloc; ++jj) { - const VALUETYPE *tmp_f = &(xx[ii][static_cast(jj) * stride]); - const VALUETYPE *tmp_avg = &(avg[static_cast(jj) * stride]); - for (unsigned dd = 0; dd < stride; ++dd) { - VALUETYPE vdiff = tmp_f[dd] - tmp_avg[dd]; - std[jj] += vdiff * vdiff; - } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + // memory will be continous for std::vector but not std::vector + std::vector energy_flat(numb_models); + std::vector force_flat(static_cast(numb_models) * + natoms * 3); + std::vector force_mag_flat(static_cast(numb_models) * + natoms * 3); + std::vector virial_flat(numb_models * 9); + double *ener_ = &energy_flat[0]; + VALUETYPE *force_ = &force_flat[0]; + VALUETYPE *force_mag_ = &force_mag_flat[0]; + VALUETYPE *virial_ = &virial_flat[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + _DP_DeepPotModelDeviComputeNListSP( + dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, + nullptr); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + // reshape + ener.resize(numb_models); + force.resize(numb_models); + force_mag.resize(numb_models); + virial.resize(numb_models); + for (int i = 0; i < numb_models; i++) { + ener[i] = energy_flat[i]; + force[i].resize(static_cast(natoms) * 3); + force_mag[i].resize(static_cast(natoms) * 3); + virial[i].resize(9); + for (int j = 0; j < natoms * 3; j++) { + force[i][j] = force_flat[i * natoms * 3 + j]; } - } - - for (unsigned jj = 0; jj < nloc; ++jj) { - std[jj] = sqrt(std[jj] / VALUETYPE(numb_models)); - } - }; - /** - * @brief Compute the relative standard deviation of vectors. - * @param[out] std The standard deviation of vectors. - * @param[in] avg The average of vectors. - * @param[in] eps The level parameter for computing the deviation. - * @param[in] stride The stride to compute the deviation. - **/ - template - void compute_relative_std(std::vector &std, - const std::vector &avg, - const VALUETYPE eps, - const int &stride) { - unsigned ndof = avg.size(); - unsigned nloc = std.size(); - assert(nloc * stride == ndof); - - for (unsigned ii = 0; ii < nloc; ++ii) { - const VALUETYPE *tmp_avg = &(avg[static_cast(ii) * stride]); - VALUETYPE f_norm = 0.0; - for (unsigned dd = 0; dd < stride; ++dd) { - f_norm += tmp_avg[dd] * tmp_avg[dd]; + for (int j = 0; j < natoms * 3; j++) { + force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < 9; j++) { + virial[i][j] = virial_flat[i * 9 + j]; } - f_norm = sqrt(f_norm); - std[ii] /= f_norm + eps; } }; - /** - * @brief Compute the standard deviation of forces. - * @param[out] std The standard deviation of forces. - * @param[in] avg The average of forces. - * @param[in] xx The vectors of all forces. - **/ - template - void compute_std_f(std::vector &std, - const std::vector &avg, - const std::vector> &xx) { - compute_std(std, avg, xx, 3); - }; - /** - * @brief Compute the relative standard deviation of forces. - * @param[out] std The relative standard deviation of forces. - * @param[in] avg The relative average of forces. - * @param[in] eps The level parameter for computing the deviation. - **/ - template - void compute_relative_std_f(std::vector &std, - const std::vector &avg, - const VALUETYPE eps) { - compute_relative_std(std, avg, eps, 3); - }; - private: - DP_DeepPotModelDevi *dp; - int numb_models; - int dfparam; - int daparam; - bool aparam_nall; + // support spin template - void validate_fparam_aparam(const int &nframes, - const int &nloc, - const std::vector &fparam, - const std::vector &aparam) const { - if (fparam.size() != dfparam && - fparam.size() != static_cast(nframes) * dfparam) { - throw deepmd::hpp::deepmd_exception( - "the dim of frame parameter provided is not consistent with what the " - "model uses"); - } - - if (aparam.size() != static_cast(daparam) * nloc && - aparam.size() != static_cast(nframes) * daparam * nloc) { - throw deepmd::hpp::deepmd_exception( - "the dim of atom parameter provided is not consistent with what the " - "model uses"); + void compute_spin( + std::vector &ener, + std::vector> &force, + std::vector> &force_mag, + std::vector> &virial, + std::vector> &atom_energy, + std::vector> &atom_virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const int nghost, + const InputNlist &lmp_list, + const int &ago, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = 1; + assert(natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == 9); } - } - template - void tile_fparam_aparam(std::vector &out_param, - const int &nframes, - const int &dparam, - const std::vector ¶m) const { - if (param.size() == dparam) { - out_param.resize(static_cast(nframes) * dparam); - for (int ii = 0; ii < nframes; ++ii) { - std::copy(param.begin(), param.end(), - out_param.begin() + static_cast(ii) * dparam); + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + std::vector energy_flat(numb_models); + std::vector force_flat(static_cast(numb_models) * + natoms * 3); + std::vector force_mag_flat(static_cast(numb_models) * + natoms * 3); + std::vector virial_flat(numb_models * 9); + std::vector atom_energy_flat(static_cast(numb_models) * + natoms); + std::vector atom_virial_flat(static_cast(numb_models) * + natoms * 9); + double *ener_ = &energy_flat[0]; + VALUETYPE *force_ = &force_flat[0]; + VALUETYPE *force_mag_ = &force_mag_flat[0]; + VALUETYPE *virial_ = &virial_flat[0]; + VALUETYPE *atomic_ener_ = &atom_energy_flat[0]; + VALUETYPE *atomic_virial_ = &atom_virial_flat[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + _DP_DeepPotModelDeviComputeNListSP( + dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, + atomic_virial_); + DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + // reshape + ener.resize(numb_models); + force.resize(numb_models); + force_mag.resize(numb_models); + virial.resize(numb_models); + atom_energy.resize(numb_models); + atom_virial.resize(numb_models); + for (int i = 0; i < numb_models; i++) { + ener[i] = energy_flat[i]; + force[i].resize(static_cast(natoms) * 3); + force_mag[i].resize(static_cast(natoms) * 3); + virial[i].resize(9); + atom_energy[i].resize(natoms); + atom_virial[i].resize(static_cast(natoms) * 9); + for (int j = 0; j < natoms * 3; j++) { + force[i][j] = force_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < natoms * 3; j++) { + force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < 9; j++) { + virial[i][j] = virial_flat[i * 9 + j]; + } + for (int j = 0; j < natoms; j++) { + atom_energy[i][j] = atom_energy_flat[i * natoms + j]; + } + for (int j = 0; j < natoms * 9; j++) { + atom_virial[i][j] = atom_virial_flat[i * natoms * 9 + j]; } - } else if (param.size() == static_cast(nframes) * dparam) { - out_param = param; } - } + }; + + private: + DP_DeepSpinModelDevi *dp; }; /** diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 992fb8404a..fe8873d18b 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -45,13 +45,28 @@ DP_Nlist* DP_NewNlist_comm(int inum_, void DP_NlistSetMask(DP_Nlist* nl, int mask) { nl->nl.set_mask(mask); } void DP_DeleteNlist(DP_Nlist* nl) { delete nl; } -DP_DeepPot::DP_DeepPot() {} -DP_DeepPot::DP_DeepPot(deepmd::DeepPot& dp) : dp(dp) { - dfparam = dp.dim_fparam(); - daparam = dp.dim_aparam(); - aparam_nall = dp.is_aparam_nall(); -} +// DP Base Model +DP_DeepBaseModel::DP_DeepBaseModel() {} +DP_DeepBaseModel::DP_DeepBaseModel(deepmd::DeepBaseModel& dpbase) + : dpbase(dpbase) { + dfparam = dpbase.dim_fparam(); + daparam = dpbase.dim_aparam(); + aparam_nall = dpbase.is_aparam_nall(); +} +void DP_DeleteDeepBaseModel(DP_DeepBaseModel* dpbase) { delete dpbase; } + +// DP Base Model Devi +DP_DeepBaseModelDevi::DP_DeepBaseModelDevi() {} +DP_DeepBaseModelDevi::DP_DeepBaseModelDevi(deepmd::DeepBaseModelDevi& dpbase) + : dpbase(dpbase) { + dfparam = dpbase.dim_fparam(); + daparam = dpbase.dim_aparam(); + aparam_nall = dpbase.is_aparam_nall(); +} +void DP_DeleteDeepBaseModelDevi(DP_DeepBaseModelDevi* dp) { delete dp; } +DP_DeepPot::DP_DeepPot() {} +DP_DeepPot::DP_DeepPot(deepmd::DeepPot& dp) : DP_DeepBaseModel(dp), dp(dp) {} DP_DeepPot* DP_NewDeepPot(const char* c_model) { std::string model(c_model); DP_NEW_OK(DP_DeepPot, deepmd::DeepPot dp(model); @@ -80,16 +95,11 @@ DP_DeepPot* DP_NewDeepPotWithParam2(const char* c_model, DP_NEW_OK(DP_DeepPot, deepmd::DeepPot dp(model, gpu_rank, file_content); DP_DeepPot* new_dp = new DP_DeepPot(dp); return new_dp;) } - void DP_DeleteDeepPot(DP_DeepPot* dp) { delete dp; } DP_DeepPotModelDevi::DP_DeepPotModelDevi() {} DP_DeepPotModelDevi::DP_DeepPotModelDevi(deepmd::DeepPotModelDevi& dp) - : dp(dp) { - dfparam = dp.dim_fparam(); - daparam = dp.dim_aparam(); - aparam_nall = dp.is_aparam_nall(); -} + : DP_DeepBaseModelDevi(dp), dp(dp) {} DP_DeepPotModelDevi* DP_NewDeepPotModelDevi(const char** c_models, int n_models) { @@ -121,6 +131,59 @@ DP_DeepPotModelDevi* DP_NewDeepPotModelDeviWithParam( void DP_DeleteDeepPotModelDevi(DP_DeepPotModelDevi* dp) { delete dp; } +DP_DeepSpin::DP_DeepSpin() {} +DP_DeepSpin::DP_DeepSpin(deepmd::DeepSpin& dp) : DP_DeepBaseModel(dp), dp(dp) {} +DP_DeepSpin* DP_NewDeepSpin(const char* c_model) { + std::string model(c_model); + DP_NEW_OK(DP_DeepSpin, deepmd::DeepSpin dp(model); + DP_DeepSpin* new_dp = new DP_DeepSpin(dp); return new_dp;) +} +DP_DeepSpin* DP_NewDeepSpinWithParam2(const char* c_model, + const int gpu_rank, + const char* c_file_content, + const int size_file_content) { + std::string model(c_model); + std::string file_content(c_file_content, c_file_content + size_file_content); + DP_NEW_OK(DP_DeepSpin, deepmd::DeepSpin dp(model, gpu_rank, file_content); + DP_DeepSpin* new_dp = new DP_DeepSpin(dp); return new_dp;) +} + +void DP_DeleteDeepSpin(DP_DeepSpin* dp) { delete dp; } + +DP_DeepSpinModelDevi::DP_DeepSpinModelDevi() {} +DP_DeepSpinModelDevi::DP_DeepSpinModelDevi(deepmd::DeepSpinModelDevi& dp) + : DP_DeepBaseModelDevi(dp), dp(dp) {} + +DP_DeepSpinModelDevi* DP_NewDeepSpinModelDevi(const char** c_models, + int n_models) { + std::vector model(c_models, c_models + n_models); + DP_NEW_OK(DP_DeepSpinModelDevi, deepmd::DeepSpinModelDevi dp(model); + DP_DeepSpinModelDevi* new_dp = new DP_DeepSpinModelDevi(dp); + return new_dp;) +} + +DP_DeepSpinModelDevi* DP_NewDeepSpinModelDeviWithParam( + const char** c_models, + const int n_models, + const int gpu_rank, + const char** c_file_contents, + const int n_file_contents, + const int* size_file_contents) { + std::vector model(c_models, c_models + n_models); + std::vector file_content; + file_content.reserve(n_file_contents); + for (int ii = 0; ii < n_file_contents; ++ii) { + file_content.push_back(std::string( + c_file_contents[ii], c_file_contents[ii] + size_file_contents[ii])); + } + DP_NEW_OK(DP_DeepSpinModelDevi, + deepmd::DeepSpinModelDevi dp(model, gpu_rank, file_content); + DP_DeepSpinModelDevi* new_dp = new DP_DeepSpinModelDevi(dp); + return new_dp;) +} + +void DP_DeleteDeepSpinModelDevi(DP_DeepSpinModelDevi* dp) { delete dp; } + DP_DeepTensor::DP_DeepTensor() {} DP_DeepTensor::DP_DeepTensor(deepmd::DeepTensor& dt) : dt(dt) {} @@ -254,21 +317,21 @@ template void DP_DeepPotCompute_variant(DP_DeepPot* dp, float* atomic_virial); // support spin template -inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const VALUETYPE* coord, - const VALUETYPE* spin, - const int* atype, - const VALUETYPE* cell, - const VALUETYPE* fparam, - const VALUETYPE* aparam, - double* energy, - VALUETYPE* force, - VALUETYPE* force_mag, - VALUETYPE* virial, - VALUETYPE* atomic_energy, - VALUETYPE* atomic_virial) { +inline void DP_DeepSpinCompute_variant(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { // init C++ vectors from C arrays std::vector coord_(coord, coord + nframes * natoms * 3); std::vector spin_(spin, spin + nframes * natoms * 3); @@ -312,37 +375,37 @@ inline void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, } } -template void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); - -template void DP_DeepPotCompute_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); +template void DP_DeepSpinCompute_variant(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + +template void DP_DeepSpinCompute_variant(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); template inline void DP_DeepPotComputeNList_variant(DP_DeepPot* dp, @@ -445,24 +508,24 @@ template void DP_DeepPotComputeNList_variant(DP_DeepPot* dp, // support spin template -inline void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const VALUETYPE* coord, - const VALUETYPE* spin, - const int* atype, - const VALUETYPE* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const VALUETYPE* fparam, - const VALUETYPE* aparam, - double* energy, - VALUETYPE* force, - VALUETYPE* force_mag, - VALUETYPE* virial, - VALUETYPE* atomic_energy, - VALUETYPE* atomic_virial) { +inline void DP_DeepSpinComputeNList_variant(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { // init C++ vectors from C arrays std::vector coord_(coord, coord + nframes * natoms * 3); std::vector spin_(spin, spin + nframes * natoms * 3); @@ -508,42 +571,42 @@ inline void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, std::copy(av.begin(), av.end(), atomic_virial); } } -template void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial); -template void DP_DeepPotComputeNList_variant_sp(DP_DeepPot* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial); +template void DP_DeepSpinComputeNList_variant(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); +template void DP_DeepSpinComputeNList_variant(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); template inline void DP_DeepPotComputeMixedType_variant(DP_DeepPot* dp, @@ -849,24 +912,24 @@ template void DP_DeepPotModelDeviComputeNList_variant( // support spin multi model. template -void DP_DeepPotModelDeviComputeNList_variant_sp(DP_DeepPotModelDevi* dp, - const int nframes, - const int natoms, - const VALUETYPE* coord, - const VALUETYPE* spin, - const int* atype, - const VALUETYPE* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const VALUETYPE* fparam, - const VALUETYPE* aparam, - double* energy, - VALUETYPE* force, - VALUETYPE* force_mag, - VALUETYPE* virial, - VALUETYPE* atomic_energy, - VALUETYPE* atomic_virial) { +void DP_DeepSpinModelDeviComputeNList_variant(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { if (nframes > 1) { throw std::runtime_error("nframes > 1 not supported yet"); } @@ -931,8 +994,8 @@ void DP_DeepPotModelDeviComputeNList_variant_sp(DP_DeepPotModelDevi* dp, std::copy(av_flat.begin(), av_flat.end(), atomic_virial); } } -template void DP_DeepPotModelDeviComputeNList_variant_sp( - DP_DeepPotModelDevi* dp, +template void DP_DeepSpinModelDeviComputeNList_variant( + DP_DeepSpinModelDevi* dp, const int nframes, const int natoms, const double* coord, @@ -950,8 +1013,8 @@ template void DP_DeepPotModelDeviComputeNList_variant_sp( double* virial, double* atomic_energy, double* atomic_virial); -template void DP_DeepPotModelDeviComputeNList_variant_sp( - DP_DeepPotModelDevi* dp, +template void DP_DeepSpinModelDeviComputeNList_variant( + DP_DeepSpinModelDevi* dp, const int nframes, const int natoms, const float* coord, @@ -1321,22 +1384,6 @@ void DP_DeepPotCompute(DP_DeepPot* dp, NULL, energy, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotComputeSP(DP_DeepPot* dp, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, - NULL, NULL, energy, force, force_mag, - virial, atomic_energy, atomic_virial); -} void DP_DeepPotComputef(DP_DeepPot* dp, const int natoms, @@ -1353,23 +1400,6 @@ void DP_DeepPotComputef(DP_DeepPot* dp, atomic_virial); } -void DP_DeepPotComputefSP(DP_DeepPot* dp, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotCompute_variant_sp(dp, 1, natoms, coord, spin, atype, cell, - NULL, NULL, energy, force, force_mag, - virial, atomic_energy, atomic_virial); -} - void DP_DeepPotComputeNList(DP_DeepPot* dp, const int natoms, const double* coord, @@ -1388,26 +1418,6 @@ void DP_DeepPotComputeNList(DP_DeepPot* dp, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotComputeNListSP(DP_DeepPot* dp, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotComputeNList_variant_sp( - dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, - energy, force, force_mag, virial, atomic_energy, atomic_virial); -} - void DP_DeepPotComputeNListf(DP_DeepPot* dp, const int natoms, const float* coord, @@ -1426,26 +1436,6 @@ void DP_DeepPotComputeNListf(DP_DeepPot* dp, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotComputeNListfSP(DP_DeepPot* dp, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotComputeNList_variant_sp( - dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, - energy, force, force_mag, virial, atomic_energy, atomic_virial); -} - // multiple frames void DP_DeepPotCompute2(DP_DeepPot* dp, const int nframes, @@ -1464,22 +1454,22 @@ void DP_DeepPotCompute2(DP_DeepPot* dp, fparam, aparam, energy, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotCompute2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotCompute_variant_sp( +void DP_DeepSpinCompute2(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepSpinCompute_variant( dp, nframes, natoms, coord, spin, atype, cell, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -1502,22 +1492,22 @@ void DP_DeepPotComputef2(DP_DeepPot* dp, atomic_energy, atomic_virial); } -void DP_DeepPotComputef2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotCompute_variant_sp( +void DP_DeepSpinComputef2(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepSpinCompute_variant( dp, nframes, natoms, coord, spin, atype, cell, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -1543,25 +1533,25 @@ void DP_DeepPotComputeNList2(DP_DeepPot* dp, aparam, energy, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotComputeNList2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotComputeNList_variant_sp( +void DP_DeepSpinComputeNList2(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepSpinComputeNList_variant( dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -1587,25 +1577,25 @@ void DP_DeepPotComputeNListf2(DP_DeepPot* dp, aparam, energy, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotComputeNListf2SP(DP_DeepPot* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotComputeNList_variant_sp( +void DP_DeepSpinComputeNListf2(DP_DeepSpin* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepSpinComputeNList_variant( dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -1648,28 +1638,39 @@ void DP_DeepPotComputeMixedTypef(DP_DeepPot* dp, virial, atomic_energy, atomic_virial); } -const char* DP_DeepPotGetTypeMap(DP_DeepPot* dp) { +// base model methods +const char* DP_DeepBaseModelGetTypeMap(DP_DeepBaseModel* dpbase) { std::string type_map; - dp->dp.get_type_map(type_map); + dpbase->dpbase.get_type_map(type_map); return string_to_char(type_map); } -double DP_DeepPotGetCutoff(DP_DeepPot* dp) { return dp->dp.cutoff(); } +double DP_DeepBaseModelGetCutoff(DP_DeepBaseModel* dpbase) { + return dpbase->dpbase.cutoff(); +} -int DP_DeepPotGetNumbTypes(DP_DeepPot* dp) { return dp->dp.numb_types(); } +int DP_DeepBaseModelGetNumbTypes(DP_DeepBaseModel* dpbase) { + return dpbase->dpbase.numb_types(); +} -int DP_DeepPotGetNumbTypesSpin(DP_DeepPot* dp) { - return dp->dp.numb_types_spin(); +int DP_DeepBaseModelGetNumbTypesSpin(DP_DeepBaseModel* dpbase) { + return dpbase->dpbase.numb_types_spin(); } -int DP_DeepPotGetDimFParam(DP_DeepPot* dp) { return dp->dfparam; } +int DP_DeepBaseModelGetDimFParam(DP_DeepBaseModel* dpbase) { + return dpbase->dfparam; +} -int DP_DeepPotGetDimAParam(DP_DeepPot* dp) { return dp->daparam; } +int DP_DeepBaseModelGetDimAParam(DP_DeepBaseModel* dpbase) { + return dpbase->daparam; +} -bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp) { return dp->aparam_nall; } +bool DP_DeepBaseModelIsAParamNAll(DP_DeepBaseModel* dpbase) { + return dpbase->aparam_nall; +} -const char* DP_DeepPotCheckOK(DP_DeepPot* dp) { - return string_to_char(dp->exception); +const char* DP_DeepBaseModelCheckOK(DP_DeepBaseModel* dpbase) { + return string_to_char(dpbase->exception); } void DP_DeepPotModelDeviCompute(DP_DeepPotModelDevi* dp, @@ -1756,22 +1757,22 @@ void DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi* dp, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotModelDeviComputeNListSP(DP_DeepPotModelDevi* dp, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotModelDeviComputeNList_variant_sp( +void DP_DeepSpinModelDeviComputeNListSP(DP_DeepSpinModelDevi* dp, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepSpinModelDeviComputeNList_variant( dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -1794,22 +1795,22 @@ void DP_DeepPotModelDeviComputeNListf(DP_DeepPotModelDevi* dp, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotModelDeviComputeNListfSP(DP_DeepPotModelDevi* dp, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotModelDeviComputeNList_variant_sp( +void DP_DeepSpinModelDeviComputeNListfSP(DP_DeepSpinModelDevi* dp, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepSpinModelDeviComputeNList_variant( dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -1835,25 +1836,25 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, aparam, energy, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotModelDeviComputeNList2SP(DP_DeepPotModelDevi* dp, - const int nframes, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const double* fparam, - const double* aparam, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepPotModelDeviComputeNList_variant_sp( +void DP_DeepSpinModelDeviComputeNList2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepSpinModelDeviComputeNList_variant( dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } @@ -1879,55 +1880,56 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, aparam, energy, force, virial, atomic_energy, atomic_virial); } -void DP_DeepPotModelDeviComputeNListf2SP(DP_DeepPotModelDevi* dp, - const int nframes, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - const float* fparam, - const float* aparam, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepPotModelDeviComputeNList_variant_sp( +void DP_DeepSpinModelDeviComputeNListf2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const int nghost, + const DP_Nlist* nlist, + const int ago, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepSpinModelDeviComputeNList_variant( dp, nframes, natoms, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } -double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp) { - return dp->dp.cutoff(); +// base model +double DP_DeepBaseModelDeviGetCutoff(DP_DeepBaseModelDevi* dpbase) { + return dpbase->dpbase.cutoff(); } -int DP_DeepPotModelDeviGetNumbTypes(DP_DeepPotModelDevi* dp) { - return dp->dp.numb_types(); +int DP_DeepBaseModelDeviGetNumbTypes(DP_DeepBaseModelDevi* dpbase) { + return dpbase->dpbase.numb_types(); } -int DP_DeepPotModelDeviGetNumbTypesSpin(DP_DeepPotModelDevi* dp) { - return dp->dp.numb_types_spin(); +int DP_DeepBaseModelDeviGetNumbTypesSpin(DP_DeepBaseModelDevi* dpbase) { + return dpbase->dpbase.numb_types_spin(); } -int DP_DeepPotModelDeviGetDimFParam(DP_DeepPotModelDevi* dp) { - return dp->dfparam; +int DP_DeepBaseModelDeviGetDimFParam(DP_DeepBaseModelDevi* dpbase) { + return dpbase->dfparam; } -int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp) { - return dp->daparam; +int DP_DeepBaseModelDeviGetDimAParam(DP_DeepBaseModelDevi* dpbase) { + return dpbase->daparam; } -bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp) { - return dp->aparam_nall; +bool DP_DeepBaseModelDeviIsAParamNAll(DP_DeepBaseModelDevi* dpbase) { + return dpbase->aparam_nall; } -const char* DP_DeepPotModelDeviCheckOK(DP_DeepPotModelDevi* dp) { - return string_to_char(dp->exception); +const char* DP_DeepBaseModelDeviCheckOK(DP_DeepBaseModelDevi* dpbase) { + return string_to_char(dpbase->exception); } void DP_DeepTensorComputeTensor(DP_DeepTensor* dt, diff --git a/source/api_c/tests/test_deeppot_a.cc b/source/api_c/tests/test_deeppot_a.cc index b4a9a81f92..e3a1f6aa66 100644 --- a/source/api_c/tests/test_deeppot_a.cc +++ b/source/api_c/tests/test_deeppot_a.cc @@ -168,22 +168,22 @@ TEST_F(TestInferDeepPotA, float_infer) { } TEST_F(TestInferDeepPotA, cutoff) { - double cutoff = DP_DeepPotGetCutoff(dp); + double cutoff = DP_DeepBaseModelGetCutoff((DP_DeepBaseModel*)dp); EXPECT_EQ(cutoff, 6.0); } TEST_F(TestInferDeepPotA, numb_types) { - int numb_types = DP_DeepPotGetNumbTypes(dp); + int numb_types = DP_DeepBaseModelGetNumbTypes((DP_DeepBaseModel*)dp); EXPECT_EQ(numb_types, 2); } TEST_F(TestInferDeepPotA, numb_types_spin) { - int numb_types_spin = DP_DeepPotGetNumbTypesSpin(dp); + int numb_types_spin = DP_DeepBaseModelGetNumbTypesSpin((DP_DeepBaseModel*)dp); EXPECT_EQ(numb_types_spin, 0); } TEST_F(TestInferDeepPotA, type_map) { - const char* type_map = DP_DeepPotGetTypeMap(dp); + const char* type_map = DP_DeepBaseModelGetTypeMap((DP_DeepBaseModel*)dp); char expected_type_map[] = "O H"; EXPECT_EQ(strcmp(type_map, expected_type_map), 0); DP_DeleteChar(type_map); diff --git a/source/api_cc/include/DeepBaseModel.h b/source/api_cc/include/DeepBaseModel.h new file mode 100644 index 0000000000..72c54f65e4 --- /dev/null +++ b/source/api_cc/include/DeepBaseModel.h @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#pragma once + +#include + +#include "common.h" +#include "neighbor_list.h" + +namespace deepmd { +/** + * @brief Deep Potential Base Model. + **/ +class DeepBaseModelBase { + public: + /** + * @brief DP constructor without initialization. + **/ + DeepBaseModelBase() {}; + virtual ~DeepBaseModelBase() {}; + /** + * @brief DP constructor with initialization. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + DeepBaseModelBase(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + /** + * @brief Initialize the DP. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + virtual void init(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = "") = 0; + + /** + * @brief Get the cutoff radius. + * @return The cutoff radius. + **/ + virtual double cutoff() const = 0; + /** + * @brief Get the number of types. + * @return The number of types. + **/ + virtual int numb_types() const = 0; + /** + * @brief Get the number of types with spin. + * @return The number of types with spin. + **/ + virtual int numb_types_spin() const = 0; + /** + * @brief Get the dimension of the frame parameter. + * @return The dimension of the frame parameter. + **/ + virtual int dim_fparam() const = 0; + /** + * @brief Get the dimension of the atomic parameter. + * @return The dimension of the atomic parameter. + **/ + virtual int dim_aparam() const = 0; + /** + * @brief Get the type map (element name of the atom types) of this model. + * @param[out] type_map The type map of this model. + **/ + virtual void get_type_map(std::string& type_map) = 0; + + /** + * @brief Get whether the atom dimension of aparam is nall instead of fparam. + * @param[out] aparam_nall whether the atom dimension of aparam is nall + *instead of fparam. + **/ + virtual bool is_aparam_nall() const = 0; +}; + +/** + * @brief Deep Potential Base Model to automatically switch backends. + **/ +class DeepBaseModel { + public: + /** + * @brief DP constructor without initialization. + **/ + DeepBaseModel(); + ~DeepBaseModel(); + /** + * @brief DP constructor with initialization. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + DeepBaseModel(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + + /** + * @brief Print the DP summary to the screen. + * @param[in] pre The prefix to each line. + **/ + void print_summary(const std::string& pre) const; + + /** + * @brief Get the cutoff radius. + * @return The cutoff radius. + **/ + double cutoff() const; + /** + * @brief Get the number of types. + * @return The number of types. + **/ + int numb_types() const; + /** + * @brief Get the number of types with spin. + * @return The number of types with spin. + **/ + int numb_types_spin() const; + /** + * @brief Get the dimension of the frame parameter. + * @return The dimension of the frame parameter. + **/ + int dim_fparam() const; + /** + * @brief Get the dimension of the atomic parameter. + * @return The dimension of the atomic parameter. + **/ + int dim_aparam() const; + /** + * @brief Get the type map (element name of the atom types) of this model. + * @param[out] type_map The type map of this model. + **/ + void get_type_map(std::string& type_map); + + /** + * @brief Get whether the atom dimension of aparam is nall instead of fparam. + * @param[out] aparam_nall whether the atom dimension of aparam is nall + *instead of fparam. + **/ + bool is_aparam_nall() const; + + protected: + bool inited; + std::shared_ptr dpbase; +}; + +class DeepBaseModelDevi { + public: + /** + * @brief DP model deviation constructor without initialization. + **/ + DeepBaseModelDevi(); + ~DeepBaseModelDevi(); + + /** + * @brief Get the cutoff radius. + * @return The cutoff radius. + **/ + double cutoff() const { + assert(inited); + return dpbases[0]->cutoff(); + }; + /** + * @brief Get the number of types. + * @return The number of types. + **/ + int numb_types() const { + assert(inited); + return dpbases[0]->numb_types(); + }; + /** + * @brief Get the number of types with spin. + * @return The number of types with spin. + **/ + int numb_types_spin() const { + assert(inited); + return dpbases[0]->numb_types_spin(); + }; + /** + * @brief Get the dimension of the frame parameter. + * @return The dimension of the frame parameter. + **/ + int dim_fparam() const { + assert(inited); + return dpbases[0]->dim_fparam(); + }; + /** + * @brief Get the dimension of the atomic parameter. + * @return The dimension of the atomic parameter. + **/ + int dim_aparam() const { + assert(inited); + return dpbases[0]->dim_aparam(); + }; + /** + * @brief Compute the average energy. + * @param[out] dener The average energy. + * @param[in] all_energy The energies of all models. + **/ + template + void compute_avg(VALUETYPE& dener, const std::vector& all_energy); + /** + * @brief Compute the average of vectors. + * @param[out] avg The average of vectors. + * @param[in] xx The vectors of all models. + **/ + template + void compute_avg(std::vector& avg, + const std::vector>& xx); + /** + * @brief Compute the standard deviation of vectors. + * @param[out] std The standard deviation of vectors. + * @param[in] avg The average of vectors. + * @param[in] xx The vectors of all models. + * @param[in] stride The stride to compute the deviation. + **/ + template + void compute_std(std::vector& std, + const std::vector& avg, + const std::vector>& xx, + const int& stride); + /** + * @brief Compute the relative standard deviation of vectors. + * @param[out] std The standard deviation of vectors. + * @param[in] avg The average of vectors. + * @param[in] eps The level parameter for computing the deviation. + * @param[in] stride The stride to compute the deviation. + **/ + template + void compute_relative_std(std::vector& std, + const std::vector& avg, + const VALUETYPE eps, + const int& stride); + /** + * @brief Compute the standard deviation of atomic energies. + * @param[out] std The standard deviation of atomic energies. + * @param[in] avg The average of atomic energies. + * @param[in] xx The vectors of all atomic energies. + **/ + template + void compute_std_e(std::vector& std, + const std::vector& avg, + const std::vector>& xx); + /** + * @brief Compute the standard deviation of forces. + * @param[out] std The standard deviation of forces. + * @param[in] avg The average of forces. + * @param[in] xx The vectors of all forces. + **/ + template + void compute_std_f(std::vector& std, + const std::vector& avg, + const std::vector>& xx); + /** + * @brief Compute the relative standard deviation of forces. + * @param[out] std The relative standard deviation of forces. + * @param[in] avg The relative average of forces. + * @param[in] eps The level parameter for computing the deviation. + **/ + template + void compute_relative_std_f(std::vector& std, + const std::vector& avg, + const VALUETYPE eps); + /** + * @brief Get whether the atom dimension of aparam is nall instead of fparam. + * @param[out] aparam_nall whether the atom dimension of aparam is nall + *instead of fparam. + **/ + bool is_aparam_nall() const { + assert(inited); + return dpbases[0]->is_aparam_nall(); + }; + + protected: + unsigned numb_models; + std::vector> + dpbases; // change to shared_ptr to make it inheritable + bool inited; +}; +} // namespace deepmd diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index d5f3f7d0da..86f07d33c4 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -3,6 +3,7 @@ #include +#include "DeepBaseModel.h" #include "common.h" #include "neighbor_list.h" @@ -10,7 +11,7 @@ namespace deepmd { /** * @brief Deep Potential. **/ -class DeepPotBase { +class DeepPotBase : public DeepBaseModelBase { public: /** * @brief DP constructor without initialization. @@ -320,49 +321,12 @@ class DeepPotBase { const std::vector& aparam, const bool atomic) = 0; /** @} */ - /** - * @brief Get the cutoff radius. - * @return The cutoff radius. - **/ - virtual double cutoff() const = 0; - /** - * @brief Get the number of types. - * @return The number of types. - **/ - virtual int numb_types() const = 0; - /** - * @brief Get the number of types with spin. - * @return The number of types with spin. - **/ - virtual int numb_types_spin() const = 0; - /** - * @brief Get the dimension of the frame parameter. - * @return The dimension of the frame parameter. - **/ - virtual int dim_fparam() const = 0; - /** - * @brief Get the dimension of the atomic parameter. - * @return The dimension of the atomic parameter. - **/ - virtual int dim_aparam() const = 0; - /** - * @brief Get the type map (element name of the atom types) of this model. - * @param[out] type_map The type map of this model. - **/ - virtual void get_type_map(std::string& type_map) = 0; - - /** - * @brief Get whether the atom dimension of aparam is nall instead of fparam. - * @param[out] aparam_nall whether the atom dimension of aparam is nall - *instead of fparam. - **/ - virtual bool is_aparam_nall() const = 0; }; /** * @brief Deep Potential to automatically switch backends. **/ -class DeepPot { +class DeepPot : public DeepBaseModel { public: /** * @brief DP constructor without initialization. @@ -390,11 +354,6 @@ class DeepPot { const int& gpu_rank = 0, const std::string& file_content = ""); - /** - * @brief Print the DP summary to the screen. - * @param[in] pre The prefix to each line. - **/ - void print_summary(const std::string& pre) const; /** * @brief Evaluate the energy, force and virial by using this DP. * @param[out] ener The system energy. @@ -911,50 +870,11 @@ class DeepPot { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); /** @} */ - /** - * @brief Get the cutoff radius. - * @return The cutoff radius. - **/ - double cutoff() const; - /** - * @brief Get the number of types. - * @return The number of types. - **/ - int numb_types() const; - /** - * @brief Get the number of types with spin. - * @return The number of types with spin. - **/ - int numb_types_spin() const; - /** - * @brief Get the dimension of the frame parameter. - * @return The dimension of the frame parameter. - **/ - int dim_fparam() const; - /** - * @brief Get the dimension of the atomic parameter. - * @return The dimension of the atomic parameter. - **/ - int dim_aparam() const; - /** - * @brief Get the type map (element name of the atom types) of this model. - * @param[out] type_map The type map of this model. - **/ - void get_type_map(std::string& type_map); - - /** - * @brief Get whether the atom dimension of aparam is nall instead of fparam. - * @param[out] aparam_nall whether the atom dimension of aparam is nall - *instead of fparam. - **/ - bool is_aparam_nall() const; - - private: - bool inited; + protected: std::shared_ptr dp; }; -class DeepPotModelDevi { +class DeepPotModelDevi : public DeepBaseModelDevi { public: /** * @brief DP model deviation constructor without initialization. @@ -1006,8 +926,8 @@ class DeepPotModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, + std::vector>& all_force, + std::vector>& all_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -1039,10 +959,10 @@ class DeepPotModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, - std::vector >& all_atom_energy, - std::vector >& all_atom_virial, + std::vector>& all_force, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -1074,8 +994,8 @@ class DeepPotModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, + std::vector>& all_force, + std::vector>& all_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -1115,9 +1035,9 @@ class DeepPotModelDevi { template void compute_spin( std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_force_mag, - std::vector >& all_virial, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, const std::vector& coord, const std::vector& spin, const std::vector& atype, @@ -1155,10 +1075,10 @@ class DeepPotModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, - std::vector >& all_atom_energy, - std::vector >& all_atom_virial, + std::vector>& all_force, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -1200,11 +1120,11 @@ class DeepPotModelDevi { template void compute_spin( std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_force_mag, - std::vector >& all_virial, - std::vector >& all_atom_energy, - std::vector >& all_atom_virial, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, const std::vector& coord, const std::vector& spin, const std::vector& atype, @@ -1214,128 +1134,8 @@ class DeepPotModelDevi { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); - /** - * @brief Get the cutoff radius. - * @return The cutoff radius. - **/ - double cutoff() const { - assert(inited); - return dps[0].cutoff(); - }; - /** - * @brief Get the number of types. - * @return The number of types. - **/ - int numb_types() const { - assert(inited); - return dps[0].numb_types(); - }; - /** - * @brief Get the number of types with spin. - * @return The number of types with spin. - **/ - int numb_types_spin() const { - assert(inited); - return dps[0].numb_types_spin(); - }; - /** - * @brief Get the dimension of the frame parameter. - * @return The dimension of the frame parameter. - **/ - int dim_fparam() const { - assert(inited); - return dps[0].dim_fparam(); - }; - /** - * @brief Get the dimension of the atomic parameter. - * @return The dimension of the atomic parameter. - **/ - int dim_aparam() const { - assert(inited); - return dps[0].dim_aparam(); - }; - /** - * @brief Compute the average energy. - * @param[out] dener The average energy. - * @param[in] all_energy The energies of all models. - **/ - template - void compute_avg(VALUETYPE& dener, const std::vector& all_energy); - /** - * @brief Compute the average of vectors. - * @param[out] avg The average of vectors. - * @param[in] xx The vectors of all models. - **/ - template - void compute_avg(std::vector& avg, - const std::vector >& xx); - /** - * @brief Compute the standard deviation of vectors. - * @param[out] std The standard deviation of vectors. - * @param[in] avg The average of vectors. - * @param[in] xx The vectors of all models. - * @param[in] stride The stride to compute the deviation. - **/ - template - void compute_std(std::vector& std, - const std::vector& avg, - const std::vector >& xx, - const int& stride); - /** - * @brief Compute the relative standard deviation of vectors. - * @param[out] std The standard deviation of vectors. - * @param[in] avg The average of vectors. - * @param[in] eps The level parameter for computing the deviation. - * @param[in] stride The stride to compute the deviation. - **/ - template - void compute_relative_std(std::vector& std, - const std::vector& avg, - const VALUETYPE eps, - const int& stride); - /** - * @brief Compute the standard deviation of atomic energies. - * @param[out] std The standard deviation of atomic energies. - * @param[in] avg The average of atomic energies. - * @param[in] xx The vectors of all atomic energies. - **/ - template - void compute_std_e(std::vector& std, - const std::vector& avg, - const std::vector >& xx); - /** - * @brief Compute the standard deviation of forces. - * @param[out] std The standard deviation of forces. - * @param[in] avg The average of forces. - * @param[in] xx The vectors of all forces. - **/ - template - void compute_std_f(std::vector& std, - const std::vector& avg, - const std::vector >& xx); - /** - * @brief Compute the relative standard deviation of forces. - * @param[out] std The relative standard deviation of forces. - * @param[in] avg The relative average of forces. - * @param[in] eps The level parameter for computing the deviation. - **/ - template - void compute_relative_std_f(std::vector& std, - const std::vector& avg, - const VALUETYPE eps); - /** - * @brief Get whether the atom dimension of aparam is nall instead of fparam. - * @param[out] aparam_nall whether the atom dimension of aparam is nall - *instead of fparam. - **/ - bool is_aparam_nall() const { - assert(inited); - return dps[0].is_aparam_nall(); - }; - private: - unsigned numb_models; - std::vector dps; - bool inited; + protected: + std::vector> dps; }; } // namespace deepmd diff --git a/source/api_cc/include/DeepSpin.h b/source/api_cc/include/DeepSpin.h new file mode 100644 index 0000000000..babf1efaae --- /dev/null +++ b/source/api_cc/include/DeepSpin.h @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#pragma once + +#include + +#include "DeepBaseModel.h" +#include "common.h" +#include "neighbor_list.h" + +namespace deepmd { +/** + * @brief Deep Potential. + **/ +class DeepSpinBase : public DeepBaseModelBase { + public: + /** + * @brief DP constructor without initialization. + **/ + DeepSpinBase() {}; + virtual ~DeepSpinBase() {}; + /** + * @brief DP constructor with initialization. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + DeepSpinBase(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + /** + * @brief Initialize the DP. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + virtual void init(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = "") = 0; + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. + * @note The double precision interface is used by i-PI, GROMACS, ABACUS, and + *CP2k. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Request atomic energy and virial if atomic is true. + * @{ + **/ + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; + /** @} */ + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. + * @note The double precision interface is used by LAMMPS and AMBER. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Request atomic energy and virial if atomic is true. + * @{ + **/ + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; + virtual void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) = 0; + /** @} */ +}; + +/** + * @brief Deep Potential to automatically switch backends. + **/ +class DeepSpin : public DeepBaseModel { + public: + /** + * @brief DP constructor without initialization. + **/ + DeepSpin(); + ~DeepSpin(); + /** + * @brief DP constructor with initialization. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + DeepSpin(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + /** + * @brief Initialize the DP. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + void init(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this + *DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ + template + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + /** @} */ + + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this + *DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] inlist The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ + template + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + /** @} */ + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ + template + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + /** @} */ + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @{ + **/ + template + void compute_spin( + ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + template + void compute_spin( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + /** @} */ + protected: + std::shared_ptr dp; +}; + +class DeepSpinModelDevi : public DeepBaseModelDevi { + public: + /** + * @brief DP model deviation constructor without initialization. + **/ + DeepSpinModelDevi(); + ~DeepSpinModelDevi(); + /** + * @brief DP model deviation constructor with initialization. + * @param[in] models The names of the frozen model files. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_contents The contents of the model files. If it is not + *empty, DP will read from the strings instead of the files. + **/ + DeepSpinModelDevi(const std::vector& models, + const int& gpu_rank = 0, + const std::vector& file_contents = + std::vector()); + /** + * @brief Initialize the DP model deviation contrcutor. + * @param[in] models The names of the frozen model files. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_contents The contents of the model files. If it is not + *empty, DP will read from the strings instead of the files. + **/ + void init(const std::vector& models, + const int& gpu_rank = 0, + const std::vector& file_contents = + std::vector()); + + /** + * @brief Evaluate the energy, force, magnetic force and virial by using these + *DP models with spin input. + * @param[out] all_ener The system energies of all models. + * @param[out] all_force The forces on each atom of all models. + * @param[out] all_force_mag The magnetic forces on each atom of all models. + * @param[out] all_virial The virials of all models. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. dim_aparam. Then all frames and atoms are provided with the + *same aparam. + **/ + template + void compute_spin( + std::vector& all_ener, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using these DP models with spin input. + * @param[out] all_ener The system energies of all models. + * @param[out] all_force The forces on each atom of all models. + * @param[out] all_force_mag The magnetic forces on each atom of all models. + * @param[out] all_virial The virials of all models. + * @param[out] all_atom_energy The atomic energies of all models. + * @param[out] all_atom_virial The atomic virials of all models. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. dim_aparam. Then all frames and atoms are provided with the + *same aparam. + **/ + template + void compute_spin( + std::vector& all_ener, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + + protected: + std::vector> dps; +}; +} // namespace deepmd diff --git a/source/api_cc/include/DeepSpinPT.h b/source/api_cc/include/DeepSpinPT.h new file mode 100644 index 0000000000..778c69758b --- /dev/null +++ b/source/api_cc/include/DeepSpinPT.h @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#pragma once + +#include +#include + +#include "DeepSpin.h" + +namespace deepmd { +/** + * @brief PyTorch implementation for Deep Potential. + **/ +class DeepSpinPT : public DeepSpinBase { + public: + /** + * @brief DP constructor without initialization. + **/ + DeepSpinPT(); + ~DeepSpinPT(); + /** + * @brief DP constructor with initialization. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + DeepSpinPT(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + /** + * @brief Initialize the DP. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + void init(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + + private: + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Whether to compute the atomic energy and virial. + **/ + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + *and atomic virial by using this DP with spin input. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Whether to compute the atomic energy and virial. + **/ + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + + public: + /** + * @brief Get the cutoff radius. + * @return The cutoff radius. + **/ + double cutoff() const { + assert(inited); + return rcut; + }; + /** + * @brief Get the number of types. + * @return The number of types. + **/ + int numb_types() const { + assert(inited); + return ntypes; + }; + /** + * @brief Get the number of types with spin. + * @return The number of types with spin. + **/ + int numb_types_spin() const { + assert(inited); + return ntypes_spin; + }; + /** + * @brief Get the dimension of the frame parameter. + * @return The dimension of the frame parameter. + **/ + int dim_fparam() const { + assert(inited); + return dfparam; + }; + /** + * @brief Get the dimension of the atomic parameter. + * @return The dimension of the atomic parameter. + **/ + int dim_aparam() const { + assert(inited); + return daparam; + }; + /** + * @brief Get the type map (element name of the atom types) of this model. + * @param[out] type_map The type map of this model. + **/ + void get_type_map(std::string& type_map); + + /** + * @brief Get whether the atom dimension of aparam is nall instead of fparam. + * @param[out] aparam_nall whether the atom dimension of aparam is nall + *instead of fparam. + **/ + bool is_aparam_nall() const { + assert(inited); + return aparam_nall; + }; + + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + + private: + int num_intra_nthreads, num_inter_nthreads; + bool inited; + int ntypes; + int ntypes_spin; + int dfparam; + int daparam; + bool aparam_nall; + // copy neighbor list info from host + torch::jit::script::Module module; + double rcut; + NeighborListData nlist_data; + int max_num_neighbors; + int gpu_id; + int do_message_passing; // 1:dpa2 model 0:others + bool gpu_enabled; + at::Tensor firstneigh_tensor; + c10::optional mapping_tensor; + torch::Dict comm_dict; + /** + * @brief Translate PyTorch exceptions to the DeePMD-kit exception. + * @param[in] f The function to run. + * @example translate_error([&](){...}); + */ + void translate_error(std::function f); +}; + +} // namespace deepmd diff --git a/source/api_cc/include/DeepSpinTF.h b/source/api_cc/include/DeepSpinTF.h new file mode 100644 index 0000000000..bcad6ef7df --- /dev/null +++ b/source/api_cc/include/DeepSpinTF.h @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#pragma once + +#include "DeepSpin.h" +#include "common.h" +#include "commonTF.h" +#include "neighbor_list.h" + +namespace deepmd { +/** + * @brief TensorFlow implementation for Deep Potential. + **/ +class DeepSpinTF : public DeepSpinBase { + public: + /** + * @brief DP constructor without initialization. + **/ + DeepSpinTF(); + ~DeepSpinTF(); + /** + * @brief DP constructor with initialization. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + DeepSpinTF(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + /** + * @brief Initialize the DP. + * @param[in] model The name of the frozen model file. + * @param[in] gpu_rank The GPU rank. Default is 0. + * @param[in] file_content The content of the model file. If it is not empty, + *DP will read from the string instead of the file. + **/ + void init(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); + + private: + /** + * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial + *by using this DP. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Whether to compute atomic energy and virial. + **/ + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + /** + * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial + *by using this DP. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] nghost The number of ghost atoms. + * @param[in] lmp_list The input neighbour list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @param[in] atomic Whether to compute atomic energy and virial. + **/ + template + void compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + + public: + /** + * @brief Get the cutoff radius. + * @return The cutoff radius. + **/ + double cutoff() const { + assert(inited); + return rcut; + }; + /** + * @brief Get the number of types. + * @return The number of types. + **/ + int numb_types() const { + assert(inited); + return ntypes; + }; + /** + * @brief Get the number of types with spin. + * @return The number of types with spin. + **/ + int numb_types_spin() const { + assert(inited); + return ntypes_spin; + }; + /** + * @brief Get the dimension of the frame parameter. + * @return The dimension of the frame parameter. + **/ + int dim_fparam() const { + assert(inited); + return dfparam; + }; + /** + * @brief Get the dimension of the atomic parameter. + * @return The dimension of the atomic parameter. + **/ + int dim_aparam() const { + assert(inited); + return daparam; + }; + /** + * @brief Get the type map (element name of the atom types) of this model. + * @param[out] type_map The type map of this model. + **/ + void get_type_map(std::string& type_map); + + /** + * @brief Get whether the atom dimension of aparam is nall instead of fparam. + * @param[out] aparam_nall whether the atom dimension of aparam is nall + *instead of fparam. + **/ + bool is_aparam_nall() const { + assert(inited); + return aparam_nall; + }; + + // forward to template class + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + void computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + + template + void extend(int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm); + + template + void extend_nlist(std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_); + + void cum_sum(std::map&, std::map&); + + private: + tensorflow::Session* session; + int num_intra_nthreads, num_inter_nthreads; + tensorflow::GraphDef* graph_def; + bool inited; + template + VT get_scalar(const std::string& name) const; + template + void get_vector(std::vector& vec, const std::string& name) const; + + double rcut; + int dtype; + double cell_size; + std::string model_type; + std::string model_version; + int ntypes; + int ntypes_spin; + std::vector virtual_len; + std::vector spin_norm; + int extend_inum; + std::vector extend_ilist; + std::vector extend_numneigh; + std::vector> extend_neigh; + std::vector extend_firstneigh; + // std::vector extend_dcoord; + std::vector extend_dtype; + int extend_nghost; + // for spin systems, search new index of atoms by their old index + std::map new_idx_map; + std::map old_idx_map; + int dfparam; + int daparam; + bool aparam_nall; + /** + * @brief Validate the size of frame and atomic parameters. + * @param[in] nframes The number of frames. + * @param[in] nloc The number of local atoms. + * @param[in] fparam The frame parameter. + * @param[in] aparam The atomic parameter. + * @tparam VALUETYPE The type of the parameters, double or float. + */ + template + void validate_fparam_aparam(const int& nframes, + const int& nloc, + const std::vector& fparam, + const std::vector& aparam) const; + /** + * @brief Tile the frame or atomic parameters if there is only + * a single frame of frame or atomic parameters. + * @param[out] out_param The tiled frame or atomic parameters. + * @param[in] nframes The number of frames. + * @param[in] dparam The dimension of the frame or atomic parameters in a + * frame. + * @param[in] param The frame or atomic parameters. + * @tparam VALUETYPE The type of the parameters, double or float. + */ + template + void tile_fparam_aparam(std::vector& out_param, + const int& nframes, + const int& dparam, + const std::vector& param) const; + // copy neighbor list info from host + bool init_nbor; + std::vector sec_a; + NeighborListData nlist_data; + InputNlist nlist; + AtomMap atommap; +}; + +} // namespace deepmd diff --git a/source/api_cc/src/DeepBaseModel.cc b/source/api_cc/src/DeepBaseModel.cc new file mode 100644 index 0000000000..a0514e4907 --- /dev/null +++ b/source/api_cc/src/DeepBaseModel.cc @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include "DeepBaseModel.h" + +#include +#include + +#include "AtomMap.h" +#include "common.h" +#include "device.h" + +using namespace deepmd; + +DeepBaseModel::DeepBaseModel() : inited(false) {} + +DeepBaseModel::~DeepBaseModel() {} + +void DeepBaseModel::print_summary(const std::string& pre) const { + deepmd::print_summary(pre); +} + +double DeepBaseModel::cutoff() const { return dpbase->cutoff(); } + +int DeepBaseModel::numb_types() const { return dpbase->numb_types(); } + +int DeepBaseModel::numb_types_spin() const { return dpbase->numb_types_spin(); } + +int DeepBaseModel::dim_fparam() const { return dpbase->dim_fparam(); } + +int DeepBaseModel::dim_aparam() const { return dpbase->dim_aparam(); } + +void DeepBaseModel::get_type_map(std::string& type_map) { + dpbase->get_type_map(type_map); +} + +bool DeepBaseModel::is_aparam_nall() const { return dpbase->is_aparam_nall(); } + +DeepBaseModelDevi::DeepBaseModelDevi() : inited(false), numb_models(0) {} + +// DeepBaseModelDevi::DeepBaseModelDevi( +// const std::vector& models, +// const int& gpu_rank, +// const std::vector& file_contents) +// : inited(false), numb_models(0) { +// init(models, gpu_rank, file_contents); +// } + +DeepBaseModelDevi::~DeepBaseModelDevi() {} + +// void DeepBaseModelDevi::init(const std::vector& models, +// const int& gpu_rank, +// const std::vector& file_contents) { +// if (inited) { +// std::cerr << "WARNING: deepmd-kit should not be initialized twice, do " +// "nothing at the second call of initializer" +// << std::endl; +// return; +// } +// numb_models = models.size(); +// if (numb_models == 0) { +// throw deepmd::deepmd_exception("no model is specified"); +// } +// dps.resize(numb_models); +// for (unsigned int ii = 0; ii < numb_models; ++ii) { +// dps[ii].init(models[ii], gpu_rank, +// file_contents.size() > ii ? file_contents[ii] : ""); +// } +// inited = true; +// } + +template +void DeepBaseModelDevi::compute_avg(VALUETYPE& dener, + const std::vector& all_energy) { + assert(all_energy.size() == numb_models); + if (numb_models == 0) { + return; + } + + dener = 0; + for (unsigned ii = 0; ii < numb_models; ++ii) { + dener += all_energy[ii]; + } + dener /= (VALUETYPE)(numb_models); +} + +template void DeepBaseModelDevi::compute_avg( + double& dener, const std::vector& all_energy); + +template void DeepBaseModelDevi::compute_avg( + float& dener, const std::vector& all_energy); + +template +void DeepBaseModelDevi::compute_avg( + std::vector& avg, + const std::vector>& xx) { + assert(xx.size() == numb_models); + if (numb_models == 0) { + return; + } + + avg.resize(xx[0].size()); + fill(avg.begin(), avg.end(), VALUETYPE(0.)); + + for (unsigned ii = 0; ii < numb_models; ++ii) { + for (unsigned jj = 0; jj < avg.size(); ++jj) { + avg[jj] += xx[ii][jj]; + } + } + + for (unsigned jj = 0; jj < avg.size(); ++jj) { + avg[jj] /= VALUETYPE(numb_models); + } +} + +template void DeepBaseModelDevi::compute_avg( + std::vector& avg, const std::vector>& xx); + +template void DeepBaseModelDevi::compute_avg( + std::vector& avg, const std::vector>& xx); + +template +void DeepBaseModelDevi::compute_std( + std::vector& std, + const std::vector& avg, + const std::vector>& xx, + const int& stride) { + assert(xx.size() == numb_models); + if (numb_models == 0) { + return; + } + + unsigned ndof = avg.size(); + unsigned nloc = ndof / stride; + assert(nloc * stride == ndof); + + std.resize(nloc); + fill(std.begin(), std.end(), VALUETYPE(0.)); + + for (unsigned ii = 0; ii < numb_models; ++ii) { + for (unsigned jj = 0; jj < nloc; ++jj) { + const VALUETYPE* tmp_f = &(xx[ii][static_cast(jj) * stride]); + const VALUETYPE* tmp_avg = &(avg[static_cast(jj) * stride]); + for (unsigned dd = 0; dd < stride; ++dd) { + VALUETYPE vdiff = tmp_f[dd] - tmp_avg[dd]; + std[jj] += vdiff * vdiff; + } + } + } + + for (unsigned jj = 0; jj < nloc; ++jj) { + std[jj] = sqrt(std[jj] / VALUETYPE(numb_models)); + } +} + +template void DeepBaseModelDevi::compute_std( + std::vector& std, + const std::vector& avg, + const std::vector>& xx, + const int& stride); + +template void DeepBaseModelDevi::compute_std( + std::vector& std, + const std::vector& avg, + const std::vector>& xx, + const int& stride); + +template +void DeepBaseModelDevi::compute_std_e( + std::vector& std, + const std::vector& avg, + const std::vector>& xx) { + compute_std(std, avg, xx, 1); +} + +template void DeepBaseModelDevi::compute_std_e( + std::vector& std, + const std::vector& avg, + const std::vector>& xx); + +template void DeepBaseModelDevi::compute_std_e( + std::vector& std, + const std::vector& avg, + const std::vector>& xx); + +template +void DeepBaseModelDevi::compute_std_f( + std::vector& std, + const std::vector& avg, + const std::vector>& xx) { + compute_std(std, avg, xx, 3); +} + +template void DeepBaseModelDevi::compute_std_f( + std::vector& std, + const std::vector& avg, + const std::vector>& xx); + +template void DeepBaseModelDevi::compute_std_f( + std::vector& std, + const std::vector& avg, + const std::vector>& xx); + +template +void DeepBaseModelDevi::compute_relative_std(std::vector& std, + const std::vector& avg, + const VALUETYPE eps, + const int& stride) { + unsigned ndof = avg.size(); + unsigned nloc = std.size(); + assert(nloc * stride == ndof); + + for (unsigned ii = 0; ii < nloc; ++ii) { + const VALUETYPE* tmp_avg = &(avg[static_cast(ii) * stride]); + VALUETYPE f_norm = 0.0; + for (unsigned dd = 0; dd < stride; ++dd) { + f_norm += tmp_avg[dd] * tmp_avg[dd]; + } + f_norm = sqrt(f_norm); + std[ii] /= f_norm + eps; + } +} + +template void DeepBaseModelDevi::compute_relative_std( + std::vector& std, + const std::vector& avg, + const double eps, + const int& stride); + +template void DeepBaseModelDevi::compute_relative_std( + std::vector& std, + const std::vector& avg, + const float eps, + const int& stride); + +template +void DeepBaseModelDevi::compute_relative_std_f( + std::vector& std, + const std::vector& avg, + const VALUETYPE eps) { + compute_relative_std(std, avg, eps, 3); +} + +template void DeepBaseModelDevi::compute_relative_std_f( + std::vector& std, const std::vector& avg, const double eps); + +template void DeepBaseModelDevi::compute_relative_std_f( + std::vector& std, const std::vector& avg, const float eps); diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 7bad4108ed..3f0c374ca8 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -16,12 +16,12 @@ using namespace deepmd; -DeepPot::DeepPot() : inited(false) {} +DeepPot::DeepPot() { inited = false; } DeepPot::DeepPot(const std::string& model, const int& gpu_rank, - const std::string& file_content) - : inited(false) { + const std::string& file_content) { + inited = false; init(model, gpu_rank, file_content); } @@ -62,12 +62,11 @@ void DeepPot::init(const std::string& model, throw deepmd::deepmd_exception("Unknown file type"); } inited = true; + dpbase = (std::shared_ptr) + dp; // make sure the base funtions work } -void DeepPot::print_summary(const std::string& pre) const { - deepmd::print_summary(pre); -} - +// no nlist, no atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -133,8 +132,10 @@ template void DeepPot::compute(std::vector& dener, const std::vector& dbox, const std::vector& fparam, const std::vector& aparam); +// above: no nlist, no atomic : nframe * precision // support spin +// no nlist, no atomic : nframe template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, @@ -171,6 +172,7 @@ void DeepPot::compute_spin(std::vector& dener, false); } +// no nlist, no atomic : nframe * precision template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, @@ -215,6 +217,7 @@ template void DeepPot::compute_spin(std::vector& dener, const std::vector& fparam, const std::vector& aparam); +// nlist, no atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -250,7 +253,7 @@ void DeepPot::compute(std::vector& dener, dp->computew(dener, dforce_, dvirial, datom_energy_, datom_virial_, dcoord_, datype_, dbox, nghost, lmp_list, ago, fparam_, aparam__, false); } - +// nlist, no atomic : nframe * precision template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dvirial, @@ -300,6 +303,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& aparam_); // support spin +// nlist, no atomic : nframe template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, @@ -399,6 +403,7 @@ template void DeepPot::compute_spin(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); +// no nlist, atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -473,8 +478,10 @@ template void DeepPot::compute(std::vector& dener, const std::vector& dbox, const std::vector& fparam, const std::vector& aparam); +// above: no nlist, atomic : nframe * precision // support spin +// no nlist, atomic : nframe template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, @@ -511,6 +518,7 @@ void DeepPot::compute_spin(std::vector& dener, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, true); } +// no nlist, atomic : nframe * precision template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dforce_mag_, @@ -563,6 +571,7 @@ template void DeepPot::compute_spin(std::vector& dener, const std::vector& fparam, const std::vector& aparam); +// nlist, atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -600,6 +609,7 @@ void DeepPot::compute(std::vector& dener, datype_, dbox, nghost, lmp_list, ago, fparam_, aparam__, true); } +// nlist, atomic : nframe * precision template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dvirial, @@ -657,7 +667,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& aparam_); // support spin - +// nlist, atomic : nframe template void DeepPot::compute_spin(ENERGYTYPE& dener, std::vector& dforce_, @@ -930,29 +940,17 @@ template void DeepPot::compute_mixed_type( const std::vector& fparam, const std::vector& aparam); -double DeepPot::cutoff() const { return dp->cutoff(); } - -int DeepPot::numb_types() const { return dp->numb_types(); } - -int DeepPot::numb_types_spin() const { return dp->numb_types_spin(); } - -int DeepPot::dim_fparam() const { return dp->dim_fparam(); } - -int DeepPot::dim_aparam() const { return dp->dim_aparam(); } - -void DeepPot::get_type_map(std::string& type_map) { - dp->get_type_map(type_map); +DeepPotModelDevi::DeepPotModelDevi() { + inited = false; + numb_models = 0; } -bool DeepPot::is_aparam_nall() const { return dp->is_aparam_nall(); } - -DeepPotModelDevi::DeepPotModelDevi() : inited(false), numb_models(0) {} - DeepPotModelDevi::DeepPotModelDevi( const std::vector& models, const int& gpu_rank, - const std::vector& file_contents) - : inited(false), numb_models(0) { + const std::vector& file_contents) { + inited = false; + numb_models = 0; init(models, gpu_rank, file_contents); } @@ -972,13 +970,17 @@ void DeepPotModelDevi::init(const std::vector& models, throw deepmd::deepmd_exception("no model is specified"); } dps.resize(numb_models); + dpbases.resize(numb_models); for (unsigned int ii = 0; ii < numb_models; ++ii) { - dps[ii].init(models[ii], gpu_rank, - file_contents.size() > ii ? file_contents[ii] : ""); + dps[ii] = std::make_shared(); + dps[ii]->init(models[ii], gpu_rank, + file_contents.size() > ii ? file_contents[ii] : ""); + dpbases[ii] = dps[ii]; } inited = true; } +// no nlist, no atomic template void DeepPotModelDevi::compute(std::vector& all_energy, std::vector>& all_force, @@ -996,11 +998,12 @@ void DeepPotModelDevi::compute(std::vector& all_energy, all_force.resize(numb_models); all_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_virial[ii], dcoord_, - datype_, dbox, fparam, aparam_); + dps[ii]->compute(all_energy[ii], all_force[ii], all_virial[ii], dcoord_, + datype_, dbox, fparam, aparam_); } } +// no nlist, no atomic: precision template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, @@ -1021,6 +1024,7 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); +// no nlist, atomic template void DeepPotModelDevi::compute( std::vector& all_energy, @@ -1042,12 +1046,12 @@ void DeepPotModelDevi::compute( all_atom_energy.resize(numb_models); all_atom_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_virial[ii], - all_atom_energy[ii], all_atom_virial[ii], dcoord_, datype_, - dbox, fparam, aparam_); + dps[ii]->compute(all_energy[ii], all_force[ii], all_virial[ii], + all_atom_energy[ii], all_atom_virial[ii], dcoord_, datype_, + dbox, fparam, aparam_); } } - +// no nlist, atomic: precision template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, @@ -1072,6 +1076,7 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); +// nlist, no atomic template void DeepPotModelDevi::compute(std::vector& all_energy, std::vector>& all_force, @@ -1091,11 +1096,11 @@ void DeepPotModelDevi::compute(std::vector& all_energy, all_force.resize(numb_models); all_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_virial[ii], dcoord_, - datype_, dbox, nghost, lmp_list, ago, fparam, aparam_); + dps[ii]->compute(all_energy[ii], all_force[ii], all_virial[ii], dcoord_, + datype_, dbox, nghost, lmp_list, ago, fparam, aparam_); } } - +// nlist, no atomic: precision template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, @@ -1123,6 +1128,7 @@ template void DeepPotModelDevi::compute( const std::vector& aparam); // support spin +// nlist, no atomic template void DeepPotModelDevi::compute_spin( std::vector& all_energy, @@ -1146,9 +1152,9 @@ void DeepPotModelDevi::compute_spin( all_force_mag.resize(numb_models); all_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], dcoord_, dspin_, datype_, dbox, nghost, - lmp_list, ago, fparam, aparam_); + dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], dcoord_, dspin_, datype_, dbox, + nghost, lmp_list, ago, fparam, aparam_); } } @@ -1183,6 +1189,7 @@ template void DeepPotModelDevi::compute_spin( const std::vector& fparam, const std::vector& aparam); +// nlist, atomic template void DeepPotModelDevi::compute( std::vector& all_energy, @@ -1207,12 +1214,13 @@ void DeepPotModelDevi::compute( all_atom_energy.resize(numb_models); all_atom_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute(all_energy[ii], all_force[ii], all_virial[ii], - all_atom_energy[ii], all_atom_virial[ii], dcoord_, datype_, - dbox, nghost, lmp_list, ago, fparam, aparam_); + dps[ii]->compute(all_energy[ii], all_force[ii], all_virial[ii], + all_atom_energy[ii], all_atom_virial[ii], dcoord_, datype_, + dbox, nghost, lmp_list, ago, fparam, aparam_); } } +// nlist, atomic : precision template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, @@ -1244,6 +1252,7 @@ template void DeepPotModelDevi::compute( const std::vector& aparam); // support spin +// nlist, atomic template void DeepPotModelDevi::compute_spin( std::vector& all_energy, @@ -1271,10 +1280,10 @@ void DeepPotModelDevi::compute_spin( all_atom_energy.resize(numb_models); all_atom_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii].compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], all_atom_energy[ii], - all_atom_virial[ii], dcoord_, dspin_, datype_, dbox, - nghost, lmp_list, ago, fparam, aparam_); + dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], all_atom_energy[ii], + all_atom_virial[ii], dcoord_, dspin_, datype_, dbox, + nghost, lmp_list, ago, fparam, aparam_); } } @@ -1312,180 +1321,3 @@ template void DeepPotModelDevi::compute_spin( const int& ago, const std::vector& fparam, const std::vector& aparam); - -template -void DeepPotModelDevi::compute_avg(VALUETYPE& dener, - const std::vector& all_energy) { - assert(all_energy.size() == numb_models); - if (numb_models == 0) { - return; - } - - dener = 0; - for (unsigned ii = 0; ii < numb_models; ++ii) { - dener += all_energy[ii]; - } - dener /= (VALUETYPE)(numb_models); -} - -template void DeepPotModelDevi::compute_avg( - double& dener, const std::vector& all_energy); - -template void DeepPotModelDevi::compute_avg( - float& dener, const std::vector& all_energy); - -template -void DeepPotModelDevi::compute_avg( - std::vector& avg, - const std::vector>& xx) { - assert(xx.size() == numb_models); - if (numb_models == 0) { - return; - } - - avg.resize(xx[0].size()); - fill(avg.begin(), avg.end(), VALUETYPE(0.)); - - for (unsigned ii = 0; ii < numb_models; ++ii) { - for (unsigned jj = 0; jj < avg.size(); ++jj) { - avg[jj] += xx[ii][jj]; - } - } - - for (unsigned jj = 0; jj < avg.size(); ++jj) { - avg[jj] /= VALUETYPE(numb_models); - } -} - -template void DeepPotModelDevi::compute_avg( - std::vector& avg, const std::vector>& xx); - -template void DeepPotModelDevi::compute_avg( - std::vector& avg, const std::vector>& xx); - -template -void DeepPotModelDevi::compute_std( - std::vector& std, - const std::vector& avg, - const std::vector>& xx, - const int& stride) { - assert(xx.size() == numb_models); - if (numb_models == 0) { - return; - } - - unsigned ndof = avg.size(); - unsigned nloc = ndof / stride; - assert(nloc * stride == ndof); - - std.resize(nloc); - fill(std.begin(), std.end(), VALUETYPE(0.)); - - for (unsigned ii = 0; ii < numb_models; ++ii) { - for (unsigned jj = 0; jj < nloc; ++jj) { - const VALUETYPE* tmp_f = &(xx[ii][static_cast(jj) * stride]); - const VALUETYPE* tmp_avg = &(avg[static_cast(jj) * stride]); - for (unsigned dd = 0; dd < stride; ++dd) { - VALUETYPE vdiff = tmp_f[dd] - tmp_avg[dd]; - std[jj] += vdiff * vdiff; - } - } - } - - for (unsigned jj = 0; jj < nloc; ++jj) { - std[jj] = sqrt(std[jj] / VALUETYPE(numb_models)); - } -} - -template void DeepPotModelDevi::compute_std( - std::vector& std, - const std::vector& avg, - const std::vector>& xx, - const int& stride); - -template void DeepPotModelDevi::compute_std( - std::vector& std, - const std::vector& avg, - const std::vector>& xx, - const int& stride); - -template -void DeepPotModelDevi::compute_std_e( - std::vector& std, - const std::vector& avg, - const std::vector>& xx) { - compute_std(std, avg, xx, 1); -} - -template void DeepPotModelDevi::compute_std_e( - std::vector& std, - const std::vector& avg, - const std::vector>& xx); - -template void DeepPotModelDevi::compute_std_e( - std::vector& std, - const std::vector& avg, - const std::vector>& xx); - -template -void DeepPotModelDevi::compute_std_f( - std::vector& std, - const std::vector& avg, - const std::vector>& xx) { - compute_std(std, avg, xx, 3); -} - -template void DeepPotModelDevi::compute_std_f( - std::vector& std, - const std::vector& avg, - const std::vector>& xx); - -template void DeepPotModelDevi::compute_std_f( - std::vector& std, - const std::vector& avg, - const std::vector>& xx); - -template -void DeepPotModelDevi::compute_relative_std(std::vector& std, - const std::vector& avg, - const VALUETYPE eps, - const int& stride) { - unsigned ndof = avg.size(); - unsigned nloc = std.size(); - assert(nloc * stride == ndof); - - for (unsigned ii = 0; ii < nloc; ++ii) { - const VALUETYPE* tmp_avg = &(avg[static_cast(ii) * stride]); - VALUETYPE f_norm = 0.0; - for (unsigned dd = 0; dd < stride; ++dd) { - f_norm += tmp_avg[dd] * tmp_avg[dd]; - } - f_norm = sqrt(f_norm); - std[ii] /= f_norm + eps; - } -} - -template void DeepPotModelDevi::compute_relative_std( - std::vector& std, - const std::vector& avg, - const double eps, - const int& stride); - -template void DeepPotModelDevi::compute_relative_std( - std::vector& std, - const std::vector& avg, - const float eps, - const int& stride); - -template -void DeepPotModelDevi::compute_relative_std_f(std::vector& std, - const std::vector& avg, - const VALUETYPE eps) { - compute_relative_std(std, avg, eps, 3); -} - -template void DeepPotModelDevi::compute_relative_std_f( - std::vector& std, const std::vector& avg, const double eps); - -template void DeepPotModelDevi::compute_relative_std_f( - std::vector& std, const std::vector& avg, const float eps); diff --git a/source/api_cc/src/DeepSpin.cc b/source/api_cc/src/DeepSpin.cc new file mode 100644 index 0000000000..b79e166efe --- /dev/null +++ b/source/api_cc/src/DeepSpin.cc @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include "DeepSpin.h" + +#include +#include + +#include "AtomMap.h" +#include "common.h" +#ifdef BUILD_TENSORFLOW +#include "DeepSpinTF.h" +#endif +#ifdef BUILD_PYTORCH +#include "DeepSpinPT.h" +#endif +#include "device.h" + +using namespace deepmd; + +DeepSpin::DeepSpin() { inited = false; } + +DeepSpin::DeepSpin(const std::string& model, + const int& gpu_rank, + const std::string& file_content) { + inited = false; + init(model, gpu_rank, file_content); +} + +DeepSpin::~DeepSpin() {} + +void DeepSpin::init(const std::string& model, + const int& gpu_rank, + const std::string& file_content) { + if (inited) { + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do " + "nothing at the second call of initializer" + << std::endl; + return; + } + DPBackend backend; + if (model.length() >= 4 && model.substr(model.length() - 4) == ".pth") { + backend = deepmd::DPBackend::PyTorch; + } else if (model.length() >= 3 && model.substr(model.length() - 3) == ".pb") { + backend = deepmd::DPBackend::TensorFlow; + } else { + throw deepmd::deepmd_exception("Unsupported model file format"); + } + if (deepmd::DPBackend::TensorFlow == backend) { +#ifdef BUILD_TENSORFLOW + dp = std::make_shared(model, gpu_rank, file_content); +#else + throw deepmd::deepmd_exception("TensorFlow backend is not built"); +#endif + } else if (deepmd::DPBackend::PyTorch == backend) { +#ifdef BUILD_PYTORCH + dp = std::make_shared(model, gpu_rank, file_content); +#else + throw deepmd::deepmd_exception("PyTorch backend is not built"); +#endif + } else if (deepmd::DPBackend::Paddle == backend) { + throw deepmd::deepmd_exception("PaddlePaddle backend is not supported yet"); + } else { + throw deepmd::deepmd_exception("Unknown file type"); + } + inited = true; + dpbase = dp; +} + +// support spin +// no nlist, no atomic : nframe +template +void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + std::vector dener_; + std::vector datom_energy_, datom_virial_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + false); + dener = dener_[0]; +} + +template +void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + std::vector datom_energy_, datom_virial_; + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + false); +} + +// no nlist, no atomic : nframe * precision +template void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +// support spin +// nlist, no atomic : nframe +template +void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + std::vector dener_; + std::vector datom_energy_, datom_virial_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, false); + dener = dener_[0]; +} + +template +void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + std::vector datom_energy_, datom_virial_; + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, false); +} + +// nlist, no atomic : nframe * precision +template void DeepSpin::compute_spin( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute_spin( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +// support spin +// no nlist, atomic : nframe +template +void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + std::vector dener_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + true); + dener = dener_[0]; +} +template +void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, + true); +} +// no nlist, atomic : nframe * precision +template void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +// support spin +// nlist, atomic : nframe +template +void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + std::vector dener_; + dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, true); + dener = dener_[0]; +} +template +void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { + dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, + datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, + ago, fparam_, aparam__, true); +} +// nlist, atomic : nframe * precision +template void DeepSpin::compute_spin( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute_spin(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute_spin( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute_spin(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +DeepSpinModelDevi::DeepSpinModelDevi() { + inited = false; + numb_models = 0; +} + +DeepSpinModelDevi::DeepSpinModelDevi( + const std::vector& models, + const int& gpu_rank, + const std::vector& file_contents) { + inited = false; + numb_models = 0; + init(models, gpu_rank, file_contents); +} + +DeepSpinModelDevi::~DeepSpinModelDevi() {} + +void DeepSpinModelDevi::init(const std::vector& models, + const int& gpu_rank, + const std::vector& file_contents) { + if (inited) { + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do " + "nothing at the second call of initializer" + << std::endl; + return; + } + numb_models = models.size(); + if (numb_models == 0) { + throw deepmd::deepmd_exception("no model is specified"); + } + dps.resize(numb_models); + dpbases.resize(numb_models); + for (unsigned int ii = 0; ii < numb_models; ++ii) { + dps[ii] = std::make_shared(); + dps[ii]->init(models[ii], gpu_rank, + file_contents.size() > ii ? file_contents[ii] : ""); + dpbases[ii] = dps[ii]; + } + inited = true; +} + +// support spin +// nlist, no atomic +template +void DeepSpinModelDevi::compute_spin( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_) { + if (numb_models == 0) { + return; + } + all_energy.resize(numb_models); + all_force.resize(numb_models); + all_force_mag.resize(numb_models); + all_virial.resize(numb_models); + for (unsigned ii = 0; ii < numb_models; ++ii) { + dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], dcoord_, dspin_, datype_, dbox, + nghost, lmp_list, ago, fparam, aparam_); + } +} + +// nlist, no atomic: precision +template void DeepSpinModelDevi::compute_spin( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpinModelDevi::compute_spin( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); + +// support spin +// nlist, atomic +template +void DeepSpinModelDevi::compute_spin( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_) { + if (numb_models == 0) { + return; + } + all_energy.resize(numb_models); + all_force.resize(numb_models); + all_force_mag.resize(numb_models); + all_virial.resize(numb_models); + all_atom_energy.resize(numb_models); + all_atom_virial.resize(numb_models); + for (unsigned ii = 0; ii < numb_models; ++ii) { + dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], all_atom_energy[ii], + all_atom_virial[ii], dcoord_, dspin_, datype_, dbox, + nghost, lmp_list, ago, fparam, aparam_); + } +} + +// nlist, atomic : precision +template void DeepSpinModelDevi::compute_spin( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpinModelDevi::compute_spin( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam); diff --git a/source/api_cc/src/DeepSpinPT.cc b/source/api_cc/src/DeepSpinPT.cc new file mode 100644 index 0000000000..08e9a3023e --- /dev/null +++ b/source/api_cc/src/DeepSpinPT.cc @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#ifdef BUILD_PYTORCH +#include "DeepSpinPT.h" + +#include + +#include + +#include "common.h" +#include "device.h" +#include "errors.h" + +using namespace deepmd; + +void DeepSpinPT::translate_error(std::function f) { + try { + f(); + // it seems that libtorch may throw different types of exceptions which are + // inherbited from different base classes + // https://github.com/pytorch/pytorch/blob/13316a8d4642454012d34da0d742f1ba93fc0667/torch/csrc/jit/runtime/interpreter.cpp#L924-L939 + } catch (const c10::Error& e) { + throw deepmd::deepmd_exception("DeePMD-kit PyTorch backend error: " + + std::string(e.what())); + } catch (const torch::jit::JITException& e) { + throw deepmd::deepmd_exception("DeePMD-kit PyTorch backend JIT error: " + + std::string(e.what())); + } catch (const std::runtime_error& e) { + throw deepmd::deepmd_exception("DeePMD-kit PyTorch backend error: " + + std::string(e.what())); + } +} + +torch::Tensor createNlistTensor2(const std::vector>& data) { + std::vector row_tensors; + + for (const auto& row : data) { + torch::Tensor row_tensor = torch::tensor(row, torch::kInt32).unsqueeze(0); + row_tensors.push_back(row_tensor); + } + + torch::Tensor tensor; + if (row_tensors.size() > 0) { + tensor = torch::cat(row_tensors, 0).unsqueeze(0); + } else { + tensor = torch::empty({1, 0, 0}, torch::kInt32); + } + return tensor; +} +DeepSpinPT::DeepSpinPT() : inited(false) {} +DeepSpinPT::DeepSpinPT(const std::string& model, + const int& gpu_rank, + const std::string& file_content) + : inited(false) { + try { + translate_error([&] { init(model, gpu_rank, file_content); }); + } catch (...) { + // Clean up and rethrow, as the destructor will not be called + throw; + } +} +void DeepSpinPT::init(const std::string& model, + const int& gpu_rank, + const std::string& file_content) { + if (inited) { + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do " + "nothing at the second call of initializer" + << std::endl; + return; + } + deepmd::load_op_library(); + int gpu_num = torch::cuda::device_count(); + if (gpu_num > 0) { + gpu_id = gpu_rank % gpu_num; + } else { + gpu_id = 0; + } + torch::Device device(torch::kCUDA, gpu_id); + gpu_enabled = torch::cuda::is_available(); + if (!gpu_enabled) { + device = torch::Device(torch::kCPU); + std::cout << "load model from: " << model << " to cpu " << std::endl; + } else { +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM + DPErrcheck(DPSetDevice(gpu_id)); +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM + std::cout << "load model from: " << model << " to gpu " << gpu_id + << std::endl; + } + std::unordered_map metadata = {{"type", ""}}; + module = torch::jit::load(model, device, metadata); + do_message_passing = module.run_method("has_message_passing").toBool(); + torch::jit::FusionStrategy strategy; + strategy = {{torch::jit::FusionBehavior::DYNAMIC, 10}}; + torch::jit::setFusionStrategy(strategy); + + get_env_nthreads(num_intra_nthreads, + num_inter_nthreads); // need to be fixed as + // DP_INTRA_OP_PARALLELISM_THREADS + if (num_inter_nthreads) { + try { + at::set_num_interop_threads(num_inter_nthreads); + } catch (...) { + } + } + if (num_intra_nthreads) { + try { + at::set_num_threads(num_intra_nthreads); + } catch (...) { + } + } + + auto rcut_ = module.run_method("get_rcut").toDouble(); + rcut = static_cast(rcut_); + ntypes = module.run_method("get_ntypes").toInt(); + ntypes_spin = 0; + dfparam = module.run_method("get_dim_fparam").toInt(); + daparam = module.run_method("get_dim_aparam").toInt(); + aparam_nall = module.run_method("is_aparam_nall").toBool(); + inited = true; +} +DeepSpinPT::~DeepSpinPT() {} + +template +void DeepSpinPT::compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + torch::Device device(torch::kCUDA, gpu_id); + if (!gpu_enabled) { + device = torch::Device(torch::kCPU); + } + int natoms = atype.size(); + auto options = torch::TensorOptions().dtype(torch::kFloat64); + torch::ScalarType floatType = torch::kFloat64; + if (std::is_same_v) { + options = torch::TensorOptions().dtype(torch::kFloat32); + floatType = torch::kFloat32; + } + auto int32_option = + torch::TensorOptions().device(torch::kCPU).dtype(torch::kInt32); + auto int_option = + torch::TensorOptions().device(torch::kCPU).dtype(torch::kInt64); + // select real atoms + std::vector dcoord, dforce, dforce_mag, aparam_, datom_energy, + datom_virial; + std::vector datype, fwd_map, bkw_map; + int nghost_real, nall_real, nloc_real; + int nall = natoms; + select_real_atoms_coord(dcoord, datype, aparam_, nghost_real, fwd_map, + bkw_map, nall_real, nloc_real, coord, atype, aparam, + nghost, ntypes, 1, daparam, nall, aparam_nall); + int nloc = nall_real - nghost_real; + int nframes = 1; + std::vector coord_wrapped = dcoord; + at::Tensor coord_wrapped_Tensor = + torch::from_blob(coord_wrapped.data(), {1, nall_real, 3}, options) + .to(device); + std::vector spin_wrapped = spin; + at::Tensor spin_wrapped_Tensor = + torch::from_blob(spin_wrapped.data(), {1, nall_real, 3}, options) + .to(device); + std::vector atype_64(datype.begin(), datype.end()); + at::Tensor atype_Tensor = + torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); + c10::optional mapping_tensor; + if (ago == 0) { + nlist_data.copy_from_nlist(lmp_list); + nlist_data.shuffle_exclude_empty(fwd_map); + nlist_data.padding(); + if (do_message_passing == 1 && nghost > 0) { + int nswap = lmp_list.nswap; + torch::Tensor sendproc_tensor = + torch::from_blob(lmp_list.sendproc, {nswap}, int32_option); + torch::Tensor recvproc_tensor = + torch::from_blob(lmp_list.recvproc, {nswap}, int32_option); + torch::Tensor firstrecv_tensor = + torch::from_blob(lmp_list.firstrecv, {nswap}, int32_option); + torch::Tensor recvnum_tensor = + torch::from_blob(lmp_list.recvnum, {nswap}, int32_option); + torch::Tensor sendnum_tensor = + torch::from_blob(lmp_list.sendnum, {nswap}, int32_option); + torch::Tensor communicator_tensor = torch::from_blob( + const_cast(lmp_list.world), {1}, torch::kInt64); + // torch::Tensor communicator_tensor = + // torch::tensor(lmp_list.world, int32_option); + torch::Tensor nswap_tensor = torch::tensor(nswap, int32_option); + int total_send = + std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); + torch::Tensor sendlist_tensor = + torch::from_blob(lmp_list.sendlist, {total_send}, int32_option); + torch::Tensor has_spin = torch::tensor({1}, int32_option); + comm_dict.insert("send_list", sendlist_tensor); + comm_dict.insert("send_proc", sendproc_tensor); + comm_dict.insert("recv_proc", recvproc_tensor); + comm_dict.insert("send_num", sendnum_tensor); + comm_dict.insert("recv_num", recvnum_tensor); + comm_dict.insert("communicator", communicator_tensor); + comm_dict.insert("has_spin", has_spin); + } + if (do_message_passing == 1 && nghost == 0) { + // for the situation that no ghost atoms (e.g. serial nopbc) + // set the mapping arange(nloc) is enough + auto option = torch::TensorOptions().device(device).dtype(torch::kInt64); + mapping_tensor = at::arange(nloc_real, option).unsqueeze(0); + } + } + at::Tensor firstneigh = createNlistTensor2(nlist_data.jlist); + firstneigh_tensor = firstneigh.to(torch::kInt64).to(device); + bool do_atom_virial_tensor = atomic; + c10::optional fparam_tensor; + if (!fparam.empty()) { + fparam_tensor = + torch::from_blob(const_cast(fparam.data()), + {1, static_cast(fparam.size())}, options) + .to(device); + } + c10::optional aparam_tensor; + if (!aparam_.empty()) { + aparam_tensor = + torch::from_blob( + const_cast(aparam_.data()), + {1, lmp_list.inum, + static_cast(aparam_.size()) / lmp_list.inum}, + options) + .to(device); + } + c10::Dict outputs = + (do_message_passing == 1 && nghost > 0) + ? module + .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, + spin_wrapped_Tensor, firstneigh_tensor, + mapping_tensor, fparam_tensor, aparam_tensor, + do_atom_virial_tensor, comm_dict) + .toGenericDict() + : module + .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, + spin_wrapped_Tensor, firstneigh_tensor, + mapping_tensor, fparam_tensor, aparam_tensor, + do_atom_virial_tensor) + .toGenericDict(); + c10::IValue energy_ = outputs.at("energy"); + c10::IValue force_ = outputs.at("extended_force"); + c10::IValue force_mag_ = outputs.at("extended_force_mag"); + // spin model not suported yet + // c10::IValue virial_ = outputs.at("virial"); + torch::Tensor flat_energy_ = energy_.toTensor().view({-1}); + torch::Tensor cpu_energy_ = flat_energy_.to(torch::kCPU); + ener.assign(cpu_energy_.data_ptr(), + cpu_energy_.data_ptr() + cpu_energy_.numel()); + torch::Tensor flat_force_ = force_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_ = flat_force_.to(torch::kCPU); + dforce.assign(cpu_force_.data_ptr(), + cpu_force_.data_ptr() + cpu_force_.numel()); + torch::Tensor flat_force_mag_ = + force_mag_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_mag_ = flat_force_mag_.to(torch::kCPU); + dforce_mag.assign( + cpu_force_mag_.data_ptr(), + cpu_force_mag_.data_ptr() + cpu_force_mag_.numel()); + // spin model not suported yet + // torch::Tensor flat_virial_ = virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_virial_ = flat_virial_.to(torch::kCPU); + // virial.assign(cpu_virial_.data_ptr(), + // cpu_virial_.data_ptr() + cpu_virial_.numel()); + + // bkw map + force.resize(static_cast(nframes) * fwd_map.size() * 3); + force_mag.resize(static_cast(nframes) * fwd_map.size() * 3); + select_map(force, dforce, bkw_map, 3, nframes, fwd_map.size(), + nall_real); + select_map(force_mag, dforce_mag, bkw_map, 3, nframes, + fwd_map.size(), nall_real); + if (atomic) { + // spin model not suported yet + // c10::IValue atom_virial_ = outputs.at("extended_virial"); + c10::IValue atom_energy_ = outputs.at("atom_energy"); + torch::Tensor flat_atom_energy_ = + atom_energy_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_atom_energy_ = flat_atom_energy_.to(torch::kCPU); + datom_energy.resize(nall_real, + 0.0); // resize to nall to be consistenet with TF. + datom_energy.assign( + cpu_atom_energy_.data_ptr(), + cpu_atom_energy_.data_ptr() + cpu_atom_energy_.numel()); + // spin model not suported yet + // torch::Tensor flat_atom_virial_ = + // atom_virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_atom_virial_ = flat_atom_virial_.to(torch::kCPU); + // datom_virial.assign( + // cpu_atom_virial_.data_ptr(), + // cpu_atom_virial_.data_ptr() + cpu_atom_virial_.numel()); + atom_energy.resize(static_cast(nframes) * fwd_map.size()); + // atom_virial.resize(static_cast(nframes) * fwd_map.size() * 9); + select_map(atom_energy, datom_energy, bkw_map, 1, nframes, + fwd_map.size(), nall_real); + // select_map(atom_virial, datom_virial, bkw_map, 9, nframes, + // fwd_map.size(), nall_real); + } +} +template void DeepSpinPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); +template void DeepSpinPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template +void DeepSpinPT::compute(ENERGYVTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + torch::Device device(torch::kCUDA, gpu_id); + if (!gpu_enabled) { + device = torch::Device(torch::kCPU); + } + std::vector coord_wrapped = coord; + std::vector spin_wrapped = spin; + int natoms = atype.size(); + auto options = torch::TensorOptions().dtype(torch::kFloat64); + torch::ScalarType floatType = torch::kFloat64; + if (std::is_same_v) { + options = torch::TensorOptions().dtype(torch::kFloat32); + floatType = torch::kFloat32; + } + auto int_options = torch::TensorOptions().dtype(torch::kInt64); + int nframes = 1; + std::vector inputs; + at::Tensor coord_wrapped_Tensor = + torch::from_blob(coord_wrapped.data(), {1, natoms, 3}, options) + .to(device); + inputs.push_back(coord_wrapped_Tensor); + std::vector atype_64(atype.begin(), atype.end()); + at::Tensor atype_Tensor = + torch::from_blob(atype_64.data(), {1, natoms}, int_options).to(device); + inputs.push_back(atype_Tensor); + at::Tensor spin_wrapped_Tensor = + torch::from_blob(spin_wrapped.data(), {1, natoms, 3}, options).to(device); + inputs.push_back(spin_wrapped_Tensor); + c10::optional box_Tensor; + if (!box.empty()) { + box_Tensor = + torch::from_blob(const_cast(box.data()), {1, 9}, options) + .to(device); + } + inputs.push_back(box_Tensor); + c10::optional fparam_tensor; + if (!fparam.empty()) { + fparam_tensor = + torch::from_blob(const_cast(fparam.data()), + {1, static_cast(fparam.size())}, options) + .to(device); + } + inputs.push_back(fparam_tensor); + c10::optional aparam_tensor; + if (!aparam.empty()) { + aparam_tensor = + torch::from_blob( + const_cast(aparam.data()), + {1, natoms, static_cast(aparam.size()) / natoms}, + options) + .to(device); + } + inputs.push_back(aparam_tensor); + bool do_atom_virial_tensor = atomic; + inputs.push_back(do_atom_virial_tensor); + c10::Dict outputs = + module.forward(inputs).toGenericDict(); + c10::IValue energy_ = outputs.at("energy"); + c10::IValue force_ = outputs.at("force"); + c10::IValue force_mag_ = outputs.at("force_mag"); + // spin model not suported yet + // c10::IValue virial_ = outputs.at("virial"); + torch::Tensor flat_energy_ = energy_.toTensor().view({-1}); + torch::Tensor cpu_energy_ = flat_energy_.to(torch::kCPU); + ener.assign(cpu_energy_.data_ptr(), + cpu_energy_.data_ptr() + cpu_energy_.numel()); + torch::Tensor flat_force_ = force_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_ = flat_force_.to(torch::kCPU); + force.assign(cpu_force_.data_ptr(), + cpu_force_.data_ptr() + cpu_force_.numel()); + torch::Tensor flat_force_mag_ = + force_mag_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_force_mag_ = flat_force_mag_.to(torch::kCPU); + force_mag.assign( + cpu_force_mag_.data_ptr(), + cpu_force_mag_.data_ptr() + cpu_force_mag_.numel()); + // spin model not suported yet + // torch::Tensor flat_virial_ = virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_virial_ = flat_virial_.to(torch::kCPU); + // virial.assign(cpu_virial_.data_ptr(), + // cpu_virial_.data_ptr() + cpu_virial_.numel()); + if (atomic) { + // c10::IValue atom_virial_ = outputs.at("atom_virial"); + c10::IValue atom_energy_ = outputs.at("atom_energy"); + torch::Tensor flat_atom_energy_ = + atom_energy_.toTensor().view({-1}).to(floatType); + torch::Tensor cpu_atom_energy_ = flat_atom_energy_.to(torch::kCPU); + atom_energy.assign( + cpu_atom_energy_.data_ptr(), + cpu_atom_energy_.data_ptr() + cpu_atom_energy_.numel()); + // torch::Tensor flat_atom_virial_ = + // atom_virial_.toTensor().view({-1}).to(floatType); + // torch::Tensor cpu_atom_virial_ = flat_atom_virial_.to(torch::kCPU); + // atom_virial.assign( + // cpu_atom_virial_.data_ptr(), + // cpu_atom_virial_.data_ptr() + cpu_atom_virial_.numel()); + } +} + +template void DeepSpinPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); +template void DeepSpinPT::compute>( + std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); +void DeepSpinPT::get_type_map(std::string& type_map) { + auto ret = module.run_method("get_type_map").toList(); + for (const torch::IValue& element : ret) { + type_map += torch::str(element); // Convert each element to a string + type_map += " "; // Add a space between elements + } +} + +// forward to template method +void DeepSpinPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, fparam, aparam, atomic); + }); +} +void DeepSpinPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, fparam, aparam, atomic); + }); +} +void DeepSpinPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, nghost, inlist, ago, fparam, aparam, atomic); + }); +} +void DeepSpinPT::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + translate_error([&] { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, + spin, atype, box, nghost, inlist, ago, fparam, aparam, atomic); + }); +} +#endif diff --git a/source/api_cc/src/DeepSpinTF.cc b/source/api_cc/src/DeepSpinTF.cc new file mode 100644 index 0000000000..ea110ebbf7 --- /dev/null +++ b/source/api_cc/src/DeepSpinTF.cc @@ -0,0 +1,1261 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#ifdef BUILD_TENSORFLOW +#include "DeepSpinTF.h" + +#include +#include + +#include "AtomMap.h" +#include "common.h" +#include "device.h" + +using namespace tensorflow; +using namespace deepmd; + +// start multiple frames + +template +static void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost = 0) { + unsigned nloc = atommap.get_type().size(); + unsigned nall = nloc + nghost; + dener.resize(nframes); + if (nloc == 0) { + // no backward map needed + // dforce of size nall * 3 + dforce_.resize(static_cast(nframes) * nall * 3); + fill(dforce_.begin(), dforce_.end(), (VALUETYPE)0.0); + // dvirial of size 9 + dvirial.resize(static_cast(nframes) * 9); + fill(dvirial.begin(), dvirial.end(), (VALUETYPE)0.0); + return; + } + + std::vector output_tensors; + check_status(session->Run( + input_tensors, {"o_energy", "o_force", "o_atom_energy", "o_atom_virial"}, + {}, &output_tensors)); + + Tensor output_e = output_tensors[0]; + Tensor output_f = output_tensors[1]; + Tensor output_av = output_tensors[3]; + + auto oe = output_e.flat(); + auto of = output_f.flat(); + auto oav = output_av.flat(); + + std::vector dforce(static_cast(nframes) * 3 * nall); + dvirial.resize(static_cast(nframes) * 9); + for (int ii = 0; ii < nframes; ++ii) { + dener[ii] = oe(ii); + } + for (size_t ii = 0; ii < static_cast(nframes) * nall * 3; ++ii) { + dforce[ii] = of(ii); + } + // set dvirial to zero, prevent input vector is not zero (#1123) + std::fill(dvirial.begin(), dvirial.end(), (VALUETYPE)0.); + for (int kk = 0; kk < nframes; ++kk) { + for (int ii = 0; ii < nall; ++ii) { + dvirial[kk * 9 + 0] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 0); + dvirial[kk * 9 + 1] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 1); + dvirial[kk * 9 + 2] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 2); + dvirial[kk * 9 + 3] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 3); + dvirial[kk * 9 + 4] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 4); + dvirial[kk * 9 + 5] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 5); + dvirial[kk * 9 + 6] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 6); + dvirial[kk * 9 + 7] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 7); + dvirial[kk * 9 + 8] += (VALUETYPE)1.0 * oav(kk * nall * 9 + 9 * ii + 8); + } + } + dforce_ = dforce; + atommap.backward(dforce_.begin(), dforce.begin(), 3, nframes, + nall); +} + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template +static void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost = 0) { + unsigned nloc = atommap.get_type().size(); + unsigned nall = nloc + nghost; + dener.resize(nframes); + if (nloc == 0) { + // no backward map needed + // dforce of size nall * 3 + dforce_.resize(static_cast(nframes) * nall * 3); + fill(dforce_.begin(), dforce_.end(), (VALUETYPE)0.0); + // dvirial of size 9 + dvirial.resize(static_cast(nframes) * 9); + fill(dvirial.begin(), dvirial.end(), (VALUETYPE)0.0); + // datom_energy_ of size nall + datom_energy_.resize(static_cast(nframes) * nall); + fill(datom_energy_.begin(), datom_energy_.end(), (VALUETYPE)0.0); + // datom_virial_ of size nall * 9 + datom_virial_.resize(static_cast(nframes) * nall * 9); + fill(datom_virial_.begin(), datom_virial_.end(), (VALUETYPE)0.0); + return; + } + std::vector output_tensors; + + check_status(session->Run( + input_tensors, {"o_energy", "o_force", "o_atom_energy", "o_atom_virial"}, + {}, &output_tensors)); + + Tensor output_e = output_tensors[0]; + Tensor output_f = output_tensors[1]; + Tensor output_ae = output_tensors[2]; + Tensor output_av = output_tensors[3]; + + auto oe = output_e.flat(); + auto of = output_f.flat(); + auto oae = output_ae.flat(); + auto oav = output_av.flat(); + + std::vector dforce(static_cast(nframes) * 3 * nall); + std::vector datom_energy(static_cast(nframes) * nall, 0); + std::vector datom_virial(static_cast(nframes) * 9 * nall); + dvirial.resize(static_cast(nframes) * 9); + for (int ii = 0; ii < nframes; ++ii) { + dener[ii] = oe(ii); + } + for (size_t ii = 0; ii < static_cast(nframes) * nall * 3; ++ii) { + dforce[ii] = of(ii); + } + for (int ii = 0; ii < nframes; ++ii) { + for (int jj = 0; jj < nloc; ++jj) { + datom_energy[ii * nall + jj] = oae(ii * nloc + jj); + } + } + for (size_t ii = 0; ii < static_cast(nframes) * nall * 9; ++ii) { + datom_virial[ii] = oav(ii); + } + // set dvirial to zero, prevent input vector is not zero (#1123) + std::fill(dvirial.begin(), dvirial.end(), (VALUETYPE)0.); + for (int kk = 0; kk < nframes; ++kk) { + for (int ii = 0; ii < nall; ++ii) { + dvirial[kk * 9 + 0] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 0]; + dvirial[kk * 9 + 1] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 1]; + dvirial[kk * 9 + 2] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 2]; + dvirial[kk * 9 + 3] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 3]; + dvirial[kk * 9 + 4] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 4]; + dvirial[kk * 9 + 5] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 5]; + dvirial[kk * 9 + 6] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 6]; + dvirial[kk * 9 + 7] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 7]; + dvirial[kk * 9 + 8] += + (VALUETYPE)1.0 * datom_virial[kk * nall * 9 + 9 * ii + 8]; + } + } + dforce_ = dforce; + datom_energy_ = datom_energy; + datom_virial_ = datom_virial; + atommap.backward(dforce_.begin(), dforce.begin(), 3, nframes, + nall); + atommap.backward(datom_energy_.begin(), datom_energy.begin(), 1, + nframes, nall); + atommap.backward(datom_virial_.begin(), datom_virial.begin(), 9, + nframes, nall); +} + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +template void run_model( + std::vector& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +// end multiple frames + +// start single frame + +template +static void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes = 1, + const int nghost = 0) { + assert(nframes == 1); + std::vector dener_(1); + // call multi-frame version + run_model(dener_, dforce_, dvirial, session, + input_tensors, atommap, nframes, nghost); + dener = dener_[0]; +} + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + Session* session, + const std::vector>& input_tensors, + const AtomMap& atommap, + const int nframes, + const int nghost); + +template +static void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes = 1, + const int& nghost = 0) { + assert(nframes == 1); + std::vector dener_(1); + // call multi-frame version + run_model(dener_, dforce_, dvirial, datom_energy_, + datom_virial_, session, input_tensors, + atommap, nframes, nghost); + dener = dener_[0]; +} + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +template void run_model( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + Session* session, + const std::vector>& input_tensors, + const deepmd::AtomMap& atommap, + const int& nframes, + const int& nghost); + +// end single frame + +DeepSpinTF::DeepSpinTF() + : inited(false), init_nbor(false), graph_def(new GraphDef()) {} + +DeepSpinTF::DeepSpinTF(const std::string& model, + const int& gpu_rank, + const std::string& file_content) + : inited(false), init_nbor(false), graph_def(new GraphDef()) { + try { + init(model, gpu_rank, file_content); + } catch (...) { + // Clean up and rethrow, as the destructor will not be called + delete graph_def; + throw; + } +} + +DeepSpinTF::~DeepSpinTF() { delete graph_def; } + +void DeepSpinTF::init(const std::string& model, + const int& gpu_rank, + const std::string& file_content) { + if (inited) { + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do " + "nothing at the second call of initializer" + << std::endl; + return; + } + SessionOptions options; + get_env_nthreads(num_intra_nthreads, num_inter_nthreads); + options.config.set_inter_op_parallelism_threads(num_inter_nthreads); + options.config.set_intra_op_parallelism_threads(num_intra_nthreads); + deepmd::load_op_library(); + + if (file_content.size() == 0) { + check_status(ReadBinaryProto(Env::Default(), model, graph_def)); + } else { + (*graph_def).ParseFromString(file_content); + } + int gpu_num = -1; +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM + DPGetDeviceCount(gpu_num); // check current device environment + if (gpu_num > 0) { + options.config.set_allow_soft_placement(true); + options.config.mutable_gpu_options()->set_per_process_gpu_memory_fraction( + 0.9); + options.config.mutable_gpu_options()->set_allow_growth(true); + DPErrcheck(DPSetDevice(gpu_rank % gpu_num)); + std::string str = "/gpu:0"; + // See + // https://github.com/tensorflow/tensorflow/blame/8fac27b486939f40bc8e362b94a16a4a8bb51869/tensorflow/core/protobuf/config.proto#L80 + options.config.mutable_gpu_options()->set_visible_device_list( + std::to_string(gpu_rank % gpu_num)); + graph::SetDefaultDevice(str, graph_def); + } +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM + check_status(NewSession(options, &session)); + check_status(session->Create(*graph_def)); + try { + model_version = get_scalar("model_attr/model_version"); + } catch (deepmd::tf_exception& e) { + // no model version defined in old models + model_version = "0.0"; + } + if (!model_compatable(model_version)) { + throw deepmd::deepmd_exception( + "incompatible model: version " + model_version + + " in graph, but version " + global_model_version + + " supported " + "See https://deepmd.rtfd.io/compatibility/ for details."); + } + dtype = session_get_dtype(session, "descrpt_attr/rcut"); + if (dtype == tensorflow::DT_DOUBLE) { + rcut = get_scalar("descrpt_attr/rcut"); + } else { + rcut = get_scalar("descrpt_attr/rcut"); + } + cell_size = rcut; + ntypes = get_scalar("descrpt_attr/ntypes"); + try { + ntypes_spin = get_scalar("spin_attr/ntypes_spin"); + } catch (const deepmd::deepmd_exception&) { + ntypes_spin = 0; + } + dfparam = get_scalar("fitting_attr/dfparam"); + daparam = get_scalar("fitting_attr/daparam"); + if (dfparam < 0) { + dfparam = 0; + } + if (daparam < 0) { + daparam = 0; + } + if (daparam > 0) { + try { + aparam_nall = get_scalar("fitting_attr/aparam_nall"); + } catch (const deepmd::deepmd_exception&) { + aparam_nall = false; + } + } else { + aparam_nall = false; + } + model_type = get_scalar("model_attr/model_type"); + inited = true; + + init_nbor = false; +} + +template +VT DeepSpinTF::get_scalar(const std::string& name) const { + return session_get_scalar(session, name); +} + +template +void DeepSpinTF::get_vector(std::vector& vec, + const std::string& name) const { + session_get_vector(vec, session, name); +} + +template +void DeepSpinTF::validate_fparam_aparam( + const int& nframes, + const int& nloc, + const std::vector& fparam, + const std::vector& aparam) const { + if (fparam.size() != dfparam && + fparam.size() != static_cast(nframes) * dfparam) { + throw deepmd::deepmd_exception( + "the dim of frame parameter provided is not consistent with what the " + "model uses"); + } + + if (aparam.size() != static_cast(daparam) * nloc && + aparam.size() != static_cast(nframes) * daparam * nloc) { + throw deepmd::deepmd_exception( + "the dim of atom parameter provided is not consistent with what the " + "model uses"); + } +} + +template void DeepSpinTF::validate_fparam_aparam( + const int& nframes, + const int& nloc, + const std::vector& fparam, + const std::vector& aparam) const; + +template void DeepSpinTF::validate_fparam_aparam( + const int& nframes, + const int& nloc, + const std::vector& fparam, + const std::vector& aparam) const; + +template +void DeepSpinTF::tile_fparam_aparam(std::vector& out_param, + const int& nframes, + const int& dparam, + const std::vector& param) const { + if (param.size() == dparam) { + out_param.resize(static_cast(nframes) * dparam); + for (int ii = 0; ii < nframes; ++ii) { + std::copy(param.begin(), param.end(), + out_param.begin() + static_cast(ii) * dparam); + } + } else if (param.size() == static_cast(nframes) * dparam) { + out_param = param; + } +} + +template void DeepSpinTF::tile_fparam_aparam( + std::vector& out_param, + const int& nframes, + const int& dparam, + const std::vector& param) const; + +template void DeepSpinTF::tile_fparam_aparam( + std::vector& out_param, + const int& nframes, + const int& dparam, + const std::vector& param) const; + +// ENERGYVTYPE: std::vector or ENERGYTYPE + +// support spin +template +void DeepSpinTF::compute(ENERGYVTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_, + const bool atomic) { + // if datype.size is 0, not clear nframes; but 1 is just ok + int nframes = datype_.size() > 0 ? (dcoord_.size() / 3 / datype_.size()) : 1; + int nloc = datype_.size(); + std::vector fparam; + std::vector aparam; + validate_fparam_aparam(nframes, nloc, fparam_, aparam_); + tile_fparam_aparam(fparam, nframes, dfparam, fparam_); + tile_fparam_aparam(aparam, nframes, nloc * daparam, aparam_); + + std::vector extend_dcoord; + std::vector extend_atype; + extend_nlist(extend_dcoord, extend_atype, dcoord_, dspin_, datype_); + + atommap = deepmd::AtomMap(extend_atype.begin(), extend_atype.end()); + + std::vector> input_tensors; + std::vector dforce_tmp; + + if (dtype == tensorflow::DT_DOUBLE) { + int ret = session_input_tensors( + input_tensors, extend_dcoord, ntypes, extend_atype, dbox, cell_size, + fparam, aparam, atommap, "", aparam_nall); + if (atomic) { + run_model(dener, dforce_tmp, dvirial, datom_energy_, + datom_virial_, session, input_tensors, atommap, + nframes); + } else { + run_model(dener, dforce_tmp, dvirial, session, input_tensors, + atommap, nframes); + } + } else { + int ret = session_input_tensors( + input_tensors, extend_dcoord, ntypes, extend_atype, dbox, cell_size, + fparam, aparam, atommap, "", aparam_nall); + if (atomic) { + run_model(dener, dforce_tmp, dvirial, datom_energy_, datom_virial_, + session, input_tensors, atommap, nframes); + } else { + run_model(dener, dforce_tmp, dvirial, session, input_tensors, + atommap, nframes); + } + } + // backward force and mag. + dforce_.resize(static_cast(nframes) * nloc * 3); + dforce_mag_.resize(static_cast(nframes) * nloc * 3); + for (int ii = 0; ii < nloc; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + dforce_[3 * ii + dd] = dforce_tmp[3 * ii + dd]; + if (datype_[ii] < ntypes_spin) { + dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (ii + nloc) + dd]; + } else { + dforce_mag_[3 * ii + dd] = 0.0; + } + } + } +} + +template void DeepSpinTF::compute( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template void DeepSpinTF::compute( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template void DeepSpinTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +template void DeepSpinTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic); + +// support spin +template +void DeepSpinTF::compute(ENERGYVTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__, + const bool atomic) { + int nall = datype_.size(); + // if nall==0, unclear nframes, but 1 is ok + int nframes = nall > 0 ? (dcoord_.size() / nall / 3) : 1; + int nloc = nall - nghost; + + std::vector virtual_len; + std::vector spin_norm; + std::vector extend_dcoord; + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); + extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, + extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, + new_idx_map, old_idx_map, lmp_list, dcoord_, datype_, nghost, dspin_, + ntypes, ntypes_spin, virtual_len, spin_norm); + InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], + &extend_firstneigh[0]); + std::vector fparam; + std::vector aparam_; + validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, + aparam__); + tile_fparam_aparam(fparam, nframes, dfparam, fparam_); + tile_fparam_aparam(aparam_, nframes, (aparam_nall ? nall : nloc) * daparam, + aparam__); + std::vector> input_tensors; + // select real atoms + std::vector dcoord, dforce, aparam, datom_energy, datom_virial; + std::vector datype, fwd_map, bkw_map; + int nghost_real, nall_real, nloc_real; + select_real_atoms_coord(dcoord, datype, aparam, nghost_real, fwd_map, bkw_map, + nall_real, nloc_real, extend_dcoord, extend_dtype, + aparam_, extend_nghost, ntypes, nframes, daparam, + nall, aparam_nall); + + if (ago == 0) { + atommap = deepmd::AtomMap(datype.begin(), datype.begin() + nloc_real); + assert(nloc_real == atommap.get_type().size()); + + nlist_data.copy_from_nlist(extend_lmp_list); + nlist_data.shuffle_exclude_empty(fwd_map); + nlist_data.shuffle(atommap); + nlist_data.make_inlist(nlist); + } + + if (dtype == tensorflow::DT_DOUBLE) { + int ret = session_input_tensors( + input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, + atommap, nghost_real, ago, "", aparam_nall); + assert(nloc_real == ret); + if (atomic) { + run_model(dener, dforce, dvirial, datom_energy, datom_virial, + session, input_tensors, atommap, nframes, nghost_real); + } else { + run_model(dener, dforce, dvirial, session, input_tensors, atommap, + nframes, nghost_real); + } + } else { + int ret = session_input_tensors( + input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, + atommap, nghost_real, ago, "", aparam_nall); + assert(nloc_real == ret); + if (atomic) { + run_model(dener, dforce, dvirial, datom_energy, datom_virial, + session, input_tensors, atommap, nframes, nghost_real); + } else { + run_model(dener, dforce, dvirial, session, input_tensors, atommap, + nframes, nghost_real); + } + } + + // bkw map + std::vector dforce_tmp, datom_energy_tmp, datom_virial_tmp; + dforce_tmp.resize(static_cast(nframes) * fwd_map.size() * 3); + datom_energy_tmp.resize(static_cast(nframes) * fwd_map.size()); + datom_virial_tmp.resize(static_cast(nframes) * fwd_map.size() * 9); + select_map(dforce_tmp, dforce, bkw_map, 3, nframes, fwd_map.size(), + nall_real); + select_map(datom_energy_tmp, datom_energy, bkw_map, 1, nframes, + fwd_map.size(), nall_real); + select_map(datom_virial_tmp, datom_virial, bkw_map, 9, nframes, + fwd_map.size(), nall_real); + // backward force and mag. + dforce_.resize(static_cast(nframes) * nall * 3); + dforce_mag_.resize(static_cast(nframes) * nall * 3); + datom_energy_.resize(static_cast(nframes) * nall); + datom_virial_.resize(static_cast(nframes) * nall * 9); + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + int new_idx = new_idx_map[ii]; + dforce_[3 * ii + dd] = dforce_tmp[3 * new_idx + dd]; + datom_energy_[ii] = datom_energy_tmp[new_idx]; + datom_virial_[ii] = datom_virial_tmp[new_idx]; + if (datype_[ii] < ntypes_spin && ii < nloc) { + dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (new_idx + nloc) + dd]; + } else if (datype_[ii] < ntypes_spin) { + dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; + } else { + dforce_mag_[3 * ii + dd] = 0.0; + } + } + } +} + +template void DeepSpinTF::compute( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +template void DeepSpinTF::compute( + ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +template void DeepSpinTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +template void DeepSpinTF::compute>( + std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_, + const bool atomic); + +// end support spin + +void DeepSpinTF::get_type_map(std::string& type_map) { + type_map = get_scalar("model_attr/tmap"); +} + +// forward to template method +// support spin +void DeepSpinTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, fparam, aparam, atomic); +} +void DeepSpinTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, fparam, aparam, atomic); +} +// support spin +void DeepSpinTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, nghost, inlist, ago, fparam, aparam, atomic); +} +void DeepSpinTF::computew(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam, + const std::vector& aparam, + const bool atomic) { + compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, + atype, box, nghost, inlist, ago, fparam, aparam, atomic); +} + +void DeepSpinTF::cum_sum(std::map& sum, std::map& vec) { + sum[0] = 0; + for (int ii = 1; ii < vec.size(); ++ii) { + sum[ii] = sum[ii - 1] + vec[ii - 1]; + } +} + +template +void DeepSpinTF::extend(int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm) { + extend_ilist.clear(); + extend_numneigh.clear(); + extend_neigh.clear(); + extend_firstneigh.clear(); + extend_dcoord.clear(); + extend_atype.clear(); + + int nall = dcoord.size() / 3; + int nloc = nall - nghost; + assert(nloc == lmp_list.inum); + + // record numb_types_real and nloc_virt + int numb_types_real = numb_types - numb_types_spin; + std::map loc_type_count; + std::map::iterator iter = loc_type_count.begin(); + for (int i = 0; i < nloc; i++) { + iter = loc_type_count.find(atype[i]); + if (iter != loc_type_count.end()) { + iter->second += 1; + } else { + loc_type_count.insert(std::pair(atype[i], 1)); + } + } + assert(numb_types_real - 1 == loc_type_count.rbegin()->first); + int nloc_virt = 0; + for (int i = 0; i < numb_types_spin; i++) { + nloc_virt += loc_type_count[i]; + } + + // record nghost_virt + std::map ghost_type_count; + for (int i = nloc; i < nall; i++) { + iter = ghost_type_count.find(atype[i]); + if (iter != ghost_type_count.end()) { + iter->second += 1; + } else { + ghost_type_count.insert(std::pair(atype[i], 1)); + } + } + int nghost_virt = 0; + for (int i = 0; i < numb_types_spin; i++) { + nghost_virt += ghost_type_count[i]; + } + + // for extended system, search new index by old index, and vice versa + extend_nghost = nghost + nghost_virt; + int extend_nloc = nloc + nloc_virt; + int extend_nall = extend_nloc + extend_nghost; + std::map cum_loc_type_count; + std::map cum_ghost_type_count; + cum_sum(cum_loc_type_count, loc_type_count); + cum_sum(cum_ghost_type_count, ghost_type_count); + std::vector loc_type_reset(numb_types_real, 0); + std::vector ghost_type_reset(numb_types_real, 0); + + new_idx_map.clear(); + old_idx_map.clear(); + for (int ii = 0; ii < nloc; ii++) { + int new_idx = cum_loc_type_count[atype[ii]] + loc_type_reset[atype[ii]]; + new_idx_map[ii] = new_idx; + old_idx_map[new_idx] = ii; + loc_type_reset[atype[ii]]++; + } + for (int ii = nloc; ii < nall; ii++) { + int new_idx = cum_ghost_type_count[atype[ii]] + + ghost_type_reset[atype[ii]] + extend_nloc; + new_idx_map[ii] = new_idx; + old_idx_map[new_idx] = ii; + ghost_type_reset[atype[ii]]++; + } + + // extend lmp_list + extend_inum = extend_nloc; + + extend_ilist.resize(extend_nloc); + for (int ii = 0; ii < extend_nloc; ii++) { + extend_ilist[ii] = ii; + } + + extend_neigh.resize(extend_nloc); + for (int ii = 0; ii < nloc; ii++) { + int jnum = lmp_list.numneigh[old_idx_map[ii]]; + const int* jlist = lmp_list.firstneigh[old_idx_map[ii]]; + if (atype[old_idx_map[ii]] < numb_types_spin) { + extend_neigh[ii].push_back(ii + nloc); + } + for (int jj = 0; jj < jnum; jj++) { + int new_idx = new_idx_map[jlist[jj]]; + extend_neigh[ii].push_back(new_idx); + if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nloc) { + extend_neigh[ii].push_back(new_idx + nloc); + } else if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nall) { + extend_neigh[ii].push_back(new_idx + nghost); + } + } + } + for (int ii = nloc; ii < extend_nloc; ii++) { + extend_neigh[ii].assign(extend_neigh[ii - nloc].begin(), + extend_neigh[ii - nloc].end()); + std::vector::iterator it = + find(extend_neigh[ii].begin(), extend_neigh[ii].end(), ii); + *it = ii - nloc; + } + + extend_firstneigh.resize(extend_nloc); + extend_numneigh.resize(extend_nloc); + for (int ii = 0; ii < extend_nloc; ii++) { + extend_firstneigh[ii] = &extend_neigh[ii][0]; + extend_numneigh[ii] = extend_neigh[ii].size(); + } + + // extend coord + extend_dcoord.resize(static_cast(extend_nall) * 3); + for (int ii = 0; ii < nloc; ii++) { + for (int jj = 0; jj < 3; jj++) { + extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; + if (atype[ii] < numb_types_spin) { + double temp_dcoord = dcoord[ii * 3 + jj] + spin[ii * 3 + jj] / + spin_norm[atype[ii]] * + virtual_len[atype[ii]]; + extend_dcoord[(new_idx_map[ii] + nloc) * 3 + jj] = temp_dcoord; + } + } + } + for (int ii = nloc; ii < nall; ii++) { + for (int jj = 0; jj < 3; jj++) { + extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; + if (atype[ii] < numb_types_spin) { + double temp_dcoord = dcoord[ii * 3 + jj] + spin[ii * 3 + jj] / + spin_norm[atype[ii]] * + virtual_len[atype[ii]]; + extend_dcoord[(new_idx_map[ii] + nghost) * 3 + jj] = temp_dcoord; + } + } + } + + // extend atype + extend_atype.resize(extend_nall); + for (int ii = 0; ii < nall; ii++) { + extend_atype[new_idx_map[ii]] = atype[ii]; + if (atype[ii] < numb_types_spin) { + if (ii < nloc) { + extend_atype[new_idx_map[ii] + nloc] = atype[ii] + numb_types_real; + } else { + extend_atype[new_idx_map[ii] + nghost] = atype[ii] + numb_types_real; + } + } + } +} + +template void DeepSpinTF::extend( + int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm); + +template void DeepSpinTF::extend( + int& extend_inum, + std::vector& extend_ilist, + std::vector& extend_numneigh, + std::vector>& extend_neigh, + std::vector& extend_firstneigh, + std::vector& extend_dcoord, + std::vector& extend_atype, + int& extend_nghost, + std::map& new_idx_map, + std::map& old_idx_map, + const InputNlist& lmp_list, + const std::vector& dcoord, + const std::vector& atype, + const int nghost, + const std::vector& spin, + const int numb_types, + const int numb_types_spin, + const std::vector& virtual_len, + const std::vector& spin_norm); + +template +void DeepSpinTF::extend_nlist(std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_) { + if (dtype == tensorflow::DT_DOUBLE) { + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); + } else { + std::vector virtual_len; + std::vector spin_norm; + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); + } + // extend coord and atype + int nloc = datype_.size(); + int nloc_spin = 0; + for (int ii = 0; ii < nloc; ii++) { + if (datype_[ii] < ntypes_spin) { + nloc_spin += 1; + } + } + int extend_nall = nloc + nloc_spin; + extend_dcoord.resize(static_cast(extend_nall) * 3); + extend_atype.resize(extend_nall); + for (int ii = 0; ii < nloc; ii++) { + extend_atype[ii] = datype_[ii]; + if (datype_[ii] < ntypes_spin) { + extend_atype[ii + nloc] = datype_[ii] + ntypes - ntypes_spin; + } + for (int jj = 0; jj < 3; jj++) { + extend_dcoord[ii * 3 + jj] = dcoord_[ii * 3 + jj]; + if (datype_[ii] < ntypes_spin) { + extend_dcoord[(ii + nloc) * 3 + jj] = + dcoord_[ii * 3 + jj] + dspin_[ii * 3 + jj] / + spin_norm[datype_[ii]] * + virtual_len[datype_[ii]]; + } + } + } +} + +template void DeepSpinTF::extend_nlist( + std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_); + +template void DeepSpinTF::extend_nlist(std::vector& extend_dcoord, + std::vector& extend_atype, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_); +#endif diff --git a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc index 4a40dffde2..9276489c7b 100644 --- a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc +++ b/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc @@ -9,7 +9,7 @@ #include #include -#include "DeepPot.h" +#include "DeepSpin.h" #include "neighbor_list.h" #include "test_utils.h" @@ -82,7 +82,7 @@ class TestInferDeepPotDpaPtSpin : public ::testing::Test { double expected_tot_e; // std::vector expected_tot_v; - deepmd::DeepPot dp; + deepmd::DeepSpin dp; void SetUp() override { dp.init("../../tests/infer/deeppot_dpa_spin.pth"); @@ -122,7 +122,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist) { int& natoms = this->natoms; double& expected_tot_e = this->expected_tot_e; // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::DeepPot& dp = this->dp; + deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial; dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); @@ -154,7 +154,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { int& natoms = this->natoms; double& expected_tot_e = this->expected_tot_e; // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::DeepPot& dp = this->dp; + deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, @@ -181,3 +181,246 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); // } } + +// template +// class TestInferDeepPotDpaPtSpinNopbc : public ::testing::Test { +// protected: +// std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, +// 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, +// 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; +// std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., +// 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; +// std::vector atype = {0, 1, 1, 0, 1, 1}; +// std::vector box = {}; +// // Generated by the following Python code: +// // import numpy as np +// // from deepmd.infer import DeepPot +// // coord = np.array([ +// // 12.83, 2.56, 2.18, 12.09, 2.87, 2.74, +// // 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, +// // 3.51, 2.51, 2.60, 4.27, 3.22, 1.56 +// // ]).reshape(1, -1) +// // spin = np.array([ +// // 0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., +// // 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0. +// // ]).reshape(1, -1) +// // atype = np.array([0, 1, 1, 0, 1, 1]) +// // box = None +// // dp = DeepPot("deeppot_dpa_spin.pth") +// // e, f, _, ae, _, fm, _ = dp.eval(coord, box, atype, atomic=True, +// spin=spin) +// // np.set_printoptions(precision=16) +// // print(f"{e.ravel()=} {f.ravel()=} {fm.ravel()=} {ae.ravel()=}") + +// std::vector expected_e = { +// -5.921669893870771 , -5.1676693791758685, -5.205933794558385 , +// -5.58688965168251 , -5.080322972018686 , -5.08213772482076}; +// std::vector expected_f = { +// -0.2929142244191496, 0.0801070990501456, 0.148216178514704 , +// 0.2929142244191503, -0.0801070990501454, -0.1482161785147037, +// -0.2094984819251435, 0.0241594118950041, -0.0215199116994508, +// 0.3068843038300324, -0.001620530344866 , 0.1508093841389746, +// -0.0122719879278721, 0.0186341247897136, -0.1137104245023705, +// -0.0851138339770169, -0.0411730063398516, -0.0155790479371533}; +// std::vector expected_fm = { +// 1.5298530476860008, 0.0071315024546899, 0.0650492472558729, +// 0. , 0. , 0. , +// 0. , 0. , 0. , +// -0.6212052813442365, -0.2290265978320395, -0.5101405083352206, +// 0. , 0. , 0. , +// 0. , 0. , 0.}; + +// int natoms; +// double expected_tot_e; +// // std::vector expected_tot_v; + +// deepmd::DeepPot dp; + +// void SetUp() override { +// dp.init("../../tests/infer/deeppot_dpa_spin.pth"); + +// natoms = expected_e.size(); +// EXPECT_EQ(natoms * 3, expected_f.size()); +// EXPECT_EQ(natoms * 3, expected_fm.size()); +// // EXPECT_EQ(natoms * 9, expected_v.size()); +// expected_tot_e = 0.; +// // expected_tot_v.resize(9); +// // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); +// for (int ii = 0; ii < natoms; ++ii) { +// expected_tot_e += expected_e[ii]; +// } +// // for (int ii = 0; ii < natoms; ++ii) { +// // for (int dd = 0; dd < 9; ++dd) { +// // expected_tot_v[dd] += expected_v[ii * 9 + dd]; +// // } +// // } +// }; + +// void TearDown() override {}; +// }; + +// TYPED_TEST_SUITE(TestInferDeepPotDpaPtSpinNopbc, ValueTypes); + +// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_build_nlist) { +// using VALUETYPE = TypeParam; +// const std::vector& coord = this->coord; +// const std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// std::vector& expected_e = this->expected_e; +// std::vector& expected_f = this->expected_f; +// std::vector& expected_fm = this->expected_fm; +// // std::vector& expected_v = this->expected_v; +// int& natoms = this->natoms; +// double& expected_tot_e = this->expected_tot_e; +// // std::vector& expected_tot_v = this->expected_tot_v; +// deepmd::DeepPot& dp = this->dp; +// double ener; +// std::vector force, force_mag, virial; +// dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); + +// EXPECT_EQ(force.size(), natoms * 3); +// EXPECT_EQ(force_mag.size(), natoms * 3); +// // EXPECT_EQ(virial.size(), 9); + +// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); +// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); +// } +// // for (int ii = 0; ii < 3 * 3; ++ii) { +// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); +// // } +// } + +// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_build_nlist_atomic) { +// using VALUETYPE = TypeParam; +// const std::vector& coord = this->coord; +// const std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// std::vector& expected_e = this->expected_e; +// std::vector& expected_f = this->expected_f; +// std::vector& expected_fm = this->expected_fm; +// // std::vector& expected_v = this->expected_v; +// int& natoms = this->natoms; +// double& expected_tot_e = this->expected_tot_e; +// // std::vector& expected_tot_v = this->expected_tot_v; +// deepmd::DeepPot& dp = this->dp; +// double ener; +// std::vector force, force_mag, virial, atom_ener, atom_vir; +// dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, +// spin, atype, box); + +// EXPECT_EQ(force.size(), natoms * 3); +// EXPECT_EQ(force_mag.size(), natoms * 3); +// // EXPECT_EQ(virial.size(), 9); +// EXPECT_EQ(atom_ener.size(), natoms); +// // EXPECT_EQ(atom_vir.size(), natoms * 9); + +// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); +// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); +// } +// // for (int ii = 0; ii < 3 * 3; ++ii) { +// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); +// // } +// for (int ii = 0; ii < natoms; ++ii) { +// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); +// } +// // for (int ii = 0; ii < natoms * 9; ++ii) { +// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); +// // } +// } + +// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_lmp_nlist) { +// using VALUETYPE = TypeParam; +// const std::vector& coord = this->coord; +// const std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// std::vector& expected_e = this->expected_e; +// std::vector& expected_f = this->expected_f; +// std::vector& expected_fm = this->expected_fm; +// // std::vector& expected_v = this->expected_v; +// int& natoms = this->natoms; +// double& expected_tot_e = this->expected_tot_e; +// // std::vector& expected_tot_v = this->expected_tot_v; +// deepmd::DeepPot& dp = this->dp; +// double ener; +// std::vector force, force_mag, virial; + +// std::vector > nlist_data = { +// {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, +// {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; +// std::vector ilist(natoms), numneigh(natoms); +// std::vector firstneigh(natoms); +// deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); +// convert_nlist(inlist, nlist_data); +// dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box, 0, +// inlist, 0); + +// EXPECT_EQ(force.size(), natoms * 3); +// EXPECT_EQ(force_mag.size(), natoms * 3); +// // EXPECT_EQ(virial.size(), 9); + +// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); +// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); +// } +// // for (int ii = 0; ii < 3 * 3; ++ii) { +// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); +// // } +// } + +// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_lmp_nlist_atomic) { +// using VALUETYPE = TypeParam; +// const std::vector& coord = this->coord; +// const std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// std::vector& expected_e = this->expected_e; +// std::vector& expected_f = this->expected_f; +// std::vector& expected_fm = this->expected_fm; +// // std::vector& expected_v = this->expected_v; +// int& natoms = this->natoms; +// double& expected_tot_e = this->expected_tot_e; +// // std::vector& expected_tot_v = this->expected_tot_v; +// deepmd::DeepPot& dp = this->dp; +// double ener; +// std::vector force, force_mag, virial, atom_ener, atom_vir; + +// std::vector > nlist_data = { +// {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, +// {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; +// std::vector ilist(natoms), numneigh(natoms); +// std::vector firstneigh(natoms); +// deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); +// convert_nlist(inlist, nlist_data); +// dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, +// spin, atype, box, 0, +// inlist, 0); + +// EXPECT_EQ(force.size(), natoms * 3); +// EXPECT_EQ(force_mag.size(), natoms * 3); +// // EXPECT_EQ(virial.size(), 9); +// EXPECT_EQ(atom_ener.size(), natoms); +// // EXPECT_EQ(atom_vir.size(), natoms * 9); + +// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); +// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); +// } +// // for (int ii = 0; ii < 3 * 3; ++ii) { +// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); +// // } +// for (int ii = 0; ii < natoms; ++ii) { +// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); +// } +// // for (int ii = 0; ii < natoms * 9; ++ii) { +// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); +// // } +// } diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc index 23b79b64d7..1cab895e04 100644 --- a/source/api_cc/tests/test_deeppot_tf_spin.cc +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -9,7 +9,7 @@ #include #include -#include "DeepPot.h" +#include "DeepSpin.h" #include "neighbor_list.h" #include "test_utils.h" @@ -37,7 +37,7 @@ class TestInferDeepPotSpin : public ::testing::Test { int natoms; double expected_tot_e; - deepmd::DeepPot dp; + deepmd::DeepSpin dp; void SetUp() override { std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; @@ -71,7 +71,7 @@ TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist) { std::vector& expected_fm = this->expected_fm; int& natoms = this->natoms; double& expected_tot_e = this->expected_tot_e; - deepmd::DeepPot& dp = this->dp; + deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial; dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); @@ -95,7 +95,7 @@ TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist_atomic) { std::vector& expected_fm = this->expected_fm; int& natoms = this->natoms; double& expected_tot_e = this->expected_tot_e; - deepmd::DeepPot& dp = this->dp; + deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, diff --git a/source/lmp/pair_base.cpp b/source/lmp/pair_base.cpp index 9f83e5b040..74501e705a 100644 --- a/source/lmp/pair_base.cpp +++ b/source/lmp/pair_base.cpp @@ -282,8 +282,14 @@ void PairDeepMDBase::cum_sum(std::map &sum, std::map &vec) { } } -PairDeepMDBase::PairDeepMDBase(LAMMPS *lmp, const char *cite_user_package) - : Pair(lmp) +PairDeepMDBase::PairDeepMDBase( + LAMMPS *lmp, + const char *cite_user_package, + deepmd_compat::DeepBaseModel &deep_model, + deepmd_compat::DeepBaseModelDevi &deep_model_devi) + : Pair(lmp), + deep_base(deep_model), + deep_base_model_devi(deep_model_devi) { if (lmp->citeme) { @@ -347,7 +353,7 @@ void PairDeepMDBase::print_summary(const string pre) const { cout << "Summary of lammps deepmd module ..." << endl; cout << pre << ">>> Info of deepmd-kit:" << endl; - deep_pot.print_summary(pre); + deep_base.print_summary(pre); cout << pre << ">>> Info of lammps module:" << endl; cout << pre << "use deepmd-kit at: " << STR_DEEPMD_ROOT << endl; cout << pre << "source: " << STR_GIT_SUMM << endl; @@ -398,289 +404,6 @@ void PairDeepMDBase::allocate() { } } -static bool is_key(const string &input) { - vector keys; - keys.push_back("out_freq"); - keys.push_back("out_file"); - keys.push_back("fparam"); - keys.push_back("aparam"); - keys.push_back("fparam_from_compute"); - keys.push_back("aparam_from_compute"); - keys.push_back("ttm"); - keys.push_back("atomic"); - keys.push_back("relative"); - keys.push_back("relative_v"); - keys.push_back("virtual_len"); - keys.push_back("spin_norm"); - - for (int ii = 0; ii < keys.size(); ++ii) { - if (input == keys[ii]) { - return true; - } - } - return false; -} - -void PairDeepMDBase::settings(int narg, char **arg) { - if (narg <= 0) { - error->all(FLERR, "Illegal pair_style command"); - } - - vector models; - int iarg = 0; - while (iarg < narg) { - if (is_key(arg[iarg])) { - break; - } - iarg++; - } - for (int ii = 0; ii < iarg; ++ii) { - models.push_back(arg[ii]); - } - numb_models = models.size(); - if (numb_models == 1) { - try { - deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - cutoff = deep_pot.cutoff() * dist_unit_cvt_factor; - numb_types = deep_pot.numb_types(); - numb_types_spin = deep_pot.numb_types_spin(); - dim_fparam = deep_pot.dim_fparam(); - dim_aparam = deep_pot.dim_aparam(); - } else { - try { - deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); - deep_pot_model_devi.init(models, get_node_rank(), - get_file_content(models)); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - cutoff = deep_pot_model_devi.cutoff() * dist_unit_cvt_factor; - numb_types = deep_pot_model_devi.numb_types(); - numb_types_spin = deep_pot_model_devi.numb_types_spin(); - dim_fparam = deep_pot_model_devi.dim_fparam(); - dim_aparam = deep_pot_model_devi.dim_aparam(); - assert(cutoff == deep_pot.cutoff() * dist_unit_cvt_factor); - assert(numb_types == deep_pot.numb_types()); - assert(numb_types_spin == deep_pot.numb_types_spin()); - assert(dim_fparam == deep_pot.dim_fparam()); - assert(dim_aparam == deep_pot.dim_aparam()); - } - - out_freq = 100; - out_file = "model_devi.out"; - out_each = 0; - out_rel = 0; - eps = 0.; - fparam.clear(); - aparam.clear(); - while (iarg < narg) { - if (!is_key(arg[iarg])) { - error->all(FLERR, - "Illegal pair_style command\nwrong number of parameters\n"); - } - if (string(arg[iarg]) == string("out_freq")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_freq, not provided"); - } - out_freq = atoi(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("out_file")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_file, not provided"); - } - out_file = string(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("fparam")) { - for (int ii = 0; ii < dim_fparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal fparam, the dimension should be %d", - dim_fparam); - error->all(FLERR, tmp); - } - fparam.push_back(atof(arg[iarg + 1 + ii])); - } - iarg += 1 + dim_fparam; - } else if (string(arg[iarg]) == string("aparam")) { - for (int ii = 0; ii < dim_aparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal aparam, the dimension should be %d", - dim_aparam); - error->all(FLERR, tmp); - } - aparam.push_back(atof(arg[iarg + 1 + ii])); - } - iarg += 1 + dim_aparam; - } else if (string(arg[iarg]) == string("ttm")) { -#ifdef USE_TTM - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); - } - } - do_ttm = true; - ttm_fix_id = arg[iarg + 1]; - iarg += 1 + 1; -#else - error->all(FLERR, - "The deepmd-kit was compiled without support for TTM, please " - "rebuild it with LAMMPS version >=20210831"); -#endif - } - - /////////////////////////////////////////////// - // pair_style deepmd cp.pb fparam_from_compute TEMP - // compute TEMP all temp - ////////////////////////////////////////////// - else if (string(arg[iarg]) == string("fparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid fparam_from_compute key: should be " - "fparam_from_compute compute_fparam_id(str)"); - } - } - do_compute_fparam = true; - compute_fparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("aparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid aparam_from_compute key: should be " - "aparam_from_compute compute_aparam_id(str)"); - } - } - do_compute_aparam = true; - compute_aparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("atomic")) { - out_each = 1; - iarg += 1; - } else if (string(arg[iarg]) == string("relative")) { - out_rel = 1; - eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("relative_v")) { - out_rel_v = 1; - eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("virtual_len")) { - virtual_len.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - virtual_len[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } else if (string(arg[iarg]) == string("spin_norm")) { - spin_norm.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - spin_norm[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } - } - - if (out_freq < 0) { - error->all(FLERR, "Illegal out_freq, should be >= 0"); - } - if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { - error->all(FLERR, - "aparam, aparam_from_compute, and ttm should NOT be set " - "simultaneously"); - } - if (do_compute_fparam && fparam.size() > 0) { - error->all( - FLERR, - "fparam and fparam_from_compute should NOT be set simultaneously"); - } - - if (comm->me == 0) { - if (numb_models > 1 && out_freq > 0) { - if (!is_restart) { - fp.open(out_file); - fp << scientific; - if (!atom->sp_flag) { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" - << setw(18 + 1) << "avg_devi_f"; - if (out_each) { - // at this time, we don't know how many atoms - fp << setw(18 + 1) << "atm_devi_f(N)"; - } - fp << endl; - } else { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" - << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" - << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" - << endl; - } - } else { - fp.open(out_file, std::ofstream::out | std::ofstream::app); - fp << scientific; - } - } - string pre = " "; - cout << pre << ">>> Info of model(s):" << endl - << pre << "using " << setw(3) << numb_models << " model(s): "; - if (narg == 1) { - cout << arg[0] << " "; - } else { - for (int ii = 0; ii < models.size(); ++ii) { - cout << models[ii] << " "; - } - } - cout << endl - << pre << "rcut in model: " << cutoff << endl - << pre << "ntypes in model: " << numb_types << endl; - if (fparam.size() > 0) { - cout << pre << "using fparam(s): "; - for (int ii = 0; ii < dim_fparam; ++ii) { - cout << fparam[ii] << " "; - } - cout << endl; - } - if (do_compute_fparam) { - cout << pre << "using compute id (fparam): "; - cout << compute_fparam_id << " " << endl; - } - if (do_compute_aparam) { - cout << pre << "using compute id (aparam): "; - cout << compute_aparam_id << " " << endl; - } - if (aparam.size() > 0) { - cout << pre << "using aparam(s): "; - for (int ii = 0; ii < aparam.size(); ++ii) { - cout << aparam[ii] << " "; - } - cout << endl; - } - if (do_ttm) { - cout << pre << "using ttm fix: "; - cout << ttm_fix_id << " "; - if (dim_fparam > 0) { - cout << "(fparam)" << endl; - } else if (dim_aparam > 0) { - cout << "(aparam)" << endl; - } - } - } - - // comm_reverse = numb_models * 3; - if (atom->sp_flag) { - comm_reverse = numb_models * 3 * 2; - } else { - comm_reverse = numb_models * 3; - } - all_force.resize(numb_models); -} - void PairDeepMDBase::read_restart(FILE *) { is_restart = true; } void PairDeepMDBase::write_restart(FILE *) { @@ -724,7 +447,7 @@ void PairDeepMDBase::coeff(int narg, char **arg) { // the number of types in the system matches that in the model std::vector type_map; std::string type_map_str; - deep_pot.get_type_map(type_map_str); + deep_base.get_type_map(type_map_str); // convert the string to a vector of strings std::istringstream iss(type_map_str); std::string type_name; diff --git a/source/lmp/pair_base.h b/source/lmp/pair_base.h index 68fc4c1bde..47d97591cd 100644 --- a/source/lmp/pair_base.h +++ b/source/lmp/pair_base.h @@ -9,9 +9,9 @@ #include "pair.h" #ifdef DP_USE_CXX_API #ifdef LMPPLUGIN -#include "DeepPot.h" +#include "DeepBaseModel.h" #else -#include "deepmd/DeepPot.h" +#include "deepmd/DeepBaseModel.h" #endif namespace deepmd_compat = deepmd; #else @@ -30,10 +30,12 @@ namespace deepmd_compat = deepmd::hpp; namespace LAMMPS_NS { class PairDeepMDBase : public Pair { public: - PairDeepMDBase(class LAMMPS *, const char *); + PairDeepMDBase(class LAMMPS *, + const char *, + deepmd_compat::DeepBaseModel &, + deepmd_compat::DeepBaseModelDevi &); ~PairDeepMDBase() override; void *extract(const char *, int &) override; - void settings(int, char **) override; void coeff(int, char **) override; void init_style() override; void write_restart(FILE *) override; @@ -50,8 +52,8 @@ class PairDeepMDBase : public Pair { double ener_unit_cvt_factor, dist_unit_cvt_factor, force_unit_cvt_factor; protected: - deepmd_compat::DeepPot deep_pot; - deepmd_compat::DeepPotModelDevi deep_pot_model_devi; + deepmd_compat::DeepBaseModel deep_base; + deepmd_compat::DeepBaseModelDevi deep_base_model_devi; virtual void allocate(); double **scale; unsigned numb_models; @@ -59,7 +61,6 @@ class PairDeepMDBase : public Pair { int numb_types; int numb_types_spin; std::vector > all_force; - std::vector > all_force_mag; std::ofstream fp; int out_freq; std::string out_file; diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index d05e0df626..573d6a63b6 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -85,7 +85,8 @@ static const char cite_user_deepmd_package[] = "}\n\n"; PairDeepMD::PairDeepMD(LAMMPS *lmp) - : PairDeepMDBase(lmp, cite_user_deepmd_package) { + : PairDeepMDBase( + lmp, cite_user_deepmd_package, deep_pot, deep_pot_model_devi) { // Constructor body can be empty } @@ -93,6 +94,289 @@ PairDeepMD::~PairDeepMD() { // Ensure base class destructor is called } +static bool is_key(const string &input) { + vector keys; + keys.push_back("out_freq"); + keys.push_back("out_file"); + keys.push_back("fparam"); + keys.push_back("aparam"); + keys.push_back("fparam_from_compute"); + keys.push_back("aparam_from_compute"); + keys.push_back("ttm"); + keys.push_back("atomic"); + keys.push_back("relative"); + keys.push_back("relative_v"); + keys.push_back("virtual_len"); + keys.push_back("spin_norm"); + + for (int ii = 0; ii < keys.size(); ++ii) { + if (input == keys[ii]) { + return true; + } + } + return false; +} + +void PairDeepMD::settings(int narg, char **arg) { + if (narg <= 0) { + error->all(FLERR, "Illegal pair_style command"); + } + + vector models; + int iarg = 0; + while (iarg < narg) { + if (is_key(arg[iarg])) { + break; + } + iarg++; + } + for (int ii = 0; ii < iarg; ++ii) { + models.push_back(arg[ii]); + } + numb_models = models.size(); + if (numb_models == 1) { + try { + deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_pot.cutoff() * dist_unit_cvt_factor; + numb_types = deep_pot.numb_types(); + numb_types_spin = deep_pot.numb_types_spin(); + dim_fparam = deep_pot.dim_fparam(); + dim_aparam = deep_pot.dim_aparam(); + } else { + try { + deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); + deep_pot_model_devi.init(models, get_node_rank(), + get_file_content(models)); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_pot_model_devi.cutoff() * dist_unit_cvt_factor; + numb_types = deep_pot_model_devi.numb_types(); + numb_types_spin = deep_pot_model_devi.numb_types_spin(); + dim_fparam = deep_pot_model_devi.dim_fparam(); + dim_aparam = deep_pot_model_devi.dim_aparam(); + assert(cutoff == deep_pot.cutoff() * dist_unit_cvt_factor); + assert(numb_types == deep_pot.numb_types()); + assert(numb_types_spin == deep_pot.numb_types_spin()); + assert(dim_fparam == deep_pot.dim_fparam()); + assert(dim_aparam == deep_pot.dim_aparam()); + } + + out_freq = 100; + out_file = "model_devi.out"; + out_each = 0; + out_rel = 0; + eps = 0.; + fparam.clear(); + aparam.clear(); + while (iarg < narg) { + if (!is_key(arg[iarg])) { + error->all(FLERR, + "Illegal pair_style command\nwrong number of parameters\n"); + } + if (string(arg[iarg]) == string("out_freq")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_freq, not provided"); + } + out_freq = atoi(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("out_file")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_file, not provided"); + } + out_file = string(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("fparam")) { + for (int ii = 0; ii < dim_fparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal fparam, the dimension should be %d", + dim_fparam); + error->all(FLERR, tmp); + } + fparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_fparam; + } else if (string(arg[iarg]) == string("aparam")) { + for (int ii = 0; ii < dim_aparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal aparam, the dimension should be %d", + dim_aparam); + error->all(FLERR, tmp); + } + aparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_aparam; + } else if (string(arg[iarg]) == string("ttm")) { +#ifdef USE_TTM + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); + } + } + do_ttm = true; + ttm_fix_id = arg[iarg + 1]; + iarg += 1 + 1; +#else + error->all(FLERR, + "The deepmd-kit was compiled without support for TTM, please " + "rebuild it with LAMMPS version >=20210831"); +#endif + } + + /////////////////////////////////////////////// + // pair_style deepmd cp.pb fparam_from_compute TEMP + // compute TEMP all temp + ////////////////////////////////////////////// + else if (string(arg[iarg]) == string("fparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid fparam_from_compute key: should be " + "fparam_from_compute compute_fparam_id(str)"); + } + } + do_compute_fparam = true; + compute_fparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("aparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid aparam_from_compute key: should be " + "aparam_from_compute compute_aparam_id(str)"); + } + } + do_compute_aparam = true; + compute_aparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("atomic")) { + out_each = 1; + iarg += 1; + } else if (string(arg[iarg]) == string("relative")) { + out_rel = 1; + eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("relative_v")) { + out_rel_v = 1; + eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("virtual_len")) { + virtual_len.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + virtual_len[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } else if (string(arg[iarg]) == string("spin_norm")) { + spin_norm.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + spin_norm[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } + } + + if (out_freq < 0) { + error->all(FLERR, "Illegal out_freq, should be >= 0"); + } + if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { + error->all(FLERR, + "aparam, aparam_from_compute, and ttm should NOT be set " + "simultaneously"); + } + if (do_compute_fparam && fparam.size() > 0) { + error->all( + FLERR, + "fparam and fparam_from_compute should NOT be set simultaneously"); + } + + if (comm->me == 0) { + if (numb_models > 1 && out_freq > 0) { + if (!is_restart) { + fp.open(out_file); + fp << scientific; + if (!atom->sp_flag) { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" + << setw(18 + 1) << "avg_devi_f"; + if (out_each) { + // at this time, we don't know how many atoms + fp << setw(18 + 1) << "atm_devi_f(N)"; + } + fp << endl; + } else { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" + << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" + << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" + << endl; + } + } else { + fp.open(out_file, std::ofstream::out | std::ofstream::app); + fp << scientific; + } + } + string pre = " "; + cout << pre << ">>> Info of model(s):" << endl + << pre << "using " << setw(3) << numb_models << " model(s): "; + if (narg == 1) { + cout << arg[0] << " "; + } else { + for (int ii = 0; ii < models.size(); ++ii) { + cout << models[ii] << " "; + } + } + cout << endl + << pre << "rcut in model: " << cutoff << endl + << pre << "ntypes in model: " << numb_types << endl; + if (fparam.size() > 0) { + cout << pre << "using fparam(s): "; + for (int ii = 0; ii < dim_fparam; ++ii) { + cout << fparam[ii] << " "; + } + cout << endl; + } + if (do_compute_fparam) { + cout << pre << "using compute id (fparam): "; + cout << compute_fparam_id << " " << endl; + } + if (do_compute_aparam) { + cout << pre << "using compute id (aparam): "; + cout << compute_aparam_id << " " << endl; + } + if (aparam.size() > 0) { + cout << pre << "using aparam(s): "; + for (int ii = 0; ii < aparam.size(); ++ii) { + cout << aparam[ii] << " "; + } + cout << endl; + } + if (do_ttm) { + cout << pre << "using ttm fix: "; + cout << ttm_fix_id << " "; + if (dim_fparam > 0) { + cout << "(fparam)" << endl; + } else if (dim_aparam > 0) { + cout << "(aparam)" << endl; + } + } + } + + // comm_reverse = numb_models * 3; + if (atom->sp_flag) { + comm_reverse = numb_models * 3 * 2; + } else { + comm_reverse = numb_models * 3; + } + all_force.resize(numb_models); +} + void PairDeepMD::compute(int eflag, int vflag) { if (numb_models == 0) { return; @@ -136,7 +420,6 @@ void PairDeepMD::compute(int eflag, int vflag) { double dener(0); vector dforce(nall * 3); - vector dforce_mag(nall * 3); vector dvirial(9, 0); vector dcoord(nall * 3, 0.); vector dbox(9, 0); @@ -290,7 +573,6 @@ void PairDeepMD::compute(int eflag, int vflag) { // deep_pot_model_devi.compute_avg (dvatom, all_atom_virial); dener = all_energy[0]; dforce = all_force[0]; - dforce_mag = all_force_mag[0]; dvirial = all_virial[0]; if (eflag_atom) { deatom = all_atom_energy[0]; diff --git a/source/lmp/pair_deepmd.h b/source/lmp/pair_deepmd.h index 5a9024e3d7..5f29134277 100644 --- a/source/lmp/pair_deepmd.h +++ b/source/lmp/pair_deepmd.h @@ -12,6 +12,22 @@ PairStyle(deepmd, PairDeepMD) #ifndef LMP_PAIR_NNP_H #define LMP_PAIR_NNP_H +#ifdef DP_USE_CXX_API +#ifdef LMPPLUGIN +#include "DeepPot.h" +#else +#include "deepmd/DeepPot.h" +#endif +namespace deepmd_compat = deepmd; +#else +#ifdef LMPPLUGIN +#include "deepmd.hpp" +#else +#include "deepmd/deepmd.hpp" +#endif +namespace deepmd_compat = deepmd::hpp; +#endif + #include #include #include @@ -28,10 +44,15 @@ class PairDeepMD : public PairDeepMDBase { public: PairDeepMD(class LAMMPS *); ~PairDeepMD() override; + void settings(int, char **) override; void compute(int, int) override; int pack_reverse_comm(int, int, double *) override; void unpack_reverse_comm(int, int *, double *) override; + protected: + deepmd_compat::DeepPot deep_pot; + deepmd_compat::DeepPotModelDevi deep_pot_model_devi; + private: CommBrickDeepMD *commdata_; }; diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 01ef220586..70b24b058c 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -85,7 +85,8 @@ static const char cite_user_deepmd_package[] = "}\n\n"; PairDeepSpin::PairDeepSpin(LAMMPS *lmp) - : PairDeepMDBase(lmp, cite_user_deepmd_package) { + : PairDeepMDBase( + lmp, cite_user_deepmd_package, deep_spin, deep_spin_model_devi) { // Constructor body can be empty } @@ -93,6 +94,289 @@ PairDeepSpin::~PairDeepSpin() { // Ensure base class destructor is called } +static bool is_key(const string &input) { + vector keys; + keys.push_back("out_freq"); + keys.push_back("out_file"); + keys.push_back("fparam"); + keys.push_back("aparam"); + keys.push_back("fparam_from_compute"); + keys.push_back("aparam_from_compute"); + keys.push_back("ttm"); + keys.push_back("atomic"); + keys.push_back("relative"); + keys.push_back("relative_v"); + keys.push_back("virtual_len"); + keys.push_back("spin_norm"); + + for (int ii = 0; ii < keys.size(); ++ii) { + if (input == keys[ii]) { + return true; + } + } + return false; +} + +void PairDeepSpin::settings(int narg, char **arg) { + if (narg <= 0) { + error->all(FLERR, "Illegal pair_style command"); + } + + vector models; + int iarg = 0; + while (iarg < narg) { + if (is_key(arg[iarg])) { + break; + } + iarg++; + } + for (int ii = 0; ii < iarg; ++ii) { + models.push_back(arg[ii]); + } + numb_models = models.size(); + if (numb_models == 1) { + try { + deep_spin.init(arg[0], get_node_rank(), get_file_content(arg[0])); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_spin.cutoff() * dist_unit_cvt_factor; + numb_types = deep_spin.numb_types(); + numb_types_spin = deep_spin.numb_types_spin(); + dim_fparam = deep_spin.dim_fparam(); + dim_aparam = deep_spin.dim_aparam(); + } else { + try { + deep_spin.init(arg[0], get_node_rank(), get_file_content(arg[0])); + deep_spin_model_devi.init(models, get_node_rank(), + get_file_content(models)); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_spin_model_devi.cutoff() * dist_unit_cvt_factor; + numb_types = deep_spin_model_devi.numb_types(); + numb_types_spin = deep_spin_model_devi.numb_types_spin(); + dim_fparam = deep_spin_model_devi.dim_fparam(); + dim_aparam = deep_spin_model_devi.dim_aparam(); + assert(cutoff == deep_spin.cutoff() * dist_unit_cvt_factor); + assert(numb_types == deep_spin.numb_types()); + assert(numb_types_spin == deep_spin.numb_types_spin()); + assert(dim_fparam == deep_spin.dim_fparam()); + assert(dim_aparam == deep_spin.dim_aparam()); + } + + out_freq = 100; + out_file = "model_devi.out"; + out_each = 0; + out_rel = 0; + eps = 0.; + fparam.clear(); + aparam.clear(); + while (iarg < narg) { + if (!is_key(arg[iarg])) { + error->all(FLERR, + "Illegal pair_style command\nwrong number of parameters\n"); + } + if (string(arg[iarg]) == string("out_freq")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_freq, not provided"); + } + out_freq = atoi(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("out_file")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_file, not provided"); + } + out_file = string(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("fparam")) { + for (int ii = 0; ii < dim_fparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal fparam, the dimension should be %d", + dim_fparam); + error->all(FLERR, tmp); + } + fparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_fparam; + } else if (string(arg[iarg]) == string("aparam")) { + for (int ii = 0; ii < dim_aparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal aparam, the dimension should be %d", + dim_aparam); + error->all(FLERR, tmp); + } + aparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_aparam; + } else if (string(arg[iarg]) == string("ttm")) { +#ifdef USE_TTM + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); + } + } + do_ttm = true; + ttm_fix_id = arg[iarg + 1]; + iarg += 1 + 1; +#else + error->all(FLERR, + "The deepmd-kit was compiled without support for TTM, please " + "rebuild it with LAMMPS version >=20210831"); +#endif + } + + /////////////////////////////////////////////// + // pair_style deepmd cp.pb fparam_from_compute TEMP + // compute TEMP all temp + ////////////////////////////////////////////// + else if (string(arg[iarg]) == string("fparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid fparam_from_compute key: should be " + "fparam_from_compute compute_fparam_id(str)"); + } + } + do_compute_fparam = true; + compute_fparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("aparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid aparam_from_compute key: should be " + "aparam_from_compute compute_aparam_id(str)"); + } + } + do_compute_aparam = true; + compute_aparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("atomic")) { + out_each = 1; + iarg += 1; + } else if (string(arg[iarg]) == string("relative")) { + out_rel = 1; + eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("relative_v")) { + out_rel_v = 1; + eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("virtual_len")) { + virtual_len.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + virtual_len[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } else if (string(arg[iarg]) == string("spin_norm")) { + spin_norm.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + spin_norm[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } + } + + if (out_freq < 0) { + error->all(FLERR, "Illegal out_freq, should be >= 0"); + } + if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { + error->all(FLERR, + "aparam, aparam_from_compute, and ttm should NOT be set " + "simultaneously"); + } + if (do_compute_fparam && fparam.size() > 0) { + error->all( + FLERR, + "fparam and fparam_from_compute should NOT be set simultaneously"); + } + + if (comm->me == 0) { + if (numb_models > 1 && out_freq > 0) { + if (!is_restart) { + fp.open(out_file); + fp << scientific; + if (!atom->sp_flag) { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" + << setw(18 + 1) << "avg_devi_f"; + if (out_each) { + // at this time, we don't know how many atoms + fp << setw(18 + 1) << "atm_devi_f(N)"; + } + fp << endl; + } else { + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" + << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" + << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" + << endl; + } + } else { + fp.open(out_file, std::ofstream::out | std::ofstream::app); + fp << scientific; + } + } + string pre = " "; + cout << pre << ">>> Info of model(s):" << endl + << pre << "using " << setw(3) << numb_models << " model(s): "; + if (narg == 1) { + cout << arg[0] << " "; + } else { + for (int ii = 0; ii < models.size(); ++ii) { + cout << models[ii] << " "; + } + } + cout << endl + << pre << "rcut in model: " << cutoff << endl + << pre << "ntypes in model: " << numb_types << endl; + if (fparam.size() > 0) { + cout << pre << "using fparam(s): "; + for (int ii = 0; ii < dim_fparam; ++ii) { + cout << fparam[ii] << " "; + } + cout << endl; + } + if (do_compute_fparam) { + cout << pre << "using compute id (fparam): "; + cout << compute_fparam_id << " " << endl; + } + if (do_compute_aparam) { + cout << pre << "using compute id (aparam): "; + cout << compute_aparam_id << " " << endl; + } + if (aparam.size() > 0) { + cout << pre << "using aparam(s): "; + for (int ii = 0; ii < aparam.size(); ++ii) { + cout << aparam[ii] << " "; + } + cout << endl; + } + if (do_ttm) { + cout << pre << "using ttm fix: "; + cout << ttm_fix_id << " "; + if (dim_fparam > 0) { + cout << "(fparam)" << endl; + } else if (dim_aparam > 0) { + cout << "(aparam)" << endl; + } + } + } + + // comm_reverse = numb_models * 3; + if (atom->sp_flag) { + comm_reverse = numb_models * 3 * 2; + } else { + comm_reverse = numb_models * 3; + } + all_force.resize(numb_models); +} + void PairDeepSpin::compute(int eflag, int vflag) { if (numb_models == 0) { return; @@ -212,9 +496,9 @@ void PairDeepSpin::compute(int eflag, int vflag) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { try { - deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, - dspin, dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); + deep_spin.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, + dspin, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -224,9 +508,9 @@ void PairDeepSpin::compute(int eflag, int vflag) { vector deatom(nall * 1, 0); vector dvatom(nall * 9, 0); try { - deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, deatom, - dvatom, dcoord, dspin, dtype, dbox, nghost, - lmp_list, ago, fparam, daparam); + deep_spin.compute_spin(dener, dforce, dforce_mag, dvirial, deatom, + dvatom, dcoord, dspin, dtype, dbox, nghost, + lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -276,7 +560,7 @@ void PairDeepSpin::compute(int eflag, int vflag) { vector> all_atom_virial; if (!(eflag_atom || cvflag_atom)) { try { - deep_pot_model_devi.compute_spin( + deep_spin_model_devi.compute_spin( all_energy, all_force, all_force_mag, all_virial, dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { @@ -284,7 +568,7 @@ void PairDeepSpin::compute(int eflag, int vflag) { } } else { try { - deep_pot_model_devi.compute_spin( + deep_spin_model_devi.compute_spin( all_energy, all_force, all_force_mag, all_virial, all_atom_energy, all_atom_virial, dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); @@ -292,11 +576,11 @@ void PairDeepSpin::compute(int eflag, int vflag) { error->one(FLERR, e.what()); } } - // deep_pot_model_devi.compute_avg (dener, all_energy); - // deep_pot_model_devi.compute_avg (dforce, all_force); - // deep_pot_model_devi.compute_avg (dvirial, all_virial); - // deep_pot_model_devi.compute_avg (deatom, all_atom_energy); - // deep_pot_model_devi.compute_avg (dvatom, all_atom_virial); + // deep_spin_model_devi.compute_avg (dener, all_energy); + // deep_spin_model_devi.compute_avg (dforce, all_force); + // deep_spin_model_devi.compute_avg (dvirial, all_virial); + // deep_spin_model_devi.compute_avg (deatom, all_atom_energy); + // deep_spin_model_devi.compute_avg (dvatom, all_atom_virial); dener = all_energy[0]; dforce = all_force[0]; dforce_mag = all_force_mag[0]; @@ -353,10 +637,10 @@ void PairDeepSpin::compute(int eflag, int vflag) { vector tmp_avg_f; vector std_fm; vector tmp_avg_fm; - deep_pot_model_devi.compute_avg(tmp_avg_f, all_force); - deep_pot_model_devi.compute_std_f(std_f, tmp_avg_f, all_force); + deep_spin_model_devi.compute_avg(tmp_avg_f, all_force); + deep_spin_model_devi.compute_std_f(std_f, tmp_avg_f, all_force); if (out_rel == 1) { - deep_pot_model_devi.compute_relative_std_f(std_f, tmp_avg_f, eps); + deep_spin_model_devi.compute_relative_std_f(std_f, tmp_avg_f, eps); } double min = numeric_limits::max(), max = 0, avg = 0; ana_st(max, min, avg, std_f, nlocal); @@ -366,10 +650,10 @@ void PairDeepSpin::compute(int eflag, int vflag) { MPI_Reduce(&max, &all_f_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); MPI_Reduce(&avg, &all_f_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); all_f_avg /= double(atom->natoms); - deep_pot_model_devi.compute_avg(tmp_avg_fm, all_force_mag); - deep_pot_model_devi.compute_std_f(std_fm, tmp_avg_fm, all_force_mag); + deep_spin_model_devi.compute_avg(tmp_avg_fm, all_force_mag); + deep_spin_model_devi.compute_std_f(std_fm, tmp_avg_fm, all_force_mag); if (out_rel == 1) { - deep_pot_model_devi.compute_relative_std_f(std_fm, tmp_avg_fm, eps); + deep_spin_model_devi.compute_relative_std_f(std_fm, tmp_avg_fm, eps); } min = numeric_limits::max(), max = 0, avg = 0; ana_st(max, min, avg, std_fm, nlocal); @@ -399,12 +683,12 @@ void PairDeepSpin::compute(int eflag, int vflag) { double all_v_min = numeric_limits::max(), all_v_max = 0, all_v_avg = 0; if (rank == 0) { - deep_pot_model_devi.compute_avg(avg_virial, all_virial_1); - deep_pot_model_devi.compute_std(std_virial, avg_virial, all_virial_1, - 1); + deep_spin_model_devi.compute_avg(avg_virial, all_virial_1); + deep_spin_model_devi.compute_std(std_virial, avg_virial, all_virial_1, + 1); if (out_rel_v == 1) { - deep_pot_model_devi.compute_relative_std(std_virial, avg_virial, - eps_v, 1); + deep_spin_model_devi.compute_relative_std(std_virial, avg_virial, + eps_v, 1); } for (int ii = 0; ii < 9; ++ii) { if (std_virial[ii] > all_v_max) { @@ -483,8 +767,8 @@ void PairDeepSpin::compute(int eflag, int vflag) { } else { if (numb_models == 1) { try { - deep_pot.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, dspin, - dtype, dbox); + deep_spin.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, + dspin, dtype, dbox); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } diff --git a/source/lmp/pair_deepspin.h b/source/lmp/pair_deepspin.h index 125caa1b9a..3363185405 100644 --- a/source/lmp/pair_deepspin.h +++ b/source/lmp/pair_deepspin.h @@ -12,6 +12,22 @@ PairStyle(deepspin, PairDeepSpin) #ifndef LMP_PAIR_NNP_SPIN_H #define LMP_PAIR_NNP_SPIN_H +#ifdef DP_USE_CXX_API +#ifdef LMPPLUGIN +#include "DeepSpin.h" +#else +#include "deepmd/DeepSpin.h" +#endif +namespace deepmd_compat = deepmd; +#else +#ifdef LMPPLUGIN +#include "deepmd.hpp" +#else +#include "deepmd/deepmd.hpp" +#endif +namespace deepmd_compat = deepmd::hpp; +#endif + #include #include #include @@ -28,10 +44,16 @@ class PairDeepSpin : public PairDeepMDBase { public: PairDeepSpin(class LAMMPS *); ~PairDeepSpin() override; + void settings(int, char **) override; void compute(int, int) override; int pack_reverse_comm(int, int, double *) override; void unpack_reverse_comm(int, int *, double *) override; + protected: + deepmd_compat::DeepSpin deep_spin; + deepmd_compat::DeepSpinModelDevi deep_spin_model_devi; + std::vector > all_force_mag; + private: CommBrickDeepSpin *commdata_; }; From 799b4e53e729fbe4cc6bf16dbbfbd6e8e9499071 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 00:09:54 +0800 Subject: [PATCH 129/193] rm dead code --- source/api_cc/include/DeepPot.h | 439 +----------- source/api_cc/include/DeepPotPT.h | 144 ---- source/api_cc/include/DeepPotTF.h | 117 --- source/api_cc/src/DeepPot.cc | 520 +------------- source/api_cc/src/DeepPotPT.cc | 443 +----------- source/api_cc/src/DeepPotTF.cc | 666 +----------------- ...pt_spin.cc => test_deeppot_dpa_pt_spin.cc} | 20 +- source/api_cc/tests/test_deeppot_tf_spin.cc | 8 +- source/lmp/plugin/deepmdplugin.cpp | 3 + 9 files changed, 22 insertions(+), 2338 deletions(-) rename source/api_cc/tests/{test_deeppot_dpa1_pt_spin.cc => test_deeppot_dpa_pt_spin.cc} (96%) diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 86f07d33c4..196b8f2910 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -89,63 +89,6 @@ class DeepPotBase : public DeepBaseModelBase { const bool atomic) = 0; /** @} */ - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. - * @note The double precision interface is used by i-PI, GROMACS, ABACUS, and - *CP2k. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @param[in] atomic Request atomic energy and virial if atomic is true. - * @{ - **/ - virtual void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) = 0; - virtual void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) = 0; - /** @} */ - /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -204,71 +147,6 @@ class DeepPotBase : public DeepBaseModelBase { const bool atomic) = 0; /** @} */ - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. - * @note The double precision interface is used by LAMMPS and AMBER. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] nghost The number of ghost atoms. - * @param[in] lmp_list The input neighbour list. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @param[in] atomic Request atomic energy and virial if atomic is true. - * @{ - **/ - virtual void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) = 0; - virtual void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) = 0; - /** @} */ - /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. @@ -393,55 +271,6 @@ class DeepPot : public DeepBaseModel { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); /** @} */ - /** - * @brief Evaluate the energy, force, magnetic force and virial by using this - *DP with spin input. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @{ - **/ - template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - /** @} */ /** * @brief Evaluate the energy, force and virial by using this DP. @@ -491,64 +320,7 @@ class DeepPot : public DeepBaseModel { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); /** @} */ - /** - * @brief Evaluate the energy, force, magnetic force and virial by using this - *DP with spin input. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] nghost The number of ghost atoms. - * @param[in] inlist The input neighbour list. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @{ - **/ - template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - /** @} */ + /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -596,62 +368,6 @@ class DeepPot : public DeepBaseModel { const std::vector& aparam = std::vector()); /** @} */ - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @{ - **/ - template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - /** @} */ - /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -708,70 +424,6 @@ class DeepPot : public DeepBaseModel { const std::vector& aparam = std::vector()); /** @} */ - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] nghost The number of ghost atoms. - * @param[in] lmp_list The input neighbour list. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @{ - **/ - template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - /** @} */ /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. @@ -1005,48 +657,6 @@ class DeepPotModelDevi : public DeepBaseModelDevi { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); - /** - * @brief Evaluate the energy, force, magnetic force and virial by using these - *DP models with spin input. - * @param[out] all_ener The system energies of all models. - * @param[out] all_force The forces on each atom of all models. - * @param[out] all_force_mag The magnetic forces on each atom of all models. - * @param[out] all_virial The virials of all models. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] nghost The number of ghost atoms. - * @param[in] lmp_list The input neighbour list. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. dim_aparam. Then all frames and atoms are provided with the - *same aparam. - **/ - template - void compute_spin( - std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using these DP models. @@ -1088,53 +698,6 @@ class DeepPotModelDevi : public DeepBaseModelDevi { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using these DP models with spin input. - * @param[out] all_ener The system energies of all models. - * @param[out] all_force The forces on each atom of all models. - * @param[out] all_force_mag The magnetic forces on each atom of all models. - * @param[out] all_virial The virials of all models. - * @param[out] all_atom_energy The atomic energies of all models. - * @param[out] all_atom_virial The atomic virials of all models. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] nghost The number of ghost atoms. - * @param[in] lmp_list The input neighbour list. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. dim_aparam. Then all frames and atoms are provided with the - *same aparam. - **/ - template - void compute_spin( - std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - std::vector>& all_atom_energy, - std::vector>& all_atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); - protected: std::vector> dps; }; diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index d77b7fa485..21b78ca550 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -75,46 +75,6 @@ class DeepPotPT : public DeepPotBase { const std::vector& aparam, const bool atomic); - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @param[in] atomic Whether to compute the atomic energy and virial. - **/ - template - void compute(ENERGYVTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -157,52 +117,6 @@ class DeepPotPT : public DeepPotBase { const std::vector& aparam, const bool atomic); - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9. - * @param[in] nghost The number of ghost atoms. - * @param[in] lmp_list The input neighbour list. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @param[in] atomic Whether to compute the atomic energy and virial. - **/ - template - void compute(ENERGYVTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. @@ -358,66 +272,10 @@ class DeepPotPT : public DeepPotBase { const bool atomic); void computew(std::vector& ener, std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, std::vector& virial, std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, - const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, @@ -428,12 +286,10 @@ class DeepPotPT : public DeepPotBase { const bool atomic); void computew(std::vector& ener, std::vector& force, - std::vector& force_mag, std::vector& virial, std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, - const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index cd2c376da7..1b6b75bce7 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -74,20 +74,6 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); - template - void compute(ENERGYVTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -129,23 +115,6 @@ class DeepPotTF : public DeepPotBase { const std::vector& fparam, const std::vector& aparam, const bool atomic); - template - void compute(ENERGYVTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. @@ -267,66 +236,10 @@ class DeepPotTF : public DeepPotBase { const bool atomic); void computew(std::vector& ener, std::vector& force, - std::vector& force_mag, std::vector& virial, std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - void computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, @@ -337,12 +250,10 @@ class DeepPotTF : public DeepPotBase { const bool atomic); void computew(std::vector& ener, std::vector& force, - std::vector& force_mag, std::vector& virial, std::vector& atom_energy, std::vector& atom_virial, const std::vector& coord, - const std::vector& spin, const std::vector& atype, const std::vector& box, const int nghost, @@ -376,34 +287,6 @@ class DeepPotTF : public DeepPotBase { const std::vector& aparam, const bool atomic); - template - void extend(int& extend_inum, - std::vector& extend_ilist, - std::vector& extend_numneigh, - std::vector>& extend_neigh, - std::vector& extend_firstneigh, - std::vector& extend_dcoord, - std::vector& extend_atype, - int& extend_nghost, - std::map& new_idx_map, - std::map& old_idx_map, - const InputNlist& lmp_list, - const std::vector& dcoord, - const std::vector& atype, - const int nghost, - const std::vector& spin, - const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); - - template - void extend_nlist(std::vector& extend_dcoord, - std::vector& extend_atype, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_); - void cum_sum(std::map&, std::map&); private: diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 3f0c374ca8..d8d02aff5c 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -62,8 +62,7 @@ void DeepPot::init(const std::string& model, throw deepmd::deepmd_exception("Unknown file type"); } inited = true; - dpbase = (std::shared_ptr) - dp; // make sure the base funtions work + dpbase = dp; // make sure the base funtions work } // no nlist, no atomic : nframe @@ -134,89 +133,6 @@ template void DeepPot::compute(std::vector& dener, const std::vector& aparam); // above: no nlist, no atomic : nframe * precision -// support spin -// no nlist, no atomic : nframe -template -void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { - std::vector dener_; - std::vector datom_energy_, datom_virial_; - dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, - false); - dener = dener_[0]; -} - -template -void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { - std::vector datom_energy_, datom_virial_; - dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, - false); -} - -// no nlist, no atomic : nframe * precision -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - // nlist, no atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, @@ -302,107 +218,6 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); -// support spin -// nlist, no atomic : nframe -template -void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { - std::vector dener_; - std::vector datom_energy_, datom_virial_; - dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, - ago, fparam_, aparam__, false); - dener = dener_[0]; -} - -template -void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { - std::vector datom_energy_, datom_virial_; - dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, - ago, fparam_, aparam__, false); -} - -// nlist, no atomic : nframe * precision -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - // no nlist, atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, @@ -480,97 +295,6 @@ template void DeepPot::compute(std::vector& dener, const std::vector& aparam); // above: no nlist, atomic : nframe * precision -// support spin -// no nlist, atomic : nframe -template -void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { - std::vector dener_; - dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, - true); - dener = dener_[0]; -} -template -void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { - dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, - true); -} -// no nlist, atomic : nframe * precision -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - // nlist, atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, @@ -666,115 +390,6 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); -// support spin -// nlist, atomic : nframe -template -void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { - std::vector dener_; - dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, - ago, fparam_, aparam__, true); - dener = dener_[0]; -} -template -void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { - dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, - datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, - ago, fparam_, aparam__, true); -} -// nlist, atomic : nframe * precision -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepPot::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepPot::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - // mixed type template void DeepPot::compute_mixed_type(ENERGYTYPE& dener, @@ -1127,68 +742,6 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); -// support spin -// nlist, no atomic -template -void DeepPotModelDevi::compute_spin( - std::vector& all_energy, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_) { - if (numb_models == 0) { - return; - } - all_energy.resize(numb_models); - all_force.resize(numb_models); - all_force_mag.resize(numb_models); - all_virial.resize(numb_models); - for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], dcoord_, dspin_, datype_, dbox, - nghost, lmp_list, ago, fparam, aparam_); - } -} - -// nlist, no atomic: precision -template void DeepPotModelDevi::compute_spin( - std::vector& all_energy, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPotModelDevi::compute_spin( - std::vector& all_energy, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam); - // nlist, atomic template void DeepPotModelDevi::compute( @@ -1250,74 +803,3 @@ template void DeepPotModelDevi::compute( const int& ago, const std::vector& fparam, const std::vector& aparam); - -// support spin -// nlist, atomic -template -void DeepPotModelDevi::compute_spin( - std::vector& all_energy, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - std::vector>& all_atom_energy, - std::vector>& all_atom_virial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_) { - if (numb_models == 0) { - return; - } - all_energy.resize(numb_models); - all_force.resize(numb_models); - all_force_mag.resize(numb_models); - all_virial.resize(numb_models); - all_atom_energy.resize(numb_models); - all_atom_virial.resize(numb_models); - for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], all_atom_energy[ii], - all_atom_virial[ii], dcoord_, dspin_, datype_, dbox, - nghost, lmp_list, ago, fparam, aparam_); - } -} - -// nlist, atomic : precision -template void DeepPotModelDevi::compute_spin( - std::vector& all_energy, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - std::vector>& all_atom_energy, - std::vector>& all_atom_virial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepPotModelDevi::compute_spin( - std::vector& all_energy, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - std::vector>& all_atom_energy, - std::vector>& all_atom_virial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam); diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index c56e65bae7..f8b803bad4 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -312,230 +312,6 @@ template void DeepPotPT::compute>( const std::vector& aparam, const bool atomic); -template -void DeepPotPT::compute(ENERGYVTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - torch::Device device(torch::kCUDA, gpu_id); - if (!gpu_enabled) { - device = torch::Device(torch::kCPU); - } - int natoms = atype.size(); - auto options = torch::TensorOptions().dtype(torch::kFloat64); - torch::ScalarType floatType = torch::kFloat64; - if (std::is_same_v) { - options = torch::TensorOptions().dtype(torch::kFloat32); - floatType = torch::kFloat32; - } - auto int32_option = - torch::TensorOptions().device(torch::kCPU).dtype(torch::kInt32); - auto int_option = - torch::TensorOptions().device(torch::kCPU).dtype(torch::kInt64); - // select real atoms - std::vector dcoord, dforce, dforce_mag, aparam_, datom_energy, - datom_virial; - std::vector datype, fwd_map, bkw_map; - int nghost_real, nall_real, nloc_real; - int nall = natoms; - select_real_atoms_coord(dcoord, datype, aparam_, nghost_real, fwd_map, - bkw_map, nall_real, nloc_real, coord, atype, aparam, - nghost, ntypes, 1, daparam, nall, aparam_nall); - int nloc = nall_real - nghost_real; - int nframes = 1; - std::vector coord_wrapped = dcoord; - at::Tensor coord_wrapped_Tensor = - torch::from_blob(coord_wrapped.data(), {1, nall_real, 3}, options) - .to(device); - std::vector spin_wrapped = spin; - at::Tensor spin_wrapped_Tensor = - torch::from_blob(spin_wrapped.data(), {1, nall_real, 3}, options) - .to(device); - std::vector atype_64(datype.begin(), datype.end()); - at::Tensor atype_Tensor = - torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); - c10::optional mapping_tensor; - if (ago == 0) { - nlist_data.copy_from_nlist(lmp_list); - nlist_data.shuffle_exclude_empty(fwd_map); - nlist_data.padding(); - if (do_message_passing == 1 && nghost > 0) { - int nswap = lmp_list.nswap; - torch::Tensor sendproc_tensor = - torch::from_blob(lmp_list.sendproc, {nswap}, int32_option); - torch::Tensor recvproc_tensor = - torch::from_blob(lmp_list.recvproc, {nswap}, int32_option); - torch::Tensor firstrecv_tensor = - torch::from_blob(lmp_list.firstrecv, {nswap}, int32_option); - torch::Tensor recvnum_tensor = - torch::from_blob(lmp_list.recvnum, {nswap}, int32_option); - torch::Tensor sendnum_tensor = - torch::from_blob(lmp_list.sendnum, {nswap}, int32_option); - torch::Tensor communicator_tensor = torch::from_blob( - const_cast(lmp_list.world), {1}, torch::kInt64); - // torch::Tensor communicator_tensor = - // torch::tensor(lmp_list.world, int32_option); - torch::Tensor nswap_tensor = torch::tensor(nswap, int32_option); - int total_send = - std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); - torch::Tensor sendlist_tensor = - torch::from_blob(lmp_list.sendlist, {total_send}, int32_option); - torch::Tensor has_spin = torch::tensor({1}, int32_option); - comm_dict.insert("send_list", sendlist_tensor); - comm_dict.insert("send_proc", sendproc_tensor); - comm_dict.insert("recv_proc", recvproc_tensor); - comm_dict.insert("send_num", sendnum_tensor); - comm_dict.insert("recv_num", recvnum_tensor); - comm_dict.insert("communicator", communicator_tensor); - comm_dict.insert("has_spin", has_spin); - } - if (do_message_passing == 1 && nghost == 0) { - // for the situation that no ghost atoms (e.g. serial nopbc) - // set the mapping arange(nloc) is enough - auto option = torch::TensorOptions().device(device).dtype(torch::kInt64); - mapping_tensor = at::arange(nloc_real, option).unsqueeze(0); - } - } - at::Tensor firstneigh = createNlistTensor(nlist_data.jlist); - firstneigh_tensor = firstneigh.to(torch::kInt64).to(device); - bool do_atom_virial_tensor = atomic; - c10::optional fparam_tensor; - if (!fparam.empty()) { - fparam_tensor = - torch::from_blob(const_cast(fparam.data()), - {1, static_cast(fparam.size())}, options) - .to(device); - } - c10::optional aparam_tensor; - if (!aparam_.empty()) { - aparam_tensor = - torch::from_blob( - const_cast(aparam_.data()), - {1, lmp_list.inum, - static_cast(aparam_.size()) / lmp_list.inum}, - options) - .to(device); - } - c10::Dict outputs = - (do_message_passing == 1 && nghost > 0) - ? module - .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, - spin_wrapped_Tensor, firstneigh_tensor, - mapping_tensor, fparam_tensor, aparam_tensor, - do_atom_virial_tensor, comm_dict) - .toGenericDict() - : module - .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, - spin_wrapped_Tensor, firstneigh_tensor, - mapping_tensor, fparam_tensor, aparam_tensor, - do_atom_virial_tensor) - .toGenericDict(); - c10::IValue energy_ = outputs.at("energy"); - c10::IValue force_ = outputs.at("extended_force"); - c10::IValue force_mag_ = outputs.at("extended_force_mag"); - // spin model not suported yet - // c10::IValue virial_ = outputs.at("virial"); - torch::Tensor flat_energy_ = energy_.toTensor().view({-1}); - torch::Tensor cpu_energy_ = flat_energy_.to(torch::kCPU); - ener.assign(cpu_energy_.data_ptr(), - cpu_energy_.data_ptr() + cpu_energy_.numel()); - torch::Tensor flat_force_ = force_.toTensor().view({-1}).to(floatType); - torch::Tensor cpu_force_ = flat_force_.to(torch::kCPU); - dforce.assign(cpu_force_.data_ptr(), - cpu_force_.data_ptr() + cpu_force_.numel()); - torch::Tensor flat_force_mag_ = - force_mag_.toTensor().view({-1}).to(floatType); - torch::Tensor cpu_force_mag_ = flat_force_mag_.to(torch::kCPU); - dforce_mag.assign( - cpu_force_mag_.data_ptr(), - cpu_force_mag_.data_ptr() + cpu_force_mag_.numel()); - // spin model not suported yet - // torch::Tensor flat_virial_ = virial_.toTensor().view({-1}).to(floatType); - // torch::Tensor cpu_virial_ = flat_virial_.to(torch::kCPU); - // virial.assign(cpu_virial_.data_ptr(), - // cpu_virial_.data_ptr() + cpu_virial_.numel()); - - // bkw map - force.resize(static_cast(nframes) * fwd_map.size() * 3); - force_mag.resize(static_cast(nframes) * fwd_map.size() * 3); - select_map(force, dforce, bkw_map, 3, nframes, fwd_map.size(), - nall_real); - select_map(force_mag, dforce_mag, bkw_map, 3, nframes, - fwd_map.size(), nall_real); - if (atomic) { - // spin model not suported yet - // c10::IValue atom_virial_ = outputs.at("extended_virial"); - c10::IValue atom_energy_ = outputs.at("atom_energy"); - torch::Tensor flat_atom_energy_ = - atom_energy_.toTensor().view({-1}).to(floatType); - torch::Tensor cpu_atom_energy_ = flat_atom_energy_.to(torch::kCPU); - datom_energy.resize(nall_real, - 0.0); // resize to nall to be consistenet with TF. - datom_energy.assign( - cpu_atom_energy_.data_ptr(), - cpu_atom_energy_.data_ptr() + cpu_atom_energy_.numel()); - // spin model not suported yet - // torch::Tensor flat_atom_virial_ = - // atom_virial_.toTensor().view({-1}).to(floatType); - // torch::Tensor cpu_atom_virial_ = flat_atom_virial_.to(torch::kCPU); - // datom_virial.assign( - // cpu_atom_virial_.data_ptr(), - // cpu_atom_virial_.data_ptr() + cpu_atom_virial_.numel()); - atom_energy.resize(static_cast(nframes) * fwd_map.size()); - // atom_virial.resize(static_cast(nframes) * fwd_map.size() * 9); - select_map(atom_energy, datom_energy, bkw_map, 1, nframes, - fwd_map.size(), nall_real); - // select_map(atom_virial, datom_virial, bkw_map, 9, nframes, - // fwd_map.size(), nall_real); - } -} -template void DeepPotPT::compute>( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); -template void DeepPotPT::compute>( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - template void DeepPotPT::compute(ENERGYVTYPE& ener, std::vector& force, @@ -658,146 +434,6 @@ template void DeepPotPT::compute>( const std::vector& aparam, const bool atomic); -template -void DeepPotPT::compute(ENERGYVTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - torch::Device device(torch::kCUDA, gpu_id); - if (!gpu_enabled) { - device = torch::Device(torch::kCPU); - } - std::vector coord_wrapped = coord; - std::vector spin_wrapped = spin; - int natoms = atype.size(); - auto options = torch::TensorOptions().dtype(torch::kFloat64); - torch::ScalarType floatType = torch::kFloat64; - if (std::is_same_v) { - options = torch::TensorOptions().dtype(torch::kFloat32); - floatType = torch::kFloat32; - } - auto int_options = torch::TensorOptions().dtype(torch::kInt64); - int nframes = 1; - std::vector inputs; - at::Tensor coord_wrapped_Tensor = - torch::from_blob(coord_wrapped.data(), {1, natoms, 3}, options) - .to(device); - inputs.push_back(coord_wrapped_Tensor); - std::vector atype_64(atype.begin(), atype.end()); - at::Tensor atype_Tensor = - torch::from_blob(atype_64.data(), {1, natoms}, int_options).to(device); - inputs.push_back(atype_Tensor); - at::Tensor spin_wrapped_Tensor = - torch::from_blob(spin_wrapped.data(), {1, natoms, 3}, options).to(device); - inputs.push_back(spin_wrapped_Tensor); - c10::optional box_Tensor; - if (!box.empty()) { - box_Tensor = - torch::from_blob(const_cast(box.data()), {1, 9}, options) - .to(device); - } - inputs.push_back(box_Tensor); - c10::optional fparam_tensor; - if (!fparam.empty()) { - fparam_tensor = - torch::from_blob(const_cast(fparam.data()), - {1, static_cast(fparam.size())}, options) - .to(device); - } - inputs.push_back(fparam_tensor); - c10::optional aparam_tensor; - if (!aparam.empty()) { - aparam_tensor = - torch::from_blob( - const_cast(aparam.data()), - {1, natoms, static_cast(aparam.size()) / natoms}, - options) - .to(device); - } - inputs.push_back(aparam_tensor); - bool do_atom_virial_tensor = atomic; - inputs.push_back(do_atom_virial_tensor); - c10::Dict outputs = - module.forward(inputs).toGenericDict(); - c10::IValue energy_ = outputs.at("energy"); - c10::IValue force_ = outputs.at("force"); - c10::IValue force_mag_ = outputs.at("force_mag"); - // spin model not suported yet - // c10::IValue virial_ = outputs.at("virial"); - torch::Tensor flat_energy_ = energy_.toTensor().view({-1}); - torch::Tensor cpu_energy_ = flat_energy_.to(torch::kCPU); - ener.assign(cpu_energy_.data_ptr(), - cpu_energy_.data_ptr() + cpu_energy_.numel()); - torch::Tensor flat_force_ = force_.toTensor().view({-1}).to(floatType); - torch::Tensor cpu_force_ = flat_force_.to(torch::kCPU); - force.assign(cpu_force_.data_ptr(), - cpu_force_.data_ptr() + cpu_force_.numel()); - torch::Tensor flat_force_mag_ = - force_mag_.toTensor().view({-1}).to(floatType); - torch::Tensor cpu_force_mag_ = flat_force_mag_.to(torch::kCPU); - force_mag.assign( - cpu_force_mag_.data_ptr(), - cpu_force_mag_.data_ptr() + cpu_force_mag_.numel()); - // spin model not suported yet - // torch::Tensor flat_virial_ = virial_.toTensor().view({-1}).to(floatType); - // torch::Tensor cpu_virial_ = flat_virial_.to(torch::kCPU); - // virial.assign(cpu_virial_.data_ptr(), - // cpu_virial_.data_ptr() + cpu_virial_.numel()); - if (atomic) { - // c10::IValue atom_virial_ = outputs.at("atom_virial"); - c10::IValue atom_energy_ = outputs.at("atom_energy"); - torch::Tensor flat_atom_energy_ = - atom_energy_.toTensor().view({-1}).to(floatType); - torch::Tensor cpu_atom_energy_ = flat_atom_energy_.to(torch::kCPU); - atom_energy.assign( - cpu_atom_energy_.data_ptr(), - cpu_atom_energy_.data_ptr() + cpu_atom_energy_.numel()); - // torch::Tensor flat_atom_virial_ = - // atom_virial_.toTensor().view({-1}).to(floatType); - // torch::Tensor cpu_atom_virial_ = flat_atom_virial_.to(torch::kCPU); - // atom_virial.assign( - // cpu_atom_virial_.data_ptr(), - // cpu_atom_virial_.data_ptr() + cpu_atom_virial_.numel()); - } -} - -template void DeepPotPT::compute>( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); -template void DeepPotPT::compute>( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); void DeepPotPT::get_type_map(std::string& type_map) { auto ret = module.run_method("get_type_map").toList(); for (const torch::IValue& element : ret) { @@ -839,42 +475,6 @@ void DeepPotPT::computew(std::vector& ener, fparam, aparam, atomic); }); } -void DeepPotPT::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - translate_error([&] { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, - spin, atype, box, fparam, aparam, atomic); - }); -} -void DeepPotPT::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - translate_error([&] { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, - spin, atype, box, fparam, aparam, atomic); - }); -} void DeepPotPT::computew(std::vector& ener, std::vector& force, std::vector& virial, @@ -913,48 +513,7 @@ void DeepPotPT::computew(std::vector& ener, nghost, inlist, ago, fparam, aparam, atomic); }); } -void DeepPotPT::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - translate_error([&] { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, - spin, atype, box, nghost, inlist, ago, fparam, aparam, atomic); - }); -} -void DeepPotPT::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - translate_error([&] { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, - spin, atype, box, nghost, inlist, ago, fparam, aparam, atomic); - }); -} + void DeepPotPT::computew_mixed_type(std::vector& ener, std::vector& force, std::vector& virial, diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index f8ad1a5b68..586bf02021 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -680,137 +680,6 @@ template void DeepPotTF::compute>( const std::vector& fparam, const std::vector& aparam, const bool atomic); -// support spin -template -void DeepPotTF::compute(ENERGYVTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_, - const bool atomic) { - // if datype.size is 0, not clear nframes; but 1 is just ok - int nframes = datype_.size() > 0 ? (dcoord_.size() / 3 / datype_.size()) : 1; - int nloc = datype_.size(); - std::vector fparam; - std::vector aparam; - validate_fparam_aparam(nframes, nloc, fparam_, aparam_); - tile_fparam_aparam(fparam, nframes, dfparam, fparam_); - tile_fparam_aparam(aparam, nframes, nloc * daparam, aparam_); - - std::vector extend_dcoord; - std::vector extend_atype; - extend_nlist(extend_dcoord, extend_atype, dcoord_, dspin_, datype_); - - atommap = deepmd::AtomMap(extend_atype.begin(), extend_atype.end()); - - std::vector> input_tensors; - std::vector dforce_tmp; - - if (dtype == tensorflow::DT_DOUBLE) { - int ret = session_input_tensors( - input_tensors, extend_dcoord, ntypes, extend_atype, dbox, cell_size, - fparam, aparam, atommap, "", aparam_nall); - if (atomic) { - run_model(dener, dforce_tmp, dvirial, datom_energy_, - datom_virial_, session, input_tensors, atommap, - nframes); - } else { - run_model(dener, dforce_tmp, dvirial, session, input_tensors, - atommap, nframes); - } - } else { - int ret = session_input_tensors( - input_tensors, extend_dcoord, ntypes, extend_atype, dbox, cell_size, - fparam, aparam, atommap, "", aparam_nall); - if (atomic) { - run_model(dener, dforce_tmp, dvirial, datom_energy_, datom_virial_, - session, input_tensors, atommap, nframes); - } else { - run_model(dener, dforce_tmp, dvirial, session, input_tensors, - atommap, nframes); - } - } - // backward force and mag. - dforce_.resize(static_cast(nframes) * nloc * 3); - dforce_mag_.resize(static_cast(nframes) * nloc * 3); - for (int ii = 0; ii < nloc; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - dforce_[3 * ii + dd] = dforce_tmp[3 * ii + dd]; - if (datype_[ii] < ntypes_spin) { - dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (ii + nloc) + dd]; - } else { - dforce_mag_[3 * ii + dd] = 0.0; - } - } - } -} - -template void DeepPotTF::compute( - ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - -template void DeepPotTF::compute( - ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - -template void DeepPotTF::compute>( - std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); - -template void DeepPotTF::compute>( - std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic); template void DeepPotTF::compute(ENERGYVTYPE& dener, @@ -959,200 +828,6 @@ template void DeepPotTF::compute>( const std::vector& aparam_, const bool atomic); -// support spin -template -void DeepPotTF::compute(ENERGYVTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__, - const bool atomic) { - int nall = datype_.size(); - // if nall==0, unclear nframes, but 1 is ok - int nframes = nall > 0 ? (dcoord_.size() / nall / 3) : 1; - int nloc = nall - nghost; - - std::vector virtual_len; - std::vector spin_norm; - std::vector extend_dcoord; - get_vector(virtual_len, "spin_attr/virtual_len"); - get_vector(spin_norm, "spin_attr/spin_norm"); - extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, - extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, - new_idx_map, old_idx_map, lmp_list, dcoord_, datype_, nghost, dspin_, - ntypes, ntypes_spin, virtual_len, spin_norm); - InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], - &extend_firstneigh[0]); - std::vector fparam; - std::vector aparam_; - validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, - aparam__); - tile_fparam_aparam(fparam, nframes, dfparam, fparam_); - tile_fparam_aparam(aparam_, nframes, (aparam_nall ? nall : nloc) * daparam, - aparam__); - std::vector> input_tensors; - // select real atoms - std::vector dcoord, dforce, aparam, datom_energy, datom_virial; - std::vector datype, fwd_map, bkw_map; - int nghost_real, nall_real, nloc_real; - select_real_atoms_coord(dcoord, datype, aparam, nghost_real, fwd_map, bkw_map, - nall_real, nloc_real, extend_dcoord, extend_dtype, - aparam_, extend_nghost, ntypes, nframes, daparam, - nall, aparam_nall); - - if (ago == 0) { - atommap = deepmd::AtomMap(datype.begin(), datype.begin() + nloc_real); - assert(nloc_real == atommap.get_type().size()); - - nlist_data.copy_from_nlist(extend_lmp_list); - nlist_data.shuffle_exclude_empty(fwd_map); - nlist_data.shuffle(atommap); - nlist_data.make_inlist(nlist); - } - - if (dtype == tensorflow::DT_DOUBLE) { - int ret = session_input_tensors( - input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, - atommap, nghost_real, ago, "", aparam_nall); - assert(nloc_real == ret); - if (atomic) { - run_model(dener, dforce, dvirial, datom_energy, datom_virial, - session, input_tensors, atommap, nframes, nghost_real); - } else { - run_model(dener, dforce, dvirial, session, input_tensors, atommap, - nframes, nghost_real); - } - } else { - int ret = session_input_tensors( - input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, - atommap, nghost_real, ago, "", aparam_nall); - assert(nloc_real == ret); - if (atomic) { - run_model(dener, dforce, dvirial, datom_energy, datom_virial, - session, input_tensors, atommap, nframes, nghost_real); - } else { - run_model(dener, dforce, dvirial, session, input_tensors, atommap, - nframes, nghost_real); - } - } - - // bkw map - std::vector dforce_tmp, datom_energy_tmp, datom_virial_tmp; - dforce_tmp.resize(static_cast(nframes) * fwd_map.size() * 3); - datom_energy_tmp.resize(static_cast(nframes) * fwd_map.size()); - datom_virial_tmp.resize(static_cast(nframes) * fwd_map.size() * 9); - select_map(dforce_tmp, dforce, bkw_map, 3, nframes, fwd_map.size(), - nall_real); - select_map(datom_energy_tmp, datom_energy, bkw_map, 1, nframes, - fwd_map.size(), nall_real); - select_map(datom_virial_tmp, datom_virial, bkw_map, 9, nframes, - fwd_map.size(), nall_real); - // backward force and mag. - dforce_.resize(static_cast(nframes) * nall * 3); - dforce_mag_.resize(static_cast(nframes) * nall * 3); - datom_energy_.resize(static_cast(nframes) * nall); - datom_virial_.resize(static_cast(nframes) * nall * 9); - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - int new_idx = new_idx_map[ii]; - dforce_[3 * ii + dd] = dforce_tmp[3 * new_idx + dd]; - datom_energy_[ii] = datom_energy_tmp[new_idx]; - datom_virial_[ii] = datom_virial_tmp[new_idx]; - if (datype_[ii] < ntypes_spin && ii < nloc) { - dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (new_idx + nloc) + dd]; - } else if (datype_[ii] < ntypes_spin) { - dforce_mag_[3 * ii + dd] = dforce_tmp[3 * (new_idx + nghost) + dd]; - } else { - dforce_mag_[3 * ii + dd] = 0.0; - } - } - } -} - -template void DeepPotTF::compute( - ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_, - const bool atomic); - -template void DeepPotTF::compute( - ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_, - const bool atomic); - -template void DeepPotTF::compute>( - std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_, - const bool atomic); - -template void DeepPotTF::compute>( - std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_, - const bool atomic); - -// end support spin - // mixed type template @@ -1293,39 +968,7 @@ void DeepPotTF::computew(std::vector& ener, compute(ener, force, virial, atom_energy, atom_virial, coord, atype, box, fparam, aparam, atomic); } -// support spin -void DeepPotTF::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, - atype, box, fparam, aparam, atomic); -} -void DeepPotTF::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, - atype, box, fparam, aparam, atomic); -} + void DeepPotTF::computew(std::vector& ener, std::vector& force, std::vector& virial, @@ -1360,45 +1003,7 @@ void DeepPotTF::computew(std::vector& ener, compute(ener, force, virial, atom_energy, atom_virial, coord, atype, box, nghost, inlist, ago, fparam, aparam, atomic); } -// support spin -void DeepPotTF::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, - atype, box, nghost, inlist, ago, fparam, aparam, atomic); -} -void DeepPotTF::computew(std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam, - const std::vector& aparam, - const bool atomic) { - compute(ener, force, force_mag, virial, atom_energy, atom_virial, coord, spin, - atype, box, nghost, inlist, ago, fparam, aparam, atomic); -} + void DeepPotTF::computew_mixed_type(std::vector& ener, std::vector& force, std::vector& virial, @@ -1437,271 +1042,4 @@ void DeepPotTF::cum_sum(std::map& sum, std::map& vec) { } } -template -void DeepPotTF::extend(int& extend_inum, - std::vector& extend_ilist, - std::vector& extend_numneigh, - std::vector>& extend_neigh, - std::vector& extend_firstneigh, - std::vector& extend_dcoord, - std::vector& extend_atype, - int& extend_nghost, - std::map& new_idx_map, - std::map& old_idx_map, - const InputNlist& lmp_list, - const std::vector& dcoord, - const std::vector& atype, - const int nghost, - const std::vector& spin, - const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm) { - extend_ilist.clear(); - extend_numneigh.clear(); - extend_neigh.clear(); - extend_firstneigh.clear(); - extend_dcoord.clear(); - extend_atype.clear(); - - int nall = dcoord.size() / 3; - int nloc = nall - nghost; - assert(nloc == lmp_list.inum); - - // record numb_types_real and nloc_virt - int numb_types_real = numb_types - numb_types_spin; - std::map loc_type_count; - std::map::iterator iter = loc_type_count.begin(); - for (int i = 0; i < nloc; i++) { - iter = loc_type_count.find(atype[i]); - if (iter != loc_type_count.end()) { - iter->second += 1; - } else { - loc_type_count.insert(std::pair(atype[i], 1)); - } - } - assert(numb_types_real - 1 == loc_type_count.rbegin()->first); - int nloc_virt = 0; - for (int i = 0; i < numb_types_spin; i++) { - nloc_virt += loc_type_count[i]; - } - - // record nghost_virt - std::map ghost_type_count; - for (int i = nloc; i < nall; i++) { - iter = ghost_type_count.find(atype[i]); - if (iter != ghost_type_count.end()) { - iter->second += 1; - } else { - ghost_type_count.insert(std::pair(atype[i], 1)); - } - } - int nghost_virt = 0; - for (int i = 0; i < numb_types_spin; i++) { - nghost_virt += ghost_type_count[i]; - } - - // for extended system, search new index by old index, and vice versa - extend_nghost = nghost + nghost_virt; - int extend_nloc = nloc + nloc_virt; - int extend_nall = extend_nloc + extend_nghost; - std::map cum_loc_type_count; - std::map cum_ghost_type_count; - cum_sum(cum_loc_type_count, loc_type_count); - cum_sum(cum_ghost_type_count, ghost_type_count); - std::vector loc_type_reset(numb_types_real, 0); - std::vector ghost_type_reset(numb_types_real, 0); - - new_idx_map.clear(); - old_idx_map.clear(); - for (int ii = 0; ii < nloc; ii++) { - int new_idx = cum_loc_type_count[atype[ii]] + loc_type_reset[atype[ii]]; - new_idx_map[ii] = new_idx; - old_idx_map[new_idx] = ii; - loc_type_reset[atype[ii]]++; - } - for (int ii = nloc; ii < nall; ii++) { - int new_idx = cum_ghost_type_count[atype[ii]] + - ghost_type_reset[atype[ii]] + extend_nloc; - new_idx_map[ii] = new_idx; - old_idx_map[new_idx] = ii; - ghost_type_reset[atype[ii]]++; - } - - // extend lmp_list - extend_inum = extend_nloc; - - extend_ilist.resize(extend_nloc); - for (int ii = 0; ii < extend_nloc; ii++) { - extend_ilist[ii] = ii; - } - - extend_neigh.resize(extend_nloc); - for (int ii = 0; ii < nloc; ii++) { - int jnum = lmp_list.numneigh[old_idx_map[ii]]; - const int* jlist = lmp_list.firstneigh[old_idx_map[ii]]; - if (atype[old_idx_map[ii]] < numb_types_spin) { - extend_neigh[ii].push_back(ii + nloc); - } - for (int jj = 0; jj < jnum; jj++) { - int new_idx = new_idx_map[jlist[jj]]; - extend_neigh[ii].push_back(new_idx); - if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nloc) { - extend_neigh[ii].push_back(new_idx + nloc); - } else if (atype[jlist[jj]] < numb_types_spin && jlist[jj] < nall) { - extend_neigh[ii].push_back(new_idx + nghost); - } - } - } - for (int ii = nloc; ii < extend_nloc; ii++) { - extend_neigh[ii].assign(extend_neigh[ii - nloc].begin(), - extend_neigh[ii - nloc].end()); - std::vector::iterator it = - find(extend_neigh[ii].begin(), extend_neigh[ii].end(), ii); - *it = ii - nloc; - } - - extend_firstneigh.resize(extend_nloc); - extend_numneigh.resize(extend_nloc); - for (int ii = 0; ii < extend_nloc; ii++) { - extend_firstneigh[ii] = &extend_neigh[ii][0]; - extend_numneigh[ii] = extend_neigh[ii].size(); - } - - // extend coord - extend_dcoord.resize(static_cast(extend_nall) * 3); - for (int ii = 0; ii < nloc; ii++) { - for (int jj = 0; jj < 3; jj++) { - extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; - if (atype[ii] < numb_types_spin) { - double temp_dcoord = dcoord[ii * 3 + jj] + spin[ii * 3 + jj] / - spin_norm[atype[ii]] * - virtual_len[atype[ii]]; - extend_dcoord[(new_idx_map[ii] + nloc) * 3 + jj] = temp_dcoord; - } - } - } - for (int ii = nloc; ii < nall; ii++) { - for (int jj = 0; jj < 3; jj++) { - extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; - if (atype[ii] < numb_types_spin) { - double temp_dcoord = dcoord[ii * 3 + jj] + spin[ii * 3 + jj] / - spin_norm[atype[ii]] * - virtual_len[atype[ii]]; - extend_dcoord[(new_idx_map[ii] + nghost) * 3 + jj] = temp_dcoord; - } - } - } - - // extend atype - extend_atype.resize(extend_nall); - for (int ii = 0; ii < nall; ii++) { - extend_atype[new_idx_map[ii]] = atype[ii]; - if (atype[ii] < numb_types_spin) { - if (ii < nloc) { - extend_atype[new_idx_map[ii] + nloc] = atype[ii] + numb_types_real; - } else { - extend_atype[new_idx_map[ii] + nghost] = atype[ii] + numb_types_real; - } - } - } -} - -template void DeepPotTF::extend( - int& extend_inum, - std::vector& extend_ilist, - std::vector& extend_numneigh, - std::vector>& extend_neigh, - std::vector& extend_firstneigh, - std::vector& extend_dcoord, - std::vector& extend_atype, - int& extend_nghost, - std::map& new_idx_map, - std::map& old_idx_map, - const InputNlist& lmp_list, - const std::vector& dcoord, - const std::vector& atype, - const int nghost, - const std::vector& spin, - const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); - -template void DeepPotTF::extend( - int& extend_inum, - std::vector& extend_ilist, - std::vector& extend_numneigh, - std::vector>& extend_neigh, - std::vector& extend_firstneigh, - std::vector& extend_dcoord, - std::vector& extend_atype, - int& extend_nghost, - std::map& new_idx_map, - std::map& old_idx_map, - const InputNlist& lmp_list, - const std::vector& dcoord, - const std::vector& atype, - const int nghost, - const std::vector& spin, - const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); - -template -void DeepPotTF::extend_nlist(std::vector& extend_dcoord, - std::vector& extend_atype, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_) { - if (dtype == tensorflow::DT_DOUBLE) { - get_vector(virtual_len, "spin_attr/virtual_len"); - get_vector(spin_norm, "spin_attr/spin_norm"); - } else { - std::vector virtual_len; - std::vector spin_norm; - get_vector(virtual_len, "spin_attr/virtual_len"); - get_vector(spin_norm, "spin_attr/spin_norm"); - } - // extend coord and atype - int nloc = datype_.size(); - int nloc_spin = 0; - for (int ii = 0; ii < nloc; ii++) { - if (datype_[ii] < ntypes_spin) { - nloc_spin += 1; - } - } - int extend_nall = nloc + nloc_spin; - extend_dcoord.resize(static_cast(extend_nall) * 3); - extend_atype.resize(extend_nall); - for (int ii = 0; ii < nloc; ii++) { - extend_atype[ii] = datype_[ii]; - if (datype_[ii] < ntypes_spin) { - extend_atype[ii + nloc] = datype_[ii] + ntypes - ntypes_spin; - } - for (int jj = 0; jj < 3; jj++) { - extend_dcoord[ii * 3 + jj] = dcoord_[ii * 3 + jj]; - if (datype_[ii] < ntypes_spin) { - extend_dcoord[(ii + nloc) * 3 + jj] = - dcoord_[ii * 3 + jj] + dspin_[ii * 3 + jj] / - spin_norm[datype_[ii]] * - virtual_len[datype_[ii]]; - } - } - } -} - -template void DeepPotTF::extend_nlist( - std::vector& extend_dcoord, - std::vector& extend_atype, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_); - -template void DeepPotTF::extend_nlist(std::vector& extend_dcoord, - std::vector& extend_atype, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_); #endif diff --git a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc similarity index 96% rename from source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc rename to source/api_cc/tests/test_deeppot_dpa_pt_spin.cc index 9276489c7b..d0cc7f35df 100644 --- a/source/api_cc/tests/test_deeppot_dpa1_pt_spin.cc +++ b/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc @@ -18,7 +18,7 @@ #define EPSILON (std::is_same::value ? 1e-7 : 1e-1) template -class TestInferDeepPotDpaPtSpin : public ::testing::Test { +class TestInferDeepSpinDpaPt : public ::testing::Test { protected: std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, @@ -107,9 +107,9 @@ class TestInferDeepPotDpaPtSpin : public ::testing::Test { void TearDown() override {}; }; -TYPED_TEST_SUITE(TestInferDeepPotDpaPtSpin, ValueTypes); +TYPED_TEST_SUITE(TestInferDeepSpinDpaPt, ValueTypes); -TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist) { +TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist) { using VALUETYPE = TypeParam; const std::vector& coord = this->coord; const std::vector& spin = this->spin; @@ -141,7 +141,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist) { // } } -TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { +TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist_atomic) { using VALUETYPE = TypeParam; const std::vector& coord = this->coord; const std::vector& spin = this->spin; @@ -183,7 +183,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { } // template -// class TestInferDeepPotDpaPtSpinNopbc : public ::testing::Test { +// class TestInferDeepSpinDpaPtNopbc : public ::testing::Test { // protected: // std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, // 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, @@ -259,9 +259,9 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { // void TearDown() override {}; // }; -// TYPED_TEST_SUITE(TestInferDeepPotDpaPtSpinNopbc, ValueTypes); +// TYPED_TEST_SUITE(TestInferDeepSpinDpaPtNopbc, ValueTypes); -// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_build_nlist) { +// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_build_nlist) { // using VALUETYPE = TypeParam; // const std::vector& coord = this->coord; // const std::vector& spin = this->spin; @@ -293,7 +293,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { // // } // } -// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_build_nlist_atomic) { +// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_build_nlist_atomic) { // using VALUETYPE = TypeParam; // const std::vector& coord = this->coord; // const std::vector& spin = this->spin; @@ -334,7 +334,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { // // } // } -// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_lmp_nlist) { +// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_lmp_nlist) { // using VALUETYPE = TypeParam; // const std::vector& coord = this->coord; // const std::vector& spin = this->spin; @@ -375,7 +375,7 @@ TYPED_TEST(TestInferDeepPotDpaPtSpin, cpu_build_nlist_atomic) { // // } // } -// TYPED_TEST(TestInferDeepPotDpaPtSpinNopbc, cpu_lmp_nlist_atomic) { +// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_lmp_nlist_atomic) { // using VALUETYPE = TypeParam; // const std::vector& coord = this->coord; // const std::vector& spin = this->spin; diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc index 1cab895e04..a7a542f532 100644 --- a/source/api_cc/tests/test_deeppot_tf_spin.cc +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -14,7 +14,7 @@ #include "test_utils.h" template -class TestInferDeepPotSpin : public ::testing::Test { +class TestInferDeepSpin : public ::testing::Test { protected: std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; @@ -58,9 +58,9 @@ class TestInferDeepPotSpin : public ::testing::Test { void TearDown() override { remove("deepspin_nlist.pb"); }; }; -TYPED_TEST_SUITE(TestInferDeepPotSpin, ValueTypes); +TYPED_TEST_SUITE(TestInferDeepSpin, ValueTypes); -TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist) { +TYPED_TEST(TestInferDeepSpin, cpu_build_nlist) { using VALUETYPE = TypeParam; const std::vector& coord = this->coord; const std::vector& spin = this->spin; @@ -84,7 +84,7 @@ TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist) { } } -TYPED_TEST(TestInferDeepPotSpin, cpu_build_nlist_atomic) { +TYPED_TEST(TestInferDeepSpin, cpu_build_nlist_atomic) { using VALUETYPE = TypeParam; const std::vector& coord = this->coord; const std::vector& spin = this->spin; diff --git a/source/lmp/plugin/deepmdplugin.cpp b/source/lmp/plugin/deepmdplugin.cpp index 007d02855b..4f62cb3944 100644 --- a/source/lmp/plugin/deepmdplugin.cpp +++ b/source/lmp/plugin/deepmdplugin.cpp @@ -55,12 +55,14 @@ extern "C" void lammpsplugin_init(void *lmp, void *handle, void *regfunc) { plugin.style = "compute"; plugin.name = "deeptensor/atom"; plugin.info = "compute deeptensor/atom " STR_GIT_SUMM; + plugin.author = "Han Wang"; plugin.creator.v2 = (lammpsplugin_factory2 *)&computedeepmdtensoratom; (*register_plugin)(&plugin, lmp); plugin.style = "fix"; plugin.name = "dplr"; plugin.info = "fix dplr " STR_GIT_SUMM; + plugin.author = "Han Wang"; plugin.creator.v2 = (lammpsplugin_factory2 *)&fixdplr; (*register_plugin)(&plugin, lmp); @@ -69,6 +71,7 @@ extern "C" void lammpsplugin_init(void *lmp, void *handle, void *regfunc) { plugin.style = "kspace"; plugin.name = "pppm/dplr"; plugin.info = "kspace pppm/dplr " STR_GIT_SUMM; + plugin.author = "Han Wang"; plugin.creator.v1 = (lammpsplugin_factory1 *)&pppmdplr; (*register_plugin)(&plugin, lmp); #endif From 643e20213ff5e21d612b57313c19a27c1277e936 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 00:28:55 +0800 Subject: [PATCH 130/193] fix ut --- source/api_cc/include/DeepPotTF.h | 2 +- source/api_cc/include/DeepSpinTF.h | 2 +- source/api_cc/src/DeepSpin.cc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index 1b6b75bce7..f020e8c92a 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -16,7 +16,7 @@ class DeepPotTF : public DeepPotBase { * @brief DP constructor without initialization. **/ DeepPotTF(); - ~DeepPotTF(); + virtual ~DeepPotTF(); /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. diff --git a/source/api_cc/include/DeepSpinTF.h b/source/api_cc/include/DeepSpinTF.h index bcad6ef7df..6c8da772c6 100644 --- a/source/api_cc/include/DeepSpinTF.h +++ b/source/api_cc/include/DeepSpinTF.h @@ -16,7 +16,7 @@ class DeepSpinTF : public DeepSpinBase { * @brief DP constructor without initialization. **/ DeepSpinTF(); - ~DeepSpinTF(); + virtual ~DeepSpinTF(); /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. diff --git a/source/api_cc/src/DeepSpin.cc b/source/api_cc/src/DeepSpin.cc index b79e166efe..e62f0df5f9 100644 --- a/source/api_cc/src/DeepSpin.cc +++ b/source/api_cc/src/DeepSpin.cc @@ -62,7 +62,7 @@ void DeepSpin::init(const std::string& model, throw deepmd::deepmd_exception("Unknown file type"); } inited = true; - dpbase = dp; + dpbase = dp; // make sure the base funtions work } // support spin From fb4dfe051102ba4efb874db08eca16de4baaad5b Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 00:39:01 +0800 Subject: [PATCH 131/193] add virtual methods --- source/api_cc/include/DeepBaseModel.h | 4 ++-- source/api_cc/include/DeepPot.h | 4 ++-- source/api_cc/include/DeepPotPT.h | 2 +- source/api_cc/include/DeepSpin.h | 4 ++-- source/api_cc/include/DeepSpinPT.h | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/source/api_cc/include/DeepBaseModel.h b/source/api_cc/include/DeepBaseModel.h index 72c54f65e4..18bc7091f5 100644 --- a/source/api_cc/include/DeepBaseModel.h +++ b/source/api_cc/include/DeepBaseModel.h @@ -86,7 +86,7 @@ class DeepBaseModel { * @brief DP constructor without initialization. **/ DeepBaseModel(); - ~DeepBaseModel(); + virtual ~DeepBaseModel(); /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -153,7 +153,7 @@ class DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepBaseModelDevi(); - ~DeepBaseModelDevi(); + virtual ~DeepBaseModelDevi(); /** * @brief Get the cutoff radius. diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 196b8f2910..a74923fb31 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -210,7 +210,7 @@ class DeepPot : public DeepBaseModel { * @brief DP constructor without initialization. **/ DeepPot(); - ~DeepPot(); + virtual ~DeepPot(); /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -532,7 +532,7 @@ class DeepPotModelDevi : public DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepPotModelDevi(); - ~DeepPotModelDevi(); + virtual ~DeepPotModelDevi(); /** * @brief DP model deviation constructor with initialization. * @param[in] models The names of the frozen model files. diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index 21b78ca550..9683813093 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -16,7 +16,7 @@ class DeepPotPT : public DeepPotBase { * @brief DP constructor without initialization. **/ DeepPotPT(); - ~DeepPotPT(); + virtual ~DeepPotPT(); /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. diff --git a/source/api_cc/include/DeepSpin.h b/source/api_cc/include/DeepSpin.h index babf1efaae..3a095f75bb 100644 --- a/source/api_cc/include/DeepSpin.h +++ b/source/api_cc/include/DeepSpin.h @@ -171,7 +171,7 @@ class DeepSpin : public DeepBaseModel { * @brief DP constructor without initialization. **/ DeepSpin(); - ~DeepSpin(); + virtual ~DeepSpin(); /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -432,7 +432,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepSpinModelDevi(); - ~DeepSpinModelDevi(); + virtual ~DeepSpinModelDevi(); /** * @brief DP model deviation constructor with initialization. * @param[in] models The names of the frozen model files. diff --git a/source/api_cc/include/DeepSpinPT.h b/source/api_cc/include/DeepSpinPT.h index 778c69758b..20a1e7303f 100644 --- a/source/api_cc/include/DeepSpinPT.h +++ b/source/api_cc/include/DeepSpinPT.h @@ -16,7 +16,7 @@ class DeepSpinPT : public DeepSpinBase { * @brief DP constructor without initialization. **/ DeepSpinPT(); - ~DeepSpinPT(); + virtual ~DeepSpinPT(); /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. From 38815b371162a2153b0c2b24a38867825665c7a3 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 4 Nov 2024 15:15:03 -0500 Subject: [PATCH 132/193] feat(jax): export call_lower to SavedModel via jax2tf (#4254) ## Summary by CodeRabbit ## Release Notes - **New Features** - Added support for the TensorFlow SavedModel format, allowing users to handle additional model file types. - Introduced a new TensorFlow model wrapper class for enhanced integration with JAX functionalities. - **Bug Fixes** - Improved error handling for unsupported file formats during model deserialization. - **Documentation** - Updated backend documentation to reflect new file extensions and clarify backend capabilities. - **Tests** - Enhanced test structure for better clarity and maintainability regarding backend handling. - Added a new job for testing TensorFlow 2 in eager mode within the testing workflow. - Introduced a conditional skip for tests based on TensorFlow 2 compatibility. --------- Signed-off-by: Jinzhe Zeng --- .github/workflows/test_python.yml | 18 +- deepmd/backend/jax.py | 2 +- deepmd/jax/infer/deep_eval.py | 27 ++- deepmd/jax/jax2tf/__init__.py | 11 + deepmd/jax/jax2tf/serialization.py | 172 ++++++++++++++ deepmd/jax/jax2tf/tfmodel.py | 325 ++++++++++++++++++++++++++ deepmd/jax/utils/serialization.py | 12 +- doc/backend.md | 3 +- pyproject.toml | 1 + source/tests/consistent/io/test_io.py | 18 +- source/tests/utils.py | 1 + 11 files changed, 568 insertions(+), 22 deletions(-) create mode 100644 deepmd/jax/jax2tf/__init__.py create mode 100644 deepmd/jax/jax2tf/serialization.py create mode 100644 deepmd/jax/jax2tf/tfmodel.py diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index e46bddd98a..422dcb5f17 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -25,19 +25,23 @@ jobs: python-version: ${{ matrix.python }} - run: python -m pip install -U uv - run: | - source/install/uv_with_retry.sh pip install --system mpich + source/install/uv_with_retry.sh pip install --system openmpi tensorflow-cpu source/install/uv_with_retry.sh pip install --system torch -i https://download.pytorch.org/whl/cpu + export TENSORFLOW_ROOT=$(python -c 'import tensorflow;print(tensorflow.__path__[0])') export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') - source/install/uv_with_retry.sh pip install --system --only-binary=horovod -e .[cpu,test,jax] horovod[tensorflow-cpu] mpi4py + source/install/uv_with_retry.sh pip install --system -e .[test,jax] mpi4py + source/install/uv_with_retry.sh pip install --system horovod --no-build-isolation env: # Please note that uv has some issues with finding # existing TensorFlow package. Currently, it uses # TensorFlow in the build dependency, but if it # changes, setting `TENSORFLOW_ROOT`. - TENSORFLOW_VERSION: 2.16.1 DP_ENABLE_PYTORCH: 1 DP_BUILD_TESTING: 1 - UV_EXTRA_INDEX_URL: "https://pypi.anaconda.org/njzjz/simple https://pypi.anaconda.org/mpi4py/simple" + UV_EXTRA_INDEX_URL: "https://pypi.anaconda.org/mpi4py/simple" + HOROVOD_WITH_TENSORFLOW: 1 + HOROVOD_WITHOUT_PYTORCH: 1 + HOROVOD_WITH_MPI: 1 - run: dp --version - name: Get durations from cache uses: actions/cache@v4 @@ -53,6 +57,12 @@ jobs: - run: pytest --cov=deepmd source/tests --durations=0 --splits 6 --group ${{ matrix.group }} --store-durations --durations-path=.test_durations --splitting-algorithm least_duration env: NUM_WORKERS: 0 + - name: Test TF2 eager mode + run: pytest --cov=deepmd source/tests/consistent/io/test_io.py --durations=0 + env: + NUM_WORKERS: 0 + DP_TEST_TF2_ONLY: 1 + if: matrix.group == 1 - run: mv .test_durations .test_durations_${{ matrix.group }} - name: Upload partial durations uses: actions/upload-artifact@v4 diff --git a/deepmd/backend/jax.py b/deepmd/backend/jax.py index cfb0936bda..7a714c2090 100644 --- a/deepmd/backend/jax.py +++ b/deepmd/backend/jax.py @@ -38,7 +38,7 @@ class JAXBackend(Backend): | Backend.Feature.NEIGHBOR_STAT ) """The features of the backend.""" - suffixes: ClassVar[list[str]] = [".hlo", ".jax"] + suffixes: ClassVar[list[str]] = [".hlo", ".jax", ".savedmodel"] """The suffixes of the backend.""" def is_available(self) -> bool: diff --git a/deepmd/jax/infer/deep_eval.py b/deepmd/jax/infer/deep_eval.py index b60076c68c..fc526a502e 100644 --- a/deepmd/jax/infer/deep_eval.py +++ b/deepmd/jax/infer/deep_eval.py @@ -90,15 +90,24 @@ def __init__( self.output_def = output_def self.model_path = model_file - model_data = load_dp_model(model_file) - self.dp = HLO( - stablehlo=model_data["@variables"]["stablehlo"].tobytes(), - stablehlo_atomic_virial=model_data["@variables"][ - "stablehlo_atomic_virial" - ].tobytes(), - model_def_script=model_data["model_def_script"], - **model_data["constants"], - ) + if model_file.endswith(".hlo"): + model_data = load_dp_model(model_file) + self.dp = HLO( + stablehlo=model_data["@variables"]["stablehlo"].tobytes(), + stablehlo_atomic_virial=model_data["@variables"][ + "stablehlo_atomic_virial" + ].tobytes(), + model_def_script=model_data["model_def_script"], + **model_data["constants"], + ) + elif model_file.endswith(".savedmodel"): + from deepmd.jax.jax2tf.tfmodel import ( + TFModelWrapper, + ) + + self.dp = TFModelWrapper(model_file) + else: + raise ValueError("Unsupported file extension") self.rcut = self.dp.get_rcut() self.type_map = self.dp.get_type_map() if isinstance(auto_batch_size, bool): diff --git a/deepmd/jax/jax2tf/__init__.py b/deepmd/jax/jax2tf/__init__.py new file mode 100644 index 0000000000..88a928f04d --- /dev/null +++ b/deepmd/jax/jax2tf/__init__.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import tensorflow as tf + +if not tf.executing_eagerly(): + # TF disallow temporary eager execution + raise RuntimeError( + "Unfortunatly, jax2tf (requires eager execution) cannot be used with the " + "TensorFlow backend (disables eager execution). " + "If you are converting a model between different backends, " + "considering converting to the `.dp` format first." + ) diff --git a/deepmd/jax/jax2tf/serialization.py b/deepmd/jax/jax2tf/serialization.py new file mode 100644 index 0000000000..dff43a11fc --- /dev/null +++ b/deepmd/jax/jax2tf/serialization.py @@ -0,0 +1,172 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json + +import tensorflow as tf +from jax.experimental import ( + jax2tf, +) + +from deepmd.jax.model.base_model import ( + BaseModel, +) + + +def deserialize_to_file(model_file: str, data: dict) -> None: + """Deserialize the dictionary to a model file. + + Parameters + ---------- + model_file : str + The model file to be saved. + data : dict + The dictionary to be deserialized. + """ + if model_file.endswith(".savedmodel"): + model = BaseModel.deserialize(data["model"]) + model_def_script = data["model_def_script"] + call_lower = model.call_lower + + tf_model = tf.Module() + + def exported_whether_do_atomic_virial(do_atomic_virial): + def call_lower_with_fixed_do_atomic_virial( + coord, atype, nlist, mapping, fparam, aparam + ): + return call_lower( + coord, + atype, + nlist, + mapping, + fparam, + aparam, + do_atomic_virial=do_atomic_virial, + ) + + return jax2tf.convert( + call_lower_with_fixed_do_atomic_virial, + polymorphic_shapes=[ + "(nf, nloc + nghost, 3)", + "(nf, nloc + nghost)", + f"(nf, nloc, {model.get_nnei()})", + "(nf, nloc + nghost)", + f"(nf, {model.get_dim_fparam()})", + f"(nf, nloc, {model.get_dim_aparam()})", + ], + with_gradient=True, + ) + + # Save a function that can take scalar inputs. + # We need to explicit set the function name, so C++ can find it. + @tf.function( + autograph=False, + input_signature=[ + tf.TensorSpec([None, None, 3], tf.float64), + tf.TensorSpec([None, None], tf.int32), + tf.TensorSpec([None, None, model.get_nnei()], tf.int64), + tf.TensorSpec([None, None], tf.int64), + tf.TensorSpec([None, model.get_dim_fparam()], tf.float64), + tf.TensorSpec([None, None, model.get_dim_aparam()], tf.float64), + ], + ) + def call_lower_without_atomic_virial( + coord, atype, nlist, mapping, fparam, aparam + ): + return exported_whether_do_atomic_virial(do_atomic_virial=False)( + coord, atype, nlist, mapping, fparam, aparam + ) + + tf_model.call_lower = call_lower_without_atomic_virial + + @tf.function( + autograph=False, + input_signature=[ + tf.TensorSpec([None, None, 3], tf.float64), + tf.TensorSpec([None, None], tf.int32), + tf.TensorSpec([None, None, model.get_nnei()], tf.int64), + tf.TensorSpec([None, None], tf.int64), + tf.TensorSpec([None, model.get_dim_fparam()], tf.float64), + tf.TensorSpec([None, None, model.get_dim_aparam()], tf.float64), + ], + ) + def call_lower_with_atomic_virial(coord, atype, nlist, mapping, fparam, aparam): + return exported_whether_do_atomic_virial(do_atomic_virial=True)( + coord, atype, nlist, mapping, fparam, aparam + ) + + tf_model.call_lower_atomic_virial = call_lower_with_atomic_virial + + # set functions to export other attributes + @tf.function + def get_type_map(): + return tf.constant(model.get_type_map(), dtype=tf.string) + + tf_model.get_type_map = get_type_map + + @tf.function + def get_rcut(): + return tf.constant(model.get_rcut(), dtype=tf.double) + + tf_model.get_rcut = get_rcut + + @tf.function + def get_dim_fparam(): + return tf.constant(model.get_dim_fparam(), dtype=tf.int64) + + tf_model.get_dim_fparam = get_dim_fparam + + @tf.function + def get_dim_aparam(): + return tf.constant(model.get_dim_aparam(), dtype=tf.int64) + + tf_model.get_dim_aparam = get_dim_aparam + + @tf.function + def get_sel_type(): + return tf.constant(model.get_sel_type(), dtype=tf.int64) + + tf_model.get_sel_type = get_sel_type + + @tf.function + def is_aparam_nall(): + return tf.constant(model.is_aparam_nall(), dtype=tf.bool) + + tf_model.is_aparam_nall = is_aparam_nall + + @tf.function + def model_output_type(): + return tf.constant(model.model_output_type(), dtype=tf.string) + + tf_model.model_output_type = model_output_type + + @tf.function + def mixed_types(): + return tf.constant(model.mixed_types(), dtype=tf.bool) + + tf_model.mixed_types = mixed_types + + if model.get_min_nbor_dist() is not None: + + @tf.function + def get_min_nbor_dist(): + return tf.constant(model.get_min_nbor_dist(), dtype=tf.double) + + tf_model.get_min_nbor_dist = get_min_nbor_dist + + @tf.function + def get_sel(): + return tf.constant(model.get_sel(), dtype=tf.int64) + + tf_model.get_sel = get_sel + + @tf.function + def get_model_def_script(): + return tf.constant( + json.dumps(model_def_script, separators=(",", ":")), dtype=tf.string + ) + + tf_model.get_model_def_script = get_model_def_script + tf.saved_model.save( + tf_model, + model_file, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=True), + ) diff --git a/deepmd/jax/jax2tf/tfmodel.py b/deepmd/jax/jax2tf/tfmodel.py new file mode 100644 index 0000000000..8f04014a97 --- /dev/null +++ b/deepmd/jax/jax2tf/tfmodel.py @@ -0,0 +1,325 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, + Optional, +) + +import jax.experimental.jax2tf as jax2tf +import tensorflow as tf + +from deepmd.dpmodel.model.make_model import ( + model_call_from_call_lower, +) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + ModelOutputDef, + OutputVariableDef, +) +from deepmd.jax.env import ( + jnp, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + +OUTPUT_DEFS = { + "energy": OutputVariableDef( + "energy", + shape=[1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + "mask": OutputVariableDef( + "mask", + shape=[1], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), +} + + +def decode_list_of_bytes(list_of_bytes: list[bytes]) -> list[str]: + """Decode a list of bytes to a list of strings.""" + return [x.decode() for x in list_of_bytes] + + +class TFModelWrapper(tf.Module): + def __init__( + self, + model, + ) -> None: + self.model = tf.saved_model.load(model) + self._call_lower = jax2tf.call_tf(self.model.call_lower) + self._call_lower_atomic_virial = jax2tf.call_tf( + self.model.call_lower_atomic_virial + ) + self.type_map = decode_list_of_bytes(self.model.get_type_map().numpy().tolist()) + self.rcut = self.model.get_rcut().numpy().item() + self.dim_fparam = self.model.get_dim_fparam().numpy().item() + self.dim_aparam = self.model.get_dim_aparam().numpy().item() + self.sel_type = self.model.get_sel_type().numpy().tolist() + self._is_aparam_nall = self.model.is_aparam_nall().numpy().item() + self._model_output_type = decode_list_of_bytes( + self.model.model_output_type().numpy().tolist() + ) + self._mixed_types = self.model.mixed_types().numpy().item() + if hasattr(self.model, "get_min_nbor_dist"): + self.min_nbor_dist = self.model.get_min_nbor_dist().numpy().item() + else: + self.min_nbor_dist = None + self.sel = self.model.get_sel().numpy().tolist() + self.model_def_script = self.model.get_model_def_script().numpy().decode() + + def __call__( + self, + coord: jnp.ndarray, + atype: jnp.ndarray, + box: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ) -> Any: + """Return model prediction. + + Parameters + ---------- + coord + The coordinates of the atoms. + shape: nf x (nloc x 3) + atype + The type of atoms. shape: nf x nloc + box + The simulation box. shape: nf x 9 + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + If calculate the atomic virial. + + Returns + ------- + ret_dict + The result dict of type dict[str,jnp.ndarray]. + The keys are defined by the `ModelOutputDef`. + + """ + return self.call(coord, atype, box, fparam, aparam, do_atomic_virial) + + def call( + self, + coord: jnp.ndarray, + atype: jnp.ndarray, + box: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ): + """Return model prediction. + + Parameters + ---------- + coord + The coordinates of the atoms. + shape: nf x (nloc x 3) + atype + The type of atoms. shape: nf x nloc + box + The simulation box. shape: nf x 9 + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + If calculate the atomic virial. + + Returns + ------- + ret_dict + The result dict of type dict[str,jnp.ndarray]. + The keys are defined by the `ModelOutputDef`. + + """ + return model_call_from_call_lower( + call_lower=self.call_lower, + rcut=self.get_rcut(), + sel=self.get_sel(), + mixed_types=self.mixed_types(), + model_output_def=self.model_output_def(), + coord=coord, + atype=atype, + box=box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + + def model_output_def(self): + return ModelOutputDef( + FittingOutputDef([OUTPUT_DEFS[tt] for tt in self.model_output_type()]) + ) + + def call_lower( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + mapping: Optional[jnp.ndarray] = None, + fparam: Optional[jnp.ndarray] = None, + aparam: Optional[jnp.ndarray] = None, + do_atomic_virial: bool = False, + ): + if do_atomic_virial: + call_lower = self._call_lower_atomic_virial + else: + call_lower = self._call_lower + # Attempt to convert a value (None) with an unsupported type () to a Tensor. + if fparam is None: + fparam = jnp.empty( + (extended_coord.shape[0], self.get_dim_fparam()), dtype=jnp.float64 + ) + if aparam is None: + aparam = jnp.empty( + (extended_coord.shape[0], nlist.shape[1], self.get_dim_aparam()), + dtype=jnp.float64, + ) + return call_lower( + extended_coord, + extended_atype, + nlist, + mapping, + fparam, + aparam, + ) + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.type_map + + def get_rcut(self): + """Get the cut-off radius.""" + return self.rcut + + def get_dim_fparam(self): + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.dim_fparam + + def get_dim_aparam(self): + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.dim_aparam + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.sel_type + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return self._is_aparam_nall + + def model_output_type(self) -> list[str]: + """Get the output type for the model.""" + return self._model_output_type + + def serialize(self) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + raise NotImplementedError("Not implemented") + + @classmethod + def deserialize(cls, data: dict) -> "TFModelWrapper": + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + BaseModel + The deserialized model + """ + raise NotImplementedError("Not implemented") + + def get_model_def_script(self) -> str: + """Get the model definition script.""" + return self.model_def_script + + def get_min_nbor_dist(self) -> Optional[float]: + """Get the minimum distance between two atoms.""" + return self.min_nbor_dist + + def get_nnei(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.get_nsel() + + def get_sel(self) -> list[int]: + return self.sel + + def get_nsel(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return sum(self.sel) + + def mixed_types(self) -> bool: + return self._mixed_types + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statictics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + raise NotImplementedError("Not implemented") + + @classmethod + def get_model(cls, model_params: dict) -> "TFModelWrapper": + """Get the model by the parameters. + + By default, all the parameters are directly passed to the constructor. + If not, override this method. + + Parameters + ---------- + model_params : dict + The model parameters + + Returns + ------- + BaseBaseModel + The model + """ + raise NotImplementedError("Not implemented") diff --git a/deepmd/jax/utils/serialization.py b/deepmd/jax/utils/serialization.py index ec2de3060e..6ab99a81f0 100644 --- a/deepmd/jax/utils/serialization.py +++ b/deepmd/jax/utils/serialization.py @@ -55,13 +55,13 @@ def deserialize_to_file(model_file: str, data: dict) -> None: def exported_whether_do_atomic_virial(do_atomic_virial): def call_lower_with_fixed_do_atomic_virial( - coord, atype, nlist, nlist_start, fparam, aparam + coord, atype, nlist, mapping, fparam, aparam ): return call_lower( coord, atype, nlist, - nlist_start, + mapping, fparam, aparam, do_atomic_virial=do_atomic_virial, @@ -107,8 +107,14 @@ def call_lower_with_fixed_do_atomic_virial( "sel": model.get_sel(), } save_dp_model(filename=model_file, model_dict=data) + elif model_file.endswith(".savedmodel"): + from deepmd.jax.jax2tf.serialization import ( + deserialize_to_file as deserialize_to_savedmodel, + ) + + return deserialize_to_savedmodel(model_file, data) else: - raise ValueError("JAX backend only supports converting .jax directory") + raise ValueError("Unsupported file extension") def serialize_from_file(model_file: str) -> dict: diff --git a/doc/backend.md b/doc/backend.md index cf99eea9cb..3fb70bee90 100644 --- a/doc/backend.md +++ b/doc/backend.md @@ -25,11 +25,12 @@ While `.pth` and `.pt` are the same in the PyTorch package, they have different ### JAX {{ jax_icon }} -- Model filename extension: `.xlo` +- Model filename extension: `.xlo`, `.savedmodel` - Checkpoint filename extension: `.jax` [JAX](https://jax.readthedocs.io/) 0.4.33 (which requires Python 3.10 or above) or above is required. Both `.xlo` and `.jax` are customized format extensions defined in DeePMD-kit, since JAX has no convention for file extensions. +`.savedmodel` is the TensorFlow [SavedModel format](https://www.tensorflow.org/guide/saved_model) generated by [JAX2TF](https://www.tensorflow.org/guide/jax2tf), which needs the installation of TensorFlow. Currently, this backend is developed actively, and has no support for training and the C++ interface. ### DP {{ dpmodel_icon }} diff --git a/pyproject.toml b/pyproject.toml index 1faacb973c..802e920014 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -444,6 +444,7 @@ select = [ [tool.uv.sources] mpich = { index = "mpi4py" } +openmpi = { index = "mpi4py" } [[tool.uv.index]] name = "mpi4py" diff --git a/source/tests/consistent/io/test_io.py b/source/tests/consistent/io/test_io.py index 91cd391322..ca213da13c 100644 --- a/source/tests/consistent/io/test_io.py +++ b/source/tests/consistent/io/test_io.py @@ -23,6 +23,7 @@ from ...utils import ( CI, + DP_TEST_TF2_ONLY, TEST_DEVICE, ) @@ -72,6 +73,7 @@ def tearDown(self): shutil.rmtree(ii) @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") + @unittest.skipIf(DP_TEST_TF2_ONLY, "Conflict with TF2 eager mode.") def test_data_equal(self): prefix = "test_consistent_io_" + self.__class__.__name__.lower() for backend_name, suffix_idx in ( @@ -140,13 +142,21 @@ def test_deep_eval(self): nframes = self.atype.shape[0] prefix = "test_consistent_io_" + self.__class__.__name__.lower() rets = [] - for backend_name in ("tensorflow", "pytorch", "dpmodel", "jax"): + for backend_name, suffix_idx in ( + # unfortunately, jax2tf cannot work with tf v1 behaviors + ("jax", 2) if DP_TEST_TF2_ONLY else ("tensorflow", 0), + ("pytorch", 0), + ("dpmodel", 0), + ("jax", 0), + ): backend = Backend.get_backend(backend_name)() if not backend.is_available(): continue reference_data = copy.deepcopy(self.data) - self.save_data_to_model(prefix + backend.suffixes[0], reference_data) - deep_eval = DeepEval(prefix + backend.suffixes[0]) + self.save_data_to_model( + prefix + backend.suffixes[suffix_idx], reference_data + ) + deep_eval = DeepEval(prefix + backend.suffixes[suffix_idx]) if deep_eval.get_dim_fparam() > 0: fparam = np.ones((nframes, deep_eval.get_dim_fparam())) else: @@ -169,7 +179,7 @@ def test_deep_eval(self): self.atype, fparam=fparam, aparam=aparam, - do_atomic_virial=True, + atomic=True, ) rets.append(ret) for ret in rets[1:]: diff --git a/source/tests/utils.py b/source/tests/utils.py index bfb3d445af..a9bf0f11ea 100644 --- a/source/tests/utils.py +++ b/source/tests/utils.py @@ -8,3 +8,4 @@ # see https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/store-information-in-variables#default-environment-variables CI = os.environ.get("CI") == "true" +DP_TEST_TF2_ONLY = os.environ.get("DP_TEST_TF2_ONLY") == "1" From 4b73fbe54d50546980cdd9a71f9e39c564cf75a8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 01:23:00 +0000 Subject: [PATCH 133/193] [pre-commit.ci] pre-commit autoupdate (#4310) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.1 → v0.7.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.1...v0.7.2) - [github.com/pre-commit/mirrors-clang-format: v19.1.2 → v19.1.3](https://github.com/pre-commit/mirrors-clang-format/compare/v19.1.2...v19.1.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6cb534fd22..721a0cd6eb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.7.1 + rev: v0.7.2 hooks: - id: ruff args: ["--fix"] @@ -60,7 +60,7 @@ repos: - id: blacken-docs # C++ - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v19.1.2 + rev: v19.1.3 hooks: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) From 9ed039765465229768535dda6d28f60888b2f42d Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 4 Nov 2024 21:46:39 -0500 Subject: [PATCH 134/193] fix(cmake): Replace deprecated `FetchContent_Populate` with `FetchContent_MakeAvailable` (#4309) Update `source/lmp/plugin/CMakeLists.txt` to use `FetchContent_MakeAvailable` instead of `FetchContent_Populate`. * Replace `FetchContent_Populate(lammps_download)` with `FetchContent_MakeAvailable(lammps_download)` on line 13. * Remove `FetchContent_GetProperties` and `if(NOT lammps_download_POPULATED)` block. This fixes a CMake warning: ``` CMake Warning (dev) at /home/runner/work/_temp/-111029589/cmake-3.30.5-linux-x86_64/share/cmake-3.30/Modules/FetchContent.cmake:1953 (message): Calling FetchContent_Populate(lammps_download) is deprecated, call FetchContent_MakeAvailable(lammps_download) instead. Policy CMP0169 can be set to OLD to allow FetchContent_Populate(lammps_download) to be called directly for now, but the ability to call it with declared details will be removed completely in a future version. Call Stack (most recent call first): lmp/plugin/CMakeLists.txt:13 (FetchContent_Populate) This warning is for project developers. Use -Wno-dev to suppress it. ``` --- For more details, open the [Copilot Workspace session](https://copilot-workspace.githubnext.com/njzjz/deepmd-kit?shareId=32a460fb-6c67-4397-b000-6f36e9841970). ## Summary by CodeRabbit - **Chores** - Simplified CMake configuration for the LAMMPS plugin, ensuring consistent availability of LAMMPS source. - Streamlined handling of LAMMPS versioning and installation logic. - Updated minimum required CMake version from 3.11 to 3.14. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- source/lmp/plugin/CMakeLists.txt | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/source/lmp/plugin/CMakeLists.txt b/source/lmp/plugin/CMakeLists.txt index f912059261..13da3d7114 100644 --- a/source/lmp/plugin/CMakeLists.txt +++ b/source/lmp/plugin/CMakeLists.txt @@ -2,17 +2,14 @@ if(DEFINED LAMMPS_SOURCE_ROOT OR DEFINED LAMMPS_VERSION) message(STATUS "enable LAMMPS plugin mode") add_library(lammps_interface INTERFACE) if(DEFINED LAMMPS_VERSION) - cmake_minimum_required(VERSION 3.11) + cmake_minimum_required(VERSION 3.14) include(FetchContent) FetchContent_Declare( lammps_download GIT_REPOSITORY https://github.com/lammps/lammps GIT_TAG ${LAMMPS_VERSION}) - FetchContent_GetProperties(lammps_download) - if(NOT lammps_download_POPULATED) - FetchContent_Populate(lammps_download) - set(LAMMPS_SOURCE_ROOT ${lammps_download_SOURCE_DIR}) - endif() + FetchContent_MakeAvailable(lammps_download) + set(LAMMPS_SOURCE_ROOT ${lammps_download_SOURCE_DIR}) endif() set(LAMMPS_HEADER_DIR ${LAMMPS_SOURCE_ROOT}/src) message(STATUS "LAMMPS_HEADER_DIR is ${LAMMPS_HEADER_DIR}") From dabedd230cd4541750707a9accbf729c50325d86 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 4 Nov 2024 23:23:30 -0500 Subject: [PATCH 135/193] fix(jax): calculate virial in `call_lower` (#4304) ## Summary by CodeRabbit - **New Features** - Enhanced output of the model by providing a reduced form of the virial tensor, improving usability for further calculations and analyses. - Introduced a new test class, `TestEnerLower`, to evaluate lower-level energy models, excluding TensorFlow functionality. --------- Signed-off-by: Jinzhe Zeng --- deepmd/jax/model/base_model.py | 2 + source/tests/consistent/model/test_ener.py | 220 ++++++++++++++++++++- 2 files changed, 221 insertions(+), 1 deletion(-) diff --git a/deepmd/jax/model/base_model.py b/deepmd/jax/model/base_model.py index 1e880700a2..44152a4c26 100644 --- a/deepmd/jax/model/base_model.py +++ b/deepmd/jax/model/base_model.py @@ -152,4 +152,6 @@ def eval_ce( avr, [0, def_ndim + 1, *range(1, def_ndim + 1), def_ndim + 2] ) model_predict[kk_derv_c] = extended_virial + # [nf, *def, 9] + model_predict[kk_derv_c + "_redu"] = jnp.sum(extended_virial, axis=1) return model_predict diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index ec73c57fa8..5d0253c5e8 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -6,8 +6,18 @@ import numpy as np +from deepmd.dpmodel.common import ( + to_numpy_array, +) from deepmd.dpmodel.model.ener_model import EnergyModel as EnergyModelDP from deepmd.dpmodel.model.model import get_model as get_model_dp +from deepmd.dpmodel.utils.nlist import ( + build_neighbor_list, + extend_coord_with_ghosts, +) +from deepmd.dpmodel.utils.region import ( + normalize_coord, +) from deepmd.env import ( GLOBAL_NP_FLOAT_PRECISION, ) @@ -27,7 +37,8 @@ if INSTALLED_PT: from deepmd.pt.model.model import get_model as get_model_pt from deepmd.pt.model.model.ener_model import EnergyModel as EnergyModelPT - + from deepmd.pt.utils.utils import to_numpy_array as torch_to_numpy + from deepmd.pt.utils.utils import to_torch_tensor as numpy_to_torch else: EnergyModelPT = None if INSTALLED_TF: @@ -39,6 +50,9 @@ ) if INSTALLED_JAX: + from deepmd.jax.common import ( + to_jax_array, + ) from deepmd.jax.model.ener_model import EnergyModel as EnergyModelJAX from deepmd.jax.model.model import get_model as get_model_jax else: @@ -243,3 +257,207 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret["energy_derv_c"].ravel(), ) raise ValueError(f"Unknown backend: {backend}") + + +@parameterized( + ( + [], + [[0, 1]], + ), + ( + [], + [1], + ), +) +class TestEnerLower(CommonTest, ModelTest, unittest.TestCase): + @property + def data(self) -> dict: + pair_exclude_types, atom_exclude_types = self.param + return { + "type_map": ["O", "H"], + "pair_exclude_types": pair_exclude_types, + "atom_exclude_types": atom_exclude_types, + "descriptor": { + "type": "se_e2_a", + "sel": [20, 20], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 3, + 6, + ], + "resnet_dt": False, + "axis_neuron": 2, + "precision": "float64", + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "neuron": [ + 5, + 5, + ], + "resnet_dt": True, + "precision": "float64", + "seed": 1, + }, + } + + tf_class = EnergyModelTF + dp_class = EnergyModelDP + pt_class = EnergyModelPT + jax_class = EnergyModelJAX + args = model_args() + + def get_reference_backend(self): + """Get the reference backend. + + We need a reference backend that can reproduce forces. + """ + if not self.skip_pt: + return self.RefBackend.PT + if not self.skip_jax: + return self.RefBackend.JAX + if not self.skip_dp: + return self.RefBackend.DP + raise ValueError("No available reference") + + @property + def skip_tf(self): + # TF does not have lower interface + return True + + @property + def skip_jax(self): + return not INSTALLED_JAX + + def pass_data_to_cls(self, cls, data) -> Any: + """Pass data to the class.""" + data = data.copy() + if cls is EnergyModelDP: + return get_model_dp(data) + elif cls is EnergyModelPT: + return get_model_pt(data) + elif cls is EnergyModelJAX: + return get_model_jax(data) + return cls(**data, **self.additional_data) + + def setUp(self): + CommonTest.setUp(self) + + self.ntypes = 2 + coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ).reshape(1, -1, 3) + atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32).reshape(1, -1) + box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ).reshape(1, 9) + + rcut = 6.0 + nframes, nloc = atype.shape[:2] + coord_normalized = normalize_coord( + coords.reshape(nframes, nloc, 3), + box.reshape(nframes, 3, 3), + ) + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box, rcut + ) + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + 6.0, + [20, 20], + distinguish_types=True, + ) + extended_coord = extended_coord.reshape(nframes, -1, 3) + self.nlist = nlist + self.extended_coord = extended_coord + self.extended_atype = extended_atype + self.mapping = mapping + + def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]: + raise NotImplementedError("no TF in this test") + + def eval_dp(self, dp_obj: Any) -> Any: + return dp_obj.call_lower( + self.extended_coord, + self.extended_atype, + self.nlist, + self.mapping, + do_atomic_virial=True, + ) + + def eval_pt(self, pt_obj: Any) -> Any: + return { + kk: torch_to_numpy(vv) + for kk, vv in pt_obj.forward_lower( + numpy_to_torch(self.extended_coord), + numpy_to_torch(self.extended_atype), + numpy_to_torch(self.nlist), + numpy_to_torch(self.mapping), + do_atomic_virial=True, + ).items() + } + + def eval_jax(self, jax_obj: Any) -> Any: + return { + kk: to_numpy_array(vv) + for kk, vv in jax_obj.call_lower( + to_jax_array(self.extended_coord), + to_jax_array(self.extended_atype), + to_jax_array(self.nlist), + to_jax_array(self.mapping), + do_atomic_virial=True, + ).items() + } + + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: + # shape not matched. ravel... + if backend is self.RefBackend.DP: + return ( + ret["energy_redu"].ravel(), + ret["energy"].ravel(), + SKIP_FLAG, + SKIP_FLAG, + SKIP_FLAG, + ) + elif backend is self.RefBackend.PT: + return ( + ret["energy"].ravel(), + ret["atom_energy"].ravel(), + ret["extended_force"].ravel(), + ret["virial"].ravel(), + ret["extended_virial"].ravel(), + ) + elif backend is self.RefBackend.JAX: + return ( + ret["energy_redu"].ravel(), + ret["energy"].ravel(), + ret["energy_derv_r"].ravel(), + ret["energy_derv_c_redu"].ravel(), + ret["energy_derv_c"].ravel(), + ) + raise ValueError(f"Unknown backend: {backend}") From d1fd284aba79068591ff174fd9fc9bff85eb6d3d Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 13:36:44 +0800 Subject: [PATCH 136/193] fix memory leak --- source/api_c/include/deepmd.hpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 98c46eb04a..e12d56ea30 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -845,7 +845,7 @@ class DeepBaseModel { * @brief DP Base Model constructor without initialization. **/ DeepBaseModel() : dpbase(nullptr) {}; - ~DeepBaseModel() { DP_DeleteDeepBaseModel(dpbase); }; + virtual ~DeepBaseModel() {}; /** * @brief Get the cutoff radius. @@ -956,7 +956,8 @@ class DeepPot : public DeepBaseModel { **/ DeepPot() : dp(nullptr) {}; ~DeepPot() { - // the base destructor will be called + DP_DeleteDeepPot(dp); + dpbase = nullptr; }; /** * @brief DP constructor with initialization. @@ -1429,7 +1430,8 @@ class DeepSpin : public DeepBaseModel { **/ DeepSpin() : dp(nullptr) {}; ~DeepSpin() { - // the base destructor will be called + DP_DeleteDeepSpin(dp); + dpbase = nullptr; }; /** * @brief DP constructor with initialization. @@ -1734,7 +1736,7 @@ class DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepBaseModelDevi() : dpbase(nullptr) {}; - ~DeepBaseModelDevi() { DP_DeleteDeepBaseModelDevi(dpbase); }; + virtual ~DeepBaseModelDevi() {}; /** * @brief Get the cutoff radius. @@ -1944,7 +1946,8 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ DeepPotModelDevi() : dp(nullptr) {}; ~DeepPotModelDevi() { - // the base destructor will be called + DP_DeleteDeepPotModelDevi(dp); + dpbase = nullptr; }; /** * @brief DP model deviation constructor with initialization. @@ -2539,7 +2542,8 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { **/ DeepSpinModelDevi() : dp(nullptr) {}; ~DeepSpinModelDevi() { - // the base destructor will be called + DP_DeleteDeepSpinModelDevi(dp); + dpbase = nullptr; }; /** * @brief DP model deviation constructor with initialization. From 99e1e05bf5a1394d0e47c6c45582239613f3ab2e Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:10:58 +0800 Subject: [PATCH 137/193] add virtual methods --- source/api_c/include/c_api_internal.h | 2 ++ source/api_c/include/deepmd.hpp | 24 ++++++------------------ source/lmp/pair_base.h | 2 +- 3 files changed, 9 insertions(+), 19 deletions(-) diff --git a/source/api_c/include/c_api_internal.h b/source/api_c/include/c_api_internal.h index 1310c46487..9824be76ab 100644 --- a/source/api_c/include/c_api_internal.h +++ b/source/api_c/include/c_api_internal.h @@ -38,6 +38,7 @@ struct DP_Nlist { struct DP_DeepBaseModel { DP_DeepBaseModel(); DP_DeepBaseModel(deepmd::DeepBaseModel& dpbase); + virtual ~DP_DeepBaseModel() {}; deepmd::DeepBaseModel dpbase; std::string exception; @@ -49,6 +50,7 @@ struct DP_DeepBaseModel { struct DP_DeepBaseModelDevi { DP_DeepBaseModelDevi(); DP_DeepBaseModelDevi(deepmd::DeepBaseModelDevi& dpbase); + virtual ~DP_DeepBaseModelDevi() {}; deepmd::DeepBaseModelDevi dpbase; std::string exception; diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index e12d56ea30..53f8ed1c02 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -845,7 +845,7 @@ class DeepBaseModel { * @brief DP Base Model constructor without initialization. **/ DeepBaseModel() : dpbase(nullptr) {}; - virtual ~DeepBaseModel() {}; + virtual ~DeepBaseModel() { DP_DeleteDeepBaseModel(dpbase); }; /** * @brief Get the cutoff radius. @@ -955,10 +955,7 @@ class DeepPot : public DeepBaseModel { * @brief DP constructor without initialization. **/ DeepPot() : dp(nullptr) {}; - ~DeepPot() { - DP_DeleteDeepPot(dp); - dpbase = nullptr; - }; + ~DeepPot() {}; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -1429,10 +1426,7 @@ class DeepSpin : public DeepBaseModel { * @brief DP constructor without initialization. **/ DeepSpin() : dp(nullptr) {}; - ~DeepSpin() { - DP_DeleteDeepSpin(dp); - dpbase = nullptr; - }; + ~DeepSpin() {}; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -1736,7 +1730,7 @@ class DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepBaseModelDevi() : dpbase(nullptr) {}; - virtual ~DeepBaseModelDevi() {}; + virtual ~DeepBaseModelDevi() { DP_DeleteDeepBaseModelDevi(dpbase); }; /** * @brief Get the cutoff radius. @@ -1945,10 +1939,7 @@ class DeepPotModelDevi : public DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepPotModelDevi() : dp(nullptr) {}; - ~DeepPotModelDevi() { - DP_DeleteDeepPotModelDevi(dp); - dpbase = nullptr; - }; + ~DeepPotModelDevi() {}; /** * @brief DP model deviation constructor with initialization. * @param[in] models The names of the frozen model file. @@ -2541,10 +2532,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepSpinModelDevi() : dp(nullptr) {}; - ~DeepSpinModelDevi() { - DP_DeleteDeepSpinModelDevi(dp); - dpbase = nullptr; - }; + ~DeepSpinModelDevi() {}; /** * @brief DP model deviation constructor with initialization. * @param[in] models The names of the frozen model file. diff --git a/source/lmp/pair_base.h b/source/lmp/pair_base.h index 47d97591cd..f19c09edff 100644 --- a/source/lmp/pair_base.h +++ b/source/lmp/pair_base.h @@ -34,7 +34,7 @@ class PairDeepMDBase : public Pair { const char *, deepmd_compat::DeepBaseModel &, deepmd_compat::DeepBaseModelDevi &); - ~PairDeepMDBase() override; + virtual ~PairDeepMDBase() override; void *extract(const char *, int &) override; void coeff(int, char **) override; void init_style() override; From ae989644c1a9fbd89cb1031bc94086e6c74ae924 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:15:38 +0800 Subject: [PATCH 138/193] Update deepmd.hpp --- source/api_c/include/deepmd.hpp | 161 -------------------------------- 1 file changed, 161 deletions(-) diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 53f8ed1c02..35ceab05d9 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -2256,78 +2256,6 @@ class DeepPotModelDevi : public DeepBaseModelDevi { } } }; - // support spin - template - void compute_spin( - std::vector &ener, - std::vector> &force, - std::vector> &force_mag, - std::vector> &virial, - const std::vector &coord, - const std::vector &spin, - const std::vector &atype, - const std::vector &box, - const int nghost, - const InputNlist &lmp_list, - const int &ago, - const std::vector &fparam = std::vector(), - const std::vector &aparam = std::vector()) { - unsigned int natoms = atype.size(); - unsigned int nframes = 1; - assert(natoms * 3 == coord.size()); - if (!box.empty()) { - assert(box.size() == 9); - } - const VALUETYPE *coord_ = &coord[0]; - const VALUETYPE *spin_ = &spin[0]; - const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; - const int *atype_ = &atype[0]; - // memory will be continous for std::vector but not std::vector - std::vector energy_flat(numb_models); - std::vector force_flat(static_cast(numb_models) * - natoms * 3); - std::vector force_mag_flat(static_cast(numb_models) * - natoms * 3); - std::vector virial_flat(numb_models * 9); - double *ener_ = &energy_flat[0]; - VALUETYPE *force_ = &force_flat[0]; - VALUETYPE *force_mag_ = &force_mag_flat[0]; - VALUETYPE *virial_ = &virial_flat[0]; - std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), - fparam, aparam); - tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, - (aparam_nall ? natoms : (natoms - nghost)) * daparam, - aparam); - const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; - const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotModelDeviComputeNListSP( - dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, - fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, - nullptr); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); - // reshape - ener.resize(numb_models); - force.resize(numb_models); - force_mag.resize(numb_models); - virial.resize(numb_models); - for (int i = 0; i < numb_models; i++) { - ener[i] = energy_flat[i]; - force[i].resize(static_cast(natoms) * 3); - force_mag[i].resize(static_cast(natoms) * 3); - virial[i].resize(9); - for (int j = 0; j < natoms * 3; j++) { - force[i][j] = force_flat[i * natoms * 3 + j]; - } - for (int j = 0; j < natoms * 3; j++) { - force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; - } - for (int j = 0; j < 9; j++) { - virial[i][j] = virial_flat[i * 9 + j]; - } - } - }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP model deviation. @@ -2432,95 +2360,6 @@ class DeepPotModelDevi : public DeepBaseModelDevi { } } }; - // support spin - template - void compute_spin( - std::vector &ener, - std::vector> &force, - std::vector> &force_mag, - std::vector> &virial, - std::vector> &atom_energy, - std::vector> &atom_virial, - const std::vector &coord, - const std::vector &spin, - const std::vector &atype, - const std::vector &box, - const int nghost, - const InputNlist &lmp_list, - const int &ago, - const std::vector &fparam = std::vector(), - const std::vector &aparam = std::vector()) { - unsigned int natoms = atype.size(); - unsigned int nframes = 1; - assert(natoms * 3 == coord.size()); - if (!box.empty()) { - assert(box.size() == 9); - } - const VALUETYPE *coord_ = &coord[0]; - const VALUETYPE *spin_ = &spin[0]; - const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; - const int *atype_ = &atype[0]; - std::vector energy_flat(numb_models); - std::vector force_flat(static_cast(numb_models) * - natoms * 3); - std::vector force_mag_flat(static_cast(numb_models) * - natoms * 3); - std::vector virial_flat(numb_models * 9); - std::vector atom_energy_flat(static_cast(numb_models) * - natoms); - std::vector atom_virial_flat(static_cast(numb_models) * - natoms * 9); - double *ener_ = &energy_flat[0]; - VALUETYPE *force_ = &force_flat[0]; - VALUETYPE *force_mag_ = &force_mag_flat[0]; - VALUETYPE *virial_ = &virial_flat[0]; - VALUETYPE *atomic_ener_ = &atom_energy_flat[0]; - VALUETYPE *atomic_virial_ = &atom_virial_flat[0]; - std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), - fparam, aparam); - tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, - (aparam_nall ? natoms : (natoms - nghost)) * daparam, - aparam); - const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; - const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotModelDeviComputeNListSP( - dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, - fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, - atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); - // reshape - ener.resize(numb_models); - force.resize(numb_models); - force_mag.resize(numb_models); - virial.resize(numb_models); - atom_energy.resize(numb_models); - atom_virial.resize(numb_models); - for (int i = 0; i < numb_models; i++) { - ener[i] = energy_flat[i]; - force[i].resize(static_cast(natoms) * 3); - force_mag[i].resize(static_cast(natoms) * 3); - virial[i].resize(9); - atom_energy[i].resize(natoms); - atom_virial[i].resize(static_cast(natoms) * 9); - for (int j = 0; j < natoms * 3; j++) { - force[i][j] = force_flat[i * natoms * 3 + j]; - } - for (int j = 0; j < natoms * 3; j++) { - force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; - } - for (int j = 0; j < 9; j++) { - virial[i][j] = virial_flat[i * 9 + j]; - } - for (int j = 0; j < natoms; j++) { - atom_energy[i][j] = atom_energy_flat[i * natoms + j]; - } - for (int j = 0; j < natoms * 9; j++) { - atom_virial[i][j] = atom_virial_flat[i * natoms * 9 + j]; - } - } - }; private: DP_DeepPotModelDevi *dp; From 3e7501e5d7bf324bac40e54b1bc1ec866c6d1096 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:29:48 +0800 Subject: [PATCH 139/193] rename compute_spin to compute --- source/api_c/include/deepmd.hpp | 12 +- source/api_c/src/c_api.cc | 18 +- source/api_cc/include/DeepSpin.h | 266 ++++--- source/api_cc/src/DeepSpin.cc | 654 +++++++++--------- .../api_cc/tests/test_deeppot_dpa_pt_spin.cc | 14 +- source/api_cc/tests/test_deeppot_tf_spin.cc | 6 +- source/lmp/pair_deepspin.cpp | 24 +- 7 files changed, 490 insertions(+), 504 deletions(-) diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 35ceab05d9..55e4257d95 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -1497,7 +1497,7 @@ class DeepSpin : public DeepBaseModel { * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute_spin( + void compute( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -1565,7 +1565,7 @@ class DeepSpin : public DeepBaseModel { * @warning Natoms should not be zero when computing multiple frames. **/ template - void compute_spin( + void compute( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -1615,7 +1615,7 @@ class DeepSpin : public DeepBaseModel { // support spin template - void compute_spin( + void compute( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -1664,7 +1664,7 @@ class DeepSpin : public DeepBaseModel { // support spin template - void compute_spin( + void compute( ENERGYVTYPE &ener, std::vector &force, std::vector &force_mag, @@ -2430,7 +2430,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { }; // support spin template - void compute_spin( + void compute( std::vector &ener, std::vector> &force, std::vector> &force_mag, @@ -2503,7 +2503,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { // support spin template - void compute_spin( + void compute( std::vector &ener, std::vector> &force, std::vector> &force_mag, diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index fe8873d18b..e0bb82841d 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -352,8 +352,8 @@ inline void DP_DeepSpinCompute_variant(DP_DeepSpin* dp, std::vector e; std::vector f, fm, v, ae, av; - DP_REQUIRES_OK(dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, - atype_, cell_, fparam_, aparam_)); + DP_REQUIRES_OK(dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, + cell_, fparam_, aparam_)); // copy from C++ vectors to C arrays, if not NULL pointer if (energy) { std::copy(e.begin(), e.end(), energy); @@ -549,8 +549,8 @@ inline void DP_DeepSpinComputeNList_variant(DP_DeepSpin* dp, std::vector e; std::vector f, fm, v, ae, av; DP_REQUIRES_OK( - dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, - nghost, nlist->nl, ago, fparam_, aparam_)); + dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + nghost, nlist->nl, ago, fparam_, aparam_)); // copy from C++ vectors to C arrays, if not NULL pointer if (energy) { std::copy(e.begin(), e.end(), energy); @@ -956,13 +956,13 @@ void DP_DeepSpinModelDeviComputeNList_variant(DP_DeepSpinModelDevi* dp, std::vector e; std::vector> f, fm, v, ae, av; if (atomic_energy || atomic_virial) { - DP_REQUIRES_OK(dp, dp->dp.compute_spin(e, f, fm, v, ae, av, coord_, spin_, - atype_, cell_, nghost, nlist->nl, - ago, fparam_, aparam_)); + DP_REQUIRES_OK( + dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, atype_, cell_, + nghost, nlist->nl, ago, fparam_, aparam_)); } else { DP_REQUIRES_OK( - dp, dp->dp.compute_spin(e, f, fm, v, coord_, spin_, atype_, cell_, - nghost, nlist->nl, ago, fparam_, aparam_)); + dp, dp->dp.compute(e, f, fm, v, coord_, spin_, atype_, cell_, nghost, + nlist->nl, ago, fparam_, aparam_)); } // 2D vector to 2D array, flatten first if (energy) { diff --git a/source/api_cc/include/DeepSpin.h b/source/api_cc/include/DeepSpin.h index 3a095f75bb..9ebe6604e6 100644 --- a/source/api_cc/include/DeepSpin.h +++ b/source/api_cc/include/DeepSpin.h @@ -218,29 +218,27 @@ class DeepSpin : public DeepBaseModel { * @{ **/ template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** @@ -271,35 +269,33 @@ class DeepSpin : public DeepBaseModel { * @{ **/ template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& inlist, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& inlist, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** @@ -329,33 +325,31 @@ class DeepSpin : public DeepBaseModel { * @{ **/ template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ /** @@ -388,39 +382,37 @@ class DeepSpin : public DeepBaseModel { * @{ **/ template - void compute_spin( - ENERGYTYPE& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(ENERGYTYPE& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); template - void compute_spin( - std::vector& ener, - std::vector& force, - std::vector& force_mag, - std::vector& virial, - std::vector& atom_energy, - std::vector& atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(std::vector& ener, + std::vector& force, + std::vector& force_mag, + std::vector& virial, + std::vector& atom_energy, + std::vector& atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** @} */ protected: std::shared_ptr dp; @@ -484,20 +476,19 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { *same aparam. **/ template - void compute_spin( - std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(std::vector& all_ener, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, @@ -529,22 +520,21 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { *same aparam. **/ template - void compute_spin( - std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_force_mag, - std::vector>& all_virial, - std::vector>& all_atom_energy, - std::vector>& all_atom_virial, - const std::vector& coord, - const std::vector& spin, - const std::vector& atype, - const std::vector& box, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam = std::vector(), - const std::vector& aparam = std::vector()); + void compute(std::vector& all_ener, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); protected: std::vector> dps; diff --git a/source/api_cc/src/DeepSpin.cc b/source/api_cc/src/DeepSpin.cc index e62f0df5f9..1702e8a45d 100644 --- a/source/api_cc/src/DeepSpin.cc +++ b/source/api_cc/src/DeepSpin.cc @@ -68,16 +68,16 @@ void DeepSpin::init(const std::string& model, // support spin // no nlist, no atomic : nframe template -void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { +void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { std::vector dener_; std::vector datom_energy_, datom_virial_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, @@ -87,16 +87,16 @@ void DeepSpin::compute_spin(ENERGYTYPE& dener, } template -void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { +void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { std::vector datom_energy_, datom_virial_; dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, @@ -104,66 +104,66 @@ void DeepSpin::compute_spin(std::vector& dener, } // no nlist, no atomic : nframe * precision -template void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); // support spin // nlist, no atomic : nframe template -void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { +void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { std::vector dener_; std::vector datom_energy_, datom_virial_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, @@ -173,19 +173,19 @@ void DeepSpin::compute_spin(ENERGYTYPE& dener, } template -void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { +void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { std::vector datom_energy_, datom_virial_; dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, @@ -193,79 +193,77 @@ void DeepSpin::compute_spin(std::vector& dener, } // nlist, no atomic : nframe * precision -template void DeepSpin::compute_spin( - ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepSpin::compute_spin( - std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); // support spin // no nlist, atomic : nframe template -void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { +void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { std::vector dener_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, @@ -273,93 +271,93 @@ void DeepSpin::compute_spin(ENERGYTYPE& dener, dener = dener_[0]; } template -void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam_, - const std::vector& aparam_) { +void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam_, + const std::vector& aparam_) { dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, fparam_, aparam_, true); } // no nlist, atomic : nframe * precision -template void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); - -template void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const std::vector& fparam, - const std::vector& aparam); +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); // support spin // nlist, atomic : nframe template -void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { +void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { std::vector dener_; dp->computew(dener_, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, @@ -367,91 +365,89 @@ void DeepSpin::compute_spin(ENERGYTYPE& dener, dener = dener_[0]; } template -void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam_, - const std::vector& aparam__) { +void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam_, + const std::vector& aparam__) { dp->computew(dener, dforce_, dforce_mag_, dvirial, datom_energy_, datom_virial_, dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, fparam_, aparam__, true); } // nlist, atomic : nframe * precision -template void DeepSpin::compute_spin( - ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepSpin::compute_spin(ENERGYTYPE& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepSpin::compute_spin( - std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); - -template void DeepSpin::compute_spin(std::vector& dener, - std::vector& dforce_, - std::vector& dforce_mag_, - std::vector& dvirial, - std::vector& datom_energy_, - std::vector& datom_virial_, - const std::vector& dcoord_, - const std::vector& dspin_, - const std::vector& datype_, - const std::vector& dbox, - const int nghost, - const InputNlist& lmp_list, - const int& ago, - const std::vector& fparam, - const std::vector& aparam_); +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute(ENERGYTYPE& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); + +template void DeepSpin::compute(std::vector& dener, + std::vector& dforce_, + std::vector& dforce_mag_, + std::vector& dvirial, + std::vector& datom_energy_, + std::vector& datom_virial_, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const int nghost, + const InputNlist& lmp_list, + const int& ago, + const std::vector& fparam, + const std::vector& aparam_); DeepSpinModelDevi::DeepSpinModelDevi() { inited = false; @@ -496,7 +492,7 @@ void DeepSpinModelDevi::init(const std::vector& models, // support spin // nlist, no atomic template -void DeepSpinModelDevi::compute_spin( +void DeepSpinModelDevi::compute( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -518,14 +514,14 @@ void DeepSpinModelDevi::compute_spin( all_force_mag.resize(numb_models); all_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], dcoord_, dspin_, datype_, dbox, - nghost, lmp_list, ago, fparam, aparam_); + dps[ii]->compute(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], dcoord_, dspin_, datype_, dbox, nghost, + lmp_list, ago, fparam, aparam_); } } // nlist, no atomic: precision -template void DeepSpinModelDevi::compute_spin( +template void DeepSpinModelDevi::compute( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -540,7 +536,7 @@ template void DeepSpinModelDevi::compute_spin( const std::vector& fparam, const std::vector& aparam); -template void DeepSpinModelDevi::compute_spin( +template void DeepSpinModelDevi::compute( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -558,7 +554,7 @@ template void DeepSpinModelDevi::compute_spin( // support spin // nlist, atomic template -void DeepSpinModelDevi::compute_spin( +void DeepSpinModelDevi::compute( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -584,15 +580,15 @@ void DeepSpinModelDevi::compute_spin( all_atom_energy.resize(numb_models); all_atom_virial.resize(numb_models); for (unsigned ii = 0; ii < numb_models; ++ii) { - dps[ii]->compute_spin(all_energy[ii], all_force[ii], all_force_mag[ii], - all_virial[ii], all_atom_energy[ii], - all_atom_virial[ii], dcoord_, dspin_, datype_, dbox, - nghost, lmp_list, ago, fparam, aparam_); + dps[ii]->compute(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], all_atom_energy[ii], all_atom_virial[ii], + dcoord_, dspin_, datype_, dbox, nghost, lmp_list, ago, + fparam, aparam_); } } // nlist, atomic : precision -template void DeepSpinModelDevi::compute_spin( +template void DeepSpinModelDevi::compute( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, @@ -609,7 +605,7 @@ template void DeepSpinModelDevi::compute_spin( const std::vector& fparam, const std::vector& aparam); -template void DeepSpinModelDevi::compute_spin( +template void DeepSpinModelDevi::compute( std::vector& all_energy, std::vector>& all_force, std::vector>& all_force_mag, diff --git a/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc index d0cc7f35df..5e3c2e7c65 100644 --- a/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc +++ b/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc @@ -125,7 +125,7 @@ TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist) { deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial; - dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); @@ -157,8 +157,8 @@ TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist_atomic) { deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, - spin, atype, box); + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); @@ -277,7 +277,7 @@ TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist_atomic) { // deepmd::DeepPot& dp = this->dp; // double ener; // std::vector force, force_mag, virial; -// dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); +// dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); // EXPECT_EQ(force.size(), natoms * 3); // EXPECT_EQ(force_mag.size(), natoms * 3); @@ -309,7 +309,7 @@ TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist_atomic) { // deepmd::DeepPot& dp = this->dp; // double ener; // std::vector force, force_mag, virial, atom_ener, atom_vir; -// dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, +// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, // spin, atype, box); // EXPECT_EQ(force.size(), natoms * 3); @@ -358,7 +358,7 @@ TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist_atomic) { // std::vector firstneigh(natoms); // deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); // convert_nlist(inlist, nlist_data); -// dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box, 0, +// dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, // inlist, 0); // EXPECT_EQ(force.size(), natoms * 3); @@ -399,7 +399,7 @@ TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist_atomic) { // std::vector firstneigh(natoms); // deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); // convert_nlist(inlist, nlist_data); -// dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, +// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, // spin, atype, box, 0, // inlist, 0); diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc index a7a542f532..4c432af597 100644 --- a/source/api_cc/tests/test_deeppot_tf_spin.cc +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -74,7 +74,7 @@ TYPED_TEST(TestInferDeepSpin, cpu_build_nlist) { deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial; - dp.compute_spin(ener, force, force_mag, virial, coord, spin, atype, box); + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); @@ -98,8 +98,8 @@ TYPED_TEST(TestInferDeepSpin, cpu_build_nlist_atomic) { deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute_spin(ener, force, force_mag, virial, atom_ener, atom_vir, coord, - spin, atype, box); + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); // EXPECT_EQ(atom_ener.size(), natoms); diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 70b24b058c..d156700c2b 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -496,9 +496,9 @@ void PairDeepSpin::compute(int eflag, int vflag) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { try { - deep_spin.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, - dspin, dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); + deep_spin.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, + dtype, dbox, nghost, lmp_list, ago, fparam, + daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -508,9 +508,9 @@ void PairDeepSpin::compute(int eflag, int vflag) { vector deatom(nall * 1, 0); vector dvatom(nall * 9, 0); try { - deep_spin.compute_spin(dener, dforce, dforce_mag, dvirial, deatom, - dvatom, dcoord, dspin, dtype, dbox, nghost, - lmp_list, ago, fparam, daparam); + deep_spin.compute(dener, dforce, dforce_mag, dvirial, deatom, dvatom, + dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } @@ -560,15 +560,15 @@ void PairDeepSpin::compute(int eflag, int vflag) { vector> all_atom_virial; if (!(eflag_atom || cvflag_atom)) { try { - deep_spin_model_devi.compute_spin( - all_energy, all_force, all_force_mag, all_virial, dcoord, dspin, - dtype, dbox, nghost, lmp_list, ago, fparam, daparam); + deep_spin_model_devi.compute(all_energy, all_force, all_force_mag, + all_virial, dcoord, dspin, dtype, dbox, + nghost, lmp_list, ago, fparam, daparam); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } } else { try { - deep_spin_model_devi.compute_spin( + deep_spin_model_devi.compute( all_energy, all_force, all_force_mag, all_virial, all_atom_energy, all_atom_virial, dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, fparam, daparam); @@ -767,8 +767,8 @@ void PairDeepSpin::compute(int eflag, int vflag) { } else { if (numb_models == 1) { try { - deep_spin.compute_spin(dener, dforce, dforce_mag, dvirial, dcoord, - dspin, dtype, dbox); + deep_spin.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, + dtype, dbox); } catch (deepmd_compat::deepmd_exception &e) { error->one(FLERR, e.what()); } From 2c4ca0ddedb0c08310c48c90490598bd2b7981f0 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 5 Nov 2024 17:03:43 +0800 Subject: [PATCH 140/193] update nopbc test --- .../api_cc/tests/test_deeppot_dpa_pt_spin.cc | 494 +++++++++--------- 1 file changed, 252 insertions(+), 242 deletions(-) diff --git a/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc b/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc index 5e3c2e7c65..f14aa7a52e 100644 --- a/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc +++ b/source/api_cc/tests/test_deeppot_dpa_pt_spin.cc @@ -182,245 +182,255 @@ TYPED_TEST(TestInferDeepSpinDpaPt, cpu_build_nlist_atomic) { // } } -// template -// class TestInferDeepSpinDpaPtNopbc : public ::testing::Test { -// protected: -// std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, -// 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, -// 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; -// std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., -// 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; -// std::vector atype = {0, 1, 1, 0, 1, 1}; -// std::vector box = {}; -// // Generated by the following Python code: -// // import numpy as np -// // from deepmd.infer import DeepPot -// // coord = np.array([ -// // 12.83, 2.56, 2.18, 12.09, 2.87, 2.74, -// // 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, -// // 3.51, 2.51, 2.60, 4.27, 3.22, 1.56 -// // ]).reshape(1, -1) -// // spin = np.array([ -// // 0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., -// // 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0. -// // ]).reshape(1, -1) -// // atype = np.array([0, 1, 1, 0, 1, 1]) -// // box = None -// // dp = DeepPot("deeppot_dpa_spin.pth") -// // e, f, _, ae, _, fm, _ = dp.eval(coord, box, atype, atomic=True, -// spin=spin) -// // np.set_printoptions(precision=16) -// // print(f"{e.ravel()=} {f.ravel()=} {fm.ravel()=} {ae.ravel()=}") - -// std::vector expected_e = { -// -5.921669893870771 , -5.1676693791758685, -5.205933794558385 , -// -5.58688965168251 , -5.080322972018686 , -5.08213772482076}; -// std::vector expected_f = { -// -0.2929142244191496, 0.0801070990501456, 0.148216178514704 , -// 0.2929142244191503, -0.0801070990501454, -0.1482161785147037, -// -0.2094984819251435, 0.0241594118950041, -0.0215199116994508, -// 0.3068843038300324, -0.001620530344866 , 0.1508093841389746, -// -0.0122719879278721, 0.0186341247897136, -0.1137104245023705, -// -0.0851138339770169, -0.0411730063398516, -0.0155790479371533}; -// std::vector expected_fm = { -// 1.5298530476860008, 0.0071315024546899, 0.0650492472558729, -// 0. , 0. , 0. , -// 0. , 0. , 0. , -// -0.6212052813442365, -0.2290265978320395, -0.5101405083352206, -// 0. , 0. , 0. , -// 0. , 0. , 0.}; - -// int natoms; -// double expected_tot_e; -// // std::vector expected_tot_v; - -// deepmd::DeepPot dp; - -// void SetUp() override { -// dp.init("../../tests/infer/deeppot_dpa_spin.pth"); - -// natoms = expected_e.size(); -// EXPECT_EQ(natoms * 3, expected_f.size()); -// EXPECT_EQ(natoms * 3, expected_fm.size()); -// // EXPECT_EQ(natoms * 9, expected_v.size()); -// expected_tot_e = 0.; -// // expected_tot_v.resize(9); -// // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); -// for (int ii = 0; ii < natoms; ++ii) { -// expected_tot_e += expected_e[ii]; -// } -// // for (int ii = 0; ii < natoms; ++ii) { -// // for (int dd = 0; dd < 9; ++dd) { -// // expected_tot_v[dd] += expected_v[ii * 9 + dd]; -// // } -// // } -// }; - -// void TearDown() override {}; -// }; - -// TYPED_TEST_SUITE(TestInferDeepSpinDpaPtNopbc, ValueTypes); - -// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_build_nlist) { -// using VALUETYPE = TypeParam; -// const std::vector& coord = this->coord; -// const std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// std::vector& expected_e = this->expected_e; -// std::vector& expected_f = this->expected_f; -// std::vector& expected_fm = this->expected_fm; -// // std::vector& expected_v = this->expected_v; -// int& natoms = this->natoms; -// double& expected_tot_e = this->expected_tot_e; -// // std::vector& expected_tot_v = this->expected_tot_v; -// deepmd::DeepPot& dp = this->dp; -// double ener; -// std::vector force, force_mag, virial; -// dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); - -// EXPECT_EQ(force.size(), natoms * 3); -// EXPECT_EQ(force_mag.size(), natoms * 3); -// // EXPECT_EQ(virial.size(), 9); - -// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); -// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); -// } -// // for (int ii = 0; ii < 3 * 3; ++ii) { -// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); -// // } -// } - -// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_build_nlist_atomic) { -// using VALUETYPE = TypeParam; -// const std::vector& coord = this->coord; -// const std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// std::vector& expected_e = this->expected_e; -// std::vector& expected_f = this->expected_f; -// std::vector& expected_fm = this->expected_fm; -// // std::vector& expected_v = this->expected_v; -// int& natoms = this->natoms; -// double& expected_tot_e = this->expected_tot_e; -// // std::vector& expected_tot_v = this->expected_tot_v; -// deepmd::DeepPot& dp = this->dp; -// double ener; -// std::vector force, force_mag, virial, atom_ener, atom_vir; -// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, -// spin, atype, box); - -// EXPECT_EQ(force.size(), natoms * 3); -// EXPECT_EQ(force_mag.size(), natoms * 3); -// // EXPECT_EQ(virial.size(), 9); -// EXPECT_EQ(atom_ener.size(), natoms); -// // EXPECT_EQ(atom_vir.size(), natoms * 9); - -// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); -// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); -// } -// // for (int ii = 0; ii < 3 * 3; ++ii) { -// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); -// // } -// for (int ii = 0; ii < natoms; ++ii) { -// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); -// } -// // for (int ii = 0; ii < natoms * 9; ++ii) { -// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); -// // } -// } - -// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_lmp_nlist) { -// using VALUETYPE = TypeParam; -// const std::vector& coord = this->coord; -// const std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// std::vector& expected_e = this->expected_e; -// std::vector& expected_f = this->expected_f; -// std::vector& expected_fm = this->expected_fm; -// // std::vector& expected_v = this->expected_v; -// int& natoms = this->natoms; -// double& expected_tot_e = this->expected_tot_e; -// // std::vector& expected_tot_v = this->expected_tot_v; -// deepmd::DeepPot& dp = this->dp; -// double ener; -// std::vector force, force_mag, virial; - -// std::vector > nlist_data = { -// {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, -// {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; -// std::vector ilist(natoms), numneigh(natoms); -// std::vector firstneigh(natoms); -// deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); -// convert_nlist(inlist, nlist_data); -// dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, -// inlist, 0); - -// EXPECT_EQ(force.size(), natoms * 3); -// EXPECT_EQ(force_mag.size(), natoms * 3); -// // EXPECT_EQ(virial.size(), 9); - -// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); -// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); -// } -// // for (int ii = 0; ii < 3 * 3; ++ii) { -// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); -// // } -// } - -// TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_lmp_nlist_atomic) { -// using VALUETYPE = TypeParam; -// const std::vector& coord = this->coord; -// const std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// std::vector& expected_e = this->expected_e; -// std::vector& expected_f = this->expected_f; -// std::vector& expected_fm = this->expected_fm; -// // std::vector& expected_v = this->expected_v; -// int& natoms = this->natoms; -// double& expected_tot_e = this->expected_tot_e; -// // std::vector& expected_tot_v = this->expected_tot_v; -// deepmd::DeepPot& dp = this->dp; -// double ener; -// std::vector force, force_mag, virial, atom_ener, atom_vir; - -// std::vector > nlist_data = { -// {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, -// {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; -// std::vector ilist(natoms), numneigh(natoms); -// std::vector firstneigh(natoms); -// deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); -// convert_nlist(inlist, nlist_data); -// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, -// spin, atype, box, 0, -// inlist, 0); - -// EXPECT_EQ(force.size(), natoms * 3); -// EXPECT_EQ(force_mag.size(), natoms * 3); -// // EXPECT_EQ(virial.size(), 9); -// EXPECT_EQ(atom_ener.size(), natoms); -// // EXPECT_EQ(atom_vir.size(), natoms * 9); - -// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); -// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); -// } -// // for (int ii = 0; ii < 3 * 3; ++ii) { -// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); -// // } -// for (int ii = 0; ii < natoms; ++ii) { -// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); -// } -// // for (int ii = 0; ii < natoms * 9; ++ii) { -// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); -// // } -// } +template +class TestInferDeepSpinDpaPtNopbc : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 1, 1, 0, 1, 1}; + std::vector box = {}; + // Generated by the following Python code: + // import numpy as np + // from deepmd.infer import DeepPot + // coord = np.array([ + // 12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + // 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + // 3.51, 2.51, 2.60, 4.27, 3.22, 1.56 + // ]).reshape(1, -1) + // spin = np.array([ + // 0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + // 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0. + // ]).reshape(1, -1) + // atype = np.array([0, 1, 1, 0, 1, 1]) + // box = None + // dp = DeepPot("deeppot_dpa_spin.pth") + // e, f, _, ae, _, fm, _ = dp.eval(coord, box, atype, atomic=True, + // spin=spin) + // np.set_printoptions(precision=16) + // print(f"{e.ravel()=} {f.ravel()=} {fm.ravel()=} {ae.ravel()=}") + + std::vector expected_e = {-5.921669893870771, -5.1676693791758685, + -5.205933794558385, -5.58688965168251, + -5.080322972018686, -5.08213772482076}; + std::vector expected_f = { + -0.2929142244191496, 0.0801070990501456, 0.148216178514704, + 0.2929142244191503, -0.0801070990501454, -0.1482161785147037, + -0.2094984819251435, 0.0241594118950041, -0.0215199116994508, + 0.3068843038300324, -0.001620530344866, 0.1508093841389746, + -0.0122719879278721, 0.0186341247897136, -0.1137104245023705, + -0.0851138339770169, -0.0411730063398516, -0.0155790479371533}; + std::vector expected_fm = {-1.5298530476860008, + 0.0071315024546899, + 0.0650492472558729, + 0., + 0., + 0., + 0., + 0., + 0., + -0.6212052813442365, + -0.2290265978320395, + -0.5101405083352206, + 0., + 0., + 0., + 0., + 0., + 0.}; + + int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + deepmd::DeepSpin dp; + + void SetUp() override { + dp.init("../../tests/infer/deeppot_dpa_spin.pth"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override {}; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinDpaPtNopbc, ValueTypes); + +TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_build_nlist) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_lmp_nlist) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + + std::vector > nlist_data = { + {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, + {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, inlist, + 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinDpaPtNopbc, cpu_lmp_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + + std::vector > nlist_data = { + {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, + {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box, 0, inlist, 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} From 914b1f9b5746d629158d24c656f36c2fb057ab2a Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 5 Nov 2024 22:49:58 -0500 Subject: [PATCH 141/193] fix(jax): fix several serialization and jit issues for DPA-2 (#4315) - `deepmd/jax/descriptor/__init__.py` imports SeT and DPA-2 to let them found by the plugin; - `deepmd/dpmodel/descriptor/dpa1.py` fixes the jit issue regarding to the shape generated by `jnp.prod`. The shape should be static by using `math.prod`. - `deepmd/jax/model/ener_model.py` and `deepmd/jax/model/dp_zbl_model.py` stop the graident of coordinates when rebuilding the neighbor list. The graient of sort causes an error due to https://github.com/jax-ml/jax/issues/24730. ## Summary by CodeRabbit - **New Features** - Introduced new methods `format_nlist` in `DPZBLModel` and `EnergyModel` classes for improved neighbor list formatting. - Added new descriptors `DescrptDPA2` and `DescrptSeTTebd` to the public API. - **Bug Fixes** - Enhanced attribute handling in `DPZBLModel` and `EnergyModel` to ensure proper serialization and deserialization of `atomic_model`. - **Documentation** - Updated the public API to reflect new additions and maintain existing documentation accuracy. Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/descriptor/dpa1.py | 5 +++-- deepmd/jax/descriptor/__init__.py | 8 ++++++++ deepmd/jax/model/dp_zbl_model.py | 16 ++++++++++++++++ deepmd/jax/model/ener_model.py | 16 ++++++++++++++++ 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index b033811507..a84cc18882 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import math from typing import ( Any, Callable, @@ -852,7 +853,7 @@ def cal_g( ): xp = array_api_compat.array_namespace(ss) nfnl, nnei = ss.shape[0:2] - shape2 = xp.prod(xp.asarray(ss.shape[2:])) + shape2 = math.prod(ss.shape[2:]) ss = xp.reshape(ss, (nfnl, nnei, shape2)) # nfnl x nnei x ng gg = self.embeddings[embedding_idx].call(ss) @@ -866,7 +867,7 @@ def cal_g_strip( assert self.embeddings_strip is not None xp = array_api_compat.array_namespace(ss) nfnl, nnei = ss.shape[0:2] - shape2 = xp.prod(xp.asarray(ss.shape[2:])) + shape2 = math.prod(ss.shape[2:]) ss = xp.reshape(ss, (nfnl, nnei, shape2)) # nfnl x nnei x ng gg = self.embeddings_strip[embedding_idx].call(ss) diff --git a/deepmd/jax/descriptor/__init__.py b/deepmd/jax/descriptor/__init__.py index 4e55bc7659..91a3032f8b 100644 --- a/deepmd/jax/descriptor/__init__.py +++ b/deepmd/jax/descriptor/__init__.py @@ -2,6 +2,9 @@ from deepmd.jax.descriptor.dpa1 import ( DescrptDPA1, ) +from deepmd.jax.descriptor.dpa2 import ( + DescrptDPA2, +) from deepmd.jax.descriptor.hybrid import ( DescrptHybrid, ) @@ -14,11 +17,16 @@ from deepmd.jax.descriptor.se_t import ( DescrptSeT, ) +from deepmd.jax.descriptor.se_t_tebd import ( + DescrptSeTTebd, +) __all__ = [ "DescrptSeA", "DescrptSeR", "DescrptSeT", + "DescrptSeTTebd", "DescrptDPA1", + "DescrptDPA2", "DescrptHybrid", ] diff --git a/deepmd/jax/model/dp_zbl_model.py b/deepmd/jax/model/dp_zbl_model.py index 028fa8593b..babbc65233 100644 --- a/deepmd/jax/model/dp_zbl_model.py +++ b/deepmd/jax/model/dp_zbl_model.py @@ -12,6 +12,7 @@ flax_module, ) from deepmd.jax.env import ( + jax, jnp, ) from deepmd.jax.model.base_model import ( @@ -48,3 +49,18 @@ def forward_common_atomic( aparam=aparam, do_atomic_virial=do_atomic_virial, ) + + def format_nlist( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + extra_nlist_sort: bool = False, + ): + return DPZBLModelDP.format_nlist( + self, + jax.lax.stop_gradient(extended_coord), + extended_atype, + nlist, + extra_nlist_sort=extra_nlist_sort, + ) diff --git a/deepmd/jax/model/ener_model.py b/deepmd/jax/model/ener_model.py index b1bf568544..a1865f5635 100644 --- a/deepmd/jax/model/ener_model.py +++ b/deepmd/jax/model/ener_model.py @@ -12,6 +12,7 @@ flax_module, ) from deepmd.jax.env import ( + jax, jnp, ) from deepmd.jax.model.base_model import ( @@ -48,3 +49,18 @@ def forward_common_atomic( aparam=aparam, do_atomic_virial=do_atomic_virial, ) + + def format_nlist( + self, + extended_coord: jnp.ndarray, + extended_atype: jnp.ndarray, + nlist: jnp.ndarray, + extra_nlist_sort: bool = False, + ): + return EnergyModelDP.format_nlist( + self, + jax.lax.stop_gradient(extended_coord), + extended_atype, + nlist, + extra_nlist_sort=extra_nlist_sort, + ) From 65aac64db089746fa374124795c4708c61968f94 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 6 Nov 2024 02:26:24 -0500 Subject: [PATCH 142/193] fix(lmp): add `pair_deepmd_index` key to `is_key` function in dplr (#4313) Fix #4273. Tests are added in this PR. * Modify `is_key` function to include `keys.push_back("pair_deepmd_index")` Update tests in `test_dplr.py` to include `pair_deepmd_index` command * Add `pair_deepmd_index 0` to various `lammps.fix` commands in the test cases ## Summary by CodeRabbit - **New Features** - Introduced a new key, `pair_deepmd_index`, enhancing the `FixDPLR` class for improved pair validation in simulations. - **Bug Fixes** - Updated error handling to ensure robustness when the new `pair_deepmd_index` is not provided. - **Tests** - Modified test parameters to include `pair_deepmd_index 0`, ensuring compatibility with the new functionality while maintaining existing validations. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- source/lmp/fix_dplr.cpp | 1 + source/lmp/tests/test_dplr.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/source/lmp/fix_dplr.cpp b/source/lmp/fix_dplr.cpp index 34fd2515ed..3e59c6b2db 100644 --- a/source/lmp/fix_dplr.cpp +++ b/source/lmp/fix_dplr.cpp @@ -30,6 +30,7 @@ static bool is_key(const string &input) { keys.push_back("type_associate"); keys.push_back("bond_type"); keys.push_back("efield"); + keys.push_back("pair_deepmd_index"); for (int ii = 0; ii < keys.size(); ++ii) { if (input == keys[ii]) { return true; diff --git a/source/lmp/tests/test_dplr.py b/source/lmp/tests/test_dplr.py index 2dd3531894..ed28bbd6d4 100644 --- a/source/lmp/tests/test_dplr.py +++ b/source/lmp/tests/test_dplr.py @@ -387,7 +387,9 @@ def test_pair_deepmd_lr(lammps): lammps.special_bonds("lj/coul 1 1 1 angle no") lammps.kspace_style("pppm/dplr 1e-5") lammps.kspace_modify(f"gewald {beta:.2f} diff ik mesh {mesh:d} {mesh:d} {mesh:d}") - lammps.fix(f"0 all dplr model {pb_file.resolve()} type_associate 1 3 bond_type 1") + lammps.fix( + f"0 all dplr model {pb_file.resolve()} type_associate 1 3 bond_type 1 pair_deepmd_index 0" + ) lammps.fix_modify("0 virial yes") lammps.run(0) for ii in range(8): From 430dfa9ded456a5c096f4979a0981f53fe78d260 Mon Sep 17 00:00:00 2001 From: zhenyu <76582286+wangzyphysics@users.noreply.github.com> Date: Wed, 6 Nov 2024 17:23:18 +0800 Subject: [PATCH 143/193] fix: make sure `head` can be used in `DeepPot` (#4312) Following the example mentioned [here](https://www.aissquare.com/models/detail?pageType=models&name=DPA-2.3.0-v3.0.0b4&id=279) , I first select a `head` and put it into `DPCalculator` but encounter the error like this: ```shell (/public/home/mzq001/soft/deepmd-kit) [mzq001@login01 ~]$ python test.py To get the best performance, it is recommended to adjust the number of threads by setting the environment variables OMP_NUM_THREADS, DP_INTRA_OP_PARALLELISM_THREADS, and DP_INTER_OP_PARALLELISM_THREADS. See https://deepmd.rtfd.io/parallelism/ for more information. Traceback (most recent call last): File "/public/home/mzq001/test.py", line 4, in dp = DPCalculator("DPA2_medium_28_10M_beta4.pt", head="H2O_H2O-PD") ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/public/home/mzq001/soft/deepmd-kit/lib/python3.11/site-packages/deepmd/calculator.py", line 92, in __init__ self.dp = DeepPot(str(Path(model).resolve()), neighbor_list=neighbor_list) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/public/home/mzq001/soft/deepmd-kit/lib/python3.11/site-packages/deepmd/infer/deep_eval.py", line 334, in __init__ self.deep_eval = DeepEvalBackend( ^^^^^^^^^^^^^^^^ File "/public/home/mzq001/soft/deepmd-kit/lib/python3.11/site-packages/deepmd/pt/infer/deep_eval.py", line 121, in __init__ head is not None AssertionError: Head must be set for multitask model! Available heads are: ['Domains_Alloy', 'Domains_Anode', 'Domains_Cluster', 'Domains_Drug', 'Domains_FerroEle', 'Domains_OC2M', 'Domains_SSE-PBE', 'Domains_SemiCond', 'H2O_H2O-PD', 'Metals_AgAu-PBE', 'Metals_AlMgCu', 'Metals_Cu', 'Metals_Sn', 'Metals_Ti', 'Metals_V', 'Metals_W', 'Others_C12H26', 'Others_HfO2', 'Domains_ANI', 'Domains_SSE-PBESol', 'Domains_Transition1x', 'H2O_H2O-DPLR', 'H2O_H2O-PBE0TS-MD', 'H2O_H2O-PBE0TS', 'H2O_H2O-SCAN0', 'Metals_AgAu-PBED3', 'Others_In2Se3', 'MP_traj_v024_alldata_mixu'] ``` ```python ## Compute potential energy from ase import Atoms from deepmd.calculator import DP as DPCalculator dp = DPCalculator("DPA2_medium_28_10M_beta4.pt", head="H2O_H2O-PD") water = Atoms('H2O', positions=[(0.7601, 1.9270, 1), (1.9575, 1, 1), (1., 1., 1.)], cell=[100, 100, 100]) water.calc = dp print(water.get_potential_energy()) print(water.get_forces()) ## Run BFGS structure optimization from ase.optimize import BFGS dyn = BFGS(water) dyn.run(fmax=1e-6) print(water.get_positions()) ``` The variable named `head` of `DPCalculator` is not being used in `DeepPot` which directly leads to this error. ## Summary by CodeRabbit - **New Features** - Enhanced configurability of the DP class with an additional `head` parameter for initialization. - Updated default behavior in the calculate method to include stress calculations alongside energy and forces. - **Bug Fixes** - Clarified handling of stress property in the context of lattice relaxation, improving accuracy in calculations. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/calculator.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deepmd/calculator.py b/deepmd/calculator.py index 6f863ab09b..0fbc447aaa 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -45,6 +45,8 @@ class DP(Calculator): will infer this information from model, by default None neighbor_list : ase.neighborlist.NeighborList, optional The neighbor list object. If None, then build the native neighbor list. + head : Union[str, None], optional + a specific model branch choosing from pretrained model, by default None Examples -------- @@ -84,10 +86,15 @@ def __init__( label: str = "DP", type_dict: Optional[dict[str, int]] = None, neighbor_list=None, + head=None, **kwargs, ) -> None: Calculator.__init__(self, label=label, **kwargs) - self.dp = DeepPot(str(Path(model).resolve()), neighbor_list=neighbor_list) + self.dp = DeepPot( + str(Path(model).resolve()), + neighbor_list=neighbor_list, + head=head, + ) if type_dict: self.type_dict = type_dict else: From 7eab6cc0cf59074e44476d4bdc54c9ef77a2cc61 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:25:20 +0000 Subject: [PATCH 144/193] fix lmp uts and rename pair base --- source/api_c/include/deepmd.hpp | 12 +-- source/lmp/pair_base.cpp | 128 +++++--------------------------- source/lmp/pair_base.h | 13 ++-- source/lmp/pair_deepmd.cpp | 95 +++++++++++++++++++++++- source/lmp/pair_deepmd.h | 3 +- source/lmp/pair_deepspin.cpp | 95 +++++++++++++++++++++++- source/lmp/pair_deepspin.h | 3 +- 7 files changed, 222 insertions(+), 127 deletions(-) diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 55e4257d95..6d54cbdfa2 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -845,7 +845,7 @@ class DeepBaseModel { * @brief DP Base Model constructor without initialization. **/ DeepBaseModel() : dpbase(nullptr) {}; - virtual ~DeepBaseModel() { DP_DeleteDeepBaseModel(dpbase); }; + virtual ~DeepBaseModel() {}; /** * @brief Get the cutoff radius. @@ -955,7 +955,7 @@ class DeepPot : public DeepBaseModel { * @brief DP constructor without initialization. **/ DeepPot() : dp(nullptr) {}; - ~DeepPot() {}; + ~DeepPot() { DP_DeleteDeepPot(dp); }; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -1426,7 +1426,7 @@ class DeepSpin : public DeepBaseModel { * @brief DP constructor without initialization. **/ DeepSpin() : dp(nullptr) {}; - ~DeepSpin() {}; + ~DeepSpin() { DP_DeleteDeepSpin(dp); }; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -1730,7 +1730,7 @@ class DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepBaseModelDevi() : dpbase(nullptr) {}; - virtual ~DeepBaseModelDevi() { DP_DeleteDeepBaseModelDevi(dpbase); }; + virtual ~DeepBaseModelDevi() {}; /** * @brief Get the cutoff radius. @@ -1939,7 +1939,7 @@ class DeepPotModelDevi : public DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepPotModelDevi() : dp(nullptr) {}; - ~DeepPotModelDevi() {}; + ~DeepPotModelDevi() { DP_DeleteDeepPotModelDevi(dp); }; /** * @brief DP model deviation constructor with initialization. * @param[in] models The names of the frozen model file. @@ -2371,7 +2371,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { * @brief DP model deviation constructor without initialization. **/ DeepSpinModelDevi() : dp(nullptr) {}; - ~DeepSpinModelDevi() {}; + ~DeepSpinModelDevi() { DP_DeleteDeepSpinModelDevi(dp); }; /** * @brief DP model deviation constructor with initialization. * @param[in] models The names of the frozen model file. diff --git a/source/lmp/pair_base.cpp b/source/lmp/pair_base.cpp index 74501e705a..082d47237a 100644 --- a/source/lmp/pair_base.cpp +++ b/source/lmp/pair_base.cpp @@ -51,7 +51,7 @@ static int stringCmp(const void *a, const void *b) { return sum; } -int PairDeepMDBase::get_node_rank() { +int PairDeepBaseModel::get_node_rank() { char host_name[MPI_MAX_PROCESSOR_NAME]; memset(host_name, '\0', sizeof(char) * MPI_MAX_PROCESSOR_NAME); char(*host_names)[MPI_MAX_PROCESSOR_NAME]; @@ -98,7 +98,7 @@ int PairDeepMDBase::get_node_rank() { return looprank; } -std::string PairDeepMDBase::get_file_content(const std::string &model) { +std::string PairDeepBaseModel::get_file_content(const std::string &model) { int myrank = 0, root = 0; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); int nchar = 0; @@ -121,7 +121,7 @@ std::string PairDeepMDBase::get_file_content(const std::string &model) { return file_content; } -std::vector PairDeepMDBase::get_file_content( +std::vector PairDeepBaseModel::get_file_content( const std::vector &models) { std::vector file_contents(models.size()); for (unsigned ii = 0; ii < models.size(); ++ii) { @@ -130,7 +130,7 @@ std::vector PairDeepMDBase::get_file_content( return file_contents; } -void PairDeepMDBase::make_fparam_from_compute(vector &fparam) { +void PairDeepBaseModel::make_fparam_from_compute(vector &fparam) { assert(do_compute_fparam); int icompute = modify->find_compute(compute_fparam_id); @@ -159,7 +159,7 @@ void PairDeepMDBase::make_fparam_from_compute(vector &fparam) { } } -void PairDeepMDBase::make_aparam_from_compute(vector &aparam) { +void PairDeepBaseModel::make_aparam_from_compute(vector &aparam) { assert(do_compute_aparam); int icompute = modify->find_compute(compute_aparam_id); @@ -189,7 +189,7 @@ void PairDeepMDBase::make_aparam_from_compute(vector &aparam) { } #ifdef USE_TTM -void PairDeepMDBase::make_ttm_fparam(vector &fparam) { +void PairDeepBaseModel::make_ttm_fparam(vector &fparam) { assert(do_ttm); // get ttm_fix const FixTTMDP *ttm_fix = NULL; @@ -230,7 +230,7 @@ void PairDeepMDBase::make_ttm_fparam(vector &fparam) { #endif #ifdef USE_TTM -void PairDeepMDBase::make_ttm_aparam(vector &daparam) { +void PairDeepBaseModel::make_ttm_aparam(vector &daparam) { assert(do_ttm); // get ttm_fix const FixTTMDP *ttm_fix = NULL; @@ -275,14 +275,15 @@ void PairDeepMDBase::make_ttm_aparam(vector &daparam) { } #endif -void PairDeepMDBase::cum_sum(std::map &sum, std::map &vec) { +void PairDeepBaseModel::cum_sum(std::map &sum, + std::map &vec) { sum[0] = 0; for (int ii = 1; ii < vec.size(); ++ii) { sum[ii] = sum[ii - 1] + vec[ii - 1]; } } -PairDeepMDBase::PairDeepMDBase( +PairDeepBaseModel::PairDeepBaseModel( LAMMPS *lmp, const char *cite_user_package, deepmd_compat::DeepBaseModel &deep_model, @@ -343,7 +344,7 @@ PairDeepMDBase::PairDeepMDBase( print_summary(" "); } -void PairDeepMDBase::print_summary(const string pre) const { +void PairDeepBaseModel::print_summary(const string pre) const { if (comm->me == 0) { // capture cout to a string, then call LAMMPS's utils::logmesg // https://stackoverflow.com/a/4043813/9567349 @@ -368,7 +369,7 @@ void PairDeepMDBase::print_summary(const string pre) const { } } -PairDeepMDBase::~PairDeepMDBase() { +PairDeepBaseModel::~PairDeepBaseModel() { if (allocated) { memory->destroy(setflag); memory->destroy(cutsq); @@ -376,7 +377,7 @@ PairDeepMDBase::~PairDeepMDBase() { } } -void PairDeepMDBase::allocate() { +void PairDeepBaseModel::allocate() { allocated = 1; int n = atom->ntypes; @@ -404,106 +405,13 @@ void PairDeepMDBase::allocate() { } } -void PairDeepMDBase::read_restart(FILE *) { is_restart = true; } +void PairDeepBaseModel::read_restart(FILE *) { is_restart = true; } -void PairDeepMDBase::write_restart(FILE *) { +void PairDeepBaseModel::write_restart(FILE *) { // pass } -/* ---------------------------------------------------------------------- - set coeffs for one or more type pairs -------------------------------------------------------------------------- */ - -void PairDeepMDBase::coeff(int narg, char **arg) { - if (!allocated) { - allocate(); - } - - int n = atom->ntypes; - int ilo, ihi, jlo, jhi; - ilo = 0; - jlo = 0; - ihi = n; - jhi = n; - if (narg >= 2) { - utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); - utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); - if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { - error->all(FLERR, - "deepmd requires that the scale should be set to all atom " - "types, i.e. pair_coeff * *."); - } - } - if (narg <= 2) { - type_idx_map.resize(n); - for (int ii = 0; ii < n; ++ii) { - type_idx_map[ii] = ii; - } - } else { - int iarg = 2; - - // type_map is a list of strings with undetermined length - // note: although we have numb_types from the model, we do not require - // the number of types in the system matches that in the model - std::vector type_map; - std::string type_map_str; - deep_base.get_type_map(type_map_str); - // convert the string to a vector of strings - std::istringstream iss(type_map_str); - std::string type_name; - while (iss >> type_name) { - type_map.push_back(type_name); - } - - type_idx_map.clear(); - type_names.clear(); - while (iarg < narg) { - std::string type_name = arg[iarg]; - type_names.push_back(type_name); - bool found_element = false; - for (int ii = 0; ii < type_map.size(); ++ii) { - if (type_map[ii] == type_name) { - type_idx_map.push_back(ii); - found_element = true; - break; - } - } - if (!found_element && "NULL" == type_name) { - type_idx_map.push_back(type_map.size()); // ghost type - found_element = true; - } - if (!found_element) { - error->all(FLERR, "pair_coeff: element " + type_name + - " not found in the model"); - } - iarg += 1; - } - numb_types = type_idx_map.size(); - if (numb_types < n) { - type_idx_map.resize(n); - for (int ii = numb_types; ii < n; ++ii) { - type_idx_map[ii] = -1; - } - } - } - for (int i = ilo; i <= ihi; i++) { - for (int j = MAX(jlo, i); j <= jhi; j++) { - setflag[i][j] = 1; - scale[i][j] = 1.0; - if (i > numb_types || j > numb_types) { - char warning_msg[1024]; - sprintf(warning_msg, - "Interaction between types %d and %d is set with deepmd, but " - "will be ignored.\n Deepmd model has only %d types, it only " - "computes the mulitbody interaction of types: 1-%d.", - i, j, numb_types, numb_types); - error->warning(FLERR, warning_msg); - } - } - } -} - -void PairDeepMDBase::init_style() { +void PairDeepBaseModel::init_style() { #if LAMMPS_VERSION_NUMBER >= 20220324 neighbor->add_request(this, NeighConst::REQ_FULL); #else @@ -527,7 +435,7 @@ void PairDeepMDBase::init_style() { } } -double PairDeepMDBase::init_one(int i, int j) { +double PairDeepBaseModel::init_one(int i, int j) { if (i > numb_types || j > numb_types) { char warning_msg[1024]; sprintf(warning_msg, @@ -546,7 +454,7 @@ double PairDeepMDBase::init_one(int i, int j) { return cutoff; } -void *PairDeepMDBase::extract(const char *str, int &dim) { +void *PairDeepBaseModel::extract(const char *str, int &dim) { if (strcmp(str, "cut_coul") == 0) { dim = 0; return (void *)&cutoff; diff --git a/source/lmp/pair_base.h b/source/lmp/pair_base.h index f19c09edff..055b45d20e 100644 --- a/source/lmp/pair_base.h +++ b/source/lmp/pair_base.h @@ -28,15 +28,14 @@ namespace deepmd_compat = deepmd::hpp; #define FLOAT_PREC double namespace LAMMPS_NS { -class PairDeepMDBase : public Pair { +class PairDeepBaseModel : public Pair { public: - PairDeepMDBase(class LAMMPS *, - const char *, - deepmd_compat::DeepBaseModel &, - deepmd_compat::DeepBaseModelDevi &); - virtual ~PairDeepMDBase() override; + PairDeepBaseModel(class LAMMPS *, + const char *, + deepmd_compat::DeepBaseModel &, + deepmd_compat::DeepBaseModelDevi &); + virtual ~PairDeepBaseModel() override; void *extract(const char *, int &) override; - void coeff(int, char **) override; void init_style() override; void write_restart(FILE *) override; void read_restart(FILE *) override; diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 573d6a63b6..e1231f9ed6 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -85,7 +85,7 @@ static const char cite_user_deepmd_package[] = "}\n\n"; PairDeepMD::PairDeepMD(LAMMPS *lmp) - : PairDeepMDBase( + : PairDeepBaseModel( lmp, cite_user_deepmd_package, deep_pot, deep_pot_model_devi) { // Constructor body can be empty } @@ -377,6 +377,99 @@ void PairDeepMD::settings(int narg, char **arg) { all_force.resize(numb_models); } +/* ---------------------------------------------------------------------- + set coeffs for one or more type pairs +------------------------------------------------------------------------- */ + +void PairDeepMD::coeff(int narg, char **arg) { + if (!allocated) { + allocate(); + } + + int n = atom->ntypes; + int ilo, ihi, jlo, jhi; + ilo = 0; + jlo = 0; + ihi = n; + jhi = n; + if (narg >= 2) { + utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); + utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); + if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { + error->all(FLERR, + "deepmd requires that the scale should be set to all atom " + "types, i.e. pair_coeff * *."); + } + } + if (narg <= 2) { + type_idx_map.resize(n); + for (int ii = 0; ii < n; ++ii) { + type_idx_map[ii] = ii; + } + } else { + int iarg = 2; + + // type_map is a list of strings with undetermined length + // note: although we have numb_types from the model, we do not require + // the number of types in the system matches that in the model + std::vector type_map; + std::string type_map_str; + deep_pot.get_type_map(type_map_str); + // convert the string to a vector of strings + std::istringstream iss(type_map_str); + std::string type_name; + while (iss >> type_name) { + type_map.push_back(type_name); + } + + type_idx_map.clear(); + type_names.clear(); + while (iarg < narg) { + std::string type_name = arg[iarg]; + type_names.push_back(type_name); + bool found_element = false; + for (int ii = 0; ii < type_map.size(); ++ii) { + if (type_map[ii] == type_name) { + type_idx_map.push_back(ii); + found_element = true; + break; + } + } + if (!found_element && "NULL" == type_name) { + type_idx_map.push_back(type_map.size()); // ghost type + found_element = true; + } + if (!found_element) { + error->all(FLERR, "pair_coeff: element " + type_name + + " not found in the model"); + } + iarg += 1; + } + numb_types = type_idx_map.size(); + if (numb_types < n) { + type_idx_map.resize(n); + for (int ii = numb_types; ii < n; ++ii) { + type_idx_map[ii] = -1; + } + } + } + for (int i = ilo; i <= ihi; i++) { + for (int j = MAX(jlo, i); j <= jhi; j++) { + setflag[i][j] = 1; + scale[i][j] = 1.0; + if (i > numb_types || j > numb_types) { + char warning_msg[1024]; + sprintf(warning_msg, + "Interaction between types %d and %d is set with deepmd, but " + "will be ignored.\n Deepmd model has only %d types, it only " + "computes the mulitbody interaction of types: 1-%d.", + i, j, numb_types, numb_types); + error->warning(FLERR, warning_msg); + } + } + } +} + void PairDeepMD::compute(int eflag, int vflag) { if (numb_models == 0) { return; diff --git a/source/lmp/pair_deepmd.h b/source/lmp/pair_deepmd.h index 5f29134277..a8b3c13f4c 100644 --- a/source/lmp/pair_deepmd.h +++ b/source/lmp/pair_deepmd.h @@ -40,11 +40,12 @@ namespace LAMMPS_NS { class CommBrickDeepMD : public CommBrick { friend class PairDeepMD; }; -class PairDeepMD : public PairDeepMDBase { +class PairDeepMD : public PairDeepBaseModel { public: PairDeepMD(class LAMMPS *); ~PairDeepMD() override; void settings(int, char **) override; + void coeff(int, char **) override; void compute(int, int) override; int pack_reverse_comm(int, int, double *) override; void unpack_reverse_comm(int, int *, double *) override; diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index d156700c2b..83f65052ce 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -85,7 +85,7 @@ static const char cite_user_deepmd_package[] = "}\n\n"; PairDeepSpin::PairDeepSpin(LAMMPS *lmp) - : PairDeepMDBase( + : PairDeepBaseModel( lmp, cite_user_deepmd_package, deep_spin, deep_spin_model_devi) { // Constructor body can be empty } @@ -377,6 +377,99 @@ void PairDeepSpin::settings(int narg, char **arg) { all_force.resize(numb_models); } +/* ---------------------------------------------------------------------- + set coeffs for one or more type pairs +------------------------------------------------------------------------- */ + +void PairDeepSpin::coeff(int narg, char **arg) { + if (!allocated) { + allocate(); + } + + int n = atom->ntypes; + int ilo, ihi, jlo, jhi; + ilo = 0; + jlo = 0; + ihi = n; + jhi = n; + if (narg >= 2) { + utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); + utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); + if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { + error->all(FLERR, + "deepmd requires that the scale should be set to all atom " + "types, i.e. pair_coeff * *."); + } + } + if (narg <= 2) { + type_idx_map.resize(n); + for (int ii = 0; ii < n; ++ii) { + type_idx_map[ii] = ii; + } + } else { + int iarg = 2; + + // type_map is a list of strings with undetermined length + // note: although we have numb_types from the model, we do not require + // the number of types in the system matches that in the model + std::vector type_map; + std::string type_map_str; + deep_spin.get_type_map(type_map_str); + // convert the string to a vector of strings + std::istringstream iss(type_map_str); + std::string type_name; + while (iss >> type_name) { + type_map.push_back(type_name); + } + + type_idx_map.clear(); + type_names.clear(); + while (iarg < narg) { + std::string type_name = arg[iarg]; + type_names.push_back(type_name); + bool found_element = false; + for (int ii = 0; ii < type_map.size(); ++ii) { + if (type_map[ii] == type_name) { + type_idx_map.push_back(ii); + found_element = true; + break; + } + } + if (!found_element && "NULL" == type_name) { + type_idx_map.push_back(type_map.size()); // ghost type + found_element = true; + } + if (!found_element) { + error->all(FLERR, "pair_coeff: element " + type_name + + " not found in the model"); + } + iarg += 1; + } + numb_types = type_idx_map.size(); + if (numb_types < n) { + type_idx_map.resize(n); + for (int ii = numb_types; ii < n; ++ii) { + type_idx_map[ii] = -1; + } + } + } + for (int i = ilo; i <= ihi; i++) { + for (int j = MAX(jlo, i); j <= jhi; j++) { + setflag[i][j] = 1; + scale[i][j] = 1.0; + if (i > numb_types || j > numb_types) { + char warning_msg[1024]; + sprintf(warning_msg, + "Interaction between types %d and %d is set with deepmd, but " + "will be ignored.\n Deepmd model has only %d types, it only " + "computes the mulitbody interaction of types: 1-%d.", + i, j, numb_types, numb_types); + error->warning(FLERR, warning_msg); + } + } + } +} + void PairDeepSpin::compute(int eflag, int vflag) { if (numb_models == 0) { return; diff --git a/source/lmp/pair_deepspin.h b/source/lmp/pair_deepspin.h index 3363185405..47d6678441 100644 --- a/source/lmp/pair_deepspin.h +++ b/source/lmp/pair_deepspin.h @@ -40,11 +40,12 @@ namespace LAMMPS_NS { class CommBrickDeepSpin : public CommBrick { friend class PairDeepSpin; }; -class PairDeepSpin : public PairDeepMDBase { +class PairDeepSpin : public PairDeepBaseModel { public: PairDeepSpin(class LAMMPS *); ~PairDeepSpin() override; void settings(int, char **) override; + void coeff(int, char **) override; void compute(int, int) override; int pack_reverse_comm(int, int, double *) override; void unpack_reverse_comm(int, int *, double *) override; From 0965a702cf93c4b11aa962c8f1c77a5b563d1611 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:07:04 +0000 Subject: [PATCH 145/193] add old c api --- source/api_c/include/c_api.h | 228 ++++++++++++++++++++++++++++++++++- source/api_c/src/c_api.cc | 203 +++++++++++++++++++++++++------ 2 files changed, 394 insertions(+), 37 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index b72a3fcc7c..a1d61de50a 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -1131,7 +1131,7 @@ void DP_DeepSpinModelDeviComputeNListf2(DP_DeepSpinModelDevi* dp, // Deep Base Model methods /** - * @brief Get the type map of a DP. + * @brief Get the cutoff of a DP. * @param[in] dpbase The DP to use. * @return The cutoff radius. */ @@ -1242,6 +1242,232 @@ int DP_DeepBaseModelDeviGetNumbTypesSpin(DP_DeepBaseModelDevi* dpbase); */ const char* DP_DeepBaseModelDeviCheckOK(DP_DeepBaseModelDevi* dpbase); +// DeepPot methods for c_api +/** + * @brief Get the cutoff of a DP. + * @param[in] dp The DP to use. + * @return The cutoff radius. + */ +double DP_DeepPotGetCutoff(DP_DeepPot* dp); + +/** + * @brief Get the number of types of a DP. + * @param[in] dp The DP to use. + * @return The number of types of the DP. + */ +int DP_DeepPotGetNumbTypes(DP_DeepPot* dp); + +/** + * @brief Get the number of types with spin of a DP. + * @param[in] dp The DP to use. + * @return The number of types with spin of the DP. + */ +int DP_DeepPotGetNumbTypesSpin(DP_DeepPot* dp); + +/** + * @brief Get the dimension of frame parameters of a DP. + * @param[in] dp The DP to use. + * @return The dimension of frame parameters of the DP. + */ +int DP_DeepPotGetDimFParam(DP_DeepPot* dp); + +/** + * @brief Get the dimension of atomic parameters of a DP. + * @param[in] dp The DP to use. + * @return The dimension of atomic parameters of the DP. + */ +int DP_DeepPotGetDimAParam(DP_DeepPot* dp); + +/** + * @brief Check whether the atomic dimension of atomic parameters is nall + * instead of nloc. + * + * @param[in] dp The DP to use. + * @return true the atomic dimension of atomic parameters is nall + * @return false the atomic dimension of atomic parameters is nloc + */ +bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp); + +/** + * @brief Get the type map of a DP. + * @param[in] dp The DP to use. + * @return The type map of the DP. + */ +const char* DP_DeepPotGetTypeMap(DP_DeepPot* dp); + +/** + * @brief Check if there is any exceptions throw. + * + * @param dp The DP to use. + * @return const char* error message. + */ +const char* DP_DeepPotCheckOK(DP_DeepPot* dp); + +/** + * @brief Get the dimension of frame parameters of a DP Model Deviation. + * @param[in] dp The DP Model Deviation to use. + * @return The dimension of frame parameters of the DP Model Deviation. + */ +int DP_DeepPotModelDeviGetDimFParam(DP_DeepPotModelDevi* dp); +/** + * @brief Get the dimension of atomic parameters of a DP Model Deviation. + * @param[in] dp The DP Model Deviation to use. + * @return The dimension of atomic parameters of the DP Model Deviation. + */ +int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp); + +/** + * @brief Check whether the atomic dimension of atomic parameters is nall + * instead of nloc. + * + * @param[in] dp The DP Model Deviation to use. + * @return true the atomic dimension of atomic parameters is nall + * @return false the atomic dimension of atomic parameters is nloc + */ +bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp); + +/** + * @brief Get the type map of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The cutoff radius. + */ +double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp); + +/** + * @brief Get the number of types of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The number of types of the DP model deviation. + */ +int DP_DeepPotModelDeviGetNumbTypes(DP_DeepPotModelDevi* dp); + +/** + * @brief Get the number of types with spin of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The number of types with spin of the DP model deviation. + */ +int DP_DeepPotModelDeviGetNumbTypesSpin(DP_DeepPotModelDevi* dp); + +/** + * @brief Check if there is any exceptions throw. + * + * @param dp The DP model deviation to use. + * @return const char* error message. + */ +const char* DP_DeepPotModelDeviCheckOK(DP_DeepPotModelDevi* dp); + +// DeepSpin methods for c_api +/** + * @brief Get the cutoff of a DP. + * @param[in] dp The DP to use. + * @return The cutoff radius. + */ +double DP_DeepSpinGetCutoff(DP_DeepSpin* dp); + +/** + * @brief Get the number of types of a DP. + * @param[in] dp The DP to use. + * @return The number of types of the DP. + */ +int DP_DeepSpinGetNumbTypes(DP_DeepSpin* dp); + +/** + * @brief Get the number of types with spin of a DP. + * @param[in] dp The DP to use. + * @return The number of types with spin of the DP. + */ +int DP_DeepSpinGetNumbTypesSpin(DP_DeepSpin* dp); + +/** + * @brief Get the dimension of frame parameters of a DP. + * @param[in] dp The DP to use. + * @return The dimension of frame parameters of the DP. + */ +int DP_DeepSpinGetDimFParam(DP_DeepSpin* dp); + +/** + * @brief Get the dimension of atomic parameters of a DP. + * @param[in] dp The DP to use. + * @return The dimension of atomic parameters of the DP. + */ +int DP_DeepSpinGetDimAParam(DP_DeepSpin* dp); + +/** + * @brief Check whether the atomic dimension of atomic parameters is nall + * instead of nloc. + * + * @param[in] dp The DP to use. + * @return true the atomic dimension of atomic parameters is nall + * @return false the atomic dimension of atomic parameters is nloc + */ +bool DP_DeepSpinIsAParamNAll(DP_DeepSpin* dp); + +/** + * @brief Get the type map of a DP. + * @param[in] dp The DP to use. + * @return The type map of the DP. + */ +const char* DP_DeepSpinGetTypeMap(DP_DeepSpin* dp); + +/** + * @brief Check if there is any exceptions throw. + * + * @param dp The DP to use. + * @return const char* error message. + */ +const char* DP_DeepSpinCheckOK(DP_DeepSpin* dp); + +/** + * @brief Get the dimension of frame parameters of a DP Model Deviation. + * @param[in] dp The DP Model Deviation to use. + * @return The dimension of frame parameters of the DP Model Deviation. + */ +int DP_DeepSpinModelDeviGetDimFParam(DP_DeepSpinModelDevi* dp); +/** + * @brief Get the dimension of atomic parameters of a DP Model Deviation. + * @param[in] dp The DP Model Deviation to use. + * @return The dimension of atomic parameters of the DP Model Deviation. + */ +int DP_DeepSpinModelDeviGetDimAParam(DP_DeepSpinModelDevi* dp); + +/** + * @brief Check whether the atomic dimension of atomic parameters is nall + * instead of nloc. + * + * @param[in] dp The DP Model Deviation to use. + * @return true the atomic dimension of atomic parameters is nall + * @return false the atomic dimension of atomic parameters is nloc + */ +bool DP_DeepSpinModelDeviIsAParamNAll(DP_DeepSpinModelDevi* dp); + +/** + * @brief Get the type map of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The cutoff radius. + */ +double DP_DeepSpinModelDeviGetCutoff(DP_DeepSpinModelDevi* dp); + +/** + * @brief Get the number of types of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The number of types of the DP model deviation. + */ +int DP_DeepSpinModelDeviGetNumbTypes(DP_DeepSpinModelDevi* dp); + +/** + * @brief Get the number of types with spin of a DP model deviation. + * @param[in] dp The DP model deviation to use. + * @return The number of types with spin of the DP model deviation. + */ +int DP_DeepSpinModelDeviGetNumbTypesSpin(DP_DeepSpinModelDevi* dp); + +/** + * @brief Check if there is any exceptions throw. + * + * @param dp The DP model deviation to use. + * @return const char* error message. + */ +const char* DP_DeepSpinModelDeviCheckOK(DP_DeepSpinModelDevi* dp); + /** * @brief The deep tensor. **/ diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index e0bb82841d..e42fa16e93 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -1638,41 +1638,6 @@ void DP_DeepPotComputeMixedTypef(DP_DeepPot* dp, virial, atomic_energy, atomic_virial); } -// base model methods -const char* DP_DeepBaseModelGetTypeMap(DP_DeepBaseModel* dpbase) { - std::string type_map; - dpbase->dpbase.get_type_map(type_map); - return string_to_char(type_map); -} - -double DP_DeepBaseModelGetCutoff(DP_DeepBaseModel* dpbase) { - return dpbase->dpbase.cutoff(); -} - -int DP_DeepBaseModelGetNumbTypes(DP_DeepBaseModel* dpbase) { - return dpbase->dpbase.numb_types(); -} - -int DP_DeepBaseModelGetNumbTypesSpin(DP_DeepBaseModel* dpbase) { - return dpbase->dpbase.numb_types_spin(); -} - -int DP_DeepBaseModelGetDimFParam(DP_DeepBaseModel* dpbase) { - return dpbase->dfparam; -} - -int DP_DeepBaseModelGetDimAParam(DP_DeepBaseModel* dpbase) { - return dpbase->daparam; -} - -bool DP_DeepBaseModelIsAParamNAll(DP_DeepBaseModel* dpbase) { - return dpbase->aparam_nall; -} - -const char* DP_DeepBaseModelCheckOK(DP_DeepBaseModel* dpbase) { - return string_to_char(dpbase->exception); -} - void DP_DeepPotModelDeviCompute(DP_DeepPotModelDevi* dp, const int natoms, const double* coord, @@ -1903,7 +1868,41 @@ void DP_DeepSpinModelDeviComputeNListf2(DP_DeepSpinModelDevi* dp, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } -// base model +// base model methods +const char* DP_DeepBaseModelGetTypeMap(DP_DeepBaseModel* dpbase) { + std::string type_map; + dpbase->dpbase.get_type_map(type_map); + return string_to_char(type_map); +} + +double DP_DeepBaseModelGetCutoff(DP_DeepBaseModel* dpbase) { + return dpbase->dpbase.cutoff(); +} + +int DP_DeepBaseModelGetNumbTypes(DP_DeepBaseModel* dpbase) { + return dpbase->dpbase.numb_types(); +} + +int DP_DeepBaseModelGetNumbTypesSpin(DP_DeepBaseModel* dpbase) { + return dpbase->dpbase.numb_types_spin(); +} + +int DP_DeepBaseModelGetDimFParam(DP_DeepBaseModel* dpbase) { + return dpbase->dfparam; +} + +int DP_DeepBaseModelGetDimAParam(DP_DeepBaseModel* dpbase) { + return dpbase->daparam; +} + +bool DP_DeepBaseModelIsAParamNAll(DP_DeepBaseModel* dpbase) { + return dpbase->aparam_nall; +} + +const char* DP_DeepBaseModelCheckOK(DP_DeepBaseModel* dpbase) { + return string_to_char(dpbase->exception); +} + double DP_DeepBaseModelDeviGetCutoff(DP_DeepBaseModelDevi* dpbase) { return dpbase->dpbase.cutoff(); } @@ -1932,6 +1931,138 @@ const char* DP_DeepBaseModelDeviCheckOK(DP_DeepBaseModelDevi* dpbase) { return string_to_char(dpbase->exception); } +// DeepPot methods +const char* DP_DeepPotGetTypeMap(DP_DeepPot* dp) { + return DP_DeepBaseModelGetTypeMap(static_cast(dp)); +} + +double DP_DeepPotGetCutoff(DP_DeepPot* dp) { + return DP_DeepBaseModelGetCutoff(static_cast(dp)); +} + +int DP_DeepPotGetNumbTypes(DP_DeepPot* dp) { + return DP_DeepBaseModelGetNumbTypes(static_cast(dp)); +} + +int DP_DeepPotGetNumbTypesSpin(DP_DeepPot* dp) { + return DP_DeepBaseModelGetNumbTypesSpin(static_cast(dp)); +} + +int DP_DeepPotGetDimFParam(DP_DeepPot* dp) { + return DP_DeepBaseModelGetDimFParam(static_cast(dp)); +} + +int DP_DeepPotGetDimAParam(DP_DeepPot* dp) { + return DP_DeepBaseModelGetDimAParam(static_cast(dp)); +} + +bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp) { + return DP_DeepBaseModelIsAParamNAll(static_cast(dp)); +} + +const char* DP_DeepPotCheckOK(DP_DeepPot* dp) { + return DP_DeepBaseModelCheckOK(static_cast(dp)); +} + +double DP_DeepPotModelDeviGetCutoff(DP_DeepPotModelDevi* dp) { + return DP_DeepBaseModelDeviGetCutoff(static_cast(dp)); +} + +int DP_DeepPotModelDeviGetNumbTypes(DP_DeepPotModelDevi* dp) { + return DP_DeepBaseModelDeviGetNumbTypes( + static_cast(dp)); +} + +int DP_DeepPotModelDeviGetNumbTypesSpin(DP_DeepPotModelDevi* dp) { + return DP_DeepBaseModelDeviGetNumbTypesSpin( + static_cast(dp)); +} + +int DP_DeepPotModelDeviGetDimFParam(DP_DeepPotModelDevi* dp) { + return DP_DeepBaseModelDeviGetDimFParam( + static_cast(dp)); +} + +int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp) { + return DP_DeepBaseModelDeviGetDimAParam( + static_cast(dp)); +} + +bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp) { + return DP_DeepBaseModelDeviIsAParamNAll( + static_cast(dp)); +} + +const char* DP_DeepPotModelDeviCheckOK(DP_DeepPotModelDevi* dp) { + return DP_DeepBaseModelDeviCheckOK(static_cast(dp)); +} + +// DeepSpin methods +const char* DP_DeepSpinGetTypeMap(DP_DeepSpin* dp) { + return DP_DeepBaseModelGetTypeMap(static_cast(dp)); +} + +double DP_DeepSpinGetCutoff(DP_DeepSpin* dp) { + return DP_DeepBaseModelGetCutoff(static_cast(dp)); +} + +int DP_DeepSpinGetNumbTypes(DP_DeepSpin* dp) { + return DP_DeepBaseModelGetNumbTypes(static_cast(dp)); +} + +int DP_DeepSpinGetNumbTypesSpin(DP_DeepSpin* dp) { + return DP_DeepBaseModelGetNumbTypesSpin(static_cast(dp)); +} + +int DP_DeepSpinGetDimFParam(DP_DeepSpin* dp) { + return DP_DeepBaseModelGetDimFParam(static_cast(dp)); +} + +int DP_DeepSpinGetDimAParam(DP_DeepSpin* dp) { + return DP_DeepBaseModelGetDimAParam(static_cast(dp)); +} + +bool DP_DeepSpinIsAParamNAll(DP_DeepSpin* dp) { + return DP_DeepBaseModelIsAParamNAll(static_cast(dp)); +} + +const char* DP_DeepSpinCheckOK(DP_DeepSpin* dp) { + return DP_DeepBaseModelCheckOK(static_cast(dp)); +} + +double DP_DeepSpinModelDeviGetCutoff(DP_DeepSpinModelDevi* dp) { + return DP_DeepBaseModelDeviGetCutoff(static_cast(dp)); +} + +int DP_DeepSpinModelDeviGetNumbTypes(DP_DeepSpinModelDevi* dp) { + return DP_DeepBaseModelDeviGetNumbTypes( + static_cast(dp)); +} + +int DP_DeepSpinModelDeviGetNumbTypesSpin(DP_DeepSpinModelDevi* dp) { + return DP_DeepBaseModelDeviGetNumbTypesSpin( + static_cast(dp)); +} + +int DP_DeepSpinModelDeviGetDimFParam(DP_DeepSpinModelDevi* dp) { + return DP_DeepBaseModelDeviGetDimFParam( + static_cast(dp)); +} + +int DP_DeepSpinModelDeviGetDimAParam(DP_DeepSpinModelDevi* dp) { + return DP_DeepBaseModelDeviGetDimAParam( + static_cast(dp)); +} + +bool DP_DeepSpinModelDeviIsAParamNAll(DP_DeepSpinModelDevi* dp) { + return DP_DeepBaseModelDeviIsAParamNAll( + static_cast(dp)); +} + +const char* DP_DeepSpinModelDeviCheckOK(DP_DeepSpinModelDevi* dp) { + return DP_DeepBaseModelDeviCheckOK(static_cast(dp)); +} + void DP_DeepTensorComputeTensor(DP_DeepTensor* dt, const int natoms, const double* coord, From af09efd7580fc80d863fcf8a4e89d6b0e9603eec Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:10:14 +0000 Subject: [PATCH 146/193] rename base to backend --- source/api_cc/include/DeepBaseModel.h | 14 +++++++------- source/api_cc/include/DeepPot.h | 14 +++++++------- source/api_cc/include/DeepPotPT.h | 2 +- source/api_cc/include/DeepPotTF.h | 2 +- source/api_cc/include/DeepSpin.h | 14 +++++++------- source/api_cc/include/DeepSpinPT.h | 2 +- source/api_cc/include/DeepSpinTF.h | 2 +- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/source/api_cc/include/DeepBaseModel.h b/source/api_cc/include/DeepBaseModel.h index 18bc7091f5..68e2c8069b 100644 --- a/source/api_cc/include/DeepBaseModel.h +++ b/source/api_cc/include/DeepBaseModel.h @@ -10,13 +10,13 @@ namespace deepmd { /** * @brief Deep Potential Base Model. **/ -class DeepBaseModelBase { +class DeepBaseModelBackend { public: /** * @brief DP constructor without initialization. **/ - DeepBaseModelBase() {}; - virtual ~DeepBaseModelBase() {}; + DeepBaseModelBackend() {}; + virtual ~DeepBaseModelBackend() {}; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -24,9 +24,9 @@ class DeepBaseModelBase { * @param[in] file_content The content of the model file. If it is not empty, *DP will read from the string instead of the file. **/ - DeepBaseModelBase(const std::string& model, - const int& gpu_rank = 0, - const std::string& file_content = ""); + DeepBaseModelBackend(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); /** * @brief Initialize the DP. * @param[in] model The name of the frozen model file. @@ -144,7 +144,7 @@ class DeepBaseModel { protected: bool inited; - std::shared_ptr dpbase; + std::shared_ptr dpbase; }; class DeepBaseModelDevi { diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index a74923fb31..06423d38c8 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -11,13 +11,13 @@ namespace deepmd { /** * @brief Deep Potential. **/ -class DeepPotBase : public DeepBaseModelBase { +class DeepPotBackend : public DeepBaseModelBackend { public: /** * @brief DP constructor without initialization. **/ - DeepPotBase() {}; - virtual ~DeepPotBase() {}; + DeepPotBackend() {}; + virtual ~DeepPotBackend() {}; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -25,9 +25,9 @@ class DeepPotBase : public DeepBaseModelBase { * @param[in] file_content The content of the model file. If it is not empty, *DP will read from the string instead of the file. **/ - DeepPotBase(const std::string& model, - const int& gpu_rank = 0, - const std::string& file_content = ""); + DeepPotBackend(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); /** * @brief Initialize the DP. * @param[in] model The name of the frozen model file. @@ -523,7 +523,7 @@ class DeepPot : public DeepBaseModel { const std::vector& aparam = std::vector()); /** @} */ protected: - std::shared_ptr dp; + std::shared_ptr dp; }; class DeepPotModelDevi : public DeepBaseModelDevi { diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index 9683813093..f440b15a1b 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -10,7 +10,7 @@ namespace deepmd { /** * @brief PyTorch implementation for Deep Potential. **/ -class DeepPotPT : public DeepPotBase { +class DeepPotPT : public DeepPotBackend { public: /** * @brief DP constructor without initialization. diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index f020e8c92a..a36ae10495 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -10,7 +10,7 @@ namespace deepmd { /** * @brief TensorFlow implementation for Deep Potential. **/ -class DeepPotTF : public DeepPotBase { +class DeepPotTF : public DeepPotBackend { public: /** * @brief DP constructor without initialization. diff --git a/source/api_cc/include/DeepSpin.h b/source/api_cc/include/DeepSpin.h index 9ebe6604e6..8b1e896e73 100644 --- a/source/api_cc/include/DeepSpin.h +++ b/source/api_cc/include/DeepSpin.h @@ -11,13 +11,13 @@ namespace deepmd { /** * @brief Deep Potential. **/ -class DeepSpinBase : public DeepBaseModelBase { +class DeepSpinBackend : public DeepBaseModelBackend { public: /** * @brief DP constructor without initialization. **/ - DeepSpinBase() {}; - virtual ~DeepSpinBase() {}; + DeepSpinBackend() {}; + virtual ~DeepSpinBackend() {}; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. @@ -25,9 +25,9 @@ class DeepSpinBase : public DeepBaseModelBase { * @param[in] file_content The content of the model file. If it is not empty, *DP will read from the string instead of the file. **/ - DeepSpinBase(const std::string& model, - const int& gpu_rank = 0, - const std::string& file_content = ""); + DeepSpinBackend(const std::string& model, + const int& gpu_rank = 0, + const std::string& file_content = ""); /** * @brief Initialize the DP. * @param[in] model The name of the frozen model file. @@ -415,7 +415,7 @@ class DeepSpin : public DeepBaseModel { const std::vector& aparam = std::vector()); /** @} */ protected: - std::shared_ptr dp; + std::shared_ptr dp; }; class DeepSpinModelDevi : public DeepBaseModelDevi { diff --git a/source/api_cc/include/DeepSpinPT.h b/source/api_cc/include/DeepSpinPT.h index 20a1e7303f..643557eb07 100644 --- a/source/api_cc/include/DeepSpinPT.h +++ b/source/api_cc/include/DeepSpinPT.h @@ -10,7 +10,7 @@ namespace deepmd { /** * @brief PyTorch implementation for Deep Potential. **/ -class DeepSpinPT : public DeepSpinBase { +class DeepSpinPT : public DeepSpinBackend { public: /** * @brief DP constructor without initialization. diff --git a/source/api_cc/include/DeepSpinTF.h b/source/api_cc/include/DeepSpinTF.h index 6c8da772c6..444f88187a 100644 --- a/source/api_cc/include/DeepSpinTF.h +++ b/source/api_cc/include/DeepSpinTF.h @@ -10,7 +10,7 @@ namespace deepmd { /** * @brief TensorFlow implementation for Deep Potential. **/ -class DeepSpinTF : public DeepSpinBase { +class DeepSpinTF : public DeepSpinBackend { public: /** * @brief DP constructor without initialization. From a532c33b368146cacdf29fabd27b81cc8c510492 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 6 Nov 2024 22:42:30 +0000 Subject: [PATCH 147/193] rename model filename in lammps tests Signed-off-by: Jinzhe Zeng --- source/lmp/tests/test_lammps_spin.py | 4 ++-- source/lmp/tests/test_lammps_spin_pt.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/lmp/tests/test_lammps_spin.py b/source/lmp/tests/test_lammps_spin.py index 31f5b41c98..aff80c52f6 100644 --- a/source/lmp/tests/test_lammps_spin.py +++ b/source/lmp/tests/test_lammps_spin.py @@ -24,8 +24,8 @@ pbtxt_file2 = ( Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist-2.pbtxt" ) -pb_file = Path(__file__).parent / "graph.pb" -pb_file2 = Path(__file__).parent / "graph2.pb" +pb_file = Path(__file__).parent / "deepspin_nlist.pb" +pb_file2 = Path(__file__).parent / "deepspin_nlist-2.pb" system_file = Path(__file__).parent.parent.parent / "tests" data_file = Path(__file__).parent / "data.lmp" data_file_si = Path(__file__).parent / "data.si" diff --git a/source/lmp/tests/test_lammps_spin_pt.py b/source/lmp/tests/test_lammps_spin_pt.py index e215358d59..dad99ddec8 100644 --- a/source/lmp/tests/test_lammps_spin_pt.py +++ b/source/lmp/tests/test_lammps_spin_pt.py @@ -24,7 +24,7 @@ pb_file = ( Path(__file__).parent.parent.parent / "tests" / "infer" / "deeppot_dpa_spin.pth" ) -pb_file2 = Path(__file__).parent / "graph2.pb" +pb_file2 = Path(__file__).parent / "deepspin_nlist-2.pb" system_file = Path(__file__).parent.parent.parent / "tests" data_file = Path(__file__).parent / "data.lmp" data_file_si = Path(__file__).parent / "data.si" From 969934983483d91e4d74dced78196534ceb27eb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yifan=20Li=E6=9D=8E=E4=B8=80=E5=B8=86?= Date: Thu, 7 Nov 2024 01:20:35 -0500 Subject: [PATCH 148/193] fix dplr: support initializing from a restart file (#4084) Thie PR aims at fixing #3679. The `get_valid_pairs` will raise an error when 1) reading a restart file written from previous dplr jobs 2) reading a data file written from previous dplr jobs 3) executing 'run 0' for twice The origin is that the check in `get_valid_pairs` is too strict for the initialization process. The check requires that a pair of real and virtual atoms found by iterating over the bondlist to be the local atoms of the same processor. This requirement is automatically satisfied during the running of MD because the positions of virtual atoms are set equal to those of real atoms before rebuilding bondlists. However, during initialization (`Verlet::setup(int flag)`), the bondlist is built based on the initial configuration provided by the user. If the user provides a configuration in which the virtual atoms are placed at the Wannier centroids, it is possible that a bond formed by a pair of real and virtual atoms are between a local and a ghost atom, which triggers this error. This PR loosens the requirement on the initial configuration provided by the user. In the `setup_post_neighbor` function, the `get_valid_pairs` function searches for bonded pairs but skips the aforementioned check. It sets the coordinates of the virtual atoms to the coordinates of the real atoms, and then rebuilds the bondlist. Afterwards, the check can be passed. ## Summary by CodeRabbit - **New Features** - Introduced a new method for setting up pre-exchange operations, enhancing simulation lifecycle management. - **Changes** - Renamed the `post_integrate` method to `pre_exchange` to better reflect its role in the simulation process. - Updated mask settings to improve clarity and context within the simulation lifecycle. - **Chores** - Updated ownership context for Docker image in the build workflow configuration. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- source/lmp/fix_dplr.cpp | 83 +++++++++++++++++++++++------------ source/lmp/fix_dplr.h | 6 ++- source/lmp/tests/test_dplr.py | 27 ++++++++++++ 3 files changed, 86 insertions(+), 30 deletions(-) diff --git a/source/lmp/fix_dplr.cpp b/source/lmp/fix_dplr.cpp index 3e59c6b2db..8e54410d0a 100644 --- a/source/lmp/fix_dplr.cpp +++ b/source/lmp/fix_dplr.cpp @@ -236,7 +236,8 @@ int FixDPLR::setmask() { // THERMO_ENERGY removed in lammps/lammps#2560 mask |= THERMO_ENERGY; #endif - mask |= POST_INTEGRATE; + mask |= PRE_EXCHANGE; + mask |= POST_NEIGHBOR; mask |= PRE_FORCE; mask |= POST_FORCE; mask |= MIN_PRE_EXCHANGE; @@ -248,19 +249,9 @@ int FixDPLR::setmask() { /* ---------------------------------------------------------------------- */ void FixDPLR::init() { - // double **xx = atom->x; - // double **vv = atom->v; - // int nlocal = atom->nlocal; - // for (int ii = 0; ii < nlocal; ++ii){ - // cout << xx[ii][0] << " " - // << xx[ii][1] << " " - // << xx[ii][2] << " " - // << vv[ii][0] << " " - // << vv[ii][1] << " " - // << vv[ii][2] << " " - // << endl; - // } - // check variables + if (atom->map_style == Atom::MAP_NONE) { + error->all(FLERR, "Fix dplr requires an atom map, see atom_modify"); + } if (xstr) { xvar = input->variable->find(xstr); @@ -313,17 +304,53 @@ void FixDPLR::init() { /* ---------------------------------------------------------------------- */ +void FixDPLR::setup_post_neighbor() { + double **x = atom->x; + + vector > valid_pairs; + get_valid_pairs(valid_pairs, true); + + for (int ii = 0; ii < valid_pairs.size(); ++ii) { + int idx0 = valid_pairs[ii].first; + int idx1 = valid_pairs[ii].second; + int idx0_local = atom->map(atom->tag[idx0]); + int idx1_local = atom->map(atom->tag[idx1]); + + for (int dd = 0; dd < 3; ++dd) { + x[idx1][dd] = x[idx0][dd]; + x[idx0_local][dd] = x[idx0][dd]; + x[idx1_local][dd] = x[idx0][dd]; + } + } + int triclinic; + triclinic = domain->triclinic; + if (triclinic) { + domain->x2lamda(atom->nlocal); + } + domain->pbc(); + domain->reset_box(); + comm->setup(); + neighbor->setup_bins(); + comm->exchange(); + comm->borders(); + if (triclinic) { + domain->lamda2x(atom->nlocal + atom->nghost); + } + + neighbor->build(1); +} + +/* ---------------------------------------------------------------------- */ + +void FixDPLR::setup_pre_exchange() {} + +/* ---------------------------------------------------------------------- */ + void FixDPLR::setup_pre_force(int vflag) { pre_force(vflag); } /* ---------------------------------------------------------------------- */ -void FixDPLR::setup(int vflag) { - // if (strstr(update->integrate_style,"verlet")) - post_force(vflag); - // else { - // error->all(FLERR, "respa is not supported by this fix"); - // } -} +void FixDPLR::setup(int vflag) { post_force(vflag); } /* ---------------------------------------------------------------------- */ @@ -331,7 +358,7 @@ void FixDPLR::min_setup(int vflag) { setup(vflag); } /* ---------------------------------------------------------------------- */ -void FixDPLR::get_valid_pairs(vector > &pairs) { +void FixDPLR::get_valid_pairs(vector > &pairs, bool is_setup) { pairs.clear(); int nlocal = atom->nlocal; @@ -398,7 +425,7 @@ void FixDPLR::get_valid_pairs(vector > &pairs) { error->all(FLERR, str); } } - if (!(idx0 < nlocal && idx1 < nlocal)) { + if (!(idx0 < nlocal && idx1 < nlocal) && (!is_setup)) { error->all(FLERR, "find a bonded pair that is not on the same processor, " "something should not happen"); @@ -409,7 +436,7 @@ void FixDPLR::get_valid_pairs(vector > &pairs) { /* ---------------------------------------------------------------------- */ -void FixDPLR::post_integrate() { +void FixDPLR::pre_exchange() { double **x = atom->x; double **v = atom->v; int *type = atom->type; @@ -418,7 +445,7 @@ void FixDPLR::post_integrate() { int nall = nlocal + nghost; vector > valid_pairs; - get_valid_pairs(valid_pairs); + get_valid_pairs(valid_pairs, false); for (int ii = 0; ii < valid_pairs.size(); ++ii) { int idx0 = valid_pairs[ii].first; @@ -519,7 +546,7 @@ void FixDPLR::pre_force(int vflag) { // vector & sort_fwd_map(atom_map.get_fwd_map()); vector > valid_pairs; - get_valid_pairs(valid_pairs); + get_valid_pairs(valid_pairs, false); int odim = dpt.output_dim(); assert(odim == 3); @@ -642,7 +669,7 @@ void FixDPLR::post_force(int vflag) { list->firstneigh); // bonded pairs vector > valid_pairs; - get_valid_pairs(valid_pairs); + get_valid_pairs(valid_pairs, false); // output vects vector dfcorr, dvcorr; // compute @@ -728,7 +755,7 @@ void FixDPLR::post_force(int vflag) { /* ---------------------------------------------------------------------- */ -void FixDPLR::min_pre_exchange() { post_integrate(); } +void FixDPLR::min_pre_exchange() { pre_exchange(); } /* ---------------------------------------------------------------------- */ diff --git a/source/lmp/fix_dplr.h b/source/lmp/fix_dplr.h index c43296e611..5f1161fda6 100644 --- a/source/lmp/fix_dplr.h +++ b/source/lmp/fix_dplr.h @@ -42,9 +42,11 @@ class FixDPLR : public Fix { int setmask() override; void init() override; void setup(int) override; + void setup_pre_exchange() override; void setup_pre_force(int) override; + void setup_post_neighbor() override; void min_setup(int) override; - void post_integrate() override; + void pre_exchange() override; void pre_force(int) override; void post_force(int) override; void min_pre_exchange() override; @@ -72,7 +74,7 @@ class FixDPLR : public Fix { std::vector efield; std::vector efield_fsum, efield_fsum_all; int efield_force_flag; - void get_valid_pairs(std::vector > &pairs); + void get_valid_pairs(std::vector > &pairs, bool is_setup); int varflag; char *xstr, *ystr, *zstr; int xvar, yvar, zvar, xstyle, ystyle, zstyle; diff --git a/source/lmp/tests/test_dplr.py b/source/lmp/tests/test_dplr.py index ed28bbd6d4..4750849860 100644 --- a/source/lmp/tests/test_dplr.py +++ b/source/lmp/tests/test_dplr.py @@ -21,6 +21,7 @@ dipole_pbtxt_file = Path(__file__).parent / "lrdipole.pbtxt" dipole_pb_file = Path(__file__).parent / "lrdipole.pb" data_file = Path(__file__).parent / "data.lmp" +data_file2 = Path(__file__).parent / "data.lmp2" data_file_si = Path(__file__).parent / "data.si" data_type_map_file = Path(__file__).parent / "data_type_map.lmp" @@ -241,6 +242,7 @@ ) box = np.array([0, 20, 0, 20, 0, 20, 0, 0, 0]) +box2 = np.array([0, 20, 0, 3.2575, 0, 20, 0, 0, 0]) coord = np.array( [ [1.25545000, 1.27562200, 0.98873000], @@ -272,6 +274,9 @@ def setup_module(): write_lmp_data_full( box, coord, mol_list, type_OH, charge, data_file, bond_list, mass_list ) + write_lmp_data_full( + box2, coord, mol_list, type_OH, charge, data_file2, bond_list, mass_list + ) write_lmp_data_full( box, coord, mol_list, type_HO, charge, data_type_map_file, bond_list, mass_list ) @@ -289,6 +294,7 @@ def setup_module(): def teardown_module(): os.remove(data_file) + os.remove(data_file2) os.remove(data_type_map_file) os.remove(data_file_si) @@ -325,6 +331,13 @@ def lammps(): lmp.close() +@pytest.fixture +def lammps2(): + lmp = _lammps(data_file=data_file2) + yield lmp + lmp.close() + + @pytest.fixture def lammps_type_map(): lmp = _lammps(data_file=data_type_map_file, exclude_type="2 3") @@ -407,6 +420,20 @@ def test_pair_deepmd_lr(lammps): lammps.run(1) +def test_pair_deepmd_lr_run0(lammps2): + lammps2.pair_style(f"deepmd {pb_file.resolve()}") + lammps2.pair_coeff("* *") + lammps2.bond_style("zero") + lammps2.bond_coeff("*") + lammps2.special_bonds("lj/coul 1 1 1 angle no") + lammps2.kspace_style("pppm/dplr 1e-5") + lammps2.kspace_modify(f"gewald {beta:.2f} diff ik mesh {mesh:d} {mesh:d} {mesh:d}") + lammps2.fix(f"0 all dplr model {pb_file.resolve()} type_associate 1 3 bond_type 1") + lammps2.fix_modify("0 virial yes") + lammps2.run(0) + lammps2.run(0) + + def test_pair_deepmd_lr_efield_constant(lammps): lammps.pair_style(f"deepmd {pb_file.resolve()}") lammps.pair_coeff("* *") From 919654e233984ca3686178afcbcd5a1f34144839 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:19:29 +0800 Subject: [PATCH 149/193] add tf nlist nopbc UT for spin --- source/api_cc/include/DeepPotTF.h | 2 - source/api_cc/include/DeepSpinTF.h | 4 +- source/api_cc/src/DeepSpinTF.cc | 27 ++- source/api_cc/tests/test_deeppot_tf_spin.cc | 242 ++++++++++++++++++++ 4 files changed, 256 insertions(+), 19 deletions(-) diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index a36ae10495..b2e7b12487 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -306,8 +306,6 @@ class DeepPotTF : public DeepPotBackend { std::string model_version; int ntypes; int ntypes_spin; - std::vector virtual_len; - std::vector spin_norm; int extend_inum; std::vector extend_ilist; std::vector extend_numneigh; diff --git a/source/api_cc/include/DeepSpinTF.h b/source/api_cc/include/DeepSpinTF.h index 444f88187a..05f5ec7382 100644 --- a/source/api_cc/include/DeepSpinTF.h +++ b/source/api_cc/include/DeepSpinTF.h @@ -254,9 +254,7 @@ class DeepSpinTF : public DeepSpinBackend { const int nghost, const std::vector& spin, const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); + const int numb_types_spin); template void extend_nlist(std::vector& extend_dcoord, diff --git a/source/api_cc/src/DeepSpinTF.cc b/source/api_cc/src/DeepSpinTF.cc index ea110ebbf7..416fc226ff 100644 --- a/source/api_cc/src/DeepSpinTF.cc +++ b/source/api_cc/src/DeepSpinTF.cc @@ -735,15 +735,11 @@ void DeepSpinTF::compute(ENERGYVTYPE& dener, int nframes = nall > 0 ? (dcoord_.size() / nall / 3) : 1; int nloc = nall - nghost; - std::vector virtual_len; - std::vector spin_norm; std::vector extend_dcoord; - get_vector(virtual_len, "spin_attr/virtual_len"); - get_vector(spin_norm, "spin_attr/spin_norm"); extend(extend_inum, extend_ilist, extend_numneigh, extend_neigh, extend_firstneigh, extend_dcoord, extend_dtype, extend_nghost, new_idx_map, old_idx_map, lmp_list, dcoord_, datype_, nghost, dspin_, - ntypes, ntypes_spin, virtual_len, spin_norm); + ntypes, ntypes_spin); InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], &extend_firstneigh[0]); std::vector fparam; @@ -1008,15 +1004,22 @@ void DeepSpinTF::extend(int& extend_inum, const int nghost, const std::vector& spin, const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm) { + const int numb_types_spin) { extend_ilist.clear(); extend_numneigh.clear(); extend_neigh.clear(); extend_firstneigh.clear(); extend_dcoord.clear(); extend_atype.clear(); + if (dtype == tensorflow::DT_DOUBLE) { + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); + } else { + std::vector virtual_len; + std::vector spin_norm; + get_vector(virtual_len, "spin_attr/virtual_len"); + get_vector(spin_norm, "spin_attr/spin_norm"); + } int nall = dcoord.size() / 3; int nloc = nall - nghost; @@ -1178,9 +1181,7 @@ template void DeepSpinTF::extend( const int nghost, const std::vector& spin, const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); + const int numb_types_spin); template void DeepSpinTF::extend( int& extend_inum, @@ -1199,9 +1200,7 @@ template void DeepSpinTF::extend( const int nghost, const std::vector& spin, const int numb_types, - const int numb_types_spin, - const std::vector& virtual_len, - const std::vector& spin_norm); + const int numb_types_spin); template void DeepSpinTF::extend_nlist(std::vector& extend_dcoord, diff --git a/source/api_cc/tests/test_deeppot_tf_spin.cc b/source/api_cc/tests/test_deeppot_tf_spin.cc index 4c432af597..d03a9b0b57 100644 --- a/source/api_cc/tests/test_deeppot_tf_spin.cc +++ b/source/api_cc/tests/test_deeppot_tf_spin.cc @@ -36,6 +36,7 @@ class TestInferDeepSpin : public ::testing::Test { 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; int natoms; double expected_tot_e; + // std::vector expected_tot_v; deepmd::DeepSpin dp; @@ -49,10 +50,18 @@ class TestInferDeepSpin : public ::testing::Test { natoms = expected_e.size(); EXPECT_EQ(natoms * 3, expected_f.size()); EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int ii = 0; ii < natoms; ++ii) { expected_tot_e += expected_e[ii]; } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } }; void TearDown() override { remove("deepspin_nlist.pb"); }; @@ -69,19 +78,26 @@ TYPED_TEST(TestInferDeepSpin, cpu_build_nlist) { std::vector& expected_e = this->expected_e; std::vector& expected_f = this->expected_f; std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; int& natoms = this->natoms; double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial; dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); for (int ii = 0; ii < natoms * 3; ++ii) { EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } } TYPED_TEST(TestInferDeepSpin, cpu_build_nlist_atomic) { @@ -93,22 +109,248 @@ TYPED_TEST(TestInferDeepSpin, cpu_build_nlist_atomic) { std::vector& expected_e = this->expected_e; std::vector& expected_f = this->expected_f; std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + // EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} + +template +class TestInferDeepSpinNopbc : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, + 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 0, 1, 1}; + std::vector box = {100., 0., 0., 0., 100., 0., 0., 0., 100.}; + std::vector expected_e = {-7.313160384523243, -7.312173646552338, + -2.8984477845267067, + -2.8984477845267067}; + std::vector expected_f = { + 0.0277100137316238, -0.0116082489956803, -0.0211484273275705, + -0.0277100137316238, 0.0116082489956803, 0.0211484273275705, + 0.0097588349924651, 0.0091168063745397, -0.0133541952528469, + -0.0097588349924651, -0.0091168063745397, 0.0133541952528469}; + std::vector expected_fm = { + 0.0058990325687816, -0.0024712163463815, 0.0296682261295907, + -0.0060028470719556, 0.0025147062058193, 0.0321884178873188, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + deepmd::DeepSpin dp; + + void SetUp() override { + std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + deepmd::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", + "deepspin_nlist.pb"); + + dp.init("deepspin_nlist.pb"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { remove("deepspin_nlist.pb"); }; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinNopbc, ValueTypes); + +TYPED_TEST(TestInferDeepSpinNopbc, cpu_build_nlist) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinNopbc, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; int& natoms = this->natoms; double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; deepmd::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial, atom_ener, atom_vir; dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, atype, box); + EXPECT_EQ(force.size(), natoms * 3); EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); // EXPECT_EQ(atom_ener.size(), natoms); + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinNopbc, cpu_lmp_nlist) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + + std::vector > nlist_data = {{1}, {0}, {3}, {2}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, inlist, + 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinNopbc, cpu_lmp_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + + std::vector > nlist_data = {{1}, {0}, {3}, {2}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::InputNlist inlist(natoms, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box, 0, inlist, 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); for (int ii = 0; ii < natoms * 3; ++ii) { EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } for (int ii = 0; ii < natoms; ++ii) { EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } } From c30091b3d0e49a8978879668852bb25a3e59f630 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:22:07 +0800 Subject: [PATCH 150/193] add tf lmp nopbc UT for spin --- source/lmp/tests/test_lammps_spin_nopbc.py | 250 +++++++++++++++++++++ 1 file changed, 250 insertions(+) create mode 100644 source/lmp/tests/test_lammps_spin_nopbc.py diff --git a/source/lmp/tests/test_lammps_spin_nopbc.py b/source/lmp/tests/test_lammps_spin_nopbc.py new file mode 100644 index 0000000000..bdbd98f258 --- /dev/null +++ b/source/lmp/tests/test_lammps_spin_nopbc.py @@ -0,0 +1,250 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +import os +import shutil +import subprocess as sp +import sys +import tempfile +from pathlib import ( + Path, +) + +import numpy as np +import pytest +from lammps import ( + PyLammps, +) +from write_lmp_data import ( + write_lmp_data_spin, +) + +pbtxt_file = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist.pbtxt" +) +pbtxt_file2 = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist-2.pbtxt" +) +pb_file = Path(__file__).parent / "deepspin_nlist.pb" +pb_file2 = Path(__file__).parent / "deepspin_nlist-2.pb" +system_file = Path(__file__).parent.parent.parent / "tests" +data_file = Path(__file__).parent / "data.lmp" +data_file_si = Path(__file__).parent / "data.si" +data_type_map_file = Path(__file__).parent / "data_type_map.lmp" +md_file = Path(__file__).parent / "md.out" + +expected_ae = np.array( + [-7.313160384523243, -7.312173646552338, -2.8984477845267067, -2.8984477845267067] +) +expected_e = np.sum(expected_ae) +expected_f = np.array( + [ + [0.0277100137316238, -0.0116082489956803, -0.0211484273275705], + [-0.0277100137316238, 0.0116082489956803, 0.0211484273275705], + [0.0097588349924651, 0.0091168063745397, -0.0133541952528469], + [-0.0097588349924651, -0.0091168063745397, 0.0133541952528469], + ] +) +expected_fm = np.array( + [ + [0.0058990325687816, -0.0024712163463815, 0.0296682261295907], + [-0.0060028470719556, 0.0025147062058193, 0.0321884178873188], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + ] +) + +expected_f2 = np.array( + [ + [-0.0020912362538459, 0.0008760584306652, -0.0002029714364812], + [0.0020912362538459, -0.0008760584306652, 0.0002029714364812], + [0.0020348523962324, 0.0019009805280592, -0.0027845348580022], + [-0.0020348523962324, -0.0019009805280592, 0.0027845348580022], + ] +) + +expected_fm2 = np.array( + [ + [0.0020796789544968, -0.0008712168593162, 0.0269545489546998], + [-0.0031170434556743, 0.0013057884746744, 0.0295063550138163], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + ] +) + +box = np.array([0, 100, 0, 100, 0, 100, 0, 0, 0]) +coord = np.array( + [ + [12.83, 2.56, 2.18], + [12.09, 2.87, 2.74], + [3.51, 2.51, 2.60], + [4.27, 3.22, 1.56], + ] +) +spin = np.array( + [ + [0, 0, 1.2737], + [0, 0, 1.2737], + [0, 0, 0], + [0, 0, 0], + ] +) +type_NiO = np.array([1, 1, 2, 2]) + +sp.check_output( + f"{sys.executable} -m deepmd convert-from pbtxt -i {pbtxt_file.resolve()} -o {pb_file.resolve()}".split() +) +sp.check_output( + f"{sys.executable} -m deepmd convert-from pbtxt -i {pbtxt_file2.resolve()} -o {pb_file2.resolve()}".split() +) + + +def setup_module(): + write_lmp_data_spin(box, coord, spin, type_NiO, data_file) + + +def teardown_module(): + os.remove(data_file) + + +def _lammps(data_file, units="metal") -> PyLammps: + lammps = PyLammps() + lammps.plugin("load libdeepmd_lmp.so") + lammps.units(units) + lammps.boundary("f f f") + lammps.atom_style("spin") + if units == "metal": + lammps.neighbor("2.0 bin") + else: + raise ValueError("units for spin should be metal") + lammps.neigh_modify("every 10 delay 0 check no") + lammps.read_data(data_file.resolve()) + if units == "metal": + lammps.mass("1 58") + lammps.mass("2 16") + else: + raise ValueError("units for spin should be metal") + if units == "metal": + lammps.timestep(0.0005) + else: + raise ValueError("units for spin should be metal") + lammps.fix("1 all nve") + return lammps + + +@pytest.fixture +def lammps(): + lmp = _lammps(data_file=data_file) + yield lmp + lmp.close() + + +def test_pair_deepmd(lammps): + lammps.pair_style(f"deepspin {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + lammps.run(1) + + +def test_pair_deepmd_model_devi(lammps): + lammps.pair_style( + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +def test_pair_deepmd_model_devi_atomic_relative(lammps): + relative = 1.0 + lammps.pair_style( + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +@pytest.mark.skipif( + shutil.which("mpirun") is None, reason="MPI is not installed on this system" +) +@pytest.mark.skipif( + importlib.util.find_spec("mpi4py") is None, reason="mpi4py is not installed" +) +@pytest.mark.parametrize( + ("balance_args",), + [(["--balance"],), ([],)], +) +def test_pair_deepmd_mpi(balance_args: list): + with tempfile.NamedTemporaryFile() as f: + sp.check_call( + [ + "mpirun", + "-n", + "2", + sys.executable, + Path(__file__).parent / "run_mpi_pair_deepmd_spin.py", + data_file, + pb_file, + pb_file2, + md_file, + f.name, + *balance_args, + "--nopbc", + ] + ) + arr = np.loadtxt(f.name, ndmin=1) + pe = arr[0] + + relative = 1.0 + assert pe == pytest.approx(expected_e) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) From 10b163eca9a512cc108c2d72744fa16727dc7358 Mon Sep 17 00:00:00 2001 From: hztttt <940755193@qq.com> Date: Thu, 7 Nov 2024 12:38:37 +0800 Subject: [PATCH 151/193] fix torch lmp UT bug --- source/lmp/tests/write_lmp_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/lmp/tests/write_lmp_data.py b/source/lmp/tests/write_lmp_data.py index 2b64ccfbea..fd86c8d61a 100644 --- a/source/lmp/tests/write_lmp_data.py +++ b/source/lmp/tests/write_lmp_data.py @@ -75,8 +75,8 @@ def write_lmp_data_spin(box, coord, spin, type_list, file_name): natom = coord.shape[0] ntype = np.unique(type_list).shape[0] sp_norm = np.linalg.norm(spin, axis=1, keepdims=True) - sp_norm = np.where(sp_norm == 0, 1, sp_norm) - sp_unit = spin / sp_norm + sp_unit = spin / np.where(sp_norm == 0, 1, sp_norm) + sp_unit = np.where(sp_norm == 0, 1, sp_unit) with open(file_name, "w") as f: f.write(comment_lmp_data + "\n") f.write("%d atoms\n" % (natom)) From 0039aa41b187a23546469de1c5d9eea63d8e7ce7 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 08:42:06 +0000 Subject: [PATCH 152/193] fix nopbc spin test --- source/api_cc/src/DeepSpinPT.cc | 21 +- source/lmp/tests/test_lammps_spin_nopbc.py | 1 - source/lmp/tests/test_lammps_spin_nopbc_pt.py | 245 ++++++++++++++++++ 3 files changed, 254 insertions(+), 13 deletions(-) create mode 100644 source/lmp/tests/test_lammps_spin_nopbc_pt.py diff --git a/source/api_cc/src/DeepSpinPT.cc b/source/api_cc/src/DeepSpinPT.cc index 08e9a3023e..3ae0eb3bb7 100644 --- a/source/api_cc/src/DeepSpinPT.cc +++ b/source/api_cc/src/DeepSpinPT.cc @@ -179,7 +179,7 @@ void DeepSpinPT::compute(ENERGYVTYPE& ener, nlist_data.copy_from_nlist(lmp_list); nlist_data.shuffle_exclude_empty(fwd_map); nlist_data.padding(); - if (do_message_passing == 1 && nghost > 0) { + if (do_message_passing == 1) { int nswap = lmp_list.nswap; torch::Tensor sendproc_tensor = torch::from_blob(lmp_list.sendproc, {nswap}, int32_option); @@ -191,10 +191,13 @@ void DeepSpinPT::compute(ENERGYVTYPE& ener, torch::from_blob(lmp_list.recvnum, {nswap}, int32_option); torch::Tensor sendnum_tensor = torch::from_blob(lmp_list.sendnum, {nswap}, int32_option); - torch::Tensor communicator_tensor = torch::from_blob( - const_cast(lmp_list.world), {1}, torch::kInt64); - // torch::Tensor communicator_tensor = - // torch::tensor(lmp_list.world, int32_option); + torch::Tensor communicator_tensor; + if (lmp_list.world == 0) { + communicator_tensor = torch::empty({1}, torch::kInt64); + } else { + communicator_tensor = torch::from_blob( + const_cast(lmp_list.world), {1}, torch::kInt64); + } torch::Tensor nswap_tensor = torch::tensor(nswap, int32_option); int total_send = std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); @@ -209,12 +212,6 @@ void DeepSpinPT::compute(ENERGYVTYPE& ener, comm_dict.insert("communicator", communicator_tensor); comm_dict.insert("has_spin", has_spin); } - if (do_message_passing == 1 && nghost == 0) { - // for the situation that no ghost atoms (e.g. serial nopbc) - // set the mapping arange(nloc) is enough - auto option = torch::TensorOptions().device(device).dtype(torch::kInt64); - mapping_tensor = at::arange(nloc_real, option).unsqueeze(0); - } } at::Tensor firstneigh = createNlistTensor2(nlist_data.jlist); firstneigh_tensor = firstneigh.to(torch::kInt64).to(device); @@ -237,7 +234,7 @@ void DeepSpinPT::compute(ENERGYVTYPE& ener, .to(device); } c10::Dict outputs = - (do_message_passing == 1 && nghost > 0) + (do_message_passing == 1) ? module .run_method("forward_lower", coord_wrapped_Tensor, atype_Tensor, spin_wrapped_Tensor, firstneigh_tensor, diff --git a/source/lmp/tests/test_lammps_spin_nopbc.py b/source/lmp/tests/test_lammps_spin_nopbc.py index bdbd98f258..e507a61add 100644 --- a/source/lmp/tests/test_lammps_spin_nopbc.py +++ b/source/lmp/tests/test_lammps_spin_nopbc.py @@ -108,7 +108,6 @@ def teardown_module(): def _lammps(data_file, units="metal") -> PyLammps: lammps = PyLammps() - lammps.plugin("load libdeepmd_lmp.so") lammps.units(units) lammps.boundary("f f f") lammps.atom_style("spin") diff --git a/source/lmp/tests/test_lammps_spin_nopbc_pt.py b/source/lmp/tests/test_lammps_spin_nopbc_pt.py new file mode 100644 index 0000000000..b34d2281a5 --- /dev/null +++ b/source/lmp/tests/test_lammps_spin_nopbc_pt.py @@ -0,0 +1,245 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +import os +import shutil +import subprocess as sp +import sys +import tempfile +from pathlib import ( + Path, +) + +import numpy as np +import pytest +from lammps import ( + PyLammps, +) +from write_lmp_data import ( + write_lmp_data_spin, +) + +pbtxt_file2 = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deepspin_nlist-2.pbtxt" +) +pb_file = ( + Path(__file__).parent.parent.parent / "tests" / "infer" / "deeppot_dpa_spin.pth" +) +pb_file2 = Path(__file__).parent / "deepspin_nlist-2.pb" +system_file = Path(__file__).parent.parent.parent / "tests" +data_file = Path(__file__).parent / "data.lmp" +data_file_si = Path(__file__).parent / "data.si" +data_type_map_file = Path(__file__).parent / "data_type_map.lmp" +md_file = Path(__file__).parent / "md.out" + +expected_ae = np.array( + [-5.452114789070532, -5.480146653237549, -5.196470063744647, -5.196470063744647] +) +expected_e = np.sum(expected_ae) +expected_f = np.array( + [ + [0.1005891161568464, -0.0421386837954357, -0.1035159238420185], + [-0.1005891161568464, 0.0421386837954357, 0.1035159238420185], + [-0.0874023630887424, -0.0816522076223778, 0.1196032337003844], + [0.0874023630887424, 0.0816522076223778, -0.1196032337003844], + ] +) +expected_fm = np.array( + [ + [0.0248296941890119, -0.0104016286467482, 0.0166496777995534], + [-0.0407454346265244, 0.0170690334246251, 0.0337262181162752], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + ] +) + +expected_f2 = np.array( + [ + [-0.0020912362538459, 0.0008760584306652, -0.0002029714364812], + [0.0020912362538459, -0.0008760584306652, 0.0002029714364812], + [0.0020348523962324, 0.0019009805280592, -0.0027845348580022], + [-0.0020348523962324, -0.0019009805280592, 0.0027845348580022], + ] +) + +expected_fm2 = np.array( + [ + [0.0020796789544968, -0.0008712168593162, 0.0269545489546998], + [-0.0031170434556743, 0.0013057884746744, 0.0295063550138163], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + [0.0000000000000000, 0.00000000000000000, 0.00000000000000000], + ] +) + +box = np.array([0, 100, 0, 100, 0, 100, 0, 0, 0]) +coord = np.array( + [ + [12.83, 2.56, 2.18], + [12.09, 2.87, 2.74], + [3.51, 2.51, 2.60], + [4.27, 3.22, 1.56], + ] +) +spin = np.array( + [ + [0, 0, 1.2737], + [0, 0, 1.2737], + [0, 0, 0], + [0, 0, 0], + ] +) +type_NiO = np.array([1, 1, 2, 2]) + + +sp.check_output( + f"{sys.executable} -m deepmd convert-from pbtxt -i {pbtxt_file2.resolve()} -o {pb_file2.resolve()}".split() +) + + +def setup_module(): + write_lmp_data_spin(box, coord, spin, type_NiO, data_file) + + +def teardown_module(): + os.remove(data_file) + + +def _lammps(data_file, units="metal") -> PyLammps: + lammps = PyLammps() + lammps.units(units) + lammps.boundary("f f f") + lammps.atom_style("spin") + if units == "metal": + lammps.neighbor("2.0 bin") + else: + raise ValueError("units for spin should be metal") + lammps.neigh_modify("every 10 delay 0 check no") + lammps.read_data(data_file.resolve()) + if units == "metal": + lammps.mass("1 58") + lammps.mass("2 16") + else: + raise ValueError("units for spin should be metal") + if units == "metal": + lammps.timestep(0.0005) + else: + raise ValueError("units for spin should be metal") + lammps.fix("1 all nve") + return lammps + + +@pytest.fixture +def lammps(): + lmp = _lammps(data_file=data_file) + yield lmp + lmp.close() + + +def test_pair_deepmd(lammps): + lammps.pair_style(f"deepspin {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + lammps.run(1) + + +def test_pair_deepmd_model_devi(lammps): + lammps.pair_style( + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +def test_pair_deepmd_model_devi_atomic_relative(lammps): + relative = 1.0 + lammps.pair_style( + f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1 atomic relative {relative}" + ) + lammps.pair_coeff("* *") + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) + + +@pytest.mark.skipif( + shutil.which("mpirun") is None, reason="MPI is not installed on this system" +) +@pytest.mark.skipif( + importlib.util.find_spec("mpi4py") is None, reason="mpi4py is not installed" +) +@pytest.mark.parametrize( + ("balance_args",), + [(["--balance"],), ([],)], +) +def test_pair_deepmd_mpi(balance_args: list): + with tempfile.NamedTemporaryFile() as f: + sp.check_call( + [ + "mpirun", + "-n", + "2", + sys.executable, + Path(__file__).parent / "run_mpi_pair_deepmd_spin.py", + data_file, + pb_file, + pb_file2, + md_file, + f.name, + *balance_args, + ] + ) + arr = np.loadtxt(f.name, ndmin=1) + pe = arr[0] + + relative = 1.0 + assert pe == pytest.approx(expected_e) + # load model devi + md = np.loadtxt(md_file.resolve()) + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + norm_spin = np.linalg.norm(np.mean([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1) + expected_md_f /= norm + relative + expected_md_fm = np.linalg.norm(np.std([expected_fm, expected_fm2], axis=0), axis=1) + expected_md_fm /= norm_spin + relative + assert md[4] == pytest.approx(np.max(expected_md_f)) + assert md[5] == pytest.approx(np.min(expected_md_f)) + assert md[6] == pytest.approx(np.mean(expected_md_f)) + assert md[7] == pytest.approx(np.max(expected_md_fm)) + assert md[8] == pytest.approx(np.min(expected_md_fm)) + assert md[9] == pytest.approx(np.mean(expected_md_fm)) From e572e376c93f1095d84552e4516048c1906fe3b5 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:51:07 +0800 Subject: [PATCH 153/193] Update c_api.h --- source/api_c/include/c_api.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index a1d61de50a..5638126e80 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -10,9 +10,9 @@ extern "C" { /** @file */ /** C API version. Bumped whenever the API is changed. - * @since API version 23 + * @since API version 22 */ -#define DP_C_API_VERSION 23 +#define DP_C_API_VERSION 24 /** * @brief Neighbor list. From 01e77454327607fef682945682a6f69cc07aaee9 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:58:14 +0800 Subject: [PATCH 154/193] Update test_deeppot_a.cc --- source/api_c/tests/test_deeppot_a.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/api_c/tests/test_deeppot_a.cc b/source/api_c/tests/test_deeppot_a.cc index e3a1f6aa66..92968024bf 100644 --- a/source/api_c/tests/test_deeppot_a.cc +++ b/source/api_c/tests/test_deeppot_a.cc @@ -168,22 +168,22 @@ TEST_F(TestInferDeepPotA, float_infer) { } TEST_F(TestInferDeepPotA, cutoff) { - double cutoff = DP_DeepBaseModelGetCutoff((DP_DeepBaseModel*)dp); + double cutoff = DP_DeepPotGetCutoff(dp); EXPECT_EQ(cutoff, 6.0); } TEST_F(TestInferDeepPotA, numb_types) { - int numb_types = DP_DeepBaseModelGetNumbTypes((DP_DeepBaseModel*)dp); + int numb_types = DP_DeepPotGetNumbTypes(dp); EXPECT_EQ(numb_types, 2); } TEST_F(TestInferDeepPotA, numb_types_spin) { - int numb_types_spin = DP_DeepBaseModelGetNumbTypesSpin((DP_DeepBaseModel*)dp); + int numb_types_spin = DP_DeepPotGetNumbTypesSpin(dp); EXPECT_EQ(numb_types_spin, 0); } TEST_F(TestInferDeepPotA, type_map) { - const char* type_map = DP_DeepBaseModelGetTypeMap((DP_DeepBaseModel*)dp); + const char* type_map = DP_DeepPotGetTypeMap((dp); char expected_type_map[] = "O H"; EXPECT_EQ(strcmp(type_map, expected_type_map), 0); DP_DeleteChar(type_map); From 3d1fce69cf068d0dd8e932e7f7d7249f7a65c248 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:59:28 +0800 Subject: [PATCH 155/193] Update test_deeppot_a.cc --- source/api_c/tests/test_deeppot_a.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/api_c/tests/test_deeppot_a.cc b/source/api_c/tests/test_deeppot_a.cc index 92968024bf..b4a9a81f92 100644 --- a/source/api_c/tests/test_deeppot_a.cc +++ b/source/api_c/tests/test_deeppot_a.cc @@ -183,7 +183,7 @@ TEST_F(TestInferDeepPotA, numb_types_spin) { } TEST_F(TestInferDeepPotA, type_map) { - const char* type_map = DP_DeepPotGetTypeMap((dp); + const char* type_map = DP_DeepPotGetTypeMap(dp); char expected_type_map[] = "O H"; EXPECT_EQ(strcmp(type_map, expected_type_map), 0); DP_DeleteChar(type_map); From 960f71a33306f1764c765d230cffb69d715c11b7 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:36:14 +0800 Subject: [PATCH 156/193] fix error handle --- source/lmp/pair_deepmd.cpp | 18 +++++++++--------- source/lmp/pair_deepspin.cpp | 18 +++++++++--------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index e1231f9ed6..46d88bfed5 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -501,9 +501,9 @@ void PairDeepMD::compute(int eflag, int vflag) { double **sp = atom->sp; double **fm = atom->fm; if (atom->sp_flag) { - std::cout << "Pair style 'deepmd' does not support spin atoms, please use " - "pair style 'deepspin' instead." - << std::endl; + throw std::runtime_error( + "Pair style 'deepmd' does not support spin atoms, please use pair " + "style 'deepspin' instead."); } vector dtype(nall); @@ -871,9 +871,9 @@ int PairDeepMD::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (atom->sp_flag) { - std::cout << "Pair style 'deepmd' does not support spin atoms, please use " - "pair style 'deepspin' instead." - << std::endl; + throw std::runtime_error( + "Pair style 'deepmd' does not support spin atoms, please use pair " + "style 'deepspin' instead."); } else { for (i = first; i < last; i++) { for (int dd = 0; dd < numb_models; ++dd) { @@ -893,9 +893,9 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (atom->sp_flag) { - std::cout << "Pair style 'deepmd' does not support spin atoms, please use " - "pair style 'deepspin' instead." - << std::endl; + throw std::runtime_error( + "Pair style 'deepmd' does not support spin atoms, please use pair " + "style 'deepspin' instead."); } else { for (i = 0; i < n; i++) { j = list[i]; diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 83f65052ce..0af1da05c1 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -509,9 +509,9 @@ void PairDeepSpin::compute(int eflag, int vflag) { } } } else { - std::cout << "Pair style 'deepspin' only supports spin atoms, please use " - "pair style 'deepmd' instead." - << std::endl; + throw std::runtime_error( + "Pair style 'deepspin' only supports spin atoms, please use pair style " + "'deepmd' instead."); } vector dtype(nall); @@ -907,9 +907,9 @@ int PairDeepSpin::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (!atom->sp_flag) { - std::cout << "Pair style 'deepspin' only supports spin atoms, please use " - "pair style 'deepmd' instead." - << std::endl; + throw std::runtime_error( + "Pair style 'deepspin' only supports spin atoms, please use pair style " + "'deepmd' instead."); } else { for (i = first; i < last; i++) { for (int dd = 0; dd < numb_models; ++dd) { @@ -932,9 +932,9 @@ void PairDeepSpin::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (!atom->sp_flag) { - std::cout << "Pair style 'deepspin' only supports spin atoms, please use " - "pair style 'deepmd' instead." - << std::endl; + throw std::runtime_error( + "Pair style 'deepspin' only supports spin atoms, please use pair style " + "'deepmd' instead."); } else { for (i = 0; i < n; i++) { j = list[i]; From 4d71247cd9d616a38d757c350b8414747c39ca19 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:39:40 +0800 Subject: [PATCH 157/193] rm spin from pairdeepmd --- source/lmp/pair_deepmd.cpp | 32 +++++++++----------------------- source/lmp/pair_deepspin.cpp | 31 +++++++------------------------ 2 files changed, 16 insertions(+), 47 deletions(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 46d88bfed5..f428c62cd4 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -299,24 +299,15 @@ void PairDeepMD::settings(int narg, char **arg) { if (!is_restart) { fp.open(out_file); fp << scientific; - if (!atom->sp_flag) { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" - << setw(18 + 1) << "avg_devi_f"; - if (out_each) { - // at this time, we don't know how many atoms - fp << setw(18 + 1) << "atm_devi_f(N)"; - } - fp << endl; - } else { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" - << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" - << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" - << endl; + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" + << setw(18 + 1) << "avg_devi_f"; + if (out_each) { + // at this time, we don't know how many atoms + fp << setw(18 + 1) << "atm_devi_f(N)"; } + fp << endl; } else { fp.open(out_file, std::ofstream::out | std::ofstream::app); fp << scientific; @@ -368,12 +359,7 @@ void PairDeepMD::settings(int narg, char **arg) { } } - // comm_reverse = numb_models * 3; - if (atom->sp_flag) { - comm_reverse = numb_models * 3 * 2; - } else { - comm_reverse = numb_models * 3; - } + comm_reverse = numb_models * 3; all_force.resize(numb_models); } diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 0af1da05c1..99eb546f2f 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -299,24 +299,12 @@ void PairDeepSpin::settings(int narg, char **arg) { if (!is_restart) { fp.open(out_file); fp << scientific; - if (!atom->sp_flag) { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" - << setw(18 + 1) << "avg_devi_f"; - if (out_each) { - // at this time, we don't know how many atoms - fp << setw(18 + 1) << "atm_devi_f(N)"; - } - fp << endl; - } else { - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" - << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" - << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" - << endl; - } + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" + << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" + << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" + << endl; } else { fp.open(out_file, std::ofstream::out | std::ofstream::app); fp << scientific; @@ -368,12 +356,7 @@ void PairDeepSpin::settings(int narg, char **arg) { } } - // comm_reverse = numb_models * 3; - if (atom->sp_flag) { - comm_reverse = numb_models * 3 * 2; - } else { - comm_reverse = numb_models * 3; - } + comm_reverse = numb_models * 3 * 2; all_force.resize(numb_models); } From 41ad70815d7489b77e7036fa54cd25be8722d74f Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:44:44 +0800 Subject: [PATCH 158/193] make pair modification readable --- source/lmp/pair_deepmd.cpp | 1070 +++++++++++++++++----------------- source/lmp/pair_deepspin.cpp | 1036 ++++++++++++++++---------------- 2 files changed, 1053 insertions(+), 1053 deletions(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index f428c62cd4..3351da2aca 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -94,530 +94,168 @@ PairDeepMD::~PairDeepMD() { // Ensure base class destructor is called } -static bool is_key(const string &input) { - vector keys; - keys.push_back("out_freq"); - keys.push_back("out_file"); - keys.push_back("fparam"); - keys.push_back("aparam"); - keys.push_back("fparam_from_compute"); - keys.push_back("aparam_from_compute"); - keys.push_back("ttm"); - keys.push_back("atomic"); - keys.push_back("relative"); - keys.push_back("relative_v"); - keys.push_back("virtual_len"); - keys.push_back("spin_norm"); +void PairDeepMD::compute(int eflag, int vflag) { + if (numb_models == 0) { + return; + } + // See + // https://docs.lammps.org/Developer_updating.html#use-ev-init-to-initialize-variables-derived-from-eflag-and-vflag + ev_init(eflag, vflag); + if (vflag_atom) { + error->all(FLERR, + "6-element atomic virial is not supported. Use compute " + "centroid/stress/atom command for 9-element atomic virial."); + } + bool do_ghost = true; + // dpa2 communication + commdata_ = (CommBrickDeepMD *)comm; + double **x = atom->x; + double **f = atom->f; + int *type = atom->type; + int nlocal = atom->nlocal; + int nghost = 0; + if (do_ghost) { + nghost = atom->nghost; + } + int nall = nlocal + nghost; + int newton_pair = force->newton_pair; - for (int ii = 0; ii < keys.size(); ++ii) { - if (input == keys[ii]) { - return true; - } + vector dspin(nall * 3, 0.); + vector dfm(nall * 3, 0.); + double **sp = atom->sp; + double **fm = atom->fm; + if (atom->sp_flag) { + throw std::runtime_error( + "Pair style 'deepmd' does not support spin atoms, please use pair " + "style 'deepspin' instead."); } - return false; -} -void PairDeepMD::settings(int narg, char **arg) { - if (narg <= 0) { - error->all(FLERR, "Illegal pair_style command"); + vector dtype(nall); + for (int ii = 0; ii < nall; ++ii) { + dtype[ii] = type_idx_map[type[ii] - 1]; } - vector models; - int iarg = 0; - while (iarg < narg) { - if (is_key(arg[iarg])) { - break; + double dener(0); + vector dforce(nall * 3); + vector dvirial(9, 0); + vector dcoord(nall * 3, 0.); + vector dbox(9, 0); + vector daparam; + + // get box + dbox[0] = domain->h[0] / dist_unit_cvt_factor; // xx + dbox[4] = domain->h[1] / dist_unit_cvt_factor; // yy + dbox[8] = domain->h[2] / dist_unit_cvt_factor; // zz + dbox[7] = domain->h[3] / dist_unit_cvt_factor; // zy + dbox[6] = domain->h[4] / dist_unit_cvt_factor; // zx + dbox[3] = domain->h[5] / dist_unit_cvt_factor; // yx + + // get coord + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + dcoord[ii * 3 + dd] = + (x[ii][dd] - domain->boxlo[dd]) / dist_unit_cvt_factor; } - iarg++; - } - for (int ii = 0; ii < iarg; ++ii) { - models.push_back(arg[ii]); } - numb_models = models.size(); - if (numb_models == 1) { - try { - deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - cutoff = deep_pot.cutoff() * dist_unit_cvt_factor; - numb_types = deep_pot.numb_types(); - numb_types_spin = deep_pot.numb_types_spin(); - dim_fparam = deep_pot.dim_fparam(); - dim_aparam = deep_pot.dim_aparam(); - } else { - try { - deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); - deep_pot_model_devi.init(models, get_node_rank(), - get_file_content(models)); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); + + if (do_compute_aparam) { + make_aparam_from_compute(daparam); + } else if (aparam.size() > 0) { + // uniform aparam + make_uniform_aparam(daparam, aparam, nlocal); + } else if (do_ttm) { +#ifdef USE_TTM + if (dim_aparam > 0) { + make_ttm_aparam(daparam); + } else if (dim_fparam > 0) { + make_ttm_fparam(fparam); } - cutoff = deep_pot_model_devi.cutoff() * dist_unit_cvt_factor; - numb_types = deep_pot_model_devi.numb_types(); - numb_types_spin = deep_pot_model_devi.numb_types_spin(); - dim_fparam = deep_pot_model_devi.dim_fparam(); - dim_aparam = deep_pot_model_devi.dim_aparam(); - assert(cutoff == deep_pot.cutoff() * dist_unit_cvt_factor); - assert(numb_types == deep_pot.numb_types()); - assert(numb_types_spin == deep_pot.numb_types_spin()); - assert(dim_fparam == deep_pot.dim_fparam()); - assert(dim_aparam == deep_pot.dim_aparam()); +#endif } - out_freq = 100; - out_file = "model_devi.out"; - out_each = 0; - out_rel = 0; - eps = 0.; - fparam.clear(); - aparam.clear(); - while (iarg < narg) { - if (!is_key(arg[iarg])) { - error->all(FLERR, - "Illegal pair_style command\nwrong number of parameters\n"); + if (do_compute_fparam) { + make_fparam_from_compute(fparam); + } + + // int ago = numb_models > 1 ? 0 : neighbor->ago; + int ago = neighbor->ago; + if (numb_models > 1) { + if (multi_models_no_mod_devi && + (out_freq > 0 && update->ntimestep % out_freq == 0)) { + ago = 0; + } else if (multi_models_mod_devi && + (out_freq == 0 || update->ntimestep % out_freq != 0)) { + ago = 0; } - if (string(arg[iarg]) == string("out_freq")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_freq, not provided"); - } - out_freq = atoi(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("out_file")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_file, not provided"); - } - out_file = string(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("fparam")) { - for (int ii = 0; ii < dim_fparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal fparam, the dimension should be %d", - dim_fparam); - error->all(FLERR, tmp); + } + // compute + single_model = (numb_models == 1); + multi_models_no_mod_devi = + (numb_models > 1 && (out_freq == 0 || update->ntimestep % out_freq != 0)); + multi_models_mod_devi = + (numb_models > 1 && (out_freq > 0 && update->ntimestep % out_freq == 0)); + if (do_ghost) { + deepmd_compat::InputNlist lmp_list( + list->inum, list->ilist, list->numneigh, list->firstneigh, + commdata_->nswap, commdata_->sendnum, commdata_->recvnum, + commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, + commdata_->recvproc, &world); + deepmd_compat::InputNlist extend_lmp_list; + if (single_model || multi_models_no_mod_devi) { + // cvflag_atom is the right flag for the cvatom matrix + if (!(eflag_atom || cvflag_atom)) { + try { + deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox, nghost, + lmp_list, ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } - fparam.push_back(atof(arg[iarg + 1 + ii])); } - iarg += 1 + dim_fparam; - } else if (string(arg[iarg]) == string("aparam")) { - for (int ii = 0; ii < dim_aparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal aparam, the dimension should be %d", - dim_aparam); - error->all(FLERR, tmp); + // do atomic energy and virial + else { + vector deatom(nall * 1, 0); + vector dvatom(nall * 9, 0); + try { + deep_pot.compute(dener, dforce, dvirial, deatom, dvatom, dcoord, + dtype, dbox, nghost, lmp_list, ago, fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } - aparam.push_back(atof(arg[iarg + 1 + ii])); - } - iarg += 1 + dim_aparam; - } else if (string(arg[iarg]) == string("ttm")) { -#ifdef USE_TTM - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); - } - } - do_ttm = true; - ttm_fix_id = arg[iarg + 1]; - iarg += 1 + 1; -#else - error->all(FLERR, - "The deepmd-kit was compiled without support for TTM, please " - "rebuild it with LAMMPS version >=20210831"); -#endif - } - - /////////////////////////////////////////////// - // pair_style deepmd cp.pb fparam_from_compute TEMP - // compute TEMP all temp - ////////////////////////////////////////////// - else if (string(arg[iarg]) == string("fparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid fparam_from_compute key: should be " - "fparam_from_compute compute_fparam_id(str)"); - } - } - do_compute_fparam = true; - compute_fparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("aparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid aparam_from_compute key: should be " - "aparam_from_compute compute_aparam_id(str)"); - } - } - do_compute_aparam = true; - compute_aparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("atomic")) { - out_each = 1; - iarg += 1; - } else if (string(arg[iarg]) == string("relative")) { - out_rel = 1; - eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("relative_v")) { - out_rel_v = 1; - eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("virtual_len")) { - virtual_len.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - virtual_len[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } else if (string(arg[iarg]) == string("spin_norm")) { - spin_norm.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - spin_norm[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } - } - - if (out_freq < 0) { - error->all(FLERR, "Illegal out_freq, should be >= 0"); - } - if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { - error->all(FLERR, - "aparam, aparam_from_compute, and ttm should NOT be set " - "simultaneously"); - } - if (do_compute_fparam && fparam.size() > 0) { - error->all( - FLERR, - "fparam and fparam_from_compute should NOT be set simultaneously"); - } - - if (comm->me == 0) { - if (numb_models > 1 && out_freq > 0) { - if (!is_restart) { - fp.open(out_file); - fp << scientific; - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" - << setw(18 + 1) << "avg_devi_f"; - if (out_each) { - // at this time, we don't know how many atoms - fp << setw(18 + 1) << "atm_devi_f(N)"; - } - fp << endl; - } else { - fp.open(out_file, std::ofstream::out | std::ofstream::app); - fp << scientific; - } - } - string pre = " "; - cout << pre << ">>> Info of model(s):" << endl - << pre << "using " << setw(3) << numb_models << " model(s): "; - if (narg == 1) { - cout << arg[0] << " "; - } else { - for (int ii = 0; ii < models.size(); ++ii) { - cout << models[ii] << " "; - } - } - cout << endl - << pre << "rcut in model: " << cutoff << endl - << pre << "ntypes in model: " << numb_types << endl; - if (fparam.size() > 0) { - cout << pre << "using fparam(s): "; - for (int ii = 0; ii < dim_fparam; ++ii) { - cout << fparam[ii] << " "; - } - cout << endl; - } - if (do_compute_fparam) { - cout << pre << "using compute id (fparam): "; - cout << compute_fparam_id << " " << endl; - } - if (do_compute_aparam) { - cout << pre << "using compute id (aparam): "; - cout << compute_aparam_id << " " << endl; - } - if (aparam.size() > 0) { - cout << pre << "using aparam(s): "; - for (int ii = 0; ii < aparam.size(); ++ii) { - cout << aparam[ii] << " "; - } - cout << endl; - } - if (do_ttm) { - cout << pre << "using ttm fix: "; - cout << ttm_fix_id << " "; - if (dim_fparam > 0) { - cout << "(fparam)" << endl; - } else if (dim_aparam > 0) { - cout << "(aparam)" << endl; - } - } - } - - comm_reverse = numb_models * 3; - all_force.resize(numb_models); -} - -/* ---------------------------------------------------------------------- - set coeffs for one or more type pairs -------------------------------------------------------------------------- */ - -void PairDeepMD::coeff(int narg, char **arg) { - if (!allocated) { - allocate(); - } - - int n = atom->ntypes; - int ilo, ihi, jlo, jhi; - ilo = 0; - jlo = 0; - ihi = n; - jhi = n; - if (narg >= 2) { - utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); - utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); - if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { - error->all(FLERR, - "deepmd requires that the scale should be set to all atom " - "types, i.e. pair_coeff * *."); - } - } - if (narg <= 2) { - type_idx_map.resize(n); - for (int ii = 0; ii < n; ++ii) { - type_idx_map[ii] = ii; - } - } else { - int iarg = 2; - - // type_map is a list of strings with undetermined length - // note: although we have numb_types from the model, we do not require - // the number of types in the system matches that in the model - std::vector type_map; - std::string type_map_str; - deep_pot.get_type_map(type_map_str); - // convert the string to a vector of strings - std::istringstream iss(type_map_str); - std::string type_name; - while (iss >> type_name) { - type_map.push_back(type_name); - } - - type_idx_map.clear(); - type_names.clear(); - while (iarg < narg) { - std::string type_name = arg[iarg]; - type_names.push_back(type_name); - bool found_element = false; - for (int ii = 0; ii < type_map.size(); ++ii) { - if (type_map[ii] == type_name) { - type_idx_map.push_back(ii); - found_element = true; - break; - } - } - if (!found_element && "NULL" == type_name) { - type_idx_map.push_back(type_map.size()); // ghost type - found_element = true; - } - if (!found_element) { - error->all(FLERR, "pair_coeff: element " + type_name + - " not found in the model"); - } - iarg += 1; - } - numb_types = type_idx_map.size(); - if (numb_types < n) { - type_idx_map.resize(n); - for (int ii = numb_types; ii < n; ++ii) { - type_idx_map[ii] = -1; - } - } - } - for (int i = ilo; i <= ihi; i++) { - for (int j = MAX(jlo, i); j <= jhi; j++) { - setflag[i][j] = 1; - scale[i][j] = 1.0; - if (i > numb_types || j > numb_types) { - char warning_msg[1024]; - sprintf(warning_msg, - "Interaction between types %d and %d is set with deepmd, but " - "will be ignored.\n Deepmd model has only %d types, it only " - "computes the mulitbody interaction of types: 1-%d.", - i, j, numb_types, numb_types); - error->warning(FLERR, warning_msg); - } - } - } -} - -void PairDeepMD::compute(int eflag, int vflag) { - if (numb_models == 0) { - return; - } - // See - // https://docs.lammps.org/Developer_updating.html#use-ev-init-to-initialize-variables-derived-from-eflag-and-vflag - ev_init(eflag, vflag); - if (vflag_atom) { - error->all(FLERR, - "6-element atomic virial is not supported. Use compute " - "centroid/stress/atom command for 9-element atomic virial."); - } - bool do_ghost = true; - // dpa2 communication - commdata_ = (CommBrickDeepMD *)comm; - double **x = atom->x; - double **f = atom->f; - int *type = atom->type; - int nlocal = atom->nlocal; - int nghost = 0; - if (do_ghost) { - nghost = atom->nghost; - } - int nall = nlocal + nghost; - int newton_pair = force->newton_pair; - - vector dspin(nall * 3, 0.); - vector dfm(nall * 3, 0.); - double **sp = atom->sp; - double **fm = atom->fm; - if (atom->sp_flag) { - throw std::runtime_error( - "Pair style 'deepmd' does not support spin atoms, please use pair " - "style 'deepspin' instead."); - } - - vector dtype(nall); - for (int ii = 0; ii < nall; ++ii) { - dtype[ii] = type_idx_map[type[ii] - 1]; - } - - double dener(0); - vector dforce(nall * 3); - vector dvirial(9, 0); - vector dcoord(nall * 3, 0.); - vector dbox(9, 0); - vector daparam; - - // get box - dbox[0] = domain->h[0] / dist_unit_cvt_factor; // xx - dbox[4] = domain->h[1] / dist_unit_cvt_factor; // yy - dbox[8] = domain->h[2] / dist_unit_cvt_factor; // zz - dbox[7] = domain->h[3] / dist_unit_cvt_factor; // zy - dbox[6] = domain->h[4] / dist_unit_cvt_factor; // zx - dbox[3] = domain->h[5] / dist_unit_cvt_factor; // yx - - // get coord - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - dcoord[ii * 3 + dd] = - (x[ii][dd] - domain->boxlo[dd]) / dist_unit_cvt_factor; - } - } - - if (do_compute_aparam) { - make_aparam_from_compute(daparam); - } else if (aparam.size() > 0) { - // uniform aparam - make_uniform_aparam(daparam, aparam, nlocal); - } else if (do_ttm) { -#ifdef USE_TTM - if (dim_aparam > 0) { - make_ttm_aparam(daparam); - } else if (dim_fparam > 0) { - make_ttm_fparam(fparam); - } -#endif - } - - if (do_compute_fparam) { - make_fparam_from_compute(fparam); - } - - // int ago = numb_models > 1 ? 0 : neighbor->ago; - int ago = neighbor->ago; - if (numb_models > 1) { - if (multi_models_no_mod_devi && - (out_freq > 0 && update->ntimestep % out_freq == 0)) { - ago = 0; - } else if (multi_models_mod_devi && - (out_freq == 0 || update->ntimestep % out_freq != 0)) { - ago = 0; - } - } - // compute - single_model = (numb_models == 1); - multi_models_no_mod_devi = - (numb_models > 1 && (out_freq == 0 || update->ntimestep % out_freq != 0)); - multi_models_mod_devi = - (numb_models > 1 && (out_freq > 0 && update->ntimestep % out_freq == 0)); - if (do_ghost) { - deepmd_compat::InputNlist lmp_list( - list->inum, list->ilist, list->numneigh, list->firstneigh, - commdata_->nswap, commdata_->sendnum, commdata_->recvnum, - commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, - commdata_->recvproc, &world); - deepmd_compat::InputNlist extend_lmp_list; - if (single_model || multi_models_no_mod_devi) { - // cvflag_atom is the right flag for the cvatom matrix - if (!(eflag_atom || cvflag_atom)) { - try { - deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox, nghost, - lmp_list, ago, fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - } - // do atomic energy and virial - else { - vector deatom(nall * 1, 0); - vector dvatom(nall * 9, 0); - try { - deep_pot.compute(dener, dforce, dvirial, deatom, dvatom, dcoord, - dtype, dbox, nghost, lmp_list, ago, fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - if (eflag_atom) { - for (int ii = 0; ii < nlocal; ++ii) { - eatom[ii] += scale[1][1] * deatom[ii] * ener_unit_cvt_factor; - } - } - // Added by Davide Tisi 2020 - // interface the atomic virial computed by DeepMD - // with the one used in centroid atoms - if (cvflag_atom) { - for (int ii = 0; ii < nall; ++ii) { - // vatom[ii][0] += 1.0 * dvatom[9*ii+0]; - // vatom[ii][1] += 1.0 * dvatom[9*ii+4]; - // vatom[ii][2] += 1.0 * dvatom[9*ii+8]; - // vatom[ii][3] += 1.0 * dvatom[9*ii+3]; - // vatom[ii][4] += 1.0 * dvatom[9*ii+6]; - // vatom[ii][5] += 1.0 * dvatom[9*ii+7]; - cvatom[ii][0] += - scale[1][1] * dvatom[9 * ii + 0] * ener_unit_cvt_factor; // xx - cvatom[ii][1] += - scale[1][1] * dvatom[9 * ii + 4] * ener_unit_cvt_factor; // yy - cvatom[ii][2] += - scale[1][1] * dvatom[9 * ii + 8] * ener_unit_cvt_factor; // zz - cvatom[ii][3] += - scale[1][1] * dvatom[9 * ii + 3] * ener_unit_cvt_factor; // xy - cvatom[ii][4] += - scale[1][1] * dvatom[9 * ii + 6] * ener_unit_cvt_factor; // xz - cvatom[ii][5] += - scale[1][1] * dvatom[9 * ii + 7] * ener_unit_cvt_factor; // yz - cvatom[ii][6] += - scale[1][1] * dvatom[9 * ii + 1] * ener_unit_cvt_factor; // yx - cvatom[ii][7] += - scale[1][1] * dvatom[9 * ii + 2] * ener_unit_cvt_factor; // zx - cvatom[ii][8] += - scale[1][1] * dvatom[9 * ii + 5] * ener_unit_cvt_factor; // zy - } + if (eflag_atom) { + for (int ii = 0; ii < nlocal; ++ii) { + eatom[ii] += scale[1][1] * deatom[ii] * ener_unit_cvt_factor; + } + } + // Added by Davide Tisi 2020 + // interface the atomic virial computed by DeepMD + // with the one used in centroid atoms + if (cvflag_atom) { + for (int ii = 0; ii < nall; ++ii) { + // vatom[ii][0] += 1.0 * dvatom[9*ii+0]; + // vatom[ii][1] += 1.0 * dvatom[9*ii+4]; + // vatom[ii][2] += 1.0 * dvatom[9*ii+8]; + // vatom[ii][3] += 1.0 * dvatom[9*ii+3]; + // vatom[ii][4] += 1.0 * dvatom[9*ii+6]; + // vatom[ii][5] += 1.0 * dvatom[9*ii+7]; + cvatom[ii][0] += + scale[1][1] * dvatom[9 * ii + 0] * ener_unit_cvt_factor; // xx + cvatom[ii][1] += + scale[1][1] * dvatom[9 * ii + 4] * ener_unit_cvt_factor; // yy + cvatom[ii][2] += + scale[1][1] * dvatom[9 * ii + 8] * ener_unit_cvt_factor; // zz + cvatom[ii][3] += + scale[1][1] * dvatom[9 * ii + 3] * ener_unit_cvt_factor; // xy + cvatom[ii][4] += + scale[1][1] * dvatom[9 * ii + 6] * ener_unit_cvt_factor; // xz + cvatom[ii][5] += + scale[1][1] * dvatom[9 * ii + 7] * ener_unit_cvt_factor; // yz + cvatom[ii][6] += + scale[1][1] * dvatom[9 * ii + 1] * ener_unit_cvt_factor; // yx + cvatom[ii][7] += + scale[1][1] * dvatom[9 * ii + 2] * ener_unit_cvt_factor; // zx + cvatom[ii][8] += + scale[1][1] * dvatom[9 * ii + 5] * ener_unit_cvt_factor; // zy + } } } } else if (multi_models_mod_devi) { @@ -809,43 +447,405 @@ void PairDeepMD::compute(int eflag, int vflag) { } } } - if (rank == 0) { - fp << endl; + if (rank == 0) { + fp << endl; + } + } + } else { + error->all(FLERR, "unknown computational branch"); + } + } else { + if (numb_models == 1) { + try { + deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } else { + error->all(FLERR, "Serial version does not support model devi"); + } + } + + // get force + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; + } + } + + // accumulate energy and virial + if (eflag) { + eng_vdwl += scale[1][1] * dener * ener_unit_cvt_factor; + } + if (vflag) { + virial[0] += 1.0 * dvirial[0] * scale[1][1] * ener_unit_cvt_factor; + virial[1] += 1.0 * dvirial[4] * scale[1][1] * ener_unit_cvt_factor; + virial[2] += 1.0 * dvirial[8] * scale[1][1] * ener_unit_cvt_factor; + virial[3] += 1.0 * dvirial[3] * scale[1][1] * ener_unit_cvt_factor; + virial[4] += 1.0 * dvirial[6] * scale[1][1] * ener_unit_cvt_factor; + virial[5] += 1.0 * dvirial[7] * scale[1][1] * ener_unit_cvt_factor; + } +} + +static bool is_key(const string &input) { + vector keys; + keys.push_back("out_freq"); + keys.push_back("out_file"); + keys.push_back("fparam"); + keys.push_back("aparam"); + keys.push_back("fparam_from_compute"); + keys.push_back("aparam_from_compute"); + keys.push_back("ttm"); + keys.push_back("atomic"); + keys.push_back("relative"); + keys.push_back("relative_v"); + keys.push_back("virtual_len"); + keys.push_back("spin_norm"); + + for (int ii = 0; ii < keys.size(); ++ii) { + if (input == keys[ii]) { + return true; + } + } + return false; +} + +void PairDeepMD::settings(int narg, char **arg) { + if (narg <= 0) { + error->all(FLERR, "Illegal pair_style command"); + } + + vector models; + int iarg = 0; + while (iarg < narg) { + if (is_key(arg[iarg])) { + break; + } + iarg++; + } + for (int ii = 0; ii < iarg; ++ii) { + models.push_back(arg[ii]); + } + numb_models = models.size(); + if (numb_models == 1) { + try { + deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_pot.cutoff() * dist_unit_cvt_factor; + numb_types = deep_pot.numb_types(); + numb_types_spin = deep_pot.numb_types_spin(); + dim_fparam = deep_pot.dim_fparam(); + dim_aparam = deep_pot.dim_aparam(); + } else { + try { + deep_pot.init(arg[0], get_node_rank(), get_file_content(arg[0])); + deep_pot_model_devi.init(models, get_node_rank(), + get_file_content(models)); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_pot_model_devi.cutoff() * dist_unit_cvt_factor; + numb_types = deep_pot_model_devi.numb_types(); + numb_types_spin = deep_pot_model_devi.numb_types_spin(); + dim_fparam = deep_pot_model_devi.dim_fparam(); + dim_aparam = deep_pot_model_devi.dim_aparam(); + assert(cutoff == deep_pot.cutoff() * dist_unit_cvt_factor); + assert(numb_types == deep_pot.numb_types()); + assert(numb_types_spin == deep_pot.numb_types_spin()); + assert(dim_fparam == deep_pot.dim_fparam()); + assert(dim_aparam == deep_pot.dim_aparam()); + } + + out_freq = 100; + out_file = "model_devi.out"; + out_each = 0; + out_rel = 0; + eps = 0.; + fparam.clear(); + aparam.clear(); + while (iarg < narg) { + if (!is_key(arg[iarg])) { + error->all(FLERR, + "Illegal pair_style command\nwrong number of parameters\n"); + } + if (string(arg[iarg]) == string("out_freq")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_freq, not provided"); + } + out_freq = atoi(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("out_file")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_file, not provided"); + } + out_file = string(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("fparam")) { + for (int ii = 0; ii < dim_fparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal fparam, the dimension should be %d", + dim_fparam); + error->all(FLERR, tmp); + } + fparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_fparam; + } else if (string(arg[iarg]) == string("aparam")) { + for (int ii = 0; ii < dim_aparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal aparam, the dimension should be %d", + dim_aparam); + error->all(FLERR, tmp); + } + aparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_aparam; + } else if (string(arg[iarg]) == string("ttm")) { +#ifdef USE_TTM + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); + } + } + do_ttm = true; + ttm_fix_id = arg[iarg + 1]; + iarg += 1 + 1; +#else + error->all(FLERR, + "The deepmd-kit was compiled without support for TTM, please " + "rebuild it with LAMMPS version >=20210831"); +#endif + } + + /////////////////////////////////////////////// + // pair_style deepmd cp.pb fparam_from_compute TEMP + // compute TEMP all temp + ////////////////////////////////////////////// + else if (string(arg[iarg]) == string("fparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid fparam_from_compute key: should be " + "fparam_from_compute compute_fparam_id(str)"); } } - } else { - error->all(FLERR, "unknown computational branch"); + do_compute_fparam = true; + compute_fparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("aparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid aparam_from_compute key: should be " + "aparam_from_compute compute_aparam_id(str)"); + } + } + do_compute_aparam = true; + compute_aparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("atomic")) { + out_each = 1; + iarg += 1; + } else if (string(arg[iarg]) == string("relative")) { + out_rel = 1; + eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("relative_v")) { + out_rel_v = 1; + eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("virtual_len")) { + virtual_len.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + virtual_len[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } else if (string(arg[iarg]) == string("spin_norm")) { + spin_norm.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + spin_norm[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; } - } else { - if (numb_models == 1) { - try { - deep_pot.compute(dener, dforce, dvirial, dcoord, dtype, dbox); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); + } + + if (out_freq < 0) { + error->all(FLERR, "Illegal out_freq, should be >= 0"); + } + if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { + error->all(FLERR, + "aparam, aparam_from_compute, and ttm should NOT be set " + "simultaneously"); + } + if (do_compute_fparam && fparam.size() > 0) { + error->all( + FLERR, + "fparam and fparam_from_compute should NOT be set simultaneously"); + } + + if (comm->me == 0) { + if (numb_models > 1 && out_freq > 0) { + if (!is_restart) { + fp.open(out_file); + fp << scientific; + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_f" << setw(18 + 1) << "min_devi_f" + << setw(18 + 1) << "avg_devi_f"; + if (out_each) { + // at this time, we don't know how many atoms + fp << setw(18 + 1) << "atm_devi_f(N)"; + } + fp << endl; + } else { + fp.open(out_file, std::ofstream::out | std::ofstream::app); + fp << scientific; } + } + string pre = " "; + cout << pre << ">>> Info of model(s):" << endl + << pre << "using " << setw(3) << numb_models << " model(s): "; + if (narg == 1) { + cout << arg[0] << " "; } else { - error->all(FLERR, "Serial version does not support model devi"); + for (int ii = 0; ii < models.size(); ++ii) { + cout << models[ii] << " "; + } + } + cout << endl + << pre << "rcut in model: " << cutoff << endl + << pre << "ntypes in model: " << numb_types << endl; + if (fparam.size() > 0) { + cout << pre << "using fparam(s): "; + for (int ii = 0; ii < dim_fparam; ++ii) { + cout << fparam[ii] << " "; + } + cout << endl; + } + if (do_compute_fparam) { + cout << pre << "using compute id (fparam): "; + cout << compute_fparam_id << " " << endl; + } + if (do_compute_aparam) { + cout << pre << "using compute id (aparam): "; + cout << compute_aparam_id << " " << endl; + } + if (aparam.size() > 0) { + cout << pre << "using aparam(s): "; + for (int ii = 0; ii < aparam.size(); ++ii) { + cout << aparam[ii] << " "; + } + cout << endl; + } + if (do_ttm) { + cout << pre << "using ttm fix: "; + cout << ttm_fix_id << " "; + if (dim_fparam > 0) { + cout << "(fparam)" << endl; + } else if (dim_aparam > 0) { + cout << "(aparam)" << endl; + } } } - // get force - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; + comm_reverse = numb_models * 3; + all_force.resize(numb_models); +} + +/* ---------------------------------------------------------------------- + set coeffs for one or more type pairs +------------------------------------------------------------------------- */ + +void PairDeepMD::coeff(int narg, char **arg) { + if (!allocated) { + allocate(); + } + + int n = atom->ntypes; + int ilo, ihi, jlo, jhi; + ilo = 0; + jlo = 0; + ihi = n; + jhi = n; + if (narg >= 2) { + utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); + utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); + if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { + error->all(FLERR, + "deepmd requires that the scale should be set to all atom " + "types, i.e. pair_coeff * *."); } } + if (narg <= 2) { + type_idx_map.resize(n); + for (int ii = 0; ii < n; ++ii) { + type_idx_map[ii] = ii; + } + } else { + int iarg = 2; - // accumulate energy and virial - if (eflag) { - eng_vdwl += scale[1][1] * dener * ener_unit_cvt_factor; + // type_map is a list of strings with undetermined length + // note: although we have numb_types from the model, we do not require + // the number of types in the system matches that in the model + std::vector type_map; + std::string type_map_str; + deep_pot.get_type_map(type_map_str); + // convert the string to a vector of strings + std::istringstream iss(type_map_str); + std::string type_name; + while (iss >> type_name) { + type_map.push_back(type_name); + } + + type_idx_map.clear(); + type_names.clear(); + while (iarg < narg) { + std::string type_name = arg[iarg]; + type_names.push_back(type_name); + bool found_element = false; + for (int ii = 0; ii < type_map.size(); ++ii) { + if (type_map[ii] == type_name) { + type_idx_map.push_back(ii); + found_element = true; + break; + } + } + if (!found_element && "NULL" == type_name) { + type_idx_map.push_back(type_map.size()); // ghost type + found_element = true; + } + if (!found_element) { + error->all(FLERR, "pair_coeff: element " + type_name + + " not found in the model"); + } + iarg += 1; + } + numb_types = type_idx_map.size(); + if (numb_types < n) { + type_idx_map.resize(n); + for (int ii = numb_types; ii < n; ++ii) { + type_idx_map[ii] = -1; + } + } } - if (vflag) { - virial[0] += 1.0 * dvirial[0] * scale[1][1] * ener_unit_cvt_factor; - virial[1] += 1.0 * dvirial[4] * scale[1][1] * ener_unit_cvt_factor; - virial[2] += 1.0 * dvirial[8] * scale[1][1] * ener_unit_cvt_factor; - virial[3] += 1.0 * dvirial[3] * scale[1][1] * ener_unit_cvt_factor; - virial[4] += 1.0 * dvirial[6] * scale[1][1] * ener_unit_cvt_factor; - virial[5] += 1.0 * dvirial[7] * scale[1][1] * ener_unit_cvt_factor; + for (int i = ilo; i <= ihi; i++) { + for (int j = MAX(jlo, i); j <= jhi; j++) { + setflag[i][j] = 1; + scale[i][j] = 1.0; + if (i > numb_types || j > numb_types) { + char warning_msg[1024]; + sprintf(warning_msg, + "Interaction between types %d and %d is set with deepmd, but " + "will be ignored.\n Deepmd model has only %d types, it only " + "computes the mulitbody interaction of types: 1-%d.", + i, j, numb_types, numb_types); + error->warning(FLERR, warning_msg); + } + } } } diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 99eb546f2f..8c289dcb97 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -94,506 +94,147 @@ PairDeepSpin::~PairDeepSpin() { // Ensure base class destructor is called } -static bool is_key(const string &input) { - vector keys; - keys.push_back("out_freq"); - keys.push_back("out_file"); - keys.push_back("fparam"); - keys.push_back("aparam"); - keys.push_back("fparam_from_compute"); - keys.push_back("aparam_from_compute"); - keys.push_back("ttm"); - keys.push_back("atomic"); - keys.push_back("relative"); - keys.push_back("relative_v"); - keys.push_back("virtual_len"); - keys.push_back("spin_norm"); +void PairDeepSpin::compute(int eflag, int vflag) { + if (numb_models == 0) { + return; + } + // See + // https://docs.lammps.org/Developer_updating.html#use-ev-init-to-initialize-variables-derived-from-eflag-and-vflag + ev_init(eflag, vflag); + if (vflag_atom) { + error->all(FLERR, + "6-element atomic virial is not supported. Use compute " + "centroid/stress/atom command for 9-element atomic virial."); + } + bool do_ghost = true; + // dpa2 communication + commdata_ = (CommBrickDeepSpin *)comm; + double **x = atom->x; + double **f = atom->f; + int *type = atom->type; + int nlocal = atom->nlocal; + int nghost = 0; + if (do_ghost) { + nghost = atom->nghost; + } + int nall = nlocal + nghost; + int newton_pair = force->newton_pair; - for (int ii = 0; ii < keys.size(); ++ii) { - if (input == keys[ii]) { - return true; + vector dspin(nall * 3, 0.); + vector dfm(nall * 3, 0.); + double **sp = atom->sp; + double **fm = atom->fm; + // spin initialize + if (atom->sp_flag) { + // get spin + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + dspin[ii * 3 + dd] = sp[ii][dd] * sp[ii][3]; // get real spin vector + } } + } else { + throw std::runtime_error( + "Pair style 'deepspin' only supports spin atoms, please use pair style " + "'deepmd' instead."); } - return false; -} -void PairDeepSpin::settings(int narg, char **arg) { - if (narg <= 0) { - error->all(FLERR, "Illegal pair_style command"); + vector dtype(nall); + for (int ii = 0; ii < nall; ++ii) { + dtype[ii] = type_idx_map[type[ii] - 1]; } - vector models; - int iarg = 0; - while (iarg < narg) { - if (is_key(arg[iarg])) { - break; + double dener(0); + vector dforce(nall * 3); + vector dforce_mag(nall * 3); + vector dvirial(9, 0); + vector dcoord(nall * 3, 0.); + vector dbox(9, 0); + vector daparam; + + // get box + dbox[0] = domain->h[0] / dist_unit_cvt_factor; // xx + dbox[4] = domain->h[1] / dist_unit_cvt_factor; // yy + dbox[8] = domain->h[2] / dist_unit_cvt_factor; // zz + dbox[7] = domain->h[3] / dist_unit_cvt_factor; // zy + dbox[6] = domain->h[4] / dist_unit_cvt_factor; // zx + dbox[3] = domain->h[5] / dist_unit_cvt_factor; // yx + + // get coord + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + dcoord[ii * 3 + dd] = + (x[ii][dd] - domain->boxlo[dd]) / dist_unit_cvt_factor; } - iarg++; } - for (int ii = 0; ii < iarg; ++ii) { - models.push_back(arg[ii]); - } - numb_models = models.size(); - if (numb_models == 1) { - try { - deep_spin.init(arg[0], get_node_rank(), get_file_content(arg[0])); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - cutoff = deep_spin.cutoff() * dist_unit_cvt_factor; - numb_types = deep_spin.numb_types(); - numb_types_spin = deep_spin.numb_types_spin(); - dim_fparam = deep_spin.dim_fparam(); - dim_aparam = deep_spin.dim_aparam(); - } else { - try { - deep_spin.init(arg[0], get_node_rank(), get_file_content(arg[0])); - deep_spin_model_devi.init(models, get_node_rank(), - get_file_content(models)); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); + + if (do_compute_aparam) { + make_aparam_from_compute(daparam); + } else if (aparam.size() > 0) { + // uniform aparam + make_uniform_aparam(daparam, aparam, nlocal); + } else if (do_ttm) { +#ifdef USE_TTM + if (dim_aparam > 0) { + make_ttm_aparam(daparam); + } else if (dim_fparam > 0) { + make_ttm_fparam(fparam); } - cutoff = deep_spin_model_devi.cutoff() * dist_unit_cvt_factor; - numb_types = deep_spin_model_devi.numb_types(); - numb_types_spin = deep_spin_model_devi.numb_types_spin(); - dim_fparam = deep_spin_model_devi.dim_fparam(); - dim_aparam = deep_spin_model_devi.dim_aparam(); - assert(cutoff == deep_spin.cutoff() * dist_unit_cvt_factor); - assert(numb_types == deep_spin.numb_types()); - assert(numb_types_spin == deep_spin.numb_types_spin()); - assert(dim_fparam == deep_spin.dim_fparam()); - assert(dim_aparam == deep_spin.dim_aparam()); +#endif } - out_freq = 100; - out_file = "model_devi.out"; - out_each = 0; - out_rel = 0; - eps = 0.; - fparam.clear(); - aparam.clear(); - while (iarg < narg) { - if (!is_key(arg[iarg])) { - error->all(FLERR, - "Illegal pair_style command\nwrong number of parameters\n"); + if (do_compute_fparam) { + make_fparam_from_compute(fparam); + } + + // int ago = numb_models > 1 ? 0 : neighbor->ago; + int ago = neighbor->ago; + if (numb_models > 1) { + if (multi_models_no_mod_devi && + (out_freq > 0 && update->ntimestep % out_freq == 0)) { + ago = 0; + } else if (multi_models_mod_devi && + (out_freq == 0 || update->ntimestep % out_freq != 0)) { + ago = 0; } - if (string(arg[iarg]) == string("out_freq")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_freq, not provided"); - } - out_freq = atoi(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("out_file")) { - if (iarg + 1 >= narg) { - error->all(FLERR, "Illegal out_file, not provided"); - } - out_file = string(arg[iarg + 1]); - iarg += 2; - } else if (string(arg[iarg]) == string("fparam")) { - for (int ii = 0; ii < dim_fparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal fparam, the dimension should be %d", - dim_fparam); - error->all(FLERR, tmp); - } - fparam.push_back(atof(arg[iarg + 1 + ii])); - } - iarg += 1 + dim_fparam; - } else if (string(arg[iarg]) == string("aparam")) { - for (int ii = 0; ii < dim_aparam; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - char tmp[1024]; - sprintf(tmp, "Illegal aparam, the dimension should be %d", - dim_aparam); - error->all(FLERR, tmp); + } + // compute + single_model = (numb_models == 1); + multi_models_no_mod_devi = + (numb_models > 1 && (out_freq == 0 || update->ntimestep % out_freq != 0)); + multi_models_mod_devi = + (numb_models > 1 && (out_freq > 0 && update->ntimestep % out_freq == 0)); + if (do_ghost) { + deepmd_compat::InputNlist lmp_list( + list->inum, list->ilist, list->numneigh, list->firstneigh, + commdata_->nswap, commdata_->sendnum, commdata_->recvnum, + commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, + commdata_->recvproc, &world); + if (single_model || multi_models_no_mod_devi) { + // cvflag_atom is the right flag for the cvatom matrix + if (!(eflag_atom || cvflag_atom)) { + try { + deep_spin.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, + dtype, dbox, nghost, lmp_list, ago, fparam, + daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } - aparam.push_back(atof(arg[iarg + 1 + ii])); } - iarg += 1 + dim_aparam; - } else if (string(arg[iarg]) == string("ttm")) { -#ifdef USE_TTM - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); + // do atomic energy and virial + else { + vector deatom(nall * 1, 0); + vector dvatom(nall * 9, 0); + try { + deep_spin.compute(dener, dforce, dforce_mag, dvirial, deatom, dvatom, + dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, + fparam, daparam); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); } - } - do_ttm = true; - ttm_fix_id = arg[iarg + 1]; - iarg += 1 + 1; -#else - error->all(FLERR, - "The deepmd-kit was compiled without support for TTM, please " - "rebuild it with LAMMPS version >=20210831"); -#endif - } - - /////////////////////////////////////////////// - // pair_style deepmd cp.pb fparam_from_compute TEMP - // compute TEMP all temp - ////////////////////////////////////////////// - else if (string(arg[iarg]) == string("fparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid fparam_from_compute key: should be " - "fparam_from_compute compute_fparam_id(str)"); - } - } - do_compute_fparam = true; - compute_fparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("aparam_from_compute")) { - for (int ii = 0; ii < 1; ++ii) { - if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { - error->all(FLERR, - "invalid aparam_from_compute key: should be " - "aparam_from_compute compute_aparam_id(str)"); - } - } - do_compute_aparam = true; - compute_aparam_id = arg[iarg + 1]; - iarg += 1 + 1; - } else if (string(arg[iarg]) == string("atomic")) { - out_each = 1; - iarg += 1; - } else if (string(arg[iarg]) == string("relative")) { - out_rel = 1; - eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("relative_v")) { - out_rel_v = 1; - eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; - iarg += 2; - } else if (string(arg[iarg]) == string("virtual_len")) { - virtual_len.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - virtual_len[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } else if (string(arg[iarg]) == string("spin_norm")) { - spin_norm.resize(numb_types_spin); - for (int ii = 0; ii < numb_types_spin; ++ii) { - spin_norm[ii] = atof(arg[iarg + ii + 1]); - } - iarg += numb_types_spin + 1; - } - } - - if (out_freq < 0) { - error->all(FLERR, "Illegal out_freq, should be >= 0"); - } - if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { - error->all(FLERR, - "aparam, aparam_from_compute, and ttm should NOT be set " - "simultaneously"); - } - if (do_compute_fparam && fparam.size() > 0) { - error->all( - FLERR, - "fparam and fparam_from_compute should NOT be set simultaneously"); - } - - if (comm->me == 0) { - if (numb_models > 1 && out_freq > 0) { - if (!is_restart) { - fp.open(out_file); - fp << scientific; - fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" - << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" - << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" - << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" - << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" - << endl; - } else { - fp.open(out_file, std::ofstream::out | std::ofstream::app); - fp << scientific; - } - } - string pre = " "; - cout << pre << ">>> Info of model(s):" << endl - << pre << "using " << setw(3) << numb_models << " model(s): "; - if (narg == 1) { - cout << arg[0] << " "; - } else { - for (int ii = 0; ii < models.size(); ++ii) { - cout << models[ii] << " "; - } - } - cout << endl - << pre << "rcut in model: " << cutoff << endl - << pre << "ntypes in model: " << numb_types << endl; - if (fparam.size() > 0) { - cout << pre << "using fparam(s): "; - for (int ii = 0; ii < dim_fparam; ++ii) { - cout << fparam[ii] << " "; - } - cout << endl; - } - if (do_compute_fparam) { - cout << pre << "using compute id (fparam): "; - cout << compute_fparam_id << " " << endl; - } - if (do_compute_aparam) { - cout << pre << "using compute id (aparam): "; - cout << compute_aparam_id << " " << endl; - } - if (aparam.size() > 0) { - cout << pre << "using aparam(s): "; - for (int ii = 0; ii < aparam.size(); ++ii) { - cout << aparam[ii] << " "; - } - cout << endl; - } - if (do_ttm) { - cout << pre << "using ttm fix: "; - cout << ttm_fix_id << " "; - if (dim_fparam > 0) { - cout << "(fparam)" << endl; - } else if (dim_aparam > 0) { - cout << "(aparam)" << endl; - } - } - } - - comm_reverse = numb_models * 3 * 2; - all_force.resize(numb_models); -} - -/* ---------------------------------------------------------------------- - set coeffs for one or more type pairs -------------------------------------------------------------------------- */ - -void PairDeepSpin::coeff(int narg, char **arg) { - if (!allocated) { - allocate(); - } - - int n = atom->ntypes; - int ilo, ihi, jlo, jhi; - ilo = 0; - jlo = 0; - ihi = n; - jhi = n; - if (narg >= 2) { - utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); - utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); - if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { - error->all(FLERR, - "deepmd requires that the scale should be set to all atom " - "types, i.e. pair_coeff * *."); - } - } - if (narg <= 2) { - type_idx_map.resize(n); - for (int ii = 0; ii < n; ++ii) { - type_idx_map[ii] = ii; - } - } else { - int iarg = 2; - - // type_map is a list of strings with undetermined length - // note: although we have numb_types from the model, we do not require - // the number of types in the system matches that in the model - std::vector type_map; - std::string type_map_str; - deep_spin.get_type_map(type_map_str); - // convert the string to a vector of strings - std::istringstream iss(type_map_str); - std::string type_name; - while (iss >> type_name) { - type_map.push_back(type_name); - } - - type_idx_map.clear(); - type_names.clear(); - while (iarg < narg) { - std::string type_name = arg[iarg]; - type_names.push_back(type_name); - bool found_element = false; - for (int ii = 0; ii < type_map.size(); ++ii) { - if (type_map[ii] == type_name) { - type_idx_map.push_back(ii); - found_element = true; - break; - } - } - if (!found_element && "NULL" == type_name) { - type_idx_map.push_back(type_map.size()); // ghost type - found_element = true; - } - if (!found_element) { - error->all(FLERR, "pair_coeff: element " + type_name + - " not found in the model"); - } - iarg += 1; - } - numb_types = type_idx_map.size(); - if (numb_types < n) { - type_idx_map.resize(n); - for (int ii = numb_types; ii < n; ++ii) { - type_idx_map[ii] = -1; - } - } - } - for (int i = ilo; i <= ihi; i++) { - for (int j = MAX(jlo, i); j <= jhi; j++) { - setflag[i][j] = 1; - scale[i][j] = 1.0; - if (i > numb_types || j > numb_types) { - char warning_msg[1024]; - sprintf(warning_msg, - "Interaction between types %d and %d is set with deepmd, but " - "will be ignored.\n Deepmd model has only %d types, it only " - "computes the mulitbody interaction of types: 1-%d.", - i, j, numb_types, numb_types); - error->warning(FLERR, warning_msg); - } - } - } -} - -void PairDeepSpin::compute(int eflag, int vflag) { - if (numb_models == 0) { - return; - } - // See - // https://docs.lammps.org/Developer_updating.html#use-ev-init-to-initialize-variables-derived-from-eflag-and-vflag - ev_init(eflag, vflag); - if (vflag_atom) { - error->all(FLERR, - "6-element atomic virial is not supported. Use compute " - "centroid/stress/atom command for 9-element atomic virial."); - } - bool do_ghost = true; - // dpa2 communication - commdata_ = (CommBrickDeepSpin *)comm; - double **x = atom->x; - double **f = atom->f; - int *type = atom->type; - int nlocal = atom->nlocal; - int nghost = 0; - if (do_ghost) { - nghost = atom->nghost; - } - int nall = nlocal + nghost; - int newton_pair = force->newton_pair; - - vector dspin(nall * 3, 0.); - vector dfm(nall * 3, 0.); - double **sp = atom->sp; - double **fm = atom->fm; - // spin initialize - if (atom->sp_flag) { - // get spin - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - dspin[ii * 3 + dd] = sp[ii][dd] * sp[ii][3]; // get real spin vector - } - } - } else { - throw std::runtime_error( - "Pair style 'deepspin' only supports spin atoms, please use pair style " - "'deepmd' instead."); - } - - vector dtype(nall); - for (int ii = 0; ii < nall; ++ii) { - dtype[ii] = type_idx_map[type[ii] - 1]; - } - - double dener(0); - vector dforce(nall * 3); - vector dforce_mag(nall * 3); - vector dvirial(9, 0); - vector dcoord(nall * 3, 0.); - vector dbox(9, 0); - vector daparam; - - // get box - dbox[0] = domain->h[0] / dist_unit_cvt_factor; // xx - dbox[4] = domain->h[1] / dist_unit_cvt_factor; // yy - dbox[8] = domain->h[2] / dist_unit_cvt_factor; // zz - dbox[7] = domain->h[3] / dist_unit_cvt_factor; // zy - dbox[6] = domain->h[4] / dist_unit_cvt_factor; // zx - dbox[3] = domain->h[5] / dist_unit_cvt_factor; // yx - - // get coord - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - dcoord[ii * 3 + dd] = - (x[ii][dd] - domain->boxlo[dd]) / dist_unit_cvt_factor; - } - } - - if (do_compute_aparam) { - make_aparam_from_compute(daparam); - } else if (aparam.size() > 0) { - // uniform aparam - make_uniform_aparam(daparam, aparam, nlocal); - } else if (do_ttm) { -#ifdef USE_TTM - if (dim_aparam > 0) { - make_ttm_aparam(daparam); - } else if (dim_fparam > 0) { - make_ttm_fparam(fparam); - } -#endif - } - - if (do_compute_fparam) { - make_fparam_from_compute(fparam); - } - - // int ago = numb_models > 1 ? 0 : neighbor->ago; - int ago = neighbor->ago; - if (numb_models > 1) { - if (multi_models_no_mod_devi && - (out_freq > 0 && update->ntimestep % out_freq == 0)) { - ago = 0; - } else if (multi_models_mod_devi && - (out_freq == 0 || update->ntimestep % out_freq != 0)) { - ago = 0; - } - } - // compute - single_model = (numb_models == 1); - multi_models_no_mod_devi = - (numb_models > 1 && (out_freq == 0 || update->ntimestep % out_freq != 0)); - multi_models_mod_devi = - (numb_models > 1 && (out_freq > 0 && update->ntimestep % out_freq == 0)); - if (do_ghost) { - deepmd_compat::InputNlist lmp_list( - list->inum, list->ilist, list->numneigh, list->firstneigh, - commdata_->nswap, commdata_->sendnum, commdata_->recvnum, - commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, - commdata_->recvproc, &world); - if (single_model || multi_models_no_mod_devi) { - // cvflag_atom is the right flag for the cvatom matrix - if (!(eflag_atom || cvflag_atom)) { - try { - deep_spin.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, - dtype, dbox, nghost, lmp_list, ago, fparam, - daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - } - // do atomic energy and virial - else { - vector deatom(nall * 1, 0); - vector dvatom(nall * 9, 0); - try { - deep_spin.compute(dener, dforce, dforce_mag, dvirial, deatom, dvatom, - dcoord, dspin, dtype, dbox, nghost, lmp_list, ago, - fparam, daparam); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); - } - if (eflag_atom) { - for (int ii = 0; ii < nlocal; ++ii) { - eatom[ii] += scale[1][1] * deatom[ii] * ener_unit_cvt_factor; - } + if (eflag_atom) { + for (int ii = 0; ii < nlocal; ++ii) { + eatom[ii] += scale[1][1] * deatom[ii] * ener_unit_cvt_factor; + } } // Added by Davide Tisi 2020 // interface the atomic virial computed by DeepMD @@ -833,52 +474,411 @@ void PairDeepSpin::compute(int eflag, int vflag) { } } } - if (rank == 0) { - fp << endl; + if (rank == 0) { + fp << endl; + } + } + } else { + error->all(FLERR, "unknown computational branch"); + } + } else { + if (numb_models == 1) { + try { + deep_spin.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, + dtype, dbox); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + } else { + error->all(FLERR, "Serial version does not support model devi"); + } + } + + // get force + // unit_factor = hbar / spin_norm; + const double hbar = 6.5821191e-04; + for (int ii = 0; ii < nall; ++ii) { + for (int dd = 0; dd < 3; ++dd) { + f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; + fm[ii][dd] += scale[1][1] * dforce_mag[3 * ii + dd] / (hbar / sp[ii][3]) * + force_unit_cvt_factor; + } + } + + std::map().swap(new_idx_map); + std::map().swap(old_idx_map); + // malloc_trim(0); + + // accumulate energy and virial + if (eflag) { + eng_vdwl += scale[1][1] * dener * ener_unit_cvt_factor; + } + if (vflag) { + virial[0] += 1.0 * dvirial[0] * scale[1][1] * ener_unit_cvt_factor; + virial[1] += 1.0 * dvirial[4] * scale[1][1] * ener_unit_cvt_factor; + virial[2] += 1.0 * dvirial[8] * scale[1][1] * ener_unit_cvt_factor; + virial[3] += 1.0 * dvirial[3] * scale[1][1] * ener_unit_cvt_factor; + virial[4] += 1.0 * dvirial[6] * scale[1][1] * ener_unit_cvt_factor; + virial[5] += 1.0 * dvirial[7] * scale[1][1] * ener_unit_cvt_factor; + } +} + +static bool is_key(const string &input) { + vector keys; + keys.push_back("out_freq"); + keys.push_back("out_file"); + keys.push_back("fparam"); + keys.push_back("aparam"); + keys.push_back("fparam_from_compute"); + keys.push_back("aparam_from_compute"); + keys.push_back("ttm"); + keys.push_back("atomic"); + keys.push_back("relative"); + keys.push_back("relative_v"); + keys.push_back("virtual_len"); + keys.push_back("spin_norm"); + + for (int ii = 0; ii < keys.size(); ++ii) { + if (input == keys[ii]) { + return true; + } + } + return false; +} + +void PairDeepSpin::settings(int narg, char **arg) { + if (narg <= 0) { + error->all(FLERR, "Illegal pair_style command"); + } + + vector models; + int iarg = 0; + while (iarg < narg) { + if (is_key(arg[iarg])) { + break; + } + iarg++; + } + for (int ii = 0; ii < iarg; ++ii) { + models.push_back(arg[ii]); + } + numb_models = models.size(); + if (numb_models == 1) { + try { + deep_spin.init(arg[0], get_node_rank(), get_file_content(arg[0])); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_spin.cutoff() * dist_unit_cvt_factor; + numb_types = deep_spin.numb_types(); + numb_types_spin = deep_spin.numb_types_spin(); + dim_fparam = deep_spin.dim_fparam(); + dim_aparam = deep_spin.dim_aparam(); + } else { + try { + deep_spin.init(arg[0], get_node_rank(), get_file_content(arg[0])); + deep_spin_model_devi.init(models, get_node_rank(), + get_file_content(models)); + } catch (deepmd_compat::deepmd_exception &e) { + error->one(FLERR, e.what()); + } + cutoff = deep_spin_model_devi.cutoff() * dist_unit_cvt_factor; + numb_types = deep_spin_model_devi.numb_types(); + numb_types_spin = deep_spin_model_devi.numb_types_spin(); + dim_fparam = deep_spin_model_devi.dim_fparam(); + dim_aparam = deep_spin_model_devi.dim_aparam(); + assert(cutoff == deep_spin.cutoff() * dist_unit_cvt_factor); + assert(numb_types == deep_spin.numb_types()); + assert(numb_types_spin == deep_spin.numb_types_spin()); + assert(dim_fparam == deep_spin.dim_fparam()); + assert(dim_aparam == deep_spin.dim_aparam()); + } + + out_freq = 100; + out_file = "model_devi.out"; + out_each = 0; + out_rel = 0; + eps = 0.; + fparam.clear(); + aparam.clear(); + while (iarg < narg) { + if (!is_key(arg[iarg])) { + error->all(FLERR, + "Illegal pair_style command\nwrong number of parameters\n"); + } + if (string(arg[iarg]) == string("out_freq")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_freq, not provided"); + } + out_freq = atoi(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("out_file")) { + if (iarg + 1 >= narg) { + error->all(FLERR, "Illegal out_file, not provided"); + } + out_file = string(arg[iarg + 1]); + iarg += 2; + } else if (string(arg[iarg]) == string("fparam")) { + for (int ii = 0; ii < dim_fparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal fparam, the dimension should be %d", + dim_fparam); + error->all(FLERR, tmp); + } + fparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_fparam; + } else if (string(arg[iarg]) == string("aparam")) { + for (int ii = 0; ii < dim_aparam; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal aparam, the dimension should be %d", + dim_aparam); + error->all(FLERR, tmp); + } + aparam.push_back(atof(arg[iarg + 1 + ii])); + } + iarg += 1 + dim_aparam; + } else if (string(arg[iarg]) == string("ttm")) { +#ifdef USE_TTM + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); + } + } + do_ttm = true; + ttm_fix_id = arg[iarg + 1]; + iarg += 1 + 1; +#else + error->all(FLERR, + "The deepmd-kit was compiled without support for TTM, please " + "rebuild it with LAMMPS version >=20210831"); +#endif + } + + /////////////////////////////////////////////// + // pair_style deepmd cp.pb fparam_from_compute TEMP + // compute TEMP all temp + ////////////////////////////////////////////// + else if (string(arg[iarg]) == string("fparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid fparam_from_compute key: should be " + "fparam_from_compute compute_fparam_id(str)"); } } - } else { - error->all(FLERR, "unknown computational branch"); + do_compute_fparam = true; + compute_fparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("aparam_from_compute")) { + for (int ii = 0; ii < 1; ++ii) { + if (iarg + 1 + ii >= narg || is_key(arg[iarg + 1 + ii])) { + error->all(FLERR, + "invalid aparam_from_compute key: should be " + "aparam_from_compute compute_aparam_id(str)"); + } + } + do_compute_aparam = true; + compute_aparam_id = arg[iarg + 1]; + iarg += 1 + 1; + } else if (string(arg[iarg]) == string("atomic")) { + out_each = 1; + iarg += 1; + } else if (string(arg[iarg]) == string("relative")) { + out_rel = 1; + eps = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("relative_v")) { + out_rel_v = 1; + eps_v = atof(arg[iarg + 1]) / ener_unit_cvt_factor; + iarg += 2; + } else if (string(arg[iarg]) == string("virtual_len")) { + virtual_len.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + virtual_len[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; + } else if (string(arg[iarg]) == string("spin_norm")) { + spin_norm.resize(numb_types_spin); + for (int ii = 0; ii < numb_types_spin; ++ii) { + spin_norm[ii] = atof(arg[iarg + ii + 1]); + } + iarg += numb_types_spin + 1; } - } else { - if (numb_models == 1) { - try { - deep_spin.compute(dener, dforce, dforce_mag, dvirial, dcoord, dspin, - dtype, dbox); - } catch (deepmd_compat::deepmd_exception &e) { - error->one(FLERR, e.what()); + } + + if (out_freq < 0) { + error->all(FLERR, "Illegal out_freq, should be >= 0"); + } + if ((int)do_ttm + (int)do_compute_aparam + (int)(aparam.size() > 0) > 1) { + error->all(FLERR, + "aparam, aparam_from_compute, and ttm should NOT be set " + "simultaneously"); + } + if (do_compute_fparam && fparam.size() > 0) { + error->all( + FLERR, + "fparam and fparam_from_compute should NOT be set simultaneously"); + } + + if (comm->me == 0) { + if (numb_models > 1 && out_freq > 0) { + if (!is_restart) { + fp.open(out_file); + fp << scientific; + fp << "#" << setw(12 - 1) << "step" << setw(18 + 1) << "max_devi_v" + << setw(18 + 1) << "min_devi_v" << setw(18 + 1) << "avg_devi_v" + << setw(18 + 1) << "max_devi_fr" << setw(18 + 1) << "min_devi_fr" + << setw(18 + 1) << "avg_devi_fr" << setw(18 + 1) << "max_devi_fm" + << setw(18 + 1) << "min_devi_fm" << setw(18 + 1) << "avg_devi_fm" + << endl; + } else { + fp.open(out_file, std::ofstream::out | std::ofstream::app); + fp << scientific; } + } + string pre = " "; + cout << pre << ">>> Info of model(s):" << endl + << pre << "using " << setw(3) << numb_models << " model(s): "; + if (narg == 1) { + cout << arg[0] << " "; } else { - error->all(FLERR, "Serial version does not support model devi"); + for (int ii = 0; ii < models.size(); ++ii) { + cout << models[ii] << " "; + } + } + cout << endl + << pre << "rcut in model: " << cutoff << endl + << pre << "ntypes in model: " << numb_types << endl; + if (fparam.size() > 0) { + cout << pre << "using fparam(s): "; + for (int ii = 0; ii < dim_fparam; ++ii) { + cout << fparam[ii] << " "; + } + cout << endl; + } + if (do_compute_fparam) { + cout << pre << "using compute id (fparam): "; + cout << compute_fparam_id << " " << endl; + } + if (do_compute_aparam) { + cout << pre << "using compute id (aparam): "; + cout << compute_aparam_id << " " << endl; + } + if (aparam.size() > 0) { + cout << pre << "using aparam(s): "; + for (int ii = 0; ii < aparam.size(); ++ii) { + cout << aparam[ii] << " "; + } + cout << endl; + } + if (do_ttm) { + cout << pre << "using ttm fix: "; + cout << ttm_fix_id << " "; + if (dim_fparam > 0) { + cout << "(fparam)" << endl; + } else if (dim_aparam > 0) { + cout << "(aparam)" << endl; + } } } - // get force - // unit_factor = hbar / spin_norm; - const double hbar = 6.5821191e-04; - for (int ii = 0; ii < nall; ++ii) { - for (int dd = 0; dd < 3; ++dd) { - f[ii][dd] += scale[1][1] * dforce[3 * ii + dd] * force_unit_cvt_factor; - fm[ii][dd] += scale[1][1] * dforce_mag[3 * ii + dd] / (hbar / sp[ii][3]) * - force_unit_cvt_factor; + comm_reverse = numb_models * 3 * 2; + all_force.resize(numb_models); +} + +/* ---------------------------------------------------------------------- + set coeffs for one or more type pairs +------------------------------------------------------------------------- */ + +void PairDeepSpin::coeff(int narg, char **arg) { + if (!allocated) { + allocate(); + } + + int n = atom->ntypes; + int ilo, ihi, jlo, jhi; + ilo = 0; + jlo = 0; + ihi = n; + jhi = n; + if (narg >= 2) { + utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); + utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); + if (ilo != 1 || jlo != 1 || ihi != n || jhi != n) { + error->all(FLERR, + "deepmd requires that the scale should be set to all atom " + "types, i.e. pair_coeff * *."); } } + if (narg <= 2) { + type_idx_map.resize(n); + for (int ii = 0; ii < n; ++ii) { + type_idx_map[ii] = ii; + } + } else { + int iarg = 2; - std::map().swap(new_idx_map); - std::map().swap(old_idx_map); - // malloc_trim(0); + // type_map is a list of strings with undetermined length + // note: although we have numb_types from the model, we do not require + // the number of types in the system matches that in the model + std::vector type_map; + std::string type_map_str; + deep_spin.get_type_map(type_map_str); + // convert the string to a vector of strings + std::istringstream iss(type_map_str); + std::string type_name; + while (iss >> type_name) { + type_map.push_back(type_name); + } - // accumulate energy and virial - if (eflag) { - eng_vdwl += scale[1][1] * dener * ener_unit_cvt_factor; + type_idx_map.clear(); + type_names.clear(); + while (iarg < narg) { + std::string type_name = arg[iarg]; + type_names.push_back(type_name); + bool found_element = false; + for (int ii = 0; ii < type_map.size(); ++ii) { + if (type_map[ii] == type_name) { + type_idx_map.push_back(ii); + found_element = true; + break; + } + } + if (!found_element && "NULL" == type_name) { + type_idx_map.push_back(type_map.size()); // ghost type + found_element = true; + } + if (!found_element) { + error->all(FLERR, "pair_coeff: element " + type_name + + " not found in the model"); + } + iarg += 1; + } + numb_types = type_idx_map.size(); + if (numb_types < n) { + type_idx_map.resize(n); + for (int ii = numb_types; ii < n; ++ii) { + type_idx_map[ii] = -1; + } + } } - if (vflag) { - virial[0] += 1.0 * dvirial[0] * scale[1][1] * ener_unit_cvt_factor; - virial[1] += 1.0 * dvirial[4] * scale[1][1] * ener_unit_cvt_factor; - virial[2] += 1.0 * dvirial[8] * scale[1][1] * ener_unit_cvt_factor; - virial[3] += 1.0 * dvirial[3] * scale[1][1] * ener_unit_cvt_factor; - virial[4] += 1.0 * dvirial[6] * scale[1][1] * ener_unit_cvt_factor; - virial[5] += 1.0 * dvirial[7] * scale[1][1] * ener_unit_cvt_factor; + for (int i = ilo; i <= ihi; i++) { + for (int j = MAX(jlo, i); j <= jhi; j++) { + setflag[i][j] = 1; + scale[i][j] = 1.0; + if (i > numb_types || j > numb_types) { + char warning_msg[1024]; + sprintf(warning_msg, + "Interaction between types %d and %d is set with deepmd, but " + "will be ignored.\n Deepmd model has only %d types, it only " + "computes the mulitbody interaction of types: 1-%d.", + i, j, numb_types, numb_types); + error->warning(FLERR, warning_msg); + } + } } } From dc0f496c4105b9d84e8485221ba1f5389fa13124 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:02:36 +0800 Subject: [PATCH 159/193] add #4269 --- source/api_cc/src/DeepSpinTF.cc | 2 ++ source/lmp/pair_deepmd.cpp | 1 + source/lmp/pair_deepspin.cpp | 1 + 3 files changed, 4 insertions(+) diff --git a/source/api_cc/src/DeepSpinTF.cc b/source/api_cc/src/DeepSpinTF.cc index 416fc226ff..924eb7aea5 100644 --- a/source/api_cc/src/DeepSpinTF.cc +++ b/source/api_cc/src/DeepSpinTF.cc @@ -8,6 +8,7 @@ #include "AtomMap.h" #include "common.h" #include "device.h" +#include "neigh_list.h" using namespace tensorflow; using namespace deepmd; @@ -742,6 +743,7 @@ void DeepSpinTF::compute(ENERGYVTYPE& dener, ntypes, ntypes_spin); InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], &extend_firstneigh[0]); + extend_lmp_list.set_mask(NEIGHMASK); std::vector fparam; std::vector aparam_; validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 3351da2aca..9d0402ae4f 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -200,6 +200,7 @@ void PairDeepMD::compute(int eflag, int vflag) { commdata_->nswap, commdata_->sendnum, commdata_->recvnum, commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, commdata_->recvproc, &world); + lmp_list.set_mask(NEIGHMASK); deepmd_compat::InputNlist extend_lmp_list; if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 8c289dcb97..aea410b284 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -209,6 +209,7 @@ void PairDeepSpin::compute(int eflag, int vflag) { commdata_->nswap, commdata_->sendnum, commdata_->recvnum, commdata_->firstrecv, commdata_->sendlist, commdata_->sendproc, commdata_->recvproc, &world); + lmp_list.set_mask(NEIGHMASK); if (single_model || multi_models_no_mod_devi) { // cvflag_atom is the right flag for the cvatom matrix if (!(eflag_atom || cvflag_atom)) { From 8fd95f8a3fe8bfcc99d5765fc317028f5f35238c Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:06:13 +0800 Subject: [PATCH 160/193] Update pair_deepmd.cpp --- source/lmp/pair_deepmd.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 9d0402ae4f..35f67ea014 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -120,10 +120,6 @@ void PairDeepMD::compute(int eflag, int vflag) { int nall = nlocal + nghost; int newton_pair = force->newton_pair; - vector dspin(nall * 3, 0.); - vector dfm(nall * 3, 0.); - double **sp = atom->sp; - double **fm = atom->fm; if (atom->sp_flag) { throw std::runtime_error( "Pair style 'deepmd' does not support spin atoms, please use pair " @@ -342,8 +338,6 @@ void PairDeepMD::compute(int eflag, int vflag) { } vector std_f; vector tmp_avg_f; - vector std_fm; - vector tmp_avg_fm; deep_pot_model_devi.compute_avg(tmp_avg_f, all_force); deep_pot_model_devi.compute_std_f(std_f, tmp_avg_f, all_force); if (out_rel == 1) { @@ -352,7 +346,6 @@ void PairDeepMD::compute(int eflag, int vflag) { double min = numeric_limits::max(), max = 0, avg = 0; ana_st(max, min, avg, std_f, nlocal); double all_f_min = 0, all_f_max = 0, all_f_avg = 0; - double all_fm_min = 0, all_fm_max = 0, all_fm_avg = 0; MPI_Reduce(&min, &all_f_min, 1, MPI_DOUBLE, MPI_MIN, 0, world); MPI_Reduce(&max, &all_f_max, 1, MPI_DOUBLE, MPI_MAX, 0, world); MPI_Reduce(&avg, &all_f_avg, 1, MPI_DOUBLE, MPI_SUM, 0, world); @@ -409,7 +402,6 @@ void PairDeepMD::compute(int eflag, int vflag) { << " " << setw(18) << all_f_avg; } if (out_each == 1) { - // need support for spin atomic force. vector std_f_all(atom->natoms); // Gather std_f and tags tagint *tag = atom->tag; From edb1e9fecd448512c87806da97b3732c44a0cc2b Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:13:23 +0800 Subject: [PATCH 161/193] Update DeepSpinTF.cc --- source/api_cc/src/DeepSpinTF.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/api_cc/src/DeepSpinTF.cc b/source/api_cc/src/DeepSpinTF.cc index 924eb7aea5..caff84255e 100644 --- a/source/api_cc/src/DeepSpinTF.cc +++ b/source/api_cc/src/DeepSpinTF.cc @@ -8,7 +8,6 @@ #include "AtomMap.h" #include "common.h" #include "device.h" -#include "neigh_list.h" using namespace tensorflow; using namespace deepmd; @@ -743,7 +742,7 @@ void DeepSpinTF::compute(ENERGYVTYPE& dener, ntypes, ntypes_spin); InputNlist extend_lmp_list(extend_inum, &extend_ilist[0], &extend_numneigh[0], &extend_firstneigh[0]); - extend_lmp_list.set_mask(NEIGHMASK); + extend_lmp_list.set_mask(lmp_list.mask); std::vector fparam; std::vector aparam_; validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, From 5c9fda1fbd6ab4344861ad231c0e9f37deca8e2b Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:32:47 +0800 Subject: [PATCH 162/193] rm spin args from deeppottf --- source/api_cc/include/DeepPotTF.h | 15 --------------- source/api_cc/src/DeepPotTF.cc | 13 ------------- 2 files changed, 28 deletions(-) diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index b2e7b12487..020a096394 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -287,8 +287,6 @@ class DeepPotTF : public DeepPotBackend { const std::vector& aparam, const bool atomic); - void cum_sum(std::map&, std::map&); - private: tensorflow::Session* session; int num_intra_nthreads, num_inter_nthreads; @@ -296,8 +294,6 @@ class DeepPotTF : public DeepPotBackend { bool inited; template VT get_scalar(const std::string& name) const; - template - void get_vector(std::vector& vec, const std::string& name) const; double rcut; int dtype; @@ -306,17 +302,6 @@ class DeepPotTF : public DeepPotBackend { std::string model_version; int ntypes; int ntypes_spin; - int extend_inum; - std::vector extend_ilist; - std::vector extend_numneigh; - std::vector> extend_neigh; - std::vector extend_firstneigh; - // std::vector extend_dcoord; - std::vector extend_dtype; - int extend_nghost; - // for spin systems, search new index of atoms by their old index - std::map new_idx_map; - std::map old_idx_map; int dfparam; int daparam; bool aparam_nall; diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 586bf02021..12c23970e3 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -511,12 +511,6 @@ VT DeepPotTF::get_scalar(const std::string& name) const { return session_get_scalar(session, name); } -template -void DeepPotTF::get_vector(std::vector& vec, - const std::string& name) const { - session_get_vector(vec, session, name); -} - template void DeepPotTF::validate_fparam_aparam( const int& nframes, @@ -1035,11 +1029,4 @@ void DeepPotTF::computew_mixed_type(std::vector& ener, coord, atype, box, fparam, aparam, atomic); } -void DeepPotTF::cum_sum(std::map& sum, std::map& vec) { - sum[0] = 0; - for (int ii = 1; ii < vec.size(); ++ii) { - sum[ii] = sum[ii - 1] + vec[ii - 1]; - } -} - #endif From 24896f0fe2d69c458cbd02c54967940485def7f0 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:34:45 +0800 Subject: [PATCH 163/193] rm black space --- source/api_cc/include/DeepPotTF.h | 1 - source/api_cc/src/DeepPotTF.cc | 3 --- 2 files changed, 4 deletions(-) diff --git a/source/api_cc/include/DeepPotTF.h b/source/api_cc/include/DeepPotTF.h index 020a096394..10d33e8216 100644 --- a/source/api_cc/include/DeepPotTF.h +++ b/source/api_cc/include/DeepPotTF.h @@ -294,7 +294,6 @@ class DeepPotTF : public DeepPotBackend { bool inited; template VT get_scalar(const std::string& name) const; - double rcut; int dtype; double cell_size; diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index 12c23970e3..a990cecf8d 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -962,7 +962,6 @@ void DeepPotTF::computew(std::vector& ener, compute(ener, force, virial, atom_energy, atom_virial, coord, atype, box, fparam, aparam, atomic); } - void DeepPotTF::computew(std::vector& ener, std::vector& force, std::vector& virial, @@ -997,7 +996,6 @@ void DeepPotTF::computew(std::vector& ener, compute(ener, force, virial, atom_energy, atom_virial, coord, atype, box, nghost, inlist, ago, fparam, aparam, atomic); } - void DeepPotTF::computew_mixed_type(std::vector& ener, std::vector& force, std::vector& virial, @@ -1028,5 +1026,4 @@ void DeepPotTF::computew_mixed_type(std::vector& ener, compute_mixed_type(ener, force, virial, atom_energy, atom_virial, nframes, coord, atype, box, fparam, aparam, atomic); } - #endif From ba46f5430eb83dc2710b9636bcb0979212740463 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:40:21 +0800 Subject: [PATCH 164/193] rm black space and comment --- source/api_cc/include/DeepPot.h | 5 ----- source/api_cc/include/DeepPotPT.h | 2 -- source/api_cc/src/DeepPot.cc | 19 +++---------------- source/api_cc/src/DeepPotPT.cc | 3 --- 4 files changed, 3 insertions(+), 26 deletions(-) diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 06423d38c8..68fdc57b60 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -88,7 +88,6 @@ class DeepPotBackend : public DeepBaseModelBackend { const std::vector& aparam, const bool atomic) = 0; /** @} */ - /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -271,7 +270,6 @@ class DeepPot : public DeepBaseModel { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); /** @} */ - /** * @brief Evaluate the energy, force and virial by using this DP. * @param[out] ener The system energy. @@ -320,7 +318,6 @@ class DeepPot : public DeepBaseModel { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); /** @} */ - /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -423,7 +420,6 @@ class DeepPot : public DeepBaseModel { const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); /** @} */ - /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. @@ -656,7 +652,6 @@ class DeepPotModelDevi : public DeepBaseModelDevi { const int& ago, const std::vector& fparam = std::vector(), const std::vector& aparam = std::vector()); - /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using these DP models. diff --git a/source/api_cc/include/DeepPotPT.h b/source/api_cc/include/DeepPotPT.h index f440b15a1b..8f69168b5a 100644 --- a/source/api_cc/include/DeepPotPT.h +++ b/source/api_cc/include/DeepPotPT.h @@ -74,7 +74,6 @@ class DeepPotPT : public DeepPotBackend { const std::vector& fparam, const std::vector& aparam, const bool atomic); - /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial *by using this DP. @@ -116,7 +115,6 @@ class DeepPotPT : public DeepPotBackend { const std::vector& fparam, const std::vector& aparam, const bool atomic); - /** * @brief Evaluate the energy, force, and virial with the mixed type *by using this DP. diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index d8d02aff5c..b47c8a9ba1 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -65,7 +65,6 @@ void DeepPot::init(const std::string& model, dpbase = dp; // make sure the base funtions work } -// no nlist, no atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -131,9 +130,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& dbox, const std::vector& fparam, const std::vector& aparam); -// above: no nlist, no atomic : nframe * precision -// nlist, no atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -169,7 +166,7 @@ void DeepPot::compute(std::vector& dener, dp->computew(dener, dforce_, dvirial, datom_energy_, datom_virial_, dcoord_, datype_, dbox, nghost, lmp_list, ago, fparam_, aparam__, false); } -// nlist, no atomic : nframe * precision + template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dvirial, @@ -218,7 +215,6 @@ template void DeepPot::compute(std::vector& dener, const std::vector& fparam, const std::vector& aparam_); -// no nlist, atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -293,9 +289,7 @@ template void DeepPot::compute(std::vector& dener, const std::vector& dbox, const std::vector& fparam, const std::vector& aparam); -// above: no nlist, atomic : nframe * precision -// nlist, atomic : nframe template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, @@ -333,7 +327,6 @@ void DeepPot::compute(std::vector& dener, datype_, dbox, nghost, lmp_list, ago, fparam_, aparam__, true); } -// nlist, atomic : nframe * precision template void DeepPot::compute(ENERGYTYPE& dener, std::vector& dforce_, std::vector& dvirial, @@ -595,7 +588,6 @@ void DeepPotModelDevi::init(const std::vector& models, inited = true; } -// no nlist, no atomic template void DeepPotModelDevi::compute(std::vector& all_energy, std::vector>& all_force, @@ -618,7 +610,6 @@ void DeepPotModelDevi::compute(std::vector& all_energy, } } -// no nlist, no atomic: precision template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, @@ -639,7 +630,6 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); -// no nlist, atomic template void DeepPotModelDevi::compute( std::vector& all_energy, @@ -666,7 +656,7 @@ void DeepPotModelDevi::compute( dbox, fparam, aparam_); } } -// no nlist, atomic: precision + template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, @@ -691,7 +681,6 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); -// nlist, no atomic template void DeepPotModelDevi::compute(std::vector& all_energy, std::vector>& all_force, @@ -715,7 +704,7 @@ void DeepPotModelDevi::compute(std::vector& all_energy, datype_, dbox, nghost, lmp_list, ago, fparam, aparam_); } } -// nlist, no atomic: precision + template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, @@ -742,7 +731,6 @@ template void DeepPotModelDevi::compute( const std::vector& fparam, const std::vector& aparam); -// nlist, atomic template void DeepPotModelDevi::compute( std::vector& all_energy, @@ -773,7 +761,6 @@ void DeepPotModelDevi::compute( } } -// nlist, atomic : precision template void DeepPotModelDevi::compute( std::vector& all_energy, std::vector>& all_force, diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index f8b803bad4..780a8007f3 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -311,7 +311,6 @@ template void DeepPotPT::compute>( const std::vector& fparam, const std::vector& aparam, const bool atomic); - template void DeepPotPT::compute(ENERGYVTYPE& ener, std::vector& force, @@ -433,7 +432,6 @@ template void DeepPotPT::compute>( const std::vector& fparam, const std::vector& aparam, const bool atomic); - void DeepPotPT::get_type_map(std::string& type_map) { auto ret = module.run_method("get_type_map").toList(); for (const torch::IValue& element : ret) { @@ -513,7 +511,6 @@ void DeepPotPT::computew(std::vector& ener, nghost, inlist, ago, fparam, aparam, atomic); }); } - void DeepPotPT::computew_mixed_type(std::vector& ener, std::vector& force, std::vector& virial, From 809b47150486ffa7e00fc0d3bffc7a5c62d518ae Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:44:30 +0800 Subject: [PATCH 165/193] Update DeepPot.h --- source/api_cc/include/DeepPot.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 68fdc57b60..9411684462 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -574,8 +574,8 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_virial, + std::vector >& all_force, + std::vector >& all_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -607,10 +607,10 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_virial, - std::vector>& all_atom_energy, - std::vector>& all_atom_virial, + std::vector >& all_force, + std::vector >& all_virial, + std::vector >& all_atom_energy, + std::vector >& all_atom_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -642,8 +642,8 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_virial, + std::vector >& all_force, + std::vector >& all_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -680,10 +680,10 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector>& all_force, - std::vector>& all_virial, - std::vector>& all_atom_energy, - std::vector>& all_atom_virial, + std::vector >& all_force, + std::vector >& all_virial, + std::vector >& all_atom_energy, + std::vector >& all_atom_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, From 388bb22e6fb4bb6509c2a8b98ad39b58e9117277 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 10:45:47 +0000 Subject: [PATCH 166/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/api_cc/include/DeepPot.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 9411684462..68fdc57b60 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -574,8 +574,8 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, + std::vector>& all_force, + std::vector>& all_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -607,10 +607,10 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, - std::vector >& all_atom_energy, - std::vector >& all_atom_virial, + std::vector>& all_force, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -642,8 +642,8 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, + std::vector>& all_force, + std::vector>& all_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, @@ -680,10 +680,10 @@ class DeepPotModelDevi : public DeepBaseModelDevi { **/ template void compute(std::vector& all_ener, - std::vector >& all_force, - std::vector >& all_virial, - std::vector >& all_atom_energy, - std::vector >& all_atom_virial, + std::vector>& all_force, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, const std::vector& coord, const std::vector& atype, const std::vector& box, From d20d66873fe48e97a658b4120461e1d62274843b Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 19:29:46 +0800 Subject: [PATCH 167/193] resolve conversations --- source/api_c/include/c_api.h | 278 +++++++++++++++++++++++++++++------ source/lmp/pair_deepmd.cpp | 6 +- source/lmp/pair_deepspin.cpp | 6 +- 3 files changed, 239 insertions(+), 51 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 5638126e80..ded6e638ed 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -99,6 +99,12 @@ const char* DP_NlistCheckOK(DP_Nlist* dp); **/ typedef struct DP_DeepBaseModel DP_DeepBaseModel; +/** + * @brief Delete a Deep Potential Base Model. + * + * @param dp Deep Potential Base Model to delete. + * @since API version 24 + */ extern void DP_DeleteDeepBaseModel(DP_DeepBaseModel* dp); /** @@ -106,6 +112,12 @@ extern void DP_DeleteDeepBaseModel(DP_DeepBaseModel* dp); **/ typedef struct DP_DeepBaseModelDevi DP_DeepBaseModelDevi; +/** + * @brief Delete a Deep Potential Base Model Deviation. + * + * @param dp Deep Potential Base Model Deviation to delete. + * @since API version 24 + */ extern void DP_DeleteDeepBaseModelDevi(DP_DeepBaseModelDevi* dp); /** @@ -155,14 +167,16 @@ extern DP_DeepPot* DP_NewDeepPotWithParam2(const char* c_model, extern void DP_DeleteDeepPot(DP_DeepPot* dp); /** - * @brief The deep potential spin. + * @brief The deep potential spin model. + * @since API version 24 **/ typedef struct DP_DeepSpin DP_DeepSpin; /** * @brief DP constructor with initialization. * @param[in] c_model The name of the frozen model file. - * @returns A pointer to the deep potential. + * @returns A pointer to the deep potential spin model. + * @since API version 24 **/ extern DP_DeepSpin* DP_NewDeepSpin(const char* c_model); @@ -173,7 +187,8 @@ extern DP_DeepSpin* DP_NewDeepSpin(const char* c_model); * @param gpu_rank The rank of the GPU. * @param c_file_content The content of the model file. * @param size_file_content The size of the model file. - * @return DP_DeepSpin* A pointer to the deep potential. + * @return DP_DeepSpin* A pointer to the deep potential spin model. + * @since API version 24 */ extern DP_DeepSpin* DP_NewDeepSpinWithParam2(const char* c_model, const int gpu_rank, @@ -181,9 +196,10 @@ extern DP_DeepSpin* DP_NewDeepSpinWithParam2(const char* c_model, const int size_file_content); /** - * @brief Delete a Deep Potential. + * @brief Delete a Deep Potential Spin Model. * - * @param dp Deep Potential to delete. + * @param dp Deep Potential Spin Model to delete. + * @since API version 24 */ extern void DP_DeleteDeepSpin(DP_DeepSpin* dp); @@ -340,7 +356,7 @@ extern void DP_DeepPotComputeNListf(DP_DeepPot* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. @@ -366,10 +382,10 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP - *with spin input. (double version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + * spin model. (double version) * @version 2 - * @param[in] dp The DP to use. + * @param[in] dp The DP spin model to use. * @param[in] nframes The number of frames. * @param[in] natoms The number of atoms. * @param[in] coord The coordinates of atoms. The array should be of size natoms @@ -382,10 +398,11 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag The magnetic force on each atom. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size *natoms. @@ -393,6 +410,7 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, *natoms x 9. * @warning The output arrays should be allocated before calling this function. *Pass NULL if not required. + * @since API version 24 **/ extern void DP_DeepSpinCompute2(DP_DeepSpin* dp, const int nframes, @@ -424,7 +442,7 @@ extern void DP_DeepSpinCompute2(DP_DeepSpin* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. @@ -451,7 +469,7 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, /** * @brief Evaluate the energy, force, magnetic force and virial by using a DP - *with spin input. (float version) + * spin model. (float version) * @version 2 * @param[in] dp The DP to use. * @param[in] nframes The number of frames. @@ -466,10 +484,11 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag The magnetic force on each atom. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size *natoms. @@ -477,6 +496,7 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, *natoms x 9. * @warning The output arrays should be allocated before calling this function. *Pass NULL if not required. + * @since API version 24 **/ extern void DP_DeepSpinComputef2(DP_DeepSpin* dp, const int nframes, @@ -512,7 +532,7 @@ extern void DP_DeepSpinComputef2(DP_DeepSpin* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. @@ -540,6 +560,40 @@ extern void DP_DeepPotComputeNList2(DP_DeepPot* dp, double* atomic_energy, double* atomic_virial); +/** + * @brief Evaluate the energy, force and virial by using a DP spin model + * with the neighbor list. (double version) + * @version 2 + * @param[in] dp The DP spin model to use. + * @param[in] nframes The number of frames. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + * natoms x dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + * @since API version 24 + **/ extern void DP_DeepSpinComputeNList2(DP_DeepSpin* dp, const int nframes, const int natoms, @@ -577,7 +631,7 @@ extern void DP_DeepSpinComputeNList2(DP_DeepSpin* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. @@ -605,6 +659,40 @@ extern void DP_DeepPotComputeNListf2(DP_DeepPot* dp, float* atomic_energy, float* atomic_virial); +/** + * @brief Evaluate the energy, force and virial by using a DP spin model + * with the neighbor list. (float version) + * @version 2 + * @param[in] dp The DP spin model to use. + * @param[in] nframes The number of frames. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + * natoms x dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + * @since API version 24 + **/ extern void DP_DeepSpinComputeNListf2(DP_DeepSpin* dp, const int nframes, const int natoms, @@ -639,7 +727,7 @@ extern void DP_DeepSpinComputeNListf2(DP_DeepSpin* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. @@ -678,7 +766,7 @@ extern void DP_DeepPotComputeMixedType(DP_DeepPot* dp, * @param[in] fparam The frame parameters. The array can be of size nframes x *dim_fparam. * @param[in] aparam The atom parameters. The array can be of size nframes x - *dim_aparam. + * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. @@ -744,6 +832,7 @@ extern void DP_DeleteDeepPotModelDevi(DP_DeepPotModelDevi* dp); /** * @brief The deep potential spin model deviation. + * @since API version 24 **/ typedef struct DP_DeepSpinModelDevi DP_DeepSpinModelDevi; @@ -751,6 +840,7 @@ typedef struct DP_DeepSpinModelDevi DP_DeepSpinModelDevi; * @brief DP spin model deviation constructor with initialization. * @param[in] c_models The array of the name of the frozen model file. * @param[in] nmodels The number of models. + * @since API version 24 **/ extern DP_DeepSpinModelDevi* DP_NewDeepSpinModelDevi(const char** c_models, int n_models); @@ -766,6 +856,7 @@ extern DP_DeepSpinModelDevi* DP_NewDeepSpinModelDevi(const char** c_models, * @param[in] size_file_contents The sizes of the contents of the model file. * @return DP_DeepSpinModelDevi* A pointer to the deep potential model * deviation. + * @since API version 24 */ extern DP_DeepSpinModelDevi* DP_NewDeepSpinModelDeviWithParam( const char** c_model, @@ -779,6 +870,7 @@ extern DP_DeepSpinModelDevi* DP_NewDeepSpinModelDeviWithParam( * @brief Delete a Deep Potential Spin Model Deviation. * * @param dp Deep Potential Spin Model to delete. + * @since API version 24 */ extern void DP_DeleteDeepSpinModelDevi(DP_DeepSpinModelDevi* dp); @@ -1045,6 +1137,39 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, double* atomic_energy, double* atomic_virial); +/** + * @brief Evaluate the energy, force and virial by using a DP spin model deviation + * with neighbor list. (double version) + * @version 2 + * @param[in] dp The DP model deviation to use. + * @param[in] nframes The number of frames. Only support 1 for now. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *natoms x dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + **/ void DP_DeepSpinModelDeviComputeNList2(DP_DeepSpinModelDevi* dp, const int nframes, const int natoms, @@ -1110,6 +1235,39 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); +/** + * @brief Evaluate the energy, force and virial by using a DP spin model deviation + * with neighbor list. (float version) + * @version 2 + * @param[in] dp The DP model deviation to use. + * @param[in] nframes The number of frames. Only support 1 for now. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *natoms x dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + **/ void DP_DeepSpinModelDeviComputeNListf2(DP_DeepSpinModelDevi* dp, const int nframes, const int natoms, @@ -1134,6 +1292,7 @@ void DP_DeepSpinModelDeviComputeNListf2(DP_DeepSpinModelDevi* dp, * @brief Get the cutoff of a DP. * @param[in] dpbase The DP to use. * @return The cutoff radius. + * @since API version 24 */ double DP_DeepBaseModelGetCutoff(DP_DeepBaseModel* dpbase); @@ -1141,6 +1300,7 @@ double DP_DeepBaseModelGetCutoff(DP_DeepBaseModel* dpbase); * @brief Get the number of types of a DP. * @param[in] dpbase The DP to use. * @return The number of types of the DP. + * @since API version 24 */ int DP_DeepBaseModelGetNumbTypes(DP_DeepBaseModel* dpbase); @@ -1148,6 +1308,7 @@ int DP_DeepBaseModelGetNumbTypes(DP_DeepBaseModel* dpbase); * @brief Get the number of types with spin of a DP. * @param[in] dpbase The DP to use. * @return The number of types with spin of the DP. + * @since API version 24 */ int DP_DeepBaseModelGetNumbTypesSpin(DP_DeepBaseModel* dpbase); @@ -1155,6 +1316,7 @@ int DP_DeepBaseModelGetNumbTypesSpin(DP_DeepBaseModel* dpbase); * @brief Get the dimension of frame parameters of a DP. * @param[in] dpbase The DP to use. * @return The dimension of frame parameters of the DP. + * @since API version 24 */ int DP_DeepBaseModelGetDimFParam(DP_DeepBaseModel* dpbase); @@ -1162,6 +1324,7 @@ int DP_DeepBaseModelGetDimFParam(DP_DeepBaseModel* dpbase); * @brief Get the dimension of atomic parameters of a DP. * @param[in] dpbase The DP to use. * @return The dimension of atomic parameters of the DP. + * @since API version 24 */ int DP_DeepBaseModelGetDimAParam(DP_DeepBaseModel* dpbase); @@ -1172,6 +1335,7 @@ int DP_DeepBaseModelGetDimAParam(DP_DeepBaseModel* dpbase); * @param[in] dpbase The DP to use. * @return true the atomic dimension of atomic parameters is nall * @return false the atomic dimension of atomic parameters is nloc + * @since API version 24 */ bool DP_DeepBaseModelIsAParamNAll(DP_DeepBaseModel* dpbase); @@ -1179,6 +1343,7 @@ bool DP_DeepBaseModelIsAParamNAll(DP_DeepBaseModel* dpbase); * @brief Get the type map of a DP. * @param[in] dpbase The DP to use. * @return The type map of the DP. + * @since API version 24 */ const char* DP_DeepBaseModelGetTypeMap(DP_DeepBaseModel* dpbase); @@ -1187,6 +1352,7 @@ const char* DP_DeepBaseModelGetTypeMap(DP_DeepBaseModel* dpbase); * * @param dpbase The DP to use. * @return const char* error message. + * @since API version 24 */ const char* DP_DeepBaseModelCheckOK(DP_DeepBaseModel* dpbase); @@ -1194,12 +1360,14 @@ const char* DP_DeepBaseModelCheckOK(DP_DeepBaseModel* dpbase); * @brief Get the dimension of frame parameters of a DP Model Deviation. * @param[in] dpbase The DP Model Deviation to use. * @return The dimension of frame parameters of the DP Model Deviation. + * @since API version 24 */ int DP_DeepBaseModelDeviGetDimFParam(DP_DeepBaseModelDevi* dpbase); /** * @brief Get the dimension of atomic parameters of a DP Model Deviation. * @param[in] dpbase The DP Model Deviation to use. * @return The dimension of atomic parameters of the DP Model Deviation. + * @since API version 24 */ int DP_DeepBaseModelDeviGetDimAParam(DP_DeepBaseModelDevi* dpbase); @@ -1210,6 +1378,7 @@ int DP_DeepBaseModelDeviGetDimAParam(DP_DeepBaseModelDevi* dpbase); * @param[in] dpbase The DP Model Deviation to use. * @return true the atomic dimension of atomic parameters is nall * @return false the atomic dimension of atomic parameters is nloc + * @since API version 24 */ bool DP_DeepBaseModelDeviIsAParamNAll(DP_DeepBaseModelDevi* dpbase); @@ -1217,6 +1386,7 @@ bool DP_DeepBaseModelDeviIsAParamNAll(DP_DeepBaseModelDevi* dpbase); * @brief Get the type map of a DP model deviation. * @param[in] dpbase The DP model deviation to use. * @return The cutoff radius. + * @since API version 24 */ double DP_DeepBaseModelDeviGetCutoff(DP_DeepBaseModelDevi* dpbase); @@ -1224,6 +1394,7 @@ double DP_DeepBaseModelDeviGetCutoff(DP_DeepBaseModelDevi* dpbase); * @brief Get the number of types of a DP model deviation. * @param[in] dpbase The DP model deviation to use. * @return The number of types of the DP model deviation. + * @since API version 24 */ int DP_DeepBaseModelDeviGetNumbTypes(DP_DeepBaseModelDevi* dpbase); @@ -1231,6 +1402,7 @@ int DP_DeepBaseModelDeviGetNumbTypes(DP_DeepBaseModelDevi* dpbase); * @brief Get the number of types with spin of a DP model deviation. * @param[in] dpbase The DP model deviation to use. * @return The number of types with spin of the DP model deviation. + * @since API version 24 */ int DP_DeepBaseModelDeviGetNumbTypesSpin(DP_DeepBaseModelDevi* dpbase); @@ -1239,6 +1411,7 @@ int DP_DeepBaseModelDeviGetNumbTypesSpin(DP_DeepBaseModelDevi* dpbase); * * @param dpbase The DP model deviation to use. * @return const char* error message. + * @since API version 24 */ const char* DP_DeepBaseModelDeviCheckOK(DP_DeepBaseModelDevi* dpbase); @@ -1357,37 +1530,42 @@ const char* DP_DeepPotModelDeviCheckOK(DP_DeepPotModelDevi* dp); // DeepSpin methods for c_api /** - * @brief Get the cutoff of a DP. - * @param[in] dp The DP to use. + * @brief Get the cutoff of a DP Spin Model. + * @param[in] dp The DP Spin Model to use. * @return The cutoff radius. + * @since API version 24 */ double DP_DeepSpinGetCutoff(DP_DeepSpin* dp); /** - * @brief Get the number of types of a DP. - * @param[in] dp The DP to use. - * @return The number of types of the DP. + * @brief Get the number of types of a DP Spin Model. + * @param[in] dp The DP Spin Model to use. + * @return The number of types of the DP Spin Model. + * @since API version 24 */ int DP_DeepSpinGetNumbTypes(DP_DeepSpin* dp); /** - * @brief Get the number of types with spin of a DP. - * @param[in] dp The DP to use. - * @return The number of types with spin of the DP. + * @brief Get the number of types with spin of a DP Spin Model. + * @param[in] dp The DP Spin Model to use. + * @return The number of types with spin of the DP Spin Model. + * @since API version 24 */ int DP_DeepSpinGetNumbTypesSpin(DP_DeepSpin* dp); /** - * @brief Get the dimension of frame parameters of a DP. - * @param[in] dp The DP to use. - * @return The dimension of frame parameters of the DP. + * @brief Get the dimension of frame parameters of a DP Spin Model. + * @param[in] dp The DP Spin Model to use. + * @return The dimension of frame parameters of the DP Spin Model. + * @since API version 24 */ int DP_DeepSpinGetDimFParam(DP_DeepSpin* dp); /** - * @brief Get the dimension of atomic parameters of a DP. - * @param[in] dp The DP to use. - * @return The dimension of atomic parameters of the DP. + * @brief Get the dimension of atomic parameters of a DP Spin Model. + * @param[in] dp The DP Spin Model to use. + * @return The dimension of atomic parameters of the DP Spin Model. + * @since API version 24 */ int DP_DeepSpinGetDimAParam(DP_DeepSpin* dp); @@ -1395,37 +1573,42 @@ int DP_DeepSpinGetDimAParam(DP_DeepSpin* dp); * @brief Check whether the atomic dimension of atomic parameters is nall * instead of nloc. * - * @param[in] dp The DP to use. + * @param[in] dp The DP Spin Model to use. * @return true the atomic dimension of atomic parameters is nall * @return false the atomic dimension of atomic parameters is nloc + * @since API version 24 */ bool DP_DeepSpinIsAParamNAll(DP_DeepSpin* dp); /** - * @brief Get the type map of a DP. - * @param[in] dp The DP to use. - * @return The type map of the DP. + * @brief Get the type map of a DP Spin Model. + * @param[in] dp The DP Spin Model to use. + * @return The type map of the DP Spin Model. + * @since API version 24 */ const char* DP_DeepSpinGetTypeMap(DP_DeepSpin* dp); /** * @brief Check if there is any exceptions throw. * - * @param dp The DP to use. + * @param dp The DP Spin Model to use. * @return const char* error message. + * @since API version 24 */ const char* DP_DeepSpinCheckOK(DP_DeepSpin* dp); /** - * @brief Get the dimension of frame parameters of a DP Model Deviation. - * @param[in] dp The DP Model Deviation to use. - * @return The dimension of frame parameters of the DP Model Deviation. + * @brief Get the dimension of frame parameters of a DP Spin Model Deviation. + * @param[in] dp The DP Spin Model Deviation to use. + * @return The dimension of frame parameters of the DP Spin Model Deviation. + * @since API version 24 */ int DP_DeepSpinModelDeviGetDimFParam(DP_DeepSpinModelDevi* dp); /** - * @brief Get the dimension of atomic parameters of a DP Model Deviation. - * @param[in] dp The DP Model Deviation to use. - * @return The dimension of atomic parameters of the DP Model Deviation. + * @brief Get the dimension of atomic parameters of a DP Spin Model Deviation. + * @param[in] dp The DP Spin Model Deviation to use. + * @return The dimension of atomic parameters of the DP Spin Model Deviation. + * @since API version 24 */ int DP_DeepSpinModelDeviGetDimAParam(DP_DeepSpinModelDevi* dp); @@ -1433,9 +1616,10 @@ int DP_DeepSpinModelDeviGetDimAParam(DP_DeepSpinModelDevi* dp); * @brief Check whether the atomic dimension of atomic parameters is nall * instead of nloc. * - * @param[in] dp The DP Model Deviation to use. + * @param[in] dp The DP Spin Model Deviation to use. * @return true the atomic dimension of atomic parameters is nall * @return false the atomic dimension of atomic parameters is nloc + * @since API version 24 */ bool DP_DeepSpinModelDeviIsAParamNAll(DP_DeepSpinModelDevi* dp); @@ -1443,6 +1627,7 @@ bool DP_DeepSpinModelDeviIsAParamNAll(DP_DeepSpinModelDevi* dp); * @brief Get the type map of a DP model deviation. * @param[in] dp The DP model deviation to use. * @return The cutoff radius. + * @since API version 24 */ double DP_DeepSpinModelDeviGetCutoff(DP_DeepSpinModelDevi* dp); @@ -1450,6 +1635,7 @@ double DP_DeepSpinModelDeviGetCutoff(DP_DeepSpinModelDevi* dp); * @brief Get the number of types of a DP model deviation. * @param[in] dp The DP model deviation to use. * @return The number of types of the DP model deviation. + * @since API version 24 */ int DP_DeepSpinModelDeviGetNumbTypes(DP_DeepSpinModelDevi* dp); @@ -1457,6 +1643,7 @@ int DP_DeepSpinModelDeviGetNumbTypes(DP_DeepSpinModelDevi* dp); * @brief Get the number of types with spin of a DP model deviation. * @param[in] dp The DP model deviation to use. * @return The number of types with spin of the DP model deviation. + * @since API version 24 */ int DP_DeepSpinModelDeviGetNumbTypesSpin(DP_DeepSpinModelDevi* dp); @@ -1465,6 +1652,7 @@ int DP_DeepSpinModelDeviGetNumbTypesSpin(DP_DeepSpinModelDevi* dp); * * @param dp The DP model deviation to use. * @return const char* error message. + * @since API version 24 */ const char* DP_DeepSpinModelDeviCheckOK(DP_DeepSpinModelDevi* dp); diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 35f67ea014..9cd51705d9 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -121,7 +121,7 @@ void PairDeepMD::compute(int eflag, int vflag) { int newton_pair = force->newton_pair; if (atom->sp_flag) { - throw std::runtime_error( + error->all(FLERR, "Pair style 'deepmd' does not support spin atoms, please use pair " "style 'deepspin' instead."); } @@ -850,7 +850,7 @@ int PairDeepMD::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (atom->sp_flag) { - throw std::runtime_error( + error->all(FLERR, "Pair style 'deepmd' does not support spin atoms, please use pair " "style 'deepspin' instead."); } else { @@ -872,7 +872,7 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (atom->sp_flag) { - throw std::runtime_error( + error->all(FLERR, "Pair style 'deepmd' does not support spin atoms, please use pair " "style 'deepspin' instead."); } else { diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index aea410b284..0f2497fb72 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -133,7 +133,7 @@ void PairDeepSpin::compute(int eflag, int vflag) { } } } else { - throw std::runtime_error( + error->all(FLERR, "Pair style 'deepspin' only supports spin atoms, please use pair style " "'deepmd' instead."); } @@ -891,7 +891,7 @@ int PairDeepSpin::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (!atom->sp_flag) { - throw std::runtime_error( + error->all(FLERR, "Pair style 'deepspin' only supports spin atoms, please use pair style " "'deepmd' instead."); } else { @@ -916,7 +916,7 @@ void PairDeepSpin::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (!atom->sp_flag) { - throw std::runtime_error( + error->all(FLERR, "Pair style 'deepspin' only supports spin atoms, please use pair style " "'deepmd' instead."); } else { From e09bf5a96198223a73ebaa39ac633b7dc46ed95a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 11:31:02 +0000 Subject: [PATCH 168/193] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/api_c/include/c_api.h | 26 +++++++++++++------------- source/lmp/pair_deepmd.cpp | 9 ++++++--- source/lmp/pair_deepspin.cpp | 9 ++++++--- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index ded6e638ed..e3ba57370e 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -382,7 +382,7 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force, magnetic force and virial by using a DP + * @brief Evaluate the energy, force, magnetic force and virial by using a DP * spin model. (double version) * @version 2 * @param[in] dp The DP spin model to use. @@ -401,7 +401,7 @@ extern void DP_DeepPotCompute2(DP_DeepPot* dp, * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force on each atom. The array should be + * @param[out] force_mag Output magnetic force on each atom. The array should be * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size @@ -487,7 +487,7 @@ extern void DP_DeepPotComputef2(DP_DeepPot* dp, * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force on each atom. The array should be + * @param[out] force_mag Output magnetic force on each atom. The array should be * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size @@ -561,7 +561,7 @@ extern void DP_DeepPotComputeNList2(DP_DeepPot* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model + * @brief Evaluate the energy, force and virial by using a DP spin model * with the neighbor list. (double version) * @version 2 * @param[in] dp The DP spin model to use. @@ -583,7 +583,7 @@ extern void DP_DeepPotComputeNList2(DP_DeepPot* dp, * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force on each atom. The array should be + * @param[out] force_mag Output magnetic force on each atom. The array should be * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size @@ -660,7 +660,7 @@ extern void DP_DeepPotComputeNListf2(DP_DeepPot* dp, float* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model + * @brief Evaluate the energy, force and virial by using a DP spin model * with the neighbor list. (float version) * @version 2 * @param[in] dp The DP spin model to use. @@ -682,7 +682,7 @@ extern void DP_DeepPotComputeNListf2(DP_DeepPot* dp, * natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force on each atom. The array should be + * @param[out] force_mag Output magnetic force on each atom. The array should be * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size @@ -1138,8 +1138,8 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model deviation - * with neighbor list. (double version) + * @brief Evaluate the energy, force and virial by using a DP spin model + *deviation with neighbor list. (double version) * @version 2 * @param[in] dp The DP model deviation to use. * @param[in] nframes The number of frames. Only support 1 for now. @@ -1160,7 +1160,7 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, *natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force on each atom. The array should be + * @param[out] force_mag Output magnetic force on each atom. The array should be * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size @@ -1236,8 +1236,8 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, float* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model deviation - * with neighbor list. (float version) + * @brief Evaluate the energy, force and virial by using a DP spin model + *deviation with neighbor list. (float version) * @version 2 * @param[in] dp The DP model deviation to use. * @param[in] nframes The number of frames. Only support 1 for now. @@ -1258,7 +1258,7 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, *natoms x dim_aparam. * @param[out] energy Output energy. * @param[out] force Output force. The array should be of size natoms x 3. - * @param[out] force_mag Output magnetic force on each atom. The array should be + * @param[out] force_mag Output magnetic force on each atom. The array should be * of size natoms x 3. * @param[out] virial Output virial. The array should be of size 9. * @param[out] atomic_energy Output atomic energy. The array should be of size diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 9cd51705d9..6d12fda20a 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -121,7 +121,8 @@ void PairDeepMD::compute(int eflag, int vflag) { int newton_pair = force->newton_pair; if (atom->sp_flag) { - error->all(FLERR, + error->all( + FLERR, "Pair style 'deepmd' does not support spin atoms, please use pair " "style 'deepspin' instead."); } @@ -850,7 +851,8 @@ int PairDeepMD::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (atom->sp_flag) { - error->all(FLERR, + error->all( + FLERR, "Pair style 'deepmd' does not support spin atoms, please use pair " "style 'deepspin' instead."); } else { @@ -872,7 +874,8 @@ void PairDeepMD::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (atom->sp_flag) { - error->all(FLERR, + error->all( + FLERR, "Pair style 'deepmd' does not support spin atoms, please use pair " "style 'deepspin' instead."); } else { diff --git a/source/lmp/pair_deepspin.cpp b/source/lmp/pair_deepspin.cpp index 0f2497fb72..5e7d4474b9 100644 --- a/source/lmp/pair_deepspin.cpp +++ b/source/lmp/pair_deepspin.cpp @@ -133,7 +133,8 @@ void PairDeepSpin::compute(int eflag, int vflag) { } } } else { - error->all(FLERR, + error->all( + FLERR, "Pair style 'deepspin' only supports spin atoms, please use pair style " "'deepmd' instead."); } @@ -891,7 +892,8 @@ int PairDeepSpin::pack_reverse_comm(int n, int first, double *buf) { m = 0; last = first + n; if (!atom->sp_flag) { - error->all(FLERR, + error->all( + FLERR, "Pair style 'deepspin' only supports spin atoms, please use pair style " "'deepmd' instead."); } else { @@ -916,7 +918,8 @@ void PairDeepSpin::unpack_reverse_comm(int n, int *list, double *buf) { m = 0; if (!atom->sp_flag) { - error->all(FLERR, + error->all( + FLERR, "Pair style 'deepspin' only supports spin atoms, please use pair style " "'deepmd' instead."); } else { From 5f53a46ded7618c9a0b4a6c85424e3d86da1bdbf Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 22:19:54 +0800 Subject: [PATCH 169/193] update docs --- source/api_c/include/c_api.h | 18 +- source/api_c/include/deepmd.hpp | 464 +++++++++++++++++++------------- source/api_c/src/c_api.cc | 40 --- 3 files changed, 281 insertions(+), 241 deletions(-) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index e3ba57370e..b214d3c7a9 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -96,6 +96,7 @@ const char* DP_NlistCheckOK(DP_Nlist* dp); /** * @brief The deep potential base model. + * @since API version 24 **/ typedef struct DP_DeepBaseModel DP_DeepBaseModel; @@ -109,6 +110,7 @@ extern void DP_DeleteDeepBaseModel(DP_DeepBaseModel* dp); /** * @brief The deep potential base model deviation. + * @since API version 24 **/ typedef struct DP_DeepBaseModelDevi DP_DeepBaseModelDevi; @@ -561,8 +563,8 @@ extern void DP_DeepPotComputeNList2(DP_DeepPot* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model - * with the neighbor list. (double version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *spin model with the neighbor list. (double version) * @version 2 * @param[in] dp The DP spin model to use. * @param[in] nframes The number of frames. @@ -660,8 +662,8 @@ extern void DP_DeepPotComputeNListf2(DP_DeepPot* dp, float* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model - * with the neighbor list. (float version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *spin model with the neighbor list. (float version) * @version 2 * @param[in] dp The DP spin model to use. * @param[in] nframes The number of frames. @@ -1138,8 +1140,8 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, double* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model - *deviation with neighbor list. (double version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *spin model deviation with neighbor list. (double version) * @version 2 * @param[in] dp The DP model deviation to use. * @param[in] nframes The number of frames. Only support 1 for now. @@ -1236,8 +1238,8 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, float* atomic_virial); /** - * @brief Evaluate the energy, force and virial by using a DP spin model - *deviation with neighbor list. (float version) + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + *spin model deviation with neighbor list. (float version) * @version 2 * @param[in] dp The DP model deviation to use. * @param[in] nframes The number of frames. Only support 1 for now. diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 6d54cbdfa2..ee22cf7ce7 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -97,59 +97,59 @@ inline void _DP_DeepPotCompute(DP_DeepPot *dp, // support spin template -inline void _DP_DeepPotComputeSP(DP_DeepSpin *dp, - const int nframes, - const int natom, - const FPTYPE *coord, - const FPTYPE *spin, - const int *atype, - const FPTYPE *cell, - const FPTYPE *fparam, - const FPTYPE *aparam, - double *energy, - FPTYPE *force, - FPTYPE *force_mag, - FPTYPE *virial, - FPTYPE *atomic_energy, - FPTYPE *atomic_virial); +inline void _DP_DeepSpinCompute(DP_DeepSpin *dp, + const int nframes, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); template <> -inline void _DP_DeepPotComputeSP(DP_DeepSpin *dp, - const int nframes, - const int natom, - const double *coord, - const double *spin, - const int *atype, - const double *cell, - const double *fparam, - const double *aparam, - double *energy, - double *force, - double *force_mag, - double *virial, - double *atomic_energy, - double *atomic_virial) { +inline void _DP_DeepSpinCompute(DP_DeepSpin *dp, + const int nframes, + const int natom, + const double *coord, + const double *spin, + const int *atype, + const double *cell, + const double *fparam, + const double *aparam, + double *energy, + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { DP_DeepSpinCompute2(dp, nframes, natom, coord, spin, atype, cell, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); } template <> -inline void _DP_DeepPotComputeSP(DP_DeepSpin *dp, - const int nframes, - const int natom, - const float *coord, - const float *spin, - const int *atype, - const float *cell, - const float *fparam, - const float *aparam, - double *energy, - float *force, - float *force_mag, - float *virial, - float *atomic_energy, - float *atomic_virial) { +inline void _DP_DeepSpinCompute(DP_DeepSpin *dp, + const int nframes, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { DP_DeepSpinComputef2(dp, nframes, natom, coord, spin, atype, cell, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); @@ -219,68 +219,68 @@ inline void _DP_DeepPotComputeNList(DP_DeepPot *dp, // support spin template -inline void _DP_DeepPotComputeNListSP(DP_DeepSpin *dp, - const int nframes, - const int natom, - const FPTYPE *coord, - const FPTYPE *spin, - const int *atype, - const FPTYPE *cell, - const int nghost, - const DP_Nlist *nlist, - const int ago, - const FPTYPE *fparam, - const FPTYPE *aparam, - double *energy, - FPTYPE *force, - FPTYPE *force_mag, - FPTYPE *virial, - FPTYPE *atomic_energy, - FPTYPE *atomic_virial); +inline void _DP_DeepSpinComputeNList(DP_DeepSpin *dp, + const int nframes, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); template <> -inline void _DP_DeepPotComputeNListSP(DP_DeepSpin *dp, - const int nframes, - const int natom, - const double *coord, - const double *spin, - const int *atype, - const double *cell, - const int nghost, - const DP_Nlist *nlist, - const int ago, - const double *fparam, - const double *aparam, - double *energy, - double *force, - double *force_mag, - double *virial, - double *atomic_energy, - double *atomic_virial) { - DP_DeepSpinComputeNList2(dp, nframes, natom, coord, spin, atype, cell, nghost, - nlist, ago, fparam, aparam, energy, force, force_mag, - virial, atomic_energy, atomic_virial); -} - -template <> -inline void _DP_DeepPotComputeNListSP(DP_DeepSpin *dp, +inline void _DP_DeepSpinComputeNList(DP_DeepSpin *dp, const int nframes, const int natom, - const float *coord, - const float *spin, + const double *coord, + const double *spin, const int *atype, - const float *cell, + const double *cell, const int nghost, const DP_Nlist *nlist, const int ago, - const float *fparam, - const float *aparam, + const double *fparam, + const double *aparam, double *energy, - float *force, - float *force_mag, - float *virial, - float *atomic_energy, - float *atomic_virial) { + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { + DP_DeepSpinComputeNList2(dp, nframes, natom, coord, spin, atype, cell, nghost, + nlist, ago, fparam, aparam, energy, force, force_mag, + virial, atomic_energy, atomic_virial); +} + +template <> +inline void _DP_DeepSpinComputeNList(DP_DeepSpin *dp, + const int nframes, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { DP_DeepSpinComputeNListf2(dp, nframes, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); @@ -449,63 +449,63 @@ inline void _DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi *dp, } template -inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepSpinModelDevi *dp, - const int natom, - const FPTYPE *coord, - const FPTYPE *spin, - const int *atype, - const FPTYPE *cell, - const int nghost, - const DP_Nlist *nlist, - const int ago, - const FPTYPE *fparam, - const FPTYPE *aparam, - double *energy, - FPTYPE *force, - FPTYPE *force_mag, - FPTYPE *virial, - FPTYPE *atomic_energy, - FPTYPE *atomic_virial); -template <> -inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepSpinModelDevi *dp, - const int natom, - const double *coord, - const double *spin, - const int *atype, - const double *cell, - const int nghost, - const DP_Nlist *nlist, - const int ago, - const double *fparam, - const double *aparam, - double *energy, - double *force, - double *force_mag, - double *virial, - double *atomic_energy, - double *atomic_virial) { - DP_DeepSpinModelDeviComputeNList2( - dp, 1, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, - aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); -} +inline void _DP_DeepSpinModelDeviComputeNList(DP_DeepSpinModelDevi *dp, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); template <> -inline void _DP_DeepPotModelDeviComputeNListSP(DP_DeepSpinModelDevi *dp, +inline void _DP_DeepSpinModelDeviComputeNList(DP_DeepSpinModelDevi *dp, const int natom, - const float *coord, - const float *spin, + const double *coord, + const double *spin, const int *atype, - const float *cell, + const double *cell, const int nghost, const DP_Nlist *nlist, const int ago, - const float *fparam, - const float *aparam, + const double *fparam, + const double *aparam, double *energy, - float *force, - float *force_mag, - float *virial, - float *atomic_energy, - float *atomic_virial) { + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { + DP_DeepSpinModelDeviComputeNList2( + dp, 1, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, + aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); +} +template <> +inline void _DP_DeepSpinModelDeviComputeNList(DP_DeepSpinModelDevi *dp, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const int nghost, + const DP_Nlist *nlist, + const int ago, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { DP_DeepSpinModelDeviComputeNListf2( dp, 1, natom, coord, spin, atype, cell, nghost, nlist, ago, fparam, aparam, energy, force, force_mag, virial, atomic_energy, atomic_virial); @@ -1123,33 +1123,6 @@ class DeepPot : public DeepBaseModel { DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; - /** - * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. - * @param[out] ener The system energy. - * @param[out] force The force on each atom. - * @param[out] force_mag The magnetic force on each atom. - * @param[out] virial The virial. - * @param[out] atom_energy The atomic energy. - * @param[out] atom_virial The atomic virial. - * @param[in] coord The coordinates of atoms. The array should be of size - *nframes x natoms x 3. - * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should - *be of size nframes x natoms x 3. - * @param[in] atype The atom types. The list should contain natoms ints. - * @param[in] box The cell of the region. The array should be of size nframes - *x 9 (PBC) or empty (no PBC). - * @param[in] fparam The frame parameter. The array can be of size : - * nframes x dim_fparam. - * dim_fparam. Then all frames are assumed to be provided with the same - *fparam. - * @param[in] aparam The atomic parameter The array can be of size : - * nframes x natoms x dim_aparam. - * natoms x dim_aparam. Then all frames are assumed to be provided with the - *same aparam. - * @warning Natoms should not be zero when computing multiple frames. - **/ - /** * @brief Evaluate the energy, force and virial by using this DP with the *neighbor list. @@ -1471,10 +1444,9 @@ class DeepSpin : public DeepBaseModel { dpbase = (DP_DeepBaseModel *)dp; }; - // support spin /** * @brief Evaluate the energy, force, magnetic force and virial by using this - *DP with spin input. + *DP spin model. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. @@ -1532,15 +1504,15 @@ class DeepSpin : public DeepBaseModel { const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeSP(dp, nframes, natoms, coord_, spin_, atype_, - box_, fparam__, aparam__, ener_, force_, - force_mag_, virial_, nullptr, nullptr); + _DP_DeepSpinCompute(dp, nframes, natoms, coord_, spin_, atype_, + box_, fparam__, aparam__, ener_, force_, + force_mag_, virial_, nullptr, nullptr); DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; /** * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using this DP with spin input. + *and atomic virial by using this DP spin model. * @param[out] ener The system energy. * @param[out] force The force on each atom. * @param[out] force_mag The magnetic force on each atom. @@ -1607,13 +1579,39 @@ class DeepSpin : public DeepBaseModel { const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeSP( + _DP_DeepSpinCompute( dp, nframes, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; - // support spin + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this + * DP spin model with the neighbor list. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @warning Natoms should not be zero when computing multiple frames. + **/ template void compute( ENERGYVTYPE &ener, @@ -1655,14 +1653,42 @@ class DeepSpin : public DeepBaseModel { aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeNListSP(dp, nframes, natoms, coord_, spin_, - atype_, box_, nghost, lmp_list.nl, ago, - fparam__, aparam__, ener_, force_, - force_mag_, virial_, nullptr, nullptr); + _DP_DeepSpinComputeNList(dp, nframes, natoms, coord_, spin_, + atype_, box_, nghost, lmp_list.nl, ago, + fparam__, aparam__, ener_, force_, + force_mag_, virial_, nullptr, nullptr); DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); }; - // support spin + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + * and atomic virial by using this DP spin model with the neighbor list. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + * @warning Natoms should not be zero when computing multiple frames. + **/ template void compute( ENERGYVTYPE &ener, @@ -1710,7 +1736,7 @@ class DeepSpin : public DeepBaseModel { aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotComputeNListSP( + _DP_DeepSpinComputeNList( dp, nframes, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); @@ -2428,7 +2454,32 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { aparam_nall = DP_DeepBaseModelDeviIsAParamNAll((DP_DeepBaseModelDevi *)dp); dpbase = (DP_DeepBaseModelDevi *)dp; }; - // support spin + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this + * DP spin model deviation. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + **/ template void compute( std::vector &ener, @@ -2474,7 +2525,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotModelDeviComputeNListSP( + _DP_DeepSpinModelDeviComputeNList( dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, nullptr); @@ -2501,7 +2552,34 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { } }; - // support spin + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + * and atomic virial by using this DP spin model deviation. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] nghost The number of ghost atoms. + * @param[in] nlist The neighbor list. + * @param[in] ago Update the internal neighbour list if ago is 0. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + **/ template void compute( std::vector &ener, @@ -2554,7 +2632,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; - _DP_DeepPotModelDeviComputeNListSP( + _DP_DeepSpinModelDeviComputeNList( dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index e42fa16e93..5f453cccd1 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -1722,26 +1722,6 @@ void DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi* dp, force, virial, atomic_energy, atomic_virial); } -void DP_DeepSpinModelDeviComputeNListSP(DP_DeepSpinModelDevi* dp, - const int natoms, - const double* coord, - const double* spin, - const int* atype, - const double* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - double* force, - double* force_mag, - double* virial, - double* atomic_energy, - double* atomic_virial) { - DP_DeepSpinModelDeviComputeNList_variant( - dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, - energy, force, force_mag, virial, atomic_energy, atomic_virial); -} - void DP_DeepPotModelDeviComputeNListf(DP_DeepPotModelDevi* dp, const int natoms, const float* coord, @@ -1760,26 +1740,6 @@ void DP_DeepPotModelDeviComputeNListf(DP_DeepPotModelDevi* dp, force, virial, atomic_energy, atomic_virial); } -void DP_DeepSpinModelDeviComputeNListfSP(DP_DeepSpinModelDevi* dp, - const int natoms, - const float* coord, - const float* spin, - const int* atype, - const float* cell, - const int nghost, - const DP_Nlist* nlist, - const int ago, - double* energy, - float* force, - float* force_mag, - float* virial, - float* atomic_energy, - float* atomic_virial) { - DP_DeepSpinModelDeviComputeNList_variant( - dp, 1, natoms, coord, spin, atype, cell, nghost, nlist, ago, NULL, NULL, - energy, force, force_mag, virial, atomic_energy, atomic_virial); -} - void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, const int nframes, const int natoms, From 223502de67482793970c444a88e54ddbebb62b26 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 7 Nov 2024 22:28:49 +0800 Subject: [PATCH 170/193] Update deepmd.hpp --- source/api_c/include/deepmd.hpp | 64 ++++++++++++++++----------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index ee22cf7ce7..37a2d89aa1 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -993,10 +993,10 @@ class DeepPot : public DeepBaseModel { } dp = DP_NewDeepPotWithParam2(model.c_str(), gpu_rank, file_content.c_str(), file_content.size()); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); - dfparam = DP_DeepBaseModelGetDimFParam((DP_DeepBaseModel *)dp); - daparam = DP_DeepBaseModelGetDimAParam((DP_DeepBaseModel *)dp); - aparam_nall = DP_DeepBaseModelIsAParamNAll((DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); + dfparam = DP_DeepPotGetDimFParam(dp); + daparam = DP_DeepPotGetDimAParam(dp); + aparam_nall = DP_DeepPotIsAParamNAll(dp); dpbase = (DP_DeepBaseModel *)dp; }; @@ -1054,7 +1054,7 @@ class DeepPot : public DeepBaseModel { _DP_DeepPotCompute(dp, nframes, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial @@ -1120,7 +1120,7 @@ class DeepPot : public DeepBaseModel { _DP_DeepPotCompute(dp, nframes, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; /** @@ -1187,7 +1187,7 @@ class DeepPot : public DeepBaseModel { _DP_DeepPotComputeNList( dp, nframes, natoms, coord_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial @@ -1263,7 +1263,7 @@ class DeepPot : public DeepBaseModel { box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; /** * @brief Evaluate the energy, force and virial by using this DP with the @@ -1320,7 +1320,7 @@ class DeepPot : public DeepBaseModel { _DP_DeepPotComputeMixedType(dp, nframes, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; /** * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial @@ -1386,7 +1386,7 @@ class DeepPot : public DeepBaseModel { _DP_DeepPotComputeMixedType( dp, nframes, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepPotCheckOK, dp); }; private: @@ -1437,10 +1437,10 @@ class DeepSpin : public DeepBaseModel { } dp = DP_NewDeepSpinWithParam2(model.c_str(), gpu_rank, file_content.c_str(), file_content.size()); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); - dfparam = DP_DeepBaseModelGetDimFParam((DP_DeepBaseModel *)dp); - daparam = DP_DeepBaseModelGetDimAParam((DP_DeepBaseModel *)dp); - aparam_nall = DP_DeepBaseModelIsAParamNAll((DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepSpinCheckOK, dp); + dfparam = DP_DeepSpinGetDimFParam(dp); + daparam = DP_DeepSpinGetDimAParam(dp); + aparam_nall = DP_DeepSpinIsAParamNAll(dp); dpbase = (DP_DeepBaseModel *)dp; }; @@ -1507,7 +1507,7 @@ class DeepSpin : public DeepBaseModel { _DP_DeepSpinCompute(dp, nframes, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepSpinCheckOK, dp); }; /** @@ -1582,7 +1582,7 @@ class DeepSpin : public DeepBaseModel { _DP_DeepSpinCompute( dp, nframes, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepSpinCheckOK, dp); }; /** @@ -1657,7 +1657,7 @@ class DeepSpin : public DeepBaseModel { atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepSpinCheckOK, dp); }; /** @@ -1740,7 +1740,7 @@ class DeepSpin : public DeepBaseModel { dp, nframes, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelCheckOK, (DP_DeepBaseModel *)dp); + DP_CHECK_OK(DP_DeepSpinCheckOK, dp); }; private: @@ -2015,11 +2015,11 @@ class DeepPotModelDevi : public DeepBaseModelDevi { dp = DP_NewDeepPotModelDeviWithParam( cstrings.data(), cstrings.size(), gpu_rank, c_file_contents.data(), c_file_contents.size(), size_file_contents.data()); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); numb_models = models.size(); - dfparam = DP_DeepBaseModelDeviGetDimFParam((DP_DeepBaseModelDevi *)dp); - daparam = DP_DeepBaseModelDeviGetDimAParam((DP_DeepBaseModelDevi *)dp); - aparam_nall = DP_DeepBaseModelDeviIsAParamNAll((DP_DeepBaseModelDevi *)dp); + dfparam = DP_DeepPotModelDeviGetDimFParam(dp); + daparam = DP_DeepPotModelDeviGetDimAParam(dp); + aparam_nall = DP_DeepPotModelDeviIsAParamNAll(dp); dpbase = (DP_DeepBaseModelDevi *)dp; }; @@ -2082,7 +2082,7 @@ class DeepPotModelDevi : public DeepBaseModelDevi { _DP_DeepPotModelDeviCompute(dp, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); // reshape ener.resize(numb_models); @@ -2167,7 +2167,7 @@ class DeepPotModelDevi : public DeepBaseModelDevi { _DP_DeepPotModelDeviCompute( dp, natoms, coord_, atype_, box_, fparam__, aparam__, ener_, force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); // reshape ener.resize(numb_models); @@ -2264,7 +2264,7 @@ class DeepPotModelDevi : public DeepBaseModelDevi { _DP_DeepPotModelDeviComputeNList( dp, natoms, coord_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); // reshape ener.resize(numb_models); @@ -2358,7 +2358,7 @@ class DeepPotModelDevi : public DeepBaseModelDevi { _DP_DeepPotModelDeviComputeNList( dp, natoms, coord_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepPotModelDeviCheckOK, dp); // reshape ener.resize(numb_models); @@ -2447,11 +2447,11 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { dp = DP_NewDeepSpinModelDeviWithParam( cstrings.data(), cstrings.size(), gpu_rank, c_file_contents.data(), c_file_contents.size(), size_file_contents.data()); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepSpinModelDeviCheckOK, dp); numb_models = models.size(); - dfparam = DP_DeepBaseModelDeviGetDimFParam((DP_DeepBaseModelDevi *)dp); - daparam = DP_DeepBaseModelDeviGetDimAParam((DP_DeepBaseModelDevi *)dp); - aparam_nall = DP_DeepBaseModelDeviIsAParamNAll((DP_DeepBaseModelDevi *)dp); + dfparam = DP_DeepSpinModelDeviGetDimFParam(dp); + daparam = DP_DeepSpinModelDeviGetDimAParam(dp); + aparam_nall = DP_DeepSpinModelDeviIsAParamNAll(dp); dpbase = (DP_DeepBaseModelDevi *)dp; }; /** @@ -2529,7 +2529,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, nullptr, nullptr); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepSpinModelDeviCheckOK, dp); // reshape ener.resize(numb_models); force.resize(numb_models); @@ -2636,7 +2636,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { dp, natoms, coord_, spin_, atype_, box_, nghost, lmp_list.nl, ago, fparam__, aparam__, ener_, force_, force_mag_, virial_, atomic_ener_, atomic_virial_); - DP_CHECK_OK(DP_DeepBaseModelDeviCheckOK, (DP_DeepBaseModelDevi *)dp); + DP_CHECK_OK(DP_DeepSpinModelDeviCheckOK, dp); // reshape ener.resize(numb_models); force.resize(numb_models); From 121509764e9f6b6c663eff2a43cc88267982fb06 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 19:46:20 +0800 Subject: [PATCH 171/193] add uts --- source/api_c/tests/test_deepspin_a.cc | 325 ++++++++++++++++++++++ source/api_c/tests/test_deepspin_a_hpp.cc | 239 ++++++++++++++++ 2 files changed, 564 insertions(+) create mode 100644 source/api_c/tests/test_deepspin_a.cc create mode 100644 source/api_c/tests/test_deepspin_a_hpp.cc diff --git a/source/api_c/tests/test_deepspin_a.cc b/source/api_c/tests/test_deepspin_a.cc new file mode 100644 index 0000000000..2f0c0d52fd --- /dev/null +++ b/source/api_c/tests/test_deepspin_a.cc @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include + +#include +#include + +#include "c_api.h" + +class TestInferDeepSpinA : public ::testing::Test { + protected: + double coord[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + float coordf[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + double spin[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; + float spinf[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; + int atype[4] = {0, 0, 1, 1}; + double box[9] = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + float boxf[9] = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + std::vector expected_e = {-7.314365618560289, -7.313531316181837, + -2.8980532245013997, -2.897373810282277}; + std::vector expected_f = { + 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, + -0.0229926640905535, 0.0114378553363334, 0.019670014885563, + 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, + -0.013170850878758, -0.009124743467278, 0.0142592491588383}; + std::vector expected_fm = { + 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, + -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + DP_DeepSpin* dp; + + void SetUp() override { + const char* file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + const char* model_file = "deepspin_nlist.pb"; + DP_ConvertPbtxtToPb(file_name, model_file); + + dp = DP_NewDeepSpin(model_file); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { + remove("deepspin_nlist.pb"); + DP_DeleteDeepSpin(dp); + }; +}; + +TEST_F(TestInferDeepSpinA, double_infer) { + double* ener_ = new double; + double* force_ = new double[natoms * 3]; + double* force_mag_ = new double[natoms * 3]; + double* virial_ = new double[9]; + double* atomic_ener_ = new double[natoms]; + double* atomic_virial_ = new double[natoms * 9]; + + DP_DeepSpinCompute2(dp, 1, natoms, coord, spin, atype, box, nullptr, nullptr, + ener_, force_, force_mag_, virial_, atomic_ener_, + atomic_virial_); + + double ener = *ener_; + std::vector force(force_, force_ + natoms * 3); + std::vector force_mag(force_mag_, force_mag_ + natoms * 3); + // std::vector virial(virial_, virial_ + 9); + std::vector atomic_ener(atomic_ener_, atomic_ener_ + natoms); + // std::vector atomic_virial(atomic_virial_, + // atomic_virial_ + natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), 1e-10); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), 1e-10); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), 1e-10); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), 1e-10); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atomic_ener[ii] - expected_e[ii]), 1e-10); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atomic_virial[ii] - expected_v[ii]), 1e-10); + // } + + delete ener_; + delete[] force_; + delete[] force_mag_; + delete[] virial_; + delete[] atomic_ener_; + delete[] atomic_virial_; +} + +TEST_F(TestInferDeepSpinA, float_infer) { + double* ener_ = new double; + float* force_ = new float[natoms * 3]; + float* force_mag_ = new float[natoms * 3]; + float* virial_ = new float[9]; + float* atomic_ener_ = new float[natoms]; + float* atomic_virial_ = new float[natoms * 9]; + + DP_DeepSpinComputef2(dp, 1, natoms, coordf, spinf, atype, boxf, nullptr, + nullptr, ener_, force_, force_mag_, virial_, + atomic_ener_, atomic_virial_); + + double ener = *ener_; + std::vector force(force_, force_ + natoms * 3); + std::vector force_mag(force_mag_, force_mag_ + natoms * 3); + // std::vector virial(virial_, virial_ + 9); + std::vector atomic_ener(atomic_ener_, atomic_ener_ + natoms); + // std::vector atomic_virial(atomic_virial_, + // atomic_virial_ + natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), 1e-6); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), 1e-6); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), 1e-6); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), 1e-6); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atomic_ener[ii] - expected_e[ii]), 1e-5); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atomic_virial[ii] - expected_v[ii]), 1e-6); + // } + + delete ener_; + delete[] force_; + delete[] force_mag_; + delete[] virial_; + delete[] atomic_ener_; + delete[] atomic_virial_; +} + +TEST_F(TestInferDeepSpinA, cutoff) { + double cutoff = DP_DeepSpinGetCutoff(dp); + EXPECT_EQ(cutoff, 6.0); +} + +TEST_F(TestInferDeepSpinA, numb_types) { + int numb_types = DP_DeepSpinGetNumbTypes(dp); + EXPECT_EQ(numb_types, 2); +} + +TEST_F(TestInferDeepSpinA, numb_types_spin) { + int numb_types_spin = DP_DeepSpinGetNumbTypesSpin(dp); + EXPECT_EQ(numb_types_spin, 1); +} + +TEST_F(TestInferDeepSpinA, type_map) { + const char* type_map = DP_DeepSpinGetTypeMap(dp); + char expected_type_map[] = "O H"; + EXPECT_EQ(strcmp(type_map, expected_type_map), 0); + DP_DeleteChar(type_map); +} + +class TestInferDeepSpinANoPBC : public ::testing::Test { + protected: + double coord[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + float coordf[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + double spin[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; + float spinf[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; + int atype[4] = {0, 0, 1, 1}; + std::vector expected_e = {-7.313160384523243, -7.312173646552338, + -2.8984477845267067, -2.8984477845267067}; + std::vector expected_f = { + 0.0277100137316238, -0.0116082489956803, -0.0211484273275705, + -0.0277100137316238, 0.0116082489956803, 0.0211484273275705, + 0.0097588349924651, 0.0091168063745397, -0.0133541952528469, + -0.0097588349924651, -0.0091168063745397, 0.0133541952528469}; + std::vector expected_fm = { + 0.0058990325687816, -0.0024712163463815, 0.0296682261295907, + -0.0060028470719556, 0.0025147062058193, 0.0321884178873188, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + DP_DeepSpin* dp; + + void SetUp() override { + const char* file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + const char* model_file = "deepspin_nlist.pb"; + DP_ConvertPbtxtToPb(file_name, model_file); + + dp = DP_NewDeepSpin(model_file); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { + remove("deepspin_nlist.pb"); + DP_DeleteDeepSpin(dp); + }; +}; + +TEST_F(TestInferDeepSpinANoPBC, double_infer) { + double* ener_ = new double; + double* force_ = new double[natoms * 3]; + double* force_mag_ = new double[natoms * 3]; + double* virial_ = new double[9]; + double* atomic_ener_ = new double[natoms]; + double* atomic_virial_ = new double[natoms * 9]; + + DP_DeepSpinCompute2(dp, 1, natoms, coord, spin, atype, nullptr, nullptr, + nullptr, ener_, force_, force_mag_, virial_, atomic_ener_, + atomic_virial_); + + double ener = *ener_; + std::vector force(force_, force_ + natoms * 3); + std::vector force_mag(force_mag_, force_mag_ + natoms * 3); + // std::vector virial(virial_, virial_ + 9); + std::vector atomic_ener(atomic_ener_, atomic_ener_ + natoms); + // std::vector atomic_virial(atomic_virial_, + // atomic_virial_ + natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), 1e-10); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), 1e-10); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), 1e-10); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), 1e-10); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atomic_ener[ii] - expected_e[ii]), 1e-10); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atomic_virial[ii] - expected_v[ii]), 1e-10); + // } + + delete ener_; + delete[] force_; + delete[] force_mag_; + delete[] virial_; + delete[] atomic_ener_; + delete[] atomic_virial_; +} + +TEST_F(TestInferDeepSpinANoPBC, float_infer) { + double* ener_ = new double; + float* force_ = new float[natoms * 3]; + float* force_mag_ = new float[natoms * 3]; + float* virial_ = new float[9]; + float* atomic_ener_ = new float[natoms]; + float* atomic_virial_ = new float[natoms * 9]; + + DP_DeepSpinComputef2(dp, 1, natoms, coordf, spinf, atype, nullptr, nullptr, + nullptr, ener_, force_, force_mag_, virial_, + atomic_ener_, atomic_virial_); + + double ener = *ener_; + std::vector force(force_, force_ + natoms * 3); + std::vector force_mag(force_mag_, force_mag_ + natoms * 3); + // std::vector virial(virial_, virial_ + 9); + std::vector atomic_ener(atomic_ener_, atomic_ener_ + natoms); + // std::vector atomic_virial(atomic_virial_, + // atomic_virial_ + natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), 1e-6); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), 1e-6); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), 1e-6); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), 1e-6); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atomic_ener[ii] - expected_e[ii]), 1e-5); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atomic_virial[ii] - expected_v[ii]), 1e-6); + // } + + delete ener_; + delete[] force_; + delete[] force_mag_; + delete[] virial_; + delete[] atomic_ener_; + delete[] atomic_virial_; +} diff --git a/source/api_c/tests/test_deepspin_a_hpp.cc b/source/api_c/tests/test_deepspin_a_hpp.cc new file mode 100644 index 0000000000..36f8d7c289 --- /dev/null +++ b/source/api_c/tests/test_deepspin_a_hpp.cc @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include + +#include +#include +#include +#include + +#include "deepmd.hpp" +#include "test_utils.h" + +template +class TestInferDeepSpinAHPP : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, + 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 0, 1, 1}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + std::vector expected_e = {-7.314365618560289, -7.313531316181837, + -2.8980532245013997, -2.897373810282277}; + std::vector expected_f = { + 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, + -0.0229926640905535, 0.0114378553363334, 0.019670014885563, + 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, + -0.013170850878758, -0.009124743467278, 0.0142592491588383}; + std::vector expected_fm = { + 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, + -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + unsigned int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + deepmd::hpp::DeepSpin dp; + + void SetUp() override { + std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + deepmd::hpp::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", + "deepspin_nlist.pb"); + + dp.init("deepspin_nlist.pb"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (unsigned int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (unsigned int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { remove("deepspin_nlist.pb"); }; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinAHPP, ValueTypes); + +TYPED_TEST(TestInferDeepSpinAHPP, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinAHPP, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinAHPP, print_summary) { + deepmd::hpp::DeepSpin& dp = this->dp; + dp.print_summary(""); +} + +template +class TestInferDeepSpinANoPbcHPP : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, + 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 0, 1, 1}; + std::vector box = {}; + std::vector expected_e = {-7.313160384523243, -7.312173646552338, + -2.8984477845267067, + -2.8984477845267067}; + std::vector expected_f = { + 0.0277100137316238, -0.0116082489956803, -0.0211484273275705, + -0.0277100137316238, 0.0116082489956803, 0.0211484273275705, + 0.0097588349924651, 0.0091168063745397, -0.0133541952528469, + -0.0097588349924651, -0.0091168063745397, 0.0133541952528469}; + std::vector expected_fm = { + 0.0058990325687816, -0.0024712163463815, 0.0296682261295907, + -0.0060028470719556, 0.0025147062058193, 0.0321884178873188, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + unsigned int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + deepmd::hpp::DeepSpin dp; + + void SetUp() override { + std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + deepmd::hpp::convert_pbtxt_to_pb(file_name, "deepspin_nlist.pb"); + + dp.init("deepspin_nlist.pb"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (unsigned int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (unsigned int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { remove("deepspin_nlist.pb"); }; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinANoPbcHPP, ValueTypes); + +TYPED_TEST(TestInferDeepSpinANoPbcHPP, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (unsigned int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (unsigned int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (unsigned int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} From 292a68fe217467d87b7e51c9035cd578b8827e2b Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 19:57:57 +0800 Subject: [PATCH 172/193] Update test_deepspin_a_hpp.cc --- source/api_c/tests/test_deepspin_a_hpp.cc | 41 +++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/source/api_c/tests/test_deepspin_a_hpp.cc b/source/api_c/tests/test_deepspin_a_hpp.cc index 36f8d7c289..493f1dbd0e 100644 --- a/source/api_c/tests/test_deepspin_a_hpp.cc +++ b/source/api_c/tests/test_deepspin_a_hpp.cc @@ -237,3 +237,44 @@ TYPED_TEST(TestInferDeepSpinANoPbcHPP, cpu_build_nlist) { // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); // } } + +TYPED_TEST(TestInferDeepSpinANoPbcHPP, cpu_lmp_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + std::vector > nlist_data = {{1}, {0}, {3}, {2}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], + &firstneigh[0]); + deepmd::hpp::convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, inlist, + 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} From 665f00174931e24f4ddbc40014b63032c46da820 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:14:35 +0800 Subject: [PATCH 173/193] update uts --- source/api_c/tests/test_deepspin_a.cc | 184 ++++++++++++---------- source/api_c/tests/test_deepspin_a_hpp.cc | 156 +++++++++++++----- 2 files changed, 221 insertions(+), 119 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a.cc b/source/api_c/tests/test_deepspin_a.cc index 2f0c0d52fd..0852418b19 100644 --- a/source/api_c/tests/test_deepspin_a.cc +++ b/source/api_c/tests/test_deepspin_a.cc @@ -8,27 +8,47 @@ class TestInferDeepSpinA : public ::testing::Test { protected: - double coord[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, - 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - float coordf[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, - 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - double spin[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; - float spinf[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; - int atype[4] = {0, 0, 1, 1}; + double coord[18] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 00.25, 3.32, 1.68, + 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + float coordf[18] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 00.25, 3.32, 1.68, + 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + double spin[18] = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + float spinf[18] = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + int atype[6] = {0, 1, 1, 0, 1, 1}; double box[9] = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; float boxf[9] = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; - std::vector expected_e = {-7.314365618560289, -7.313531316181837, - -2.8980532245013997, -2.897373810282277}; + std::vector expected_e = {-5.835211567762678, -5.071189078159807, + -5.044361601406714, -5.582324154346981, + -5.059906899269188, -5.074135576182056}; std::vector expected_f = { - 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, - -0.0229926640905535, 0.0114378553363334, 0.019670014885563, - 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, - -0.013170850878758, -0.009124743467278, 0.0142592491588383}; + -0.0619881702551019, 0.0646720543680939, 0.2137632336140025, + 0.037800173877136, -0.096327623008356, -0.1531911892384847, + -0.112204927558682, 0.0299145670766557, -0.0589474826303666, + 0.2278904556868233, 0.0382061907026398, 0.0888060647788163, + -0.0078898845686437, 0.0019385598635839, -0.0791616129664364, + -0.083607647181527, -0.0384037490026167, -0.0112690135575317}; std::vector expected_fm = { - 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, - -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + -3.0778301386623275, + -1.3135930534661662, + -0.8332043979367366, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + -0.5452347545527696, + -0.2051506559632127, + -0.4908015055951312, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + }; int natoms; double expected_tot_e; // std::vector expected_tot_v; @@ -36,11 +56,7 @@ class TestInferDeepSpinA : public ::testing::Test { DP_DeepSpin* dp; void SetUp() override { - const char* file_name = "../../tests/infer/deepspin_nlist.pbtxt"; - const char* model_file = "deepspin_nlist.pb"; - DP_ConvertPbtxtToPb(file_name, model_file); - - dp = DP_NewDeepSpin(model_file); + dp = DP_NewDeepSpin("../../tests/infer/deeppot_dpa_spin.pth"); natoms = expected_e.size(); EXPECT_EQ(natoms * 3, expected_f.size()); @@ -59,10 +75,7 @@ class TestInferDeepSpinA : public ::testing::Test { // } }; - void TearDown() override { - remove("deepspin_nlist.pb"); - DP_DeleteDeepSpin(dp); - }; + void TearDown() override { DP_DeleteDeepSpin(dp); }; }; TEST_F(TestInferDeepSpinA, double_infer) { @@ -179,60 +192,73 @@ TEST_F(TestInferDeepSpinA, type_map) { class TestInferDeepSpinANoPBC : public ::testing::Test { protected: - double coord[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, - 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - float coordf[12] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, - 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - double spin[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; - float spinf[12] = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; - int atype[4] = {0, 0, 1, 1}; - std::vector expected_e = {-7.313160384523243, -7.312173646552338, - -2.8984477845267067, -2.8984477845267067}; - std::vector expected_f = { - 0.0277100137316238, -0.0116082489956803, -0.0211484273275705, - -0.0277100137316238, 0.0116082489956803, 0.0211484273275705, - 0.0097588349924651, 0.0091168063745397, -0.0133541952528469, - -0.0097588349924651, -0.0091168063745397, 0.0133541952528469}; - std::vector expected_fm = { - 0.0058990325687816, -0.0024712163463815, 0.0296682261295907, - -0.0060028470719556, 0.0025147062058193, 0.0321884178873188, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; - int natoms; - double expected_tot_e; - // std::vector expected_tot_v; - - DP_DeepSpin* dp; - - void SetUp() override { - const char* file_name = "../../tests/infer/deepspin_nlist.pbtxt"; - const char* model_file = "deepspin_nlist.pb"; - DP_ConvertPbtxtToPb(file_name, model_file); - - dp = DP_NewDeepSpin(model_file); - - natoms = expected_e.size(); - EXPECT_EQ(natoms * 3, expected_f.size()); - EXPECT_EQ(natoms * 3, expected_fm.size()); - // EXPECT_EQ(natoms * 9, expected_v.size()); - expected_tot_e = 0.; - // expected_tot_v.resize(9); - // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); - for (int ii = 0; ii < natoms; ++ii) { - expected_tot_e += expected_e[ii]; - } - // for (int ii = 0; ii < natoms; ++ii) { - // for (int dd = 0; dd < 9; ++dd) { - // expected_tot_v[dd] += expected_v[ii * 9 + dd]; - // } - // } - }; - - void TearDown() override { - remove("deepspin_nlist.pb"); - DP_DeleteDeepSpin(dp); - }; + double coord[18] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 00.25, 3.32, 1.68, + 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + float coordf[18] = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, 00.25, 3.32, 1.68, + 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + double spin[18] = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + float spinf[18] = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + int atype[6] = {0, 1, 1, 0, 1, 1}; + std::vector expected_e = {-5.921669893870771, -5.1676693791758685, + -5.205933794558385, -5.58688965168251, + -5.080322972018686, -5.08213772482076} }; +std::vector expected_f = { + -0.2929142244191496, 0.0801070990501456, 0.148216178514704, + 0.2929142244191503, -0.0801070990501454, -0.1482161785147037, + -0.2094984819251435, 0.0241594118950041, -0.0215199116994508, + 0.3068843038300324, -0.001620530344866, 0.1508093841389746, + -0.0122719879278721, 0.0186341247897136, -0.1137104245023705, + -0.0851138339770169, -0.0411730063398516, -0.0155790479371533}; +std::vector expected_fm = {-1.5298530476860008, + 0.0071315024546899, + 0.0650492472558729, + 0., + 0., + 0., + 0., + 0., + 0., + -0.6212052813442365, + -0.2290265978320395, + -0.5101405083352206, + 0., + 0., + 0., + 0., + 0., + 0.}; +int natoms; +double expected_tot_e; +// std::vector expected_tot_v; + +DP_DeepSpin* dp; + +void SetUp() override { + dp = DP_NewDeepSpin("../../tests/infer/deeppot_dpa_spin.pth"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } +}; + +void TearDown() override { DP_DeleteDeepSpin(dp); }; +} +; TEST_F(TestInferDeepSpinANoPBC, double_infer) { double* ener_ = new double; diff --git a/source/api_c/tests/test_deepspin_a_hpp.cc b/source/api_c/tests/test_deepspin_a_hpp.cc index 493f1dbd0e..701d517690 100644 --- a/source/api_c/tests/test_deepspin_a_hpp.cc +++ b/source/api_c/tests/test_deepspin_a_hpp.cc @@ -13,23 +13,42 @@ template class TestInferDeepSpinAHPP : public ::testing::Test { protected: std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, - 0., 0., 0., 0., 0., 0.}; - std::vector atype = {0, 0, 1, 1}; + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 1, 1, 0, 1, 1}; std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; - std::vector expected_e = {-7.314365618560289, -7.313531316181837, - -2.8980532245013997, -2.897373810282277}; + std::vector expected_e = {-5.835211567762678, -5.071189078159807, + -5.044361601406714, -5.582324154346981, + -5.059906899269188, -5.074135576182056}; std::vector expected_f = { - 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, - -0.0229926640905535, 0.0114378553363334, 0.019670014885563, - 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, - -0.013170850878758, -0.009124743467278, 0.0142592491588383}; + -0.0619881702551019, 0.0646720543680939, 0.2137632336140025, + 0.037800173877136, -0.096327623008356, -0.1531911892384847, + -0.112204927558682, 0.0299145670766557, -0.0589474826303666, + 0.2278904556868233, 0.0382061907026398, 0.0888060647788163, + -0.0078898845686437, 0.0019385598635839, -0.0791616129664364, + -0.083607647181527, -0.0384037490026167, -0.0112690135575317}; std::vector expected_fm = { - 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, - -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + -3.0778301386623275, + -1.3135930534661662, + -0.8332043979367366, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + -0.5452347545527696, + -0.2051506559632127, + -0.4908015055951312, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + }; unsigned int natoms; double expected_tot_e; // std::vector expected_tot_v; @@ -37,11 +56,7 @@ class TestInferDeepSpinAHPP : public ::testing::Test { deepmd::hpp::DeepSpin dp; void SetUp() override { - std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; - deepmd::hpp::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", - "deepspin_nlist.pb"); - - dp.init("deepspin_nlist.pb"); + dp.init("../../tests/infer/deeppot_dpa_spin.pth"); natoms = expected_e.size(); EXPECT_EQ(natoms * 3, expected_f.size()); @@ -60,7 +75,7 @@ class TestInferDeepSpinAHPP : public ::testing::Test { // } }; - void TearDown() override { remove("deepspin_nlist.pb"); }; + void TearDown() override {}; }; TYPED_TEST_SUITE(TestInferDeepSpinAHPP, ValueTypes); @@ -152,24 +167,40 @@ template class TestInferDeepSpinANoPbcHPP : public ::testing::Test { protected: std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, - 0., 0., 0., 0., 0., 0.}; - std::vector atype = {0, 0, 1, 1}; + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 1, 1, 0, 1, 1}; std::vector box = {}; - std::vector expected_e = {-7.313160384523243, -7.312173646552338, - -2.8984477845267067, - -2.8984477845267067}; + std::vector expected_e = {-5.921669893870771, -5.1676693791758685, + -5.205933794558385, -5.58688965168251, + -5.080322972018686, -5.08213772482076}; std::vector expected_f = { - 0.0277100137316238, -0.0116082489956803, -0.0211484273275705, - -0.0277100137316238, 0.0116082489956803, 0.0211484273275705, - 0.0097588349924651, 0.0091168063745397, -0.0133541952528469, - -0.0097588349924651, -0.0091168063745397, 0.0133541952528469}; - std::vector expected_fm = { - 0.0058990325687816, -0.0024712163463815, 0.0296682261295907, - -0.0060028470719556, 0.0025147062058193, 0.0321884178873188, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + -0.2929142244191496, 0.0801070990501456, 0.148216178514704, + 0.2929142244191503, -0.0801070990501454, -0.1482161785147037, + -0.2094984819251435, 0.0241594118950041, -0.0215199116994508, + 0.3068843038300324, -0.001620530344866, 0.1508093841389746, + -0.0122719879278721, 0.0186341247897136, -0.1137104245023705, + -0.0851138339770169, -0.0411730063398516, -0.0155790479371533}; + std::vector expected_fm = {-1.5298530476860008, + 0.0071315024546899, + 0.0650492472558729, + 0., + 0., + 0., + 0., + 0., + 0., + -0.6212052813442365, + -0.2290265978320395, + -0.5101405083352206, + 0., + 0., + 0., + 0., + 0., + 0.}; unsigned int natoms; double expected_tot_e; // std::vector expected_tot_v; @@ -177,10 +208,7 @@ class TestInferDeepSpinANoPbcHPP : public ::testing::Test { deepmd::hpp::DeepSpin dp; void SetUp() override { - std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; - deepmd::hpp::convert_pbtxt_to_pb(file_name, "deepspin_nlist.pb"); - - dp.init("deepspin_nlist.pb"); + dp.init("../../tests/infer/deeppot_dpa_spin.pth"); natoms = expected_e.size(); EXPECT_EQ(natoms * 3, expected_f.size()); @@ -199,7 +227,7 @@ class TestInferDeepSpinANoPbcHPP : public ::testing::Test { // } }; - void TearDown() override { remove("deepspin_nlist.pb"); }; + void TearDown() override {}; }; TYPED_TEST_SUITE(TestInferDeepSpinANoPbcHPP, ValueTypes); @@ -254,7 +282,9 @@ TYPED_TEST(TestInferDeepSpinANoPbcHPP, cpu_lmp_nlist) { deepmd::hpp::DeepSpin& dp = this->dp; double ener; std::vector force, force_mag, virial; - std::vector > nlist_data = {{1}, {0}, {3}, {2}}; + std::vector > nlist_data = { + {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, + {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; std::vector ilist(natoms), numneigh(natoms); std::vector firstneigh(natoms); deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], @@ -278,3 +308,49 @@ TYPED_TEST(TestInferDeepSpinANoPbcHPP, cpu_lmp_nlist) { // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); // } } + +TYPED_TEST(TestInferDeepSpinANoPbcHPP, cpu_lmp_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + std::vector > nlist_data = { + {1, 2, 3, 4, 5}, {0, 2, 3, 4, 5}, {0, 1, 3, 4, 5}, + {0, 1, 2, 4, 5}, {0, 1, 2, 3, 5}, {0, 1, 2, 3, 4}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], + &firstneigh[0]); + deepmd::hpp::convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box, 0, inlist, 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} From 58c15eda2ca1c5603f0262100a7f5f729cf200e2 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:16:12 +0800 Subject: [PATCH 174/193] Update test_deepspin_a.cc --- source/api_c/tests/test_deepspin_a.cc | 108 +++++++++++++------------- 1 file changed, 53 insertions(+), 55 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a.cc b/source/api_c/tests/test_deepspin_a.cc index 0852418b19..2e74fa1659 100644 --- a/source/api_c/tests/test_deepspin_a.cc +++ b/source/api_c/tests/test_deepspin_a.cc @@ -203,62 +203,60 @@ class TestInferDeepSpinANoPBC : public ::testing::Test { int atype[6] = {0, 1, 1, 0, 1, 1}; std::vector expected_e = {-5.921669893870771, -5.1676693791758685, -5.205933794558385, -5.58688965168251, - -5.080322972018686, -5.08213772482076} -}; -std::vector expected_f = { - -0.2929142244191496, 0.0801070990501456, 0.148216178514704, - 0.2929142244191503, -0.0801070990501454, -0.1482161785147037, - -0.2094984819251435, 0.0241594118950041, -0.0215199116994508, - 0.3068843038300324, -0.001620530344866, 0.1508093841389746, - -0.0122719879278721, 0.0186341247897136, -0.1137104245023705, - -0.0851138339770169, -0.0411730063398516, -0.0155790479371533}; -std::vector expected_fm = {-1.5298530476860008, - 0.0071315024546899, - 0.0650492472558729, - 0., - 0., - 0., - 0., - 0., - 0., - -0.6212052813442365, - -0.2290265978320395, - -0.5101405083352206, - 0., - 0., - 0., - 0., - 0., - 0.}; -int natoms; -double expected_tot_e; -// std::vector expected_tot_v; - -DP_DeepSpin* dp; - -void SetUp() override { - dp = DP_NewDeepSpin("../../tests/infer/deeppot_dpa_spin.pth"); - - natoms = expected_e.size(); - EXPECT_EQ(natoms * 3, expected_f.size()); - EXPECT_EQ(natoms * 3, expected_fm.size()); - // EXPECT_EQ(natoms * 9, expected_v.size()); - expected_tot_e = 0.; - // expected_tot_v.resize(9); - // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); - for (int ii = 0; ii < natoms; ++ii) { - expected_tot_e += expected_e[ii]; - } - // for (int ii = 0; ii < natoms; ++ii) { - // for (int dd = 0; dd < 9; ++dd) { - // expected_tot_v[dd] += expected_v[ii * 9 + dd]; - // } - // } -}; + -5.080322972018686, -5.08213772482076}; + std::vector expected_f = { + -0.2929142244191496, 0.0801070990501456, 0.148216178514704, + 0.2929142244191503, -0.0801070990501454, -0.1482161785147037, + -0.2094984819251435, 0.0241594118950041, -0.0215199116994508, + 0.3068843038300324, -0.001620530344866, 0.1508093841389746, + -0.0122719879278721, 0.0186341247897136, -0.1137104245023705, + -0.0851138339770169, -0.0411730063398516, -0.0155790479371533}; + std::vector expected_fm = {-1.5298530476860008, + 0.0071315024546899, + 0.0650492472558729, + 0., + 0., + 0., + 0., + 0., + 0., + -0.6212052813442365, + -0.2290265978320395, + -0.5101405083352206, + 0., + 0., + 0., + 0., + 0., + 0.}; + int natoms; + double expected_tot_e; + // std::vector expected_tot_v; -void TearDown() override { DP_DeleteDeepSpin(dp); }; -} -; + DP_DeepSpin* dp; + + void SetUp() override { + dp = DP_NewDeepSpin("../../tests/infer/deeppot_dpa_spin.pth"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { DP_DeleteDeepSpin(dp); }; +}; TEST_F(TestInferDeepSpinANoPBC, double_infer) { double* ener_ = new double; From b481274390da9cdbb843dd580ed1a85a06a55c92 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:28:22 +0800 Subject: [PATCH 175/193] Update test_deepspin_a.cc --- source/api_c/tests/test_deepspin_a.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a.cc b/source/api_c/tests/test_deepspin_a.cc index 2e74fa1659..7c0a606034 100644 --- a/source/api_c/tests/test_deepspin_a.cc +++ b/source/api_c/tests/test_deepspin_a.cc @@ -180,12 +180,12 @@ TEST_F(TestInferDeepSpinA, numb_types) { TEST_F(TestInferDeepSpinA, numb_types_spin) { int numb_types_spin = DP_DeepSpinGetNumbTypesSpin(dp); - EXPECT_EQ(numb_types_spin, 1); + EXPECT_EQ(numb_types_spin, 0); } TEST_F(TestInferDeepSpinA, type_map) { const char* type_map = DP_DeepSpinGetTypeMap(dp); - char expected_type_map[] = "O H"; + char expected_type_map[] = "O H O_spin H_spin"; EXPECT_EQ(strcmp(type_map, expected_type_map), 0); DP_DeleteChar(type_map); } From 29ace48e85dea15ad22ac51808c62e023a5a12f1 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:37:33 +0800 Subject: [PATCH 176/193] Update test_deepspin_a.cc --- source/api_c/tests/test_deepspin_a.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/api_c/tests/test_deepspin_a.cc b/source/api_c/tests/test_deepspin_a.cc index 7c0a606034..ccffd7221f 100644 --- a/source/api_c/tests/test_deepspin_a.cc +++ b/source/api_c/tests/test_deepspin_a.cc @@ -185,7 +185,7 @@ TEST_F(TestInferDeepSpinA, numb_types_spin) { TEST_F(TestInferDeepSpinA, type_map) { const char* type_map = DP_DeepSpinGetTypeMap(dp); - char expected_type_map[] = "O H O_spin H_spin"; + char expected_type_map[] = "Ni O"; EXPECT_EQ(strcmp(type_map, expected_type_map), 0); DP_DeleteChar(type_map); } From e68de42084f61cbf5ae6afbbb3b990a25d2bd2b1 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:54:25 +0800 Subject: [PATCH 177/193] Update test_deepspin_a.cc --- source/api_c/tests/test_deepspin_a.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/api_c/tests/test_deepspin_a.cc b/source/api_c/tests/test_deepspin_a.cc index ccffd7221f..6c9ea0955c 100644 --- a/source/api_c/tests/test_deepspin_a.cc +++ b/source/api_c/tests/test_deepspin_a.cc @@ -186,6 +186,8 @@ TEST_F(TestInferDeepSpinA, numb_types_spin) { TEST_F(TestInferDeepSpinA, type_map) { const char* type_map = DP_DeepSpinGetTypeMap(dp); char expected_type_map[] = "Ni O"; + printf("type_map: %s\n", type_map); + printf("expected_type_map: %s\n", expected_type_map); EXPECT_EQ(strcmp(type_map, expected_type_map), 0); DP_DeleteChar(type_map); } From cef58173d57963608f635db297757570f015208e Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Fri, 8 Nov 2024 23:12:47 +0800 Subject: [PATCH 178/193] fix space --- source/api_c/src/c_api.cc | 5 +++++ source/api_c/tests/test_deepspin_a.cc | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 5f453cccd1..0021254ab4 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -1355,6 +1355,11 @@ template void DP_DipoleChargeModifierComputeNList_variant( * @return const char* */ const char* string_to_char(std::string& str) { + // Remove trailing spaces + str.erase(std::find_if(str.rbegin(), str.rend(), + [](unsigned char ch) { return !std::isspace(ch); }) + .base(), + str.end()); // copy from string to char* const std::string::size_type size = str.size(); // +1 for '\0' diff --git a/source/api_c/tests/test_deepspin_a.cc b/source/api_c/tests/test_deepspin_a.cc index 6c9ea0955c..ccffd7221f 100644 --- a/source/api_c/tests/test_deepspin_a.cc +++ b/source/api_c/tests/test_deepspin_a.cc @@ -186,8 +186,6 @@ TEST_F(TestInferDeepSpinA, numb_types_spin) { TEST_F(TestInferDeepSpinA, type_map) { const char* type_map = DP_DeepSpinGetTypeMap(dp); char expected_type_map[] = "Ni O"; - printf("type_map: %s\n", type_map); - printf("expected_type_map: %s\n", expected_type_map); EXPECT_EQ(strcmp(type_map, expected_type_map), 0); DP_DeleteChar(type_map); } From 2085804df66ef905d6574d9858ce5d2abaf3cf50 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 02:26:36 +0800 Subject: [PATCH 179/193] Create test_deepspin_a_hpp_tf.cc --- source/api_c/tests/test_deepspin_a_hpp_tf.cc | 280 +++++++++++++++++++ 1 file changed, 280 insertions(+) create mode 100644 source/api_c/tests/test_deepspin_a_hpp_tf.cc diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc new file mode 100644 index 0000000000..4dedaa296d --- /dev/null +++ b/source/api_c/tests/test_deepspin_a_hpp_tf.cc @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include + +#include +#include +#include +#include + +#include "deepmd.hpp" +#include "test_utils.h" + +template +class TestInferDeepSpinTFAHPP : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, + 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 0, 1, 1}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + std::vector expected_e = {-7.314365618560289, -7.313531316181837, + -2.8980532245013997, -2.897373810282277}; + std::vector expected_f = { + 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, + -0.0229926640905535, 0.0114378553363334, 0.019670014885563, + 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, + -0.013170850878758, -0.009124743467278, 0.0142592491588383}; + std::vector expected_fm = { + 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, + -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + unsigned int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + deepmd::hpp::DeepSpin dp; + + void SetUp() override { + std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + deepmd::hpp::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", + "deepspin_nlist_hpp.pb"); + + dp.init("deepspin_nlist_hpp.pb"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (unsigned int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (unsigned int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { remove("deepspin_nlist_hpp.pb"); }; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinTFAHPP, ValueTypes); + +TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinTFAHPP, print_summary) { + deepmd::hpp::DeepSpin& dp = this->dp; + dp.print_summary(""); +} + +template +class TestInferDeepSpinTFANoPbcHPP : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, + 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 0, 1, 1}; + std::vector box = {}; + std::vector expected_e = {-7.313160384523243, -7.312173646552338, + -2.8984477845267067, + -2.8984477845267067}; + std::vector expected_f = { + 0.0277100137316238, -0.0116082489956803, -0.0211484273275705, + -0.0277100137316238, 0.0116082489956803, 0.0211484273275705, + 0.0097588349924651, 0.0091168063745397, -0.0133541952528469, + -0.0097588349924651, -0.0091168063745397, 0.0133541952528469}; + std::vector expected_fm = { + 0.0058990325687816, -0.0024712163463815, 0.0296682261295907, + -0.0060028470719556, 0.0025147062058193, 0.0321884178873188, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, + 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; + unsigned int natoms; + double expected_tot_e; + // std::vector expected_tot_v; + + deepmd::hpp::DeepSpin dp; + + void SetUp() override { + std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + deepmd::hpp::convert_pbtxt_to_pb(file_name, "deepspin_nlist_hpp.pb"); + + dp.init("deepspin_nlist_hpp.pb"); + + natoms = expected_e.size(); + EXPECT_EQ(natoms * 3, expected_f.size()); + EXPECT_EQ(natoms * 3, expected_fm.size()); + // EXPECT_EQ(natoms * 9, expected_v.size()); + expected_tot_e = 0.; + // expected_tot_v.resize(9); + // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); + for (unsigned int ii = 0; ii < natoms; ++ii) { + expected_tot_e += expected_e[ii]; + } + // for (unsigned int ii = 0; ii < natoms; ++ii) { + // for (int dd = 0; dd < 9; ++dd) { + // expected_tot_v[dd] += expected_v[ii * 9 + dd]; + // } + // } + }; + + void TearDown() override { remove("deepspin_nlist_hpp.pb"); }; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinTFANoPbcHPP, ValueTypes); + +TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (unsigned int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (unsigned int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (unsigned int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} + +TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + std::vector > nlist_data = {{1}, {0}, {3}, {2}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], + &firstneigh[0]); + deepmd::hpp::convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, inlist, + 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} From b367f97edb8917abc9e62ad42074309b9d1301bf Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 02:55:16 +0800 Subject: [PATCH 180/193] Update test_deepspin_a_hpp_tf.cc --- source/api_c/tests/test_deepspin_a_hpp_tf.cc | 81 ++++++++++---------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc index 4dedaa296d..36c3cb8ba6 100644 --- a/source/api_c/tests/test_deepspin_a_hpp_tf.cc +++ b/source/api_c/tests/test_deepspin_a_hpp_tf.cc @@ -100,48 +100,49 @@ TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist) { // } } -TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, - atype, box); +// TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { +// using VALUETYPE = TypeParam; +// std::vector& coord = this->coord; +// std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// std::vector& expected_e = this->expected_e; +// std::vector& expected_f = this->expected_f; +// std::vector& expected_fm = this->expected_fm; +// // std::vector& expected_v = this->expected_v; +// unsigned int& natoms = this->natoms; +// double& expected_tot_e = this->expected_tot_e; +// // std::vector& expected_tot_v = this->expected_tot_v; +// deepmd::hpp::DeepSpin& dp = this->dp; +// double ener; +// std::vector force, force_mag, virial, atom_ener, atom_vir; +// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, +// spin, +// atype, box); - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); - EXPECT_EQ(atom_ener.size(), natoms); - // EXPECT_EQ(atom_vir.size(), natoms * 9); +// EXPECT_EQ(force.size(), natoms * 3); +// EXPECT_EQ(force_mag.size(), natoms * 3); +// // EXPECT_EQ(virial.size(), 9); +// EXPECT_EQ(atom_ener.size(), natoms); +// // EXPECT_EQ(atom_vir.size(), natoms * 9); - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - // for (int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } - for (int ii = 0; ii < natoms; ++ii) { - EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); - } - // for (int ii = 0; ii < natoms * 9; ++ii) { - // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); - // } -} +// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); +// } +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); +// } +// // for (int ii = 0; ii < 3 * 3; ++ii) { +// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); +// // } +// for (int ii = 0; ii < natoms; ++ii) { +// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); +// } +// // for (int ii = 0; ii < natoms * 9; ++ii) { +// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); +// // } +// } TYPED_TEST(TestInferDeepSpinTFAHPP, print_summary) { deepmd::hpp::DeepSpin& dp = this->dp; From b76e272cc13831c4f38eee05df1aea51ee10d847 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 17:00:45 +0800 Subject: [PATCH 181/193] update ntypes_spin --- source/api_cc/src/DeepPotTF.cc | 6 +----- source/api_cc/src/DeepSpinTF.cc | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index a990cecf8d..7656590ea6 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -478,11 +478,7 @@ void DeepPotTF::init(const std::string& model, } cell_size = rcut; ntypes = get_scalar("descrpt_attr/ntypes"); - try { - ntypes_spin = get_scalar("spin_attr/ntypes_spin"); - } catch (const deepmd::deepmd_exception&) { - ntypes_spin = 0; - } + ntypes_spin = 0; dfparam = get_scalar("fitting_attr/dfparam"); daparam = get_scalar("fitting_attr/daparam"); if (dfparam < 0) { diff --git a/source/api_cc/src/DeepSpinTF.cc b/source/api_cc/src/DeepSpinTF.cc index caff84255e..0906bc9289 100644 --- a/source/api_cc/src/DeepSpinTF.cc +++ b/source/api_cc/src/DeepSpinTF.cc @@ -478,11 +478,7 @@ void DeepSpinTF::init(const std::string& model, } cell_size = rcut; ntypes = get_scalar("descrpt_attr/ntypes"); - try { - ntypes_spin = get_scalar("spin_attr/ntypes_spin"); - } catch (const deepmd::deepmd_exception&) { - ntypes_spin = 0; - } + ntypes_spin = get_scalar("spin_attr/ntypes_spin"); dfparam = get_scalar("fitting_attr/dfparam"); daparam = get_scalar("fitting_attr/daparam"); if (dfparam < 0) { From 68cfb947d9ee7a2c739ce41d28c1fcfaa7653a4b Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 17:02:27 +0800 Subject: [PATCH 182/193] Update test_deepspin_a_hpp_tf.cc --- source/api_c/tests/test_deepspin_a_hpp_tf.cc | 77 ++++++++++---------- 1 file changed, 39 insertions(+), 38 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc index 36c3cb8ba6..04c3226692 100644 --- a/source/api_c/tests/test_deepspin_a_hpp_tf.cc +++ b/source/api_c/tests/test_deepspin_a_hpp_tf.cc @@ -239,43 +239,44 @@ TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_build_nlist) { // } } -TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial; - std::vector > nlist_data = {{1}, {0}, {3}, {2}}; - std::vector ilist(natoms), numneigh(natoms); - std::vector firstneigh(natoms); - deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], - &firstneigh[0]); - deepmd::hpp::convert_nlist(inlist, nlist_data); - dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, inlist, - 0); +// TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist) { +// using VALUETYPE = TypeParam; +// std::vector& coord = this->coord; +// std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// std::vector& expected_e = this->expected_e; +// std::vector& expected_f = this->expected_f; +// std::vector& expected_fm = this->expected_fm; +// // std::vector& expected_v = this->expected_v; +// unsigned int& natoms = this->natoms; +// double& expected_tot_e = this->expected_tot_e; +// // std::vector& expected_tot_v = this->expected_tot_v; +// deepmd::hpp::DeepSpin& dp = this->dp; +// double ener; +// std::vector force, force_mag, virial; +// std::vector > nlist_data = {{1}, {0}, {3}, {2}}; +// std::vector ilist(natoms), numneigh(natoms); +// std::vector firstneigh(natoms); +// deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], +// &firstneigh[0]); +// deepmd::hpp::convert_nlist(inlist, nlist_data); +// dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, +// inlist, +// 0); - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); +// EXPECT_EQ(force.size(), natoms * 3); +// EXPECT_EQ(force_mag.size(), natoms * 3); +// // EXPECT_EQ(virial.size(), 9); - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - // for (int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } -} +// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); +// } +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); +// } +// // for (int ii = 0; ii < 3 * 3; ++ii) { +// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); +// // } +// } From ea19b35f3013ed4a5dfbb22b9b8f9c07378f0ad9 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 17:34:41 +0800 Subject: [PATCH 183/193] Update test_deepspin_a_hpp_tf.cc --- source/api_c/tests/test_deepspin_a_hpp_tf.cc | 160 +++++++++---------- 1 file changed, 79 insertions(+), 81 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc index 04c3226692..8087ea17c5 100644 --- a/source/api_c/tests/test_deepspin_a_hpp_tf.cc +++ b/source/api_c/tests/test_deepspin_a_hpp_tf.cc @@ -100,49 +100,48 @@ TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist) { // } } -// TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { -// using VALUETYPE = TypeParam; -// std::vector& coord = this->coord; -// std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// std::vector& expected_e = this->expected_e; -// std::vector& expected_f = this->expected_f; -// std::vector& expected_fm = this->expected_fm; -// // std::vector& expected_v = this->expected_v; -// unsigned int& natoms = this->natoms; -// double& expected_tot_e = this->expected_tot_e; -// // std::vector& expected_tot_v = this->expected_tot_v; -// deepmd::hpp::DeepSpin& dp = this->dp; -// double ener; -// std::vector force, force_mag, virial, atom_ener, atom_vir; -// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, -// spin, -// atype, box); +TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); -// EXPECT_EQ(force.size(), natoms * 3); -// EXPECT_EQ(force_mag.size(), natoms * 3); -// // EXPECT_EQ(virial.size(), 9); -// EXPECT_EQ(atom_ener.size(), natoms); -// // EXPECT_EQ(atom_vir.size(), natoms * 9); + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); -// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); -// } -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); -// } -// // for (int ii = 0; ii < 3 * 3; ++ii) { -// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); -// // } -// for (int ii = 0; ii < natoms; ++ii) { -// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); -// } -// // for (int ii = 0; ii < natoms * 9; ++ii) { -// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); -// // } -// } + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} TYPED_TEST(TestInferDeepSpinTFAHPP, print_summary) { deepmd::hpp::DeepSpin& dp = this->dp; @@ -157,7 +156,7 @@ class TestInferDeepSpinTFANoPbcHPP : public ::testing::Test { std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, 0., 0., 0., 0., 0., 0.}; std::vector atype = {0, 0, 1, 1}; - std::vector box = {}; + std::vector box = {100., 0., 0., 0., 100., 0., 0., 0., 100.}; std::vector expected_e = {-7.313160384523243, -7.312173646552338, -2.8984477845267067, -2.8984477845267067}; @@ -239,44 +238,43 @@ TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_build_nlist) { // } } -// TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist) { -// using VALUETYPE = TypeParam; -// std::vector& coord = this->coord; -// std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// std::vector& expected_e = this->expected_e; -// std::vector& expected_f = this->expected_f; -// std::vector& expected_fm = this->expected_fm; -// // std::vector& expected_v = this->expected_v; -// unsigned int& natoms = this->natoms; -// double& expected_tot_e = this->expected_tot_e; -// // std::vector& expected_tot_v = this->expected_tot_v; -// deepmd::hpp::DeepSpin& dp = this->dp; -// double ener; -// std::vector force, force_mag, virial; -// std::vector > nlist_data = {{1}, {0}, {3}, {2}}; -// std::vector ilist(natoms), numneigh(natoms); -// std::vector firstneigh(natoms); -// deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], -// &firstneigh[0]); -// deepmd::hpp::convert_nlist(inlist, nlist_data); -// dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, -// inlist, -// 0); +TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial; + std::vector > nlist_data = {{1}, {0}, {3}, {2}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], + &firstneigh[0]); + deepmd::hpp::convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, inlist, + 0); -// EXPECT_EQ(force.size(), natoms * 3); -// EXPECT_EQ(force_mag.size(), natoms * 3); -// // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); -// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); -// } -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); -// } -// // for (int ii = 0; ii < 3 * 3; ++ii) { -// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); -// // } -// } + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} From 43e8bafaa5089424ad6ae8774946b9fb256a1eaf Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 17:49:29 +0800 Subject: [PATCH 184/193] Update test_deepspin_a_hpp_tf.cc --- source/api_c/tests/test_deepspin_a_hpp_tf.cc | 81 ++++++++++---------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc index 8087ea17c5..7b103cd9d7 100644 --- a/source/api_c/tests/test_deepspin_a_hpp_tf.cc +++ b/source/api_c/tests/test_deepspin_a_hpp_tf.cc @@ -100,48 +100,49 @@ TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist) { // } } -TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, - atype, box); +// TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { +// using VALUETYPE = TypeParam; +// std::vector& coord = this->coord; +// std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// std::vector& expected_e = this->expected_e; +// std::vector& expected_f = this->expected_f; +// std::vector& expected_fm = this->expected_fm; +// // std::vector& expected_v = this->expected_v; +// unsigned int& natoms = this->natoms; +// double& expected_tot_e = this->expected_tot_e; +// // std::vector& expected_tot_v = this->expected_tot_v; +// deepmd::hpp::DeepSpin& dp = this->dp; +// double ener; +// std::vector force, force_mag, virial, atom_ener, atom_vir; +// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, +// spin, +// atype, box); - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); - EXPECT_EQ(atom_ener.size(), natoms); - // EXPECT_EQ(atom_vir.size(), natoms * 9); +// EXPECT_EQ(force.size(), natoms * 3); +// EXPECT_EQ(force_mag.size(), natoms * 3); +// // EXPECT_EQ(virial.size(), 9); +// EXPECT_EQ(atom_ener.size(), natoms); +// // EXPECT_EQ(atom_vir.size(), natoms * 9); - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - // for (int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } - for (int ii = 0; ii < natoms; ++ii) { - EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); - } - // for (int ii = 0; ii < natoms * 9; ++ii) { - // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); - // } -} +// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); +// } +// for (int ii = 0; ii < natoms * 3; ++ii) { +// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); +// } +// // for (int ii = 0; ii < 3 * 3; ++ii) { +// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); +// // } +// for (int ii = 0; ii < natoms; ++ii) { +// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); +// } +// // for (int ii = 0; ii < natoms * 9; ++ii) { +// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); +// // } +// } TYPED_TEST(TestInferDeepSpinTFAHPP, print_summary) { deepmd::hpp::DeepSpin& dp = this->dp; From 82eca9afe8b81241671f2bbaa970823037f6f34e Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 18:54:21 +0800 Subject: [PATCH 185/193] Update test_deepspin_a_hpp_tf.cc --- source/api_c/tests/test_deepspin_a_hpp_tf.cc | 81 ++++++++++---------- 1 file changed, 40 insertions(+), 41 deletions(-) diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc index 7b103cd9d7..4d4899543a 100644 --- a/source/api_c/tests/test_deepspin_a_hpp_tf.cc +++ b/source/api_c/tests/test_deepspin_a_hpp_tf.cc @@ -100,49 +100,48 @@ TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist) { // } } -// TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { -// using VALUETYPE = TypeParam; -// std::vector& coord = this->coord; -// std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// std::vector& expected_e = this->expected_e; -// std::vector& expected_f = this->expected_f; -// std::vector& expected_fm = this->expected_fm; -// // std::vector& expected_v = this->expected_v; -// unsigned int& natoms = this->natoms; -// double& expected_tot_e = this->expected_tot_e; -// // std::vector& expected_tot_v = this->expected_tot_v; -// deepmd::hpp::DeepSpin& dp = this->dp; -// double ener; -// std::vector force, force_mag, virial, atom_ener, atom_vir; -// dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, -// spin, -// atype, box); +TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box); -// EXPECT_EQ(force.size(), natoms * 3); -// EXPECT_EQ(force_mag.size(), natoms * 3); -// // EXPECT_EQ(virial.size(), 9); -// EXPECT_EQ(atom_ener.size(), natoms); -// // EXPECT_EQ(atom_vir.size(), natoms * 9); + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + EXPECT_EQ(atom_ener.size(), natoms); + // EXPECT_EQ(atom_vir.size(), natoms * 9); -// EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); -// } -// for (int ii = 0; ii < natoms * 3; ++ii) { -// EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); -// } -// // for (int ii = 0; ii < 3 * 3; ++ii) { -// // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); -// // } -// for (int ii = 0; ii < natoms; ++ii) { -// EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); -// } -// // for (int ii = 0; ii < natoms * 9; ++ii) { -// // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); -// // } -// } + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < natoms * 9; ++ii) { + // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); + // } +} TYPED_TEST(TestInferDeepSpinTFAHPP, print_summary) { deepmd::hpp::DeepSpin& dp = this->dp; From 7c6906690df0502ee32d457df62964384e7a720d Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 22:10:40 +0800 Subject: [PATCH 186/193] add uts --- source/api_c/include/c_api.h | 96 +++++++ source/api_c/include/deepmd.hpp | 258 ++++++++++++++++++ source/api_c/src/c_api.cc | 152 +++++++++++ source/api_c/tests/test_deepspin_a_hpp_tf.cc | 44 +++ .../tests/test_deepspin_model_devi_hpp.cc | 162 +++++++++++ source/api_cc/include/DeepSpin.h | 78 +++++- source/api_cc/src/DeepSpin.cc | 109 ++++++++ .../api_cc/tests/test_deepspin_model_devi.cc | 166 +++++++++++ 8 files changed, 1063 insertions(+), 2 deletions(-) create mode 100644 source/api_c/tests/test_deepspin_model_devi_hpp.cc create mode 100644 source/api_cc/tests/test_deepspin_model_devi.cc diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index b214d3c7a9..c34a6909a5 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -978,6 +978,53 @@ void DP_DeepPotModelDeviCompute2(DP_DeepPotModelDevi* dp, double* virial, double* atomic_energy, double* atomic_virial); + +/** + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + * spin model deviation. (double version) + * @version 2 + * @param[in] dp The DP spin model deviation to use. + * @param[in] nframes The number of frames. Only support 1 for now. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *natoms x dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + * @since API version 24 + **/ +void DP_DeepSpinModelDeviCompute2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); /** * @brief Evaluate the energy, force and virial by using a DP model deviation *with neighbor list. (float version) @@ -1018,6 +1065,53 @@ void DP_DeepPotModelDeviComputef2(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); +/** + * @brief Evaluate the energy, force, magnetic force and virial by using a DP + * spin model deviation. (float version) + * @version 2 + * @param[in] dp The DP spin model deviation to use. + * @param[in] nframes The number of frames. Only support 1 for now. + * @param[in] natoms The number of atoms. + * @param[in] coord The coordinates of atoms. The array should be of size natoms + *x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should be + *of size nframes x natoms x 3. + * @param[in] atype The atom types. The array should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size 9. Pass + *NULL if pbc is not used. + * @param[in] fparam The frame parameters. The array can be of size nframes x + *dim_fparam. + * @param[in] aparam The atom parameters. The array can be of size nframes x + *natoms x dim_aparam. + * @param[out] energy Output energy. + * @param[out] force Output force. The array should be of size natoms x 3. + * @param[out] force_mag Output magnetic force on each atom. The array should be + * of size natoms x 3. + * @param[out] virial Output virial. The array should be of size 9. + * @param[out] atomic_energy Output atomic energy. The array should be of size + *natoms. + * @param[out] atomic_virial Output atomic virial. The array should be of size + *natoms x 9. + * @warning The output arrays should be allocated before calling this function. + *Pass NULL if not required. + * @since API version 24 + **/ +void DP_DeepSpinModelDeviComputef2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + // deprecated interface version1 /** * @brief Evaluate the energy, force and virial by using a DP model deviation @@ -1171,6 +1265,7 @@ void DP_DeepPotModelDeviComputeNList2(DP_DeepPotModelDevi* dp, *natoms x 9. * @warning The output arrays should be allocated before calling this function. *Pass NULL if not required. + * @since API version 24 **/ void DP_DeepSpinModelDeviComputeNList2(DP_DeepSpinModelDevi* dp, const int nframes, @@ -1269,6 +1364,7 @@ void DP_DeepPotModelDeviComputeNListf2(DP_DeepPotModelDevi* dp, *natoms x 9. * @warning The output arrays should be allocated before calling this function. *Pass NULL if not required. + * @since API version 24 **/ void DP_DeepSpinModelDeviComputeNListf2(DP_DeepSpinModelDevi* dp, const int nframes, diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 37a2d89aa1..9e1a611869 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -389,6 +389,62 @@ inline void _DP_DeepPotModelDeviCompute(DP_DeepPotModelDevi *dp, atomic_virial); } +template +inline void _DP_DeepSpinModelDeviCompute(DP_DeepSpinModelDevi *dp, + const int natom, + const FPTYPE *coord, + const FPTYPE *spin, + const int *atype, + const FPTYPE *cell, + const FPTYPE *fparam, + const FPTYPE *aparam, + double *energy, + FPTYPE *force, + FPTYPE *force_mag, + FPTYPE *virial, + FPTYPE *atomic_energy, + FPTYPE *atomic_virial); + +template <> +inline void _DP_DeepSpinModelDeviCompute(DP_DeepSpinModelDevi *dp, + const int natom, + const double *coord, + const double *spin, + const int *atype, + const double *cell, + const double *fparam, + const double *aparam, + double *energy, + double *force, + double *force_mag, + double *virial, + double *atomic_energy, + double *atomic_virial) { + DP_DeepSpinModelDeviCompute2(dp, 1, natom, coord, spin, atype, cell, fparam, + aparam, energy, force, force_mag, virial, + atomic_energy, atomic_virial); +} + +template <> +inline void _DP_DeepSpinModelDeviCompute(DP_DeepSpinModelDevi *dp, + const int natom, + const float *coord, + const float *spin, + const int *atype, + const float *cell, + const float *fparam, + const float *aparam, + double *energy, + float *force, + float *force_mag, + float *virial, + float *atomic_energy, + float *atomic_virial) { + DP_DeepSpinModelDeviComputef2(dp, 1, natom, coord, spin, atype, cell, fparam, + aparam, energy, force, force_mag, virial, + atomic_energy, atomic_virial); +} + template inline void _DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi *dp, const int natom, @@ -2454,6 +2510,208 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { aparam_nall = DP_DeepSpinModelDeviIsAParamNAll(dp); dpbase = (DP_DeepBaseModelDevi *)dp; }; + + /** + * @brief Evaluate the energy, force, magnetic force and virial by using this + *DP spin model deviation. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + **/ + template + void compute( + std::vector &ener, + std::vector> &force, + std::vector> &force_mag, + std::vector> &virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = 1; + assert(natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + + // memory will be continuous for std::vector but not + // std::vector + std::vector energy_flat(numb_models); + std::vector force_flat(static_cast(numb_models) * + natoms * 3); + std::vector force_mag_flat(static_cast(numb_models) * + natoms * 3); + std::vector virial_flat(numb_models * 9); + double *ener_ = &energy_flat[0]; + VALUETYPE *force_ = &force_flat[0]; + VALUETYPE *force_mag_ = &force_mag_flat[0]; + VALUETYPE *virial_ = &virial_flat[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, natoms, fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + + _DP_DeepSpinModelDeviCompute( + dp, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, ener_, + force_, force_mag_, virial_, nullptr, nullptr); + DP_CHECK_OK(DP_DeepSpinModelDeviCheckOK, dp); + + // reshape + ener.resize(numb_models); + force.resize(numb_models); + force_mag.resize(numb_models); + // virial.resize(numb_models); + for (int i = 0; i < numb_models; i++) { + ener[i] = energy_flat[i]; + force[i].resize(static_cast(natoms) * 3); + force_mag[i].resize(static_cast(natoms) * 3); + virial[i].resize(9); + for (int j = 0; j < natoms * 3; j++) { + force[i][j] = force_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < natoms * 3; j++) { + force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; + } + // for (int j = 0; j < 9; j++) { + // virial[i][j] = virial_flat[i * 9 + j]; + // } + } + }; + /** + * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, + * and atomic virial by using this DP spin model deviation. + * @param[out] ener The system energy. + * @param[out] force The force on each atom. + * @param[out] force_mag The magnetic force on each atom. + * @param[out] virial The virial. + * @param[out] atom_energy The atomic energy. + * @param[out] atom_virial The atomic virial. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9 (PBC) or empty (no PBC). + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. + **/ + template + void compute( + std::vector &ener, + std::vector> &force, + std::vector> &force_mag, + std::vector> &virial, + std::vector> &atom_energy, + std::vector> &atom_virial, + const std::vector &coord, + const std::vector &spin, + const std::vector &atype, + const std::vector &box, + const std::vector &fparam = std::vector(), + const std::vector &aparam = std::vector()) { + unsigned int natoms = atype.size(); + unsigned int nframes = 1; + assert(natoms * 3 == coord.size()); + if (!box.empty()) { + assert(box.size() == 9); + } + const VALUETYPE *coord_ = &coord[0]; + const VALUETYPE *spin_ = &spin[0]; + const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; + const int *atype_ = &atype[0]; + + std::vector energy_flat(numb_models); + std::vector force_flat(static_cast(numb_models) * + natoms * 3); + std::vector force_mag_flat(static_cast(numb_models) * + natoms * 3); + std::vector virial_flat(numb_models * 9); + std::vector atom_energy_flat(static_cast(numb_models) * + natoms); + std::vector atom_virial_flat(static_cast(numb_models) * + natoms * 9); + double *ener_ = &energy_flat[0]; + VALUETYPE *force_ = &force_flat[0]; + VALUETYPE *force_mag_ = &force_mag_flat[0]; + VALUETYPE *virial_ = &virial_flat[0]; + VALUETYPE *atomic_ener_ = &atom_energy_flat[0]; + VALUETYPE *atomic_virial_ = &atom_virial_flat[0]; + std::vector fparam_, aparam_; + validate_fparam_aparam(nframes, natoms, fparam, aparam); + tile_fparam_aparam(fparam_, nframes, dfparam, fparam); + tile_fparam_aparam(aparam_, nframes, natoms * daparam, aparam); + const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; + const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; + + _DP_DeepSpinModelDeviCompute( + dp, natoms, coord_, spin_, atype_, box_, fparam__, aparam__, ener_, + force_, force_mag_, virial_, atomic_ener_, atomic_virial_); + DP_CHECK_OK(DP_DeepSpinModelDeviCheckOK, dp); + + // reshape + ener.resize(numb_models); + force.resize(numb_models); + force_mag.resize(numb_models); + virial.resize(numb_models); + atom_energy.resize(numb_models); + atom_virial.resize(numb_models); + for (int i = 0; i < numb_models; i++) { + ener[i] = energy_flat[i]; + force[i].resize(static_cast(natoms) * 3); + force_mag[i].resize(static_cast(natoms) * 3); + virial[i].resize(9); + atom_energy[i].resize(natoms); + atom_virial[i].resize(static_cast(natoms) * 9); + for (int j = 0; j < natoms * 3; j++) { + force[i][j] = force_flat[i * natoms * 3 + j]; + } + for (int j = 0; j < natoms * 3; j++) { + force_mag[i][j] = force_mag_flat[i * natoms * 3 + j]; + } + // for (int j = 0; j < 9; j++) { + // virial[i][j] = virial_flat[i * 9 + j]; + // } + for (int j = 0; j < natoms; j++) { + atom_energy[i][j] = atom_energy_flat[i * natoms + j]; + } + // for (int j = 0; j < natoms * 9; j++) { + // atom_virial[i][j] = atom_virial_flat[i * natoms * 9 + j]; + // } + } + }; + /** * @brief Evaluate the energy, force, magnetic force and virial by using this * DP spin model deviation. diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 0021254ab4..eba9be3664 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -798,6 +798,118 @@ template void DP_DeepPotModelDeviCompute_variant(DP_DeepPotModelDevi* dp, float* atomic_energy, float* atomic_virial); +template +void DP_DeepSpinModelDeviCompute_variant(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const VALUETYPE* coord, + const VALUETYPE* spin, + const int* atype, + const VALUETYPE* cell, + const VALUETYPE* fparam, + const VALUETYPE* aparam, + double* energy, + VALUETYPE* force, + VALUETYPE* force_mag, + VALUETYPE* virial, + VALUETYPE* atomic_energy, + VALUETYPE* atomic_virial) { + if (nframes > 1) { + throw std::runtime_error("nframes > 1 not supported yet"); + } + // init C++ vectors from C arrays + std::vector coord_(coord, coord + natoms * 3); + std::vector spin_(spin, spin + natoms * 3); + std::vector atype_(atype, atype + natoms); + std::vector cell_; + if (cell) { + // pbc + cell_.assign(cell, cell + 9); + } + std::vector fparam_; + if (fparam) { + fparam_.assign(fparam, fparam + dp->dfparam); + } + std::vector aparam_; + if (aparam) { + aparam_.assign(aparam, aparam + nframes * natoms * dp->daparam); + } + // different from DeepPot + std::vector e; + std::vector> f, fm, v, ae, av; + + if (atomic_energy || atomic_virial) { + DP_REQUIRES_OK(dp, dp->dp.compute(e, f, fm, v, ae, av, coord_, spin_, + atype_, cell_, fparam_, aparam_)); + } else { + DP_REQUIRES_OK(dp, dp->dp.compute(e, f, fm, v, coord_, spin_, atype_, cell_, + fparam_, aparam_)); + } + // 2D vector to 2D array, flatten first + if (energy) { + std::copy(e.begin(), e.end(), energy); + } + if (force) { + std::vector f_flat; + flatten_vector(f_flat, f); + std::copy(f_flat.begin(), f_flat.end(), force); + } + if (force_mag) { + std::vector fm_flat; + flatten_vector(fm_flat, fm); + std::copy(fm_flat.begin(), fm_flat.end(), force_mag); + } + // if (virial) { + // std::vector v_flat; + // flatten_vector(v_flat, v); + // std::copy(v_flat.begin(), v_flat.end(), virial); + // } + if (atomic_energy) { + std::vector ae_flat; + flatten_vector(ae_flat, ae); + std::copy(ae_flat.begin(), ae_flat.end(), atomic_energy); + } + // if (atomic_virial) { + // std::vector av_flat; + // flatten_vector(av_flat, av); + // std::copy(av_flat.begin(), av_flat.end(), atomic_virial); + // } +} + +template void DP_DeepSpinModelDeviCompute_variant( + DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial); + +template void DP_DeepSpinModelDeviCompute_variant( + DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial); + template void DP_DeepPotModelDeviComputeNList_variant(DP_DeepPotModelDevi* dp, const int nframes, @@ -1709,6 +1821,46 @@ void DP_DeepPotModelDeviComputef2(DP_DeepPotModelDevi* dp, virial, atomic_energy, atomic_virial); } +void DP_DeepSpinModelDeviCompute2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const double* coord, + const double* spin, + const int* atype, + const double* cell, + const double* fparam, + const double* aparam, + double* energy, + double* force, + double* force_mag, + double* virial, + double* atomic_energy, + double* atomic_virial) { + DP_DeepSpinModelDeviCompute_variant( + dp, nframes, natoms, coord, spin, atype, cell, fparam, aparam, energy, + force, force_mag, virial, atomic_energy, atomic_virial); +} + +void DP_DeepSpinModelDeviComputef2(DP_DeepSpinModelDevi* dp, + const int nframes, + const int natoms, + const float* coord, + const float* spin, + const int* atype, + const float* cell, + const float* fparam, + const float* aparam, + double* energy, + float* force, + float* force_mag, + float* virial, + float* atomic_energy, + float* atomic_virial) { + DP_DeepSpinModelDeviCompute_variant( + dp, nframes, natoms, coord, spin, atype, cell, fparam, aparam, energy, + force, force_mag, virial, atomic_energy, atomic_virial); +} + void DP_DeepPotModelDeviComputeNList(DP_DeepPotModelDevi* dp, const int natoms, const double* coord, diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc index 4d4899543a..5a12d2d04d 100644 --- a/source/api_c/tests/test_deepspin_a_hpp_tf.cc +++ b/source/api_c/tests/test_deepspin_a_hpp_tf.cc @@ -278,3 +278,47 @@ TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist) { // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); // } } + +TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist_atomic) { + using VALUETYPE = TypeParam; + const std::vector& coord = this->coord; + const std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& expected_e = this->expected_e; + std::vector& expected_f = this->expected_f; + std::vector& expected_fm = this->expected_fm; + // std::vector& expected_v = this->expected_v; + unsigned int& natoms = this->natoms; + double& expected_tot_e = this->expected_tot_e; + // std::vector& expected_tot_v = this->expected_tot_v; + deepmd::hpp::DeepSpin& dp = this->dp; + double ener; + std::vector force, force_mag, virial, atom_ener, atom_vir; + std::vector > nlist_data = {{1}, {0}, {3}, {2}}; + std::vector ilist(natoms), numneigh(natoms); + std::vector firstneigh(natoms); + deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], + &firstneigh[0]); + deepmd::hpp::convert_nlist(inlist, nlist_data); + dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, + atype, box, 0, inlist, 0); + + EXPECT_EQ(force.size(), natoms * 3); + EXPECT_EQ(force_mag.size(), natoms * 3); + // EXPECT_EQ(virial.size(), 9); + + EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); + } + for (int ii = 0; ii < natoms * 3; ++ii) { + EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); + } + for (int ii = 0; ii < natoms; ++ii) { + EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); + } + // for (int ii = 0; ii < 3 * 3; ++ii) { + // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); + // } +} diff --git a/source/api_c/tests/test_deepspin_model_devi_hpp.cc b/source/api_c/tests/test_deepspin_model_devi_hpp.cc new file mode 100644 index 0000000000..c6bbdef8a8 --- /dev/null +++ b/source/api_c/tests/test_deepspin_model_devi_hpp.cc @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include + +#include +#include +#include +#include + +#include "deepmd.hpp" +#include "test_utils.h" + +template +class TestInferDeepSpinModeDevi : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 1, 1, 0, 1, 1}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + int natoms; + + deepmd::hpp::DeepSpin dp0; + deepmd::hpp::DeepSpin dp1; + deepmd::hpp::DeepSpinModelDevi dp_md; + + void SetUp() override { + { + dp0.init("../../tests/infer/deeppot_dpa_spin.pth"); + } + { + dp1.init("../../tests/infer/deeppot_dpa_spin.pth"); + } + dp_md.init( + std::vector({"../../tests/infer/deeppot_dpa_spin.pth", + "../../tests/infer/deeppot_dpa_spin.pth"})); + }; + + void TearDown() override {}; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinModeDevi, ValueTypes); + +TYPED_TEST(TestInferDeepSpinModeDevi, attrs) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepSpin& dp0 = this->dp0; + deepmd::hpp::DeepSpin& dp1 = this->dp1; + deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; + EXPECT_EQ(dp0.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp0.numb_types(), dp_md.numb_types()); + // EXPECT_EQ(dp0.dim_fparam(), dp_md.dim_fparam()); + // EXPECT_EQ(dp0.dim_aparam(), dp_md.dim_aparam()); + EXPECT_EQ(dp1.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp1.numb_types(), dp_md.numb_types()); + // EXPECT_EQ(dp1.dim_fparam(), dp_md.dim_fparam()); + // EXPECT_EQ(dp1.dim_aparam(), dp_md.dim_aparam()); +} + +TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepSpin& dp0 = this->dp0; + deepmd::hpp::DeepSpin& dp1 = this->dp1; + deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; + float rc = dp_md.cutoff(); + int nloc = coord.size() / 3; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), fmagdir(nmodel), + vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd; + dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], coord, spin, atype, box); + dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], coord, spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + } +} + +TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepSpin& dp0 = this->dp0; + deepmd::hpp::DeepSpin& dp1 = this->dp1; + deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), fmagdir(nmodel), + vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, + avdir(nmodel), avmd(nmodel); + dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], coord, + spin, atype, box); + dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], coord, + spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + EXPECT_EQ(aedir.size(), aemd.size()); + // EXPECT_EQ(avdir.size(), avmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); + // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + for (int ii = 0; ii < aedir[0].size(); ++ii) { + EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < avdir[0].size(); ++ii) { + // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); + // } + } +} diff --git a/source/api_cc/include/DeepSpin.h b/source/api_cc/include/DeepSpin.h index 8b1e896e73..4fc9972378 100644 --- a/source/api_cc/include/DeepSpin.h +++ b/source/api_cc/include/DeepSpin.h @@ -447,10 +447,84 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { const int& gpu_rank = 0, const std::vector& file_contents = std::vector()); + /** + * @brief Evaluate the energy, force and virial by using these DP spin models. + * @param[out] all_ener The system energies of all models. + * @param[out] all_force The forces on each atom of all models. + * @param[out] all_force_mag The magnetic forces on each atom of all models. + * @param[out] all_virial The virials of all models. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. dim_aparam. Then all frames and atoms are provided with the + *same aparam. + **/ + template + void compute(std::vector& all_ener, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); + + /** + * @brief Evaluate the energy, force, virial, atomic energy, and atomic virial + *by using these DP spin models. + * @param[out] all_ener The system energies of all models. + * @param[out] all_force The forces on each atom of all models. + * @param[out] all_force_mag The magnetic forces on each atom of all models. + * @param[out] all_virial The virials of all models. + * @param[out] all_atom_energy The atomic energies of all models. + * @param[out] all_atom_virial The atomic virials of all models. + * @param[in] coord The coordinates of atoms. The array should be of size + *nframes x natoms x 3. + * @param[in] spin The spins of atoms, [0, 0, 0] if no spin. The array should + *be of size nframes x natoms x 3. + * @param[in] atype The atom types. The list should contain natoms ints. + * @param[in] box The cell of the region. The array should be of size nframes + *x 9. + * @param[in] fparam The frame parameter. The array can be of size : + * nframes x dim_fparam. + * dim_fparam. Then all frames are assumed to be provided with the same + *fparam. + * @param[in] aparam The atomic parameter The array can be of size : + * nframes x natoms x dim_aparam. + * natoms x dim_aparam. Then all frames are assumed to be provided with the + *same aparam. dim_aparam. Then all frames and atoms are provided with the + *same aparam. + **/ + template + void compute(std::vector& all_ener, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& coord, + const std::vector& spin, + const std::vector& atype, + const std::vector& box, + const std::vector& fparam = std::vector(), + const std::vector& aparam = std::vector()); /** * @brief Evaluate the energy, force, magnetic force and virial by using these - *DP models with spin input. + *DP spin models. * @param[out] all_ener The system energies of all models. * @param[out] all_force The forces on each atom of all models. * @param[out] all_force_mag The magnetic forces on each atom of all models. @@ -492,7 +566,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { /** * @brief Evaluate the energy, force, magnetic force, virial, atomic energy, - *and atomic virial by using these DP models with spin input. + *and atomic virial by using these DP spin models. * @param[out] all_ener The system energies of all models. * @param[out] all_force The forces on each atom of all models. * @param[out] all_force_mag The magnetic forces on each atom of all models. diff --git a/source/api_cc/src/DeepSpin.cc b/source/api_cc/src/DeepSpin.cc index 1702e8a45d..d761e9d3c2 100644 --- a/source/api_cc/src/DeepSpin.cc +++ b/source/api_cc/src/DeepSpin.cc @@ -489,6 +489,115 @@ void DeepSpinModelDevi::init(const std::vector& models, inited = true; } +template +void DeepSpinModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam_) { + // without nlist + if (numb_models == 0) { + return; + } + all_energy.resize(numb_models); + all_force.resize(numb_models); + all_force_mag.resize(numb_models); + all_virial.resize(numb_models); + for (unsigned ii = 0; ii < numb_models; ++ii) { + dps[ii]->compute(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], dcoord_, dspin_, datype_, dbox, fparam, + aparam_); + } +} + +template void DeepSpinModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpinModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template +void DeepSpinModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam_) { + if (numb_models == 0) { + return; + } + all_energy.resize(numb_models); + all_force.resize(numb_models); + all_force_mag.resize(numb_models); + all_virial.resize(numb_models); + all_atom_energy.resize(numb_models); + all_atom_virial.resize(numb_models); + for (unsigned ii = 0; ii < numb_models; ++ii) { + dps[ii]->compute(all_energy[ii], all_force[ii], all_force_mag[ii], + all_virial[ii], all_atom_energy[ii], all_atom_virial[ii], + dcoord_, dspin_, datype_, dbox, fparam, aparam_); + } +} + +template void DeepSpinModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + +template void DeepSpinModelDevi::compute( + std::vector& all_energy, + std::vector>& all_force, + std::vector>& all_force_mag, + std::vector>& all_virial, + std::vector>& all_atom_energy, + std::vector>& all_atom_virial, + const std::vector& dcoord_, + const std::vector& dspin_, + const std::vector& datype_, + const std::vector& dbox, + const std::vector& fparam, + const std::vector& aparam); + // support spin // nlist, no atomic template diff --git a/source/api_cc/tests/test_deepspin_model_devi.cc b/source/api_cc/tests/test_deepspin_model_devi.cc new file mode 100644 index 0000000000..fcc4a4315d --- /dev/null +++ b/source/api_cc/tests/test_deepspin_model_devi.cc @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "DeepSpin.h" +#include "neighbor_list.h" +#include "test_utils.h" + +template +class TestInferDeepSpinModeDevi : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 1, 1, 0, 1, 1}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + int natoms; + + deepmd::DeepSpin dp0; + deepmd::DeepSpin dp1; + deepmd::DeepSpinModelDevi dp_md; + + void SetUp() override { + { + dp0.init("../../tests/infer/deeppot_dpa_spin.pth"); + } + { + dp1.init("../../tests/infer/deeppot_dpa_spin.pth"); + } + dp_md.init( + std::vector({"../../tests/infer/deeppot_dpa_spin.pth", + "../../tests/infer/deeppot_dpa_spin.pth"})); + }; + + void TearDown() override {}; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinModeDevi, ValueTypes); + +TYPED_TEST(TestInferDeepSpinModeDevi, attrs) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::DeepSpin& dp0 = this->dp0; + deepmd::DeepSpin& dp1 = this->dp1; + deepmd::DeepSpinModelDevi& dp_md = this->dp_md; + EXPECT_EQ(dp0.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp0.numb_types(), dp_md.numb_types()); + EXPECT_EQ(dp0.dim_fparam(), dp_md.dim_fparam()); + EXPECT_EQ(dp0.dim_aparam(), dp_md.dim_aparam()); + EXPECT_EQ(dp1.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp1.numb_types(), dp_md.numb_types()); + EXPECT_EQ(dp1.dim_fparam(), dp_md.dim_fparam()); + EXPECT_EQ(dp1.dim_aparam(), dp_md.dim_aparam()); +} + +TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::DeepSpin& dp0 = this->dp0; + deepmd::DeepSpin& dp1 = this->dp1; + deepmd::DeepSpinModelDevi& dp_md = this->dp_md; + float rc = dp_md.cutoff(); + int nloc = coord.size() / 3; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), fmagdir(nmodel), + vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd; + dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], coord, spin, atype, box); + dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], coord, spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + } +} + +TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::DeepSpin& dp0 = this->dp0; + deepmd::DeepSpin& dp1 = this->dp1; + deepmd::DeepSpinModelDevi& dp_md = this->dp_md; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), fmagdir(nmodel), + vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, + avdir(nmodel), avmd(nmodel); + dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], coord, + spin, atype, box); + dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], coord, + spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + EXPECT_EQ(aedir.size(), aemd.size()); + // EXPECT_EQ(avdir.size(), avmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); + // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + for (int ii = 0; ii < aedir[0].size(); ++ii) { + EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < avdir[0].size(); ++ii) { + // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); + // } + } +} From 31d69db98e41992d9150ad9379e6606ebc0cf9c6 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 9 Nov 2024 22:41:04 +0800 Subject: [PATCH 187/193] Delete test_deepspin_model_devi_hpp.cc --- .../tests/test_deepspin_model_devi_hpp.cc | 162 ------------------ 1 file changed, 162 deletions(-) delete mode 100644 source/api_c/tests/test_deepspin_model_devi_hpp.cc diff --git a/source/api_c/tests/test_deepspin_model_devi_hpp.cc b/source/api_c/tests/test_deepspin_model_devi_hpp.cc deleted file mode 100644 index c6bbdef8a8..0000000000 --- a/source/api_c/tests/test_deepspin_model_devi_hpp.cc +++ /dev/null @@ -1,162 +0,0 @@ -// SPDX-License-Identifier: LGPL-3.0-or-later -#include - -#include -#include -#include -#include - -#include "deepmd.hpp" -#include "test_utils.h" - -template -class TestInferDeepSpinModeDevi : public ::testing::Test { - protected: - std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, - 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, - 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., - 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; - std::vector atype = {0, 1, 1, 0, 1, 1}; - std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; - int natoms; - - deepmd::hpp::DeepSpin dp0; - deepmd::hpp::DeepSpin dp1; - deepmd::hpp::DeepSpinModelDevi dp_md; - - void SetUp() override { - { - dp0.init("../../tests/infer/deeppot_dpa_spin.pth"); - } - { - dp1.init("../../tests/infer/deeppot_dpa_spin.pth"); - } - dp_md.init( - std::vector({"../../tests/infer/deeppot_dpa_spin.pth", - "../../tests/infer/deeppot_dpa_spin.pth"})); - }; - - void TearDown() override {}; -}; - -TYPED_TEST_SUITE(TestInferDeepSpinModeDevi, ValueTypes); - -TYPED_TEST(TestInferDeepSpinModeDevi, attrs) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - int& natoms = this->natoms; - deepmd::hpp::DeepSpin& dp0 = this->dp0; - deepmd::hpp::DeepSpin& dp1 = this->dp1; - deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; - EXPECT_EQ(dp0.cutoff(), dp_md.cutoff()); - EXPECT_EQ(dp0.numb_types(), dp_md.numb_types()); - // EXPECT_EQ(dp0.dim_fparam(), dp_md.dim_fparam()); - // EXPECT_EQ(dp0.dim_aparam(), dp_md.dim_aparam()); - EXPECT_EQ(dp1.cutoff(), dp_md.cutoff()); - EXPECT_EQ(dp1.numb_types(), dp_md.numb_types()); - // EXPECT_EQ(dp1.dim_fparam(), dp_md.dim_fparam()); - // EXPECT_EQ(dp1.dim_aparam(), dp_md.dim_aparam()); -} - -TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - int& natoms = this->natoms; - deepmd::hpp::DeepSpin& dp0 = this->dp0; - deepmd::hpp::DeepSpin& dp1 = this->dp1; - deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; - float rc = dp_md.cutoff(); - int nloc = coord.size() / 3; - - int nmodel = 2; - std::vector edir(nmodel), emd; - std::vector > fdir(nmodel), fmagdir(nmodel), - vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd; - dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], coord, spin, atype, box); - dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], coord, spin, atype, box); - dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); - - EXPECT_EQ(edir.size(), emd.size()); - EXPECT_EQ(fdir.size(), fmd.size()); - EXPECT_EQ(fmagdir.size(), fmmagd.size()); - // EXPECT_EQ(vdir.size(), vmd.size()); - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); - EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); - // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); - } - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); - for (int ii = 0; ii < fdir[0].size(); ++ii) { - EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); - } - for (int ii = 0; ii < fmagdir[0].size(); ++ii) { - EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); - } - // for (int ii = 0; ii < vdir[0].size(); ++ii) { - // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); - // } - } -} - -TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - int& natoms = this->natoms; - deepmd::hpp::DeepSpin& dp0 = this->dp0; - deepmd::hpp::DeepSpin& dp1 = this->dp1; - deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; - - int nmodel = 2; - std::vector edir(nmodel), emd; - std::vector > fdir(nmodel), fmagdir(nmodel), - vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, - avdir(nmodel), avmd(nmodel); - dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], coord, - spin, atype, box); - dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], coord, - spin, atype, box); - dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); - - EXPECT_EQ(edir.size(), emd.size()); - EXPECT_EQ(fdir.size(), fmd.size()); - EXPECT_EQ(fmagdir.size(), fmmagd.size()); - // EXPECT_EQ(vdir.size(), vmd.size()); - EXPECT_EQ(aedir.size(), aemd.size()); - // EXPECT_EQ(avdir.size(), avmd.size()); - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); - EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); - // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); - EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); - // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); - } - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); - for (int ii = 0; ii < fdir[0].size(); ++ii) { - EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); - } - for (int ii = 0; ii < fmagdir[0].size(); ++ii) { - EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); - } - // for (int ii = 0; ii < vdir[0].size(); ++ii) { - // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); - // } - for (int ii = 0; ii < aedir[0].size(); ++ii) { - EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); - } - // for (int ii = 0; ii < avdir[0].size(); ++ii) { - // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); - // } - } -} From 8fb64984a68e4c4792de133940ef7bf18b3bc923 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sun, 10 Nov 2024 01:00:28 +0800 Subject: [PATCH 188/193] Delete test_deepspin_a_hpp_tf.cc --- source/api_c/tests/test_deepspin_a_hpp_tf.cc | 324 ------------------- 1 file changed, 324 deletions(-) delete mode 100644 source/api_c/tests/test_deepspin_a_hpp_tf.cc diff --git a/source/api_c/tests/test_deepspin_a_hpp_tf.cc b/source/api_c/tests/test_deepspin_a_hpp_tf.cc deleted file mode 100644 index 5a12d2d04d..0000000000 --- a/source/api_c/tests/test_deepspin_a_hpp_tf.cc +++ /dev/null @@ -1,324 +0,0 @@ -// SPDX-License-Identifier: LGPL-3.0-or-later -#include - -#include -#include -#include -#include - -#include "deepmd.hpp" -#include "test_utils.h" - -template -class TestInferDeepSpinTFAHPP : public ::testing::Test { - protected: - std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, - 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, - 0., 0., 0., 0., 0., 0.}; - std::vector atype = {0, 0, 1, 1}; - std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; - std::vector expected_e = {-7.314365618560289, -7.313531316181837, - -2.8980532245013997, -2.897373810282277}; - std::vector expected_f = { - 0.0275132293555514, -0.0112057401883111, -0.0212278132621243, - -0.0229926640905535, 0.0114378553363334, 0.019670014885563, - 0.0086502856137601, 0.0088926283192558, -0.0127014507822769, - -0.013170850878758, -0.009124743467278, 0.0142592491588383}; - std::vector expected_fm = { - 0.0066245455049449, -0.0023055088004378, 0.0294608578045521, - -0.0041979452385972, 0.0025775020220167, 0.0316295420619988, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; - unsigned int natoms; - double expected_tot_e; - // std::vector expected_tot_v; - - deepmd::hpp::DeepSpin dp; - - void SetUp() override { - std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; - deepmd::hpp::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", - "deepspin_nlist_hpp.pb"); - - dp.init("deepspin_nlist_hpp.pb"); - - natoms = expected_e.size(); - EXPECT_EQ(natoms * 3, expected_f.size()); - EXPECT_EQ(natoms * 3, expected_fm.size()); - // EXPECT_EQ(natoms * 9, expected_v.size()); - expected_tot_e = 0.; - // expected_tot_v.resize(9); - // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); - for (unsigned int ii = 0; ii < natoms; ++ii) { - expected_tot_e += expected_e[ii]; - } - // for (unsigned int ii = 0; ii < natoms; ++ii) { - // for (int dd = 0; dd < 9; ++dd) { - // expected_tot_v[dd] += expected_v[ii * 9 + dd]; - // } - // } - }; - - void TearDown() override { remove("deepspin_nlist_hpp.pb"); }; -}; - -TYPED_TEST_SUITE(TestInferDeepSpinTFAHPP, ValueTypes); - -TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial; - - dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); - - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); - - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - // for (int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } -} - -TYPED_TEST(TestInferDeepSpinTFAHPP, cpu_build_nlist_atomic) { - using VALUETYPE = TypeParam; - const std::vector& coord = this->coord; - const std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial, atom_ener, atom_vir; - dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, - atype, box); - - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); - EXPECT_EQ(atom_ener.size(), natoms); - // EXPECT_EQ(atom_vir.size(), natoms * 9); - - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - // for (int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } - for (int ii = 0; ii < natoms; ++ii) { - EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); - } - // for (int ii = 0; ii < natoms * 9; ++ii) { - // EXPECT_LT(fabs(atom_vir[ii] - expected_v[ii]), EPSILON); - // } -} - -TYPED_TEST(TestInferDeepSpinTFAHPP, print_summary) { - deepmd::hpp::DeepSpin& dp = this->dp; - dp.print_summary(""); -} - -template -class TestInferDeepSpinTFANoPbcHPP : public ::testing::Test { - protected: - std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, - 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, - 0., 0., 0., 0., 0., 0.}; - std::vector atype = {0, 0, 1, 1}; - std::vector box = {100., 0., 0., 0., 100., 0., 0., 0., 100.}; - std::vector expected_e = {-7.313160384523243, -7.312173646552338, - -2.8984477845267067, - -2.8984477845267067}; - std::vector expected_f = { - 0.0277100137316238, -0.0116082489956803, -0.0211484273275705, - -0.0277100137316238, 0.0116082489956803, 0.0211484273275705, - 0.0097588349924651, 0.0091168063745397, -0.0133541952528469, - -0.0097588349924651, -0.0091168063745397, 0.0133541952528469}; - std::vector expected_fm = { - 0.0058990325687816, -0.0024712163463815, 0.0296682261295907, - -0.0060028470719556, 0.0025147062058193, 0.0321884178873188, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000, - 0.0000000000000000, 0.00000000000000000, 0.00000000000000000}; - unsigned int natoms; - double expected_tot_e; - // std::vector expected_tot_v; - - deepmd::hpp::DeepSpin dp; - - void SetUp() override { - std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; - deepmd::hpp::convert_pbtxt_to_pb(file_name, "deepspin_nlist_hpp.pb"); - - dp.init("deepspin_nlist_hpp.pb"); - - natoms = expected_e.size(); - EXPECT_EQ(natoms * 3, expected_f.size()); - EXPECT_EQ(natoms * 3, expected_fm.size()); - // EXPECT_EQ(natoms * 9, expected_v.size()); - expected_tot_e = 0.; - // expected_tot_v.resize(9); - // std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); - for (unsigned int ii = 0; ii < natoms; ++ii) { - expected_tot_e += expected_e[ii]; - } - // for (unsigned int ii = 0; ii < natoms; ++ii) { - // for (int dd = 0; dd < 9; ++dd) { - // expected_tot_v[dd] += expected_v[ii * 9 + dd]; - // } - // } - }; - - void TearDown() override { remove("deepspin_nlist_hpp.pb"); }; -}; - -TYPED_TEST_SUITE(TestInferDeepSpinTFANoPbcHPP, ValueTypes); - -TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_build_nlist) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial; - dp.compute(ener, force, force_mag, virial, coord, spin, atype, box); - - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); - - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (unsigned int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (unsigned int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - // for (unsigned int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } -} - -TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial; - std::vector > nlist_data = {{1}, {0}, {3}, {2}}; - std::vector ilist(natoms), numneigh(natoms); - std::vector firstneigh(natoms); - deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], - &firstneigh[0]); - deepmd::hpp::convert_nlist(inlist, nlist_data); - dp.compute(ener, force, force_mag, virial, coord, spin, atype, box, 0, inlist, - 0); - - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); - - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - // for (int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } -} - -TYPED_TEST(TestInferDeepSpinTFANoPbcHPP, cpu_lmp_nlist_atomic) { - using VALUETYPE = TypeParam; - const std::vector& coord = this->coord; - const std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - std::vector& expected_e = this->expected_e; - std::vector& expected_f = this->expected_f; - std::vector& expected_fm = this->expected_fm; - // std::vector& expected_v = this->expected_v; - unsigned int& natoms = this->natoms; - double& expected_tot_e = this->expected_tot_e; - // std::vector& expected_tot_v = this->expected_tot_v; - deepmd::hpp::DeepSpin& dp = this->dp; - double ener; - std::vector force, force_mag, virial, atom_ener, atom_vir; - std::vector > nlist_data = {{1}, {0}, {3}, {2}}; - std::vector ilist(natoms), numneigh(natoms); - std::vector firstneigh(natoms); - deepmd::hpp::InputNlist inlist(natoms, &ilist[0], &numneigh[0], - &firstneigh[0]); - deepmd::hpp::convert_nlist(inlist, nlist_data); - dp.compute(ener, force, force_mag, virial, atom_ener, atom_vir, coord, spin, - atype, box, 0, inlist, 0); - - EXPECT_EQ(force.size(), natoms * 3); - EXPECT_EQ(force_mag.size(), natoms * 3); - // EXPECT_EQ(virial.size(), 9); - - EXPECT_LT(fabs(ener - expected_tot_e), EPSILON); - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON); - } - for (int ii = 0; ii < natoms * 3; ++ii) { - EXPECT_LT(fabs(force_mag[ii] - expected_fm[ii]), EPSILON); - } - for (int ii = 0; ii < natoms; ++ii) { - EXPECT_LT(fabs(atom_ener[ii] - expected_e[ii]), EPSILON); - } - // for (int ii = 0; ii < 3 * 3; ++ii) { - // EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); - // } -} From 4bc0e42aa8bb7ac3b41041f620a89085b2f5e7bb Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sun, 10 Nov 2024 23:31:26 +0800 Subject: [PATCH 189/193] Create test_deepspin_model_devi_hpp.cc --- .../tests/test_deepspin_model_devi_hpp.cc | 166 ++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 source/api_c/tests/test_deepspin_model_devi_hpp.cc diff --git a/source/api_c/tests/test_deepspin_model_devi_hpp.cc b/source/api_c/tests/test_deepspin_model_devi_hpp.cc new file mode 100644 index 0000000000..ef3cbf2644 --- /dev/null +++ b/source/api_c/tests/test_deepspin_model_devi_hpp.cc @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +#include + +#include +#include +#include +#include + +#include "deepmd.hpp" +#include "test_utils.h" + +template +class TestInferDeepSpinModeDevi : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, + 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 0, 1, 1}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + int natoms; + + deepmd::hpp::DeepSpin dp0; + deepmd::hpp::DeepSpin dp1; + deepmd::hpp::DeepSpinModelDevi dp_md; + + void SetUp() override { + { + std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; + deepmd::hpp::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", + "deepspin_nlist.pb"); + dp0.init("deepspin_nlist.pb"); + } + { + std::string file_name = "../../tests/infer/deepspin_nlist-2.pbtxt"; + deepmd::hpp::convert_pbtxt_to_pb( + "../../tests/infer/deepspin_nlist-2.pbtxt", "deepspin_nlist-2.pb"); + dp1.init("deepspin_nlist-2.pb"); + } + dp_md.init( + std::vector({"deepspin_nlist.pb", "deepspin_nlist-2.pb"})); + }; + + void TearDown() override {}; +}; + +TYPED_TEST_SUITE(TestInferDeepSpinModeDevi, ValueTypes); + +TYPED_TEST(TestInferDeepSpinModeDevi, attrs) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepSpin& dp0 = this->dp0; + deepmd::hpp::DeepSpin& dp1 = this->dp1; + deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; + EXPECT_EQ(dp0.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp0.numb_types(), dp_md.numb_types()); + // EXPECT_EQ(dp0.dim_fparam(), dp_md.dim_fparam()); + // EXPECT_EQ(dp0.dim_aparam(), dp_md.dim_aparam()); + EXPECT_EQ(dp1.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp1.numb_types(), dp_md.numb_types()); + // EXPECT_EQ(dp1.dim_fparam(), dp_md.dim_fparam()); + // EXPECT_EQ(dp1.dim_aparam(), dp_md.dim_aparam()); +} + +TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepSpin& dp0 = this->dp0; + deepmd::hpp::DeepSpin& dp1 = this->dp1; + deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; + float rc = dp_md.cutoff(); + int nloc = coord.size() / 3; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), fmagdir(nmodel), + vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd; + dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], coord, spin, atype, box); + dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], coord, spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + } +} + +TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepSpin& dp0 = this->dp0; + deepmd::hpp::DeepSpin& dp1 = this->dp1; + deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), fmagdir(nmodel), + vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, + avdir(nmodel), avmd(nmodel); + dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], coord, + spin, atype, box); + dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], coord, + spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + EXPECT_EQ(aedir.size(), aemd.size()); + // EXPECT_EQ(avdir.size(), avmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); + // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + for (int ii = 0; ii < aedir[0].size(); ++ii) { + EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < avdir[0].size(); ++ii) { + // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); + // } + } +} From 1b7c79b7b82fe3f4e920027797656ea22bdbd05d Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sun, 10 Nov 2024 23:57:22 +0800 Subject: [PATCH 190/193] Update test_deepspin_model_devi_hpp.cc --- .../tests/test_deepspin_model_devi_hpp.cc | 150 +++++++++--------- 1 file changed, 76 insertions(+), 74 deletions(-) diff --git a/source/api_c/tests/test_deepspin_model_devi_hpp.cc b/source/api_c/tests/test_deepspin_model_devi_hpp.cc index ef3cbf2644..3b75bf1119 100644 --- a/source/api_c/tests/test_deepspin_model_devi_hpp.cc +++ b/source/api_c/tests/test_deepspin_model_devi_hpp.cc @@ -85,82 +85,84 @@ TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist) { vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd; dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], coord, spin, atype, box); dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], coord, spin, atype, box); - dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); + // dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); - EXPECT_EQ(edir.size(), emd.size()); - EXPECT_EQ(fdir.size(), fmd.size()); - EXPECT_EQ(fmagdir.size(), fmmagd.size()); - // EXPECT_EQ(vdir.size(), vmd.size()); - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); - EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); - // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); - } - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); - for (int ii = 0; ii < fdir[0].size(); ++ii) { - EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); - } - for (int ii = 0; ii < fmagdir[0].size(); ++ii) { - EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); - } - // for (int ii = 0; ii < vdir[0].size(); ++ii) { - // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); - // } - } + // EXPECT_EQ(edir.size(), emd.size()); + // EXPECT_EQ(fdir.size(), fmd.size()); + // EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // // EXPECT_EQ(vdir.size(), vmd.size()); + // for (int kk = 0; kk < nmodel; ++kk) { + // EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + // EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + // } + // for (int kk = 0; kk < nmodel; ++kk) { + // EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + // for (int ii = 0; ii < fdir[0].size(); ++ii) { + // EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + // } + // for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + // EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + // } + // // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // // } + // } } -TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { - using VALUETYPE = TypeParam; - std::vector& coord = this->coord; - std::vector& spin = this->spin; - std::vector& atype = this->atype; - std::vector& box = this->box; - int& natoms = this->natoms; - deepmd::hpp::DeepSpin& dp0 = this->dp0; - deepmd::hpp::DeepSpin& dp1 = this->dp1; - deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; +// TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { +// using VALUETYPE = TypeParam; +// std::vector& coord = this->coord; +// std::vector& spin = this->spin; +// std::vector& atype = this->atype; +// std::vector& box = this->box; +// int& natoms = this->natoms; +// deepmd::hpp::DeepSpin& dp0 = this->dp0; +// deepmd::hpp::DeepSpin& dp1 = this->dp1; +// deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; - int nmodel = 2; - std::vector edir(nmodel), emd; - std::vector > fdir(nmodel), fmagdir(nmodel), - vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, - avdir(nmodel), avmd(nmodel); - dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], coord, - spin, atype, box); - dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], coord, - spin, atype, box); - dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); +// int nmodel = 2; +// std::vector edir(nmodel), emd; +// std::vector > fdir(nmodel), fmagdir(nmodel), +// vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, +// avdir(nmodel), avmd(nmodel); +// dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], +// coord, +// spin, atype, box); +// dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], +// coord, +// spin, atype, box); +// dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); - EXPECT_EQ(edir.size(), emd.size()); - EXPECT_EQ(fdir.size(), fmd.size()); - EXPECT_EQ(fmagdir.size(), fmmagd.size()); - // EXPECT_EQ(vdir.size(), vmd.size()); - EXPECT_EQ(aedir.size(), aemd.size()); - // EXPECT_EQ(avdir.size(), avmd.size()); - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); - EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); - // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); - EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); - // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); - } - for (int kk = 0; kk < nmodel; ++kk) { - EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); - for (int ii = 0; ii < fdir[0].size(); ++ii) { - EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); - } - for (int ii = 0; ii < fmagdir[0].size(); ++ii) { - EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); - } - // for (int ii = 0; ii < vdir[0].size(); ++ii) { - // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); - // } - for (int ii = 0; ii < aedir[0].size(); ++ii) { - EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); - } - // for (int ii = 0; ii < avdir[0].size(); ++ii) { - // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); - // } - } -} +// EXPECT_EQ(edir.size(), emd.size()); +// EXPECT_EQ(fdir.size(), fmd.size()); +// EXPECT_EQ(fmagdir.size(), fmmagd.size()); +// // EXPECT_EQ(vdir.size(), vmd.size()); +// EXPECT_EQ(aedir.size(), aemd.size()); +// // EXPECT_EQ(avdir.size(), avmd.size()); +// for (int kk = 0; kk < nmodel; ++kk) { +// EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); +// EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); +// // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); +// EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); +// // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); +// } +// for (int kk = 0; kk < nmodel; ++kk) { +// EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); +// for (int ii = 0; ii < fdir[0].size(); ++ii) { +// EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); +// } +// for (int ii = 0; ii < fmagdir[0].size(); ++ii) { +// EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); +// } +// // for (int ii = 0; ii < vdir[0].size(); ++ii) { +// // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); +// // } +// for (int ii = 0; ii < aedir[0].size(); ++ii) { +// EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); +// } +// // for (int ii = 0; ii < avdir[0].size(); ++ii) { +// // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); +// // } +// } +// } From bb8d38e8c2afdf86f5ee482780c04e77a139b901 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 11 Nov 2024 00:04:51 +0800 Subject: [PATCH 191/193] Update test_deepspin_model_devi_hpp.cc --- .../tests/test_deepspin_model_devi_hpp.cc | 170 +++++++++--------- 1 file changed, 82 insertions(+), 88 deletions(-) diff --git a/source/api_c/tests/test_deepspin_model_devi_hpp.cc b/source/api_c/tests/test_deepspin_model_devi_hpp.cc index 3b75bf1119..c6bbdef8a8 100644 --- a/source/api_c/tests/test_deepspin_model_devi_hpp.cc +++ b/source/api_c/tests/test_deepspin_model_devi_hpp.cc @@ -13,10 +13,11 @@ template class TestInferDeepSpinModeDevi : public ::testing::Test { protected: std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; - std::vector spin = {0., 0., 1.2737, 0., 0., 1.2737, - 0., 0., 0., 0., 0., 0.}; - std::vector atype = {0, 0, 1, 1}; + std::vector spin = {0.13, 0.02, 0.03, 0., 0., 0., 0., 0., 0., + 0.14, 0.10, 0.12, 0., 0., 0., 0., 0., 0.}; + std::vector atype = {0, 1, 1, 0, 1, 1}; std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; int natoms; @@ -26,19 +27,14 @@ class TestInferDeepSpinModeDevi : public ::testing::Test { void SetUp() override { { - std::string file_name = "../../tests/infer/deepspin_nlist.pbtxt"; - deepmd::hpp::convert_pbtxt_to_pb("../../tests/infer/deepspin_nlist.pbtxt", - "deepspin_nlist.pb"); - dp0.init("deepspin_nlist.pb"); + dp0.init("../../tests/infer/deeppot_dpa_spin.pth"); } { - std::string file_name = "../../tests/infer/deepspin_nlist-2.pbtxt"; - deepmd::hpp::convert_pbtxt_to_pb( - "../../tests/infer/deepspin_nlist-2.pbtxt", "deepspin_nlist-2.pb"); - dp1.init("deepspin_nlist-2.pb"); + dp1.init("../../tests/infer/deeppot_dpa_spin.pth"); } dp_md.init( - std::vector({"deepspin_nlist.pb", "deepspin_nlist-2.pb"})); + std::vector({"../../tests/infer/deeppot_dpa_spin.pth", + "../../tests/infer/deeppot_dpa_spin.pth"})); }; void TearDown() override {}; @@ -85,84 +81,82 @@ TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist) { vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd; dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], coord, spin, atype, box); dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], coord, spin, atype, box); - // dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, coord, spin, atype, box); - // EXPECT_EQ(edir.size(), emd.size()); - // EXPECT_EQ(fdir.size(), fmd.size()); - // EXPECT_EQ(fmagdir.size(), fmmagd.size()); - // // EXPECT_EQ(vdir.size(), vmd.size()); - // for (int kk = 0; kk < nmodel; ++kk) { - // EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); - // EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); - // // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); - // } - // for (int kk = 0; kk < nmodel; ++kk) { - // EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); - // for (int ii = 0; ii < fdir[0].size(); ++ii) { - // EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); - // } - // for (int ii = 0; ii < fmagdir[0].size(); ++ii) { - // EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); - // } - // // for (int ii = 0; ii < vdir[0].size(); ++ii) { - // // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); - // // } - // } + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + } } -// TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { -// using VALUETYPE = TypeParam; -// std::vector& coord = this->coord; -// std::vector& spin = this->spin; -// std::vector& atype = this->atype; -// std::vector& box = this->box; -// int& natoms = this->natoms; -// deepmd::hpp::DeepSpin& dp0 = this->dp0; -// deepmd::hpp::DeepSpin& dp1 = this->dp1; -// deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; +TYPED_TEST(TestInferDeepSpinModeDevi, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& spin = this->spin; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepSpin& dp0 = this->dp0; + deepmd::hpp::DeepSpin& dp1 = this->dp1; + deepmd::hpp::DeepSpinModelDevi& dp_md = this->dp_md; -// int nmodel = 2; -// std::vector edir(nmodel), emd; -// std::vector > fdir(nmodel), fmagdir(nmodel), -// vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, -// avdir(nmodel), avmd(nmodel); -// dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], -// coord, -// spin, atype, box); -// dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], -// coord, -// spin, atype, box); -// dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), fmagdir(nmodel), + vdir(nmodel), fmd(nmodel), fmmagd(nmodel), vmd, aedir(nmodel), aemd, + avdir(nmodel), avmd(nmodel); + dp0.compute(edir[0], fdir[0], fmagdir[0], vdir[0], aedir[0], avdir[0], coord, + spin, atype, box); + dp1.compute(edir[1], fdir[1], fmagdir[1], vdir[1], aedir[1], avdir[1], coord, + spin, atype, box); + dp_md.compute(emd, fmd, fmmagd, vmd, aemd, avmd, coord, spin, atype, box); -// EXPECT_EQ(edir.size(), emd.size()); -// EXPECT_EQ(fdir.size(), fmd.size()); -// EXPECT_EQ(fmagdir.size(), fmmagd.size()); -// // EXPECT_EQ(vdir.size(), vmd.size()); -// EXPECT_EQ(aedir.size(), aemd.size()); -// // EXPECT_EQ(avdir.size(), avmd.size()); -// for (int kk = 0; kk < nmodel; ++kk) { -// EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); -// EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); -// // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); -// EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); -// // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); -// } -// for (int kk = 0; kk < nmodel; ++kk) { -// EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); -// for (int ii = 0; ii < fdir[0].size(); ++ii) { -// EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); -// } -// for (int ii = 0; ii < fmagdir[0].size(); ++ii) { -// EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); -// } -// // for (int ii = 0; ii < vdir[0].size(); ++ii) { -// // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); -// // } -// for (int ii = 0; ii < aedir[0].size(); ++ii) { -// EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); -// } -// // for (int ii = 0; ii < avdir[0].size(); ++ii) { -// // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); -// // } -// } -// } + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(fmagdir.size(), fmmagd.size()); + // EXPECT_EQ(vdir.size(), vmd.size()); + EXPECT_EQ(aedir.size(), aemd.size()); + // EXPECT_EQ(avdir.size(), avmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(fmagdir[kk].size(), fmmagd[kk].size()); + // EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); + // EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (int ii = 0; ii < fdir[0].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (int ii = 0; ii < fmagdir[0].size(); ++ii) { + EXPECT_LT(fabs(fmagdir[kk][ii] - fmmagd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < vdir[0].size(); ++ii) { + // EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + // } + for (int ii = 0; ii < aedir[0].size(); ++ii) { + EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); + } + // for (int ii = 0; ii < avdir[0].size(); ++ii) { + // EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); + // } + } +} From e6bfebe107e14919ccd88e70d03864be13b5a274 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 11 Nov 2024 00:26:01 +0800 Subject: [PATCH 192/193] Update deepmd.hpp --- source/api_c/include/deepmd.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 9e1a611869..dd212e9dec 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -2585,7 +2585,7 @@ class DeepSpinModelDevi : public DeepBaseModelDevi { ener.resize(numb_models); force.resize(numb_models); force_mag.resize(numb_models); - // virial.resize(numb_models); + virial.resize(numb_models); for (int i = 0; i < numb_models; i++) { ener[i] = energy_flat[i]; force[i].resize(static_cast(natoms) * 3); From 117f4c97c8e6f77d95dbce85d52a4fdb1d59ef1d Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:56:39 +0800 Subject: [PATCH 193/193] add ut for lammps atomic energy --- source/lmp/tests/test_lammps_spin.py | 40 +++++++++++++++++++++++++ source/lmp/tests/test_lammps_spin_pt.py | 40 +++++++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/source/lmp/tests/test_lammps_spin.py b/source/lmp/tests/test_lammps_spin.py index aff80c52f6..cd65f6d3ce 100644 --- a/source/lmp/tests/test_lammps_spin.py +++ b/source/lmp/tests/test_lammps_spin.py @@ -150,6 +150,46 @@ def test_pair_deepmd(lammps): lammps.run(1) +def test_pair_deepmd_virial(lammps): + lammps.pair_style(f"deepspin {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.compute("peatom all pe/atom pair") + lammps.compute("pressure all pressure NULL pair") + lammps.compute("virial all centroid/stress/atom NULL pair") + lammps.variable("eatom atom c_peatom") + # for ii in range(9): + # jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + # lammps.variable(f"pressure{jj} equal c_pressure[{ii+1}]") + # for ii in range(9): + # jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + # lammps.variable(f"virial{jj} atom c_virial[{ii+1}]") + # lammps.dump( + # "1 all custom 1 dump id " + " ".join([f"v_virial{ii}" for ii in range(9)]) + # ) + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + idx_map = lammps.lmp.numpy.extract_atom("id") - 1 + assert np.array(lammps.variables["eatom"].value) == pytest.approx( + expected_ae[idx_map] + ) + # vol = box[1] * box[3] * box[5] + # for ii in range(6): + # jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + # assert np.array( + # lammps.variables[f"pressure{jj}"].value + # ) / constants.nktv2p == pytest.approx( + # -expected_v[idx_map, jj].sum(axis=0) / vol + # ) + # for ii in range(9): + # assert np.array( + # lammps.variables[f"virial{ii}"].value + # ) / constants.nktv2p == pytest.approx(expected_v[idx_map, ii]) + + def test_pair_deepmd_model_devi(lammps): lammps.pair_style( f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1" diff --git a/source/lmp/tests/test_lammps_spin_pt.py b/source/lmp/tests/test_lammps_spin_pt.py index dad99ddec8..c3bd27be38 100644 --- a/source/lmp/tests/test_lammps_spin_pt.py +++ b/source/lmp/tests/test_lammps_spin_pt.py @@ -146,6 +146,46 @@ def test_pair_deepmd(lammps): lammps.run(1) +def test_pair_deepmd_virial(lammps): + lammps.pair_style(f"deepspin {pb_file.resolve()}") + lammps.pair_coeff("* *") + lammps.compute("peatom all pe/atom pair") + lammps.compute("pressure all pressure NULL pair") + lammps.compute("virial all centroid/stress/atom NULL pair") + lammps.variable("eatom atom c_peatom") + # for ii in range(9): + # jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + # lammps.variable(f"pressure{jj} equal c_pressure[{ii+1}]") + # for ii in range(9): + # jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + # lammps.variable(f"virial{jj} atom c_virial[{ii+1}]") + # lammps.dump( + # "1 all custom 1 dump id " + " ".join([f"v_virial{ii}" for ii in range(9)]) + # ) + lammps.run(0) + assert lammps.eval("pe") == pytest.approx(expected_e) + for ii in range(4): + assert lammps.atoms[ii].force == pytest.approx( + expected_f[lammps.atoms[ii].id - 1] + ) + idx_map = lammps.lmp.numpy.extract_atom("id") - 1 + assert np.array(lammps.variables["eatom"].value) == pytest.approx( + expected_ae[idx_map] + ) + # vol = box[1] * box[3] * box[5] + # for ii in range(6): + # jj = [0, 4, 8, 3, 6, 7, 1, 2, 5][ii] + # assert np.array( + # lammps.variables[f"pressure{jj}"].value + # ) / constants.nktv2p == pytest.approx( + # -expected_v[idx_map, jj].sum(axis=0) / vol + # ) + # for ii in range(9): + # assert np.array( + # lammps.variables[f"virial{ii}"].value + # ) / constants.nktv2p == pytest.approx(expected_v[idx_map, ii]) + + def test_pair_deepmd_model_devi(lammps): lammps.pair_style( f"deepspin {pb_file.resolve()} {pb_file2.resolve()} out_file {md_file.resolve()} out_freq 1"