diff --git a/dev_scripts/update_pt_data.py b/dev_scripts/update_pt_data.py index 9644bd0ac6b..df512cbb7c8 100644 --- a/dev_scripts/update_pt_data.py +++ b/dev_scripts/update_pt_data.py @@ -219,7 +219,7 @@ def gen_iupac_ordering(): order = sum((list(product(x, y)) for x, y in order), []) # noqa: RUF017 iupac_ordering_dict = dict( - zip([Element.from_row_and_group(row, group) for group, row in order], range(len(order)), strict=False) + zip([Element.from_row_and_group(row, group) for group, row in order], range(len(order)), strict=True) ) # first clean periodic table of any IUPAC ordering diff --git a/src/pymatgen/analysis/adsorption.py b/src/pymatgen/analysis/adsorption.py index 3516c916283..4486e3bc870 100644 --- a/src/pymatgen/analysis/adsorption.py +++ b/src/pymatgen/analysis/adsorption.py @@ -178,7 +178,7 @@ def find_surface_sites_by_height(self, slab: Slab, height=0.9, xy_tol=0.05): surf_sites = [slab.sites[n] for n in np.where(mask)[0]] if xy_tol: # sort surface sites by height - surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites, strict=False)] + surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites, strict=True)] surf_sites.reverse() unique_sites: list = [] unique_perp_fracs: list = [] @@ -235,7 +235,7 @@ def find_adsorption_sites( Args: distance (float): distance from the coordinating ensemble - of atoms along the miller index for the site (i. e. + of atoms along the miller index for the site (i.e. the distance from the slab itself) put_inside (bool): whether to put the site inside the cell symm_reduce (float): symm reduction threshold @@ -268,7 +268,7 @@ def find_adsorption_sites( for v in dt.simplices: if -1 not in v: dots = [] - for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1)), strict=False): + for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1)), strict=True): corner, opp = v[i_corner], [v[o] for o in i_opp] vecs = [mesh[d].coords - mesh[corner].coords for d in opp] vecs = [vec / np.linalg.norm(vec) for vec in vecs] @@ -701,7 +701,7 @@ def plot_slab( ads_sites = asf.find_adsorption_sites()["all"] symm_op = get_rot(orig_slab) ads_sites = [symm_op.operate(ads_site)[:2].tolist() for ads_site in ads_sites] - ax.plot(*zip(*ads_sites, strict=False), color="k", marker="x", markersize=10, mew=1, linestyle="", zorder=10000) + ax.plot(*zip(*ads_sites, strict=True), color="k", marker="x", markersize=10, mew=1, linestyle="", zorder=10000) # Draw unit cell if draw_unit_cell: vertices = np.insert(vertices, 1, lattice_sum, axis=0).tolist() diff --git a/src/pymatgen/analysis/bond_valence.py b/src/pymatgen/analysis/bond_valence.py index 1b8dd101428..97541da6d7b 100644 --- a/src/pymatgen/analysis/bond_valence.py +++ b/src/pymatgen/analysis/bond_valence.py @@ -403,7 +403,7 @@ def _recurse(assigned=None): if self._best_vset: if structure.is_ordered: assigned = {} - for val, sites in zip(self._best_vset, equi_sites, strict=False): + for val, sites in zip(self._best_vset, equi_sites, strict=True): for site in sites: assigned[site] = val @@ -414,7 +414,7 @@ def _recurse(assigned=None): new_best_vset.append([]) for ival, val in enumerate(self._best_vset): new_best_vset[attrib[ival]].append(val) - for val, sites in zip(new_best_vset, equi_sites, strict=False): + for val, sites in zip(new_best_vset, equi_sites, strict=True): for site in sites: assigned[site] = val diff --git a/src/pymatgen/analysis/chempot_diagram.py b/src/pymatgen/analysis/chempot_diagram.py index c48d3c26ce4..3a295d54136 100644 --- a/src/pymatgen/analysis/chempot_diagram.py +++ b/src/pymatgen/analysis/chempot_diagram.py @@ -213,7 +213,7 @@ def _get_domains(self) -> dict[str, np.ndarray]: domains: dict[str, list] = {entry.reduced_formula: [] for entry in entries} - for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets, strict=False): + for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets, strict=True): for v in facet: if v < len(entries): this_entry = entries[v] @@ -578,7 +578,7 @@ def get_chempot_axis_title(element) -> str: return f"
μ{element} - μ{element}o (eV)" axes_layout = {} - for ax, el in zip(axes, elements, strict=False): + for ax, el in zip(axes, elements, strict=True): layout = plotly_layouts[layout_name].copy() layout["title"] = get_chempot_axis_title(el) axes_layout[ax] = layout diff --git a/src/pymatgen/analysis/diffraction/core.py b/src/pymatgen/analysis/diffraction/core.py index 525e8fbee38..2db0996c86e 100644 --- a/src/pymatgen/analysis/diffraction/core.py +++ b/src/pymatgen/analysis/diffraction/core.py @@ -105,7 +105,7 @@ def get_plot( xrd = self.get_pattern(structure, two_theta_range=two_theta_range) imax = max(xrd.y) - for two_theta, i, hkls in zip(xrd.x, xrd.y, xrd.hkls, strict=False): + for two_theta, i, hkls in zip(xrd.x, xrd.y, xrd.hkls, strict=True): if two_theta_range[0] <= two_theta <= two_theta_range[1]: hkl_tuples = [hkl["hkl"] for hkl in hkls] label = ", ".join(map(str, hkl_tuples)) # 'full' label @@ -188,7 +188,7 @@ def plot_structures(self, structures, fontsize=6, **kwargs): n_rows = len(structures) fig, axes = plt.subplots(nrows=n_rows, ncols=1, sharex=True, squeeze=False) - for i, (ax, structure) in enumerate(zip(axes.ravel(), structures, strict=False)): + for i, (ax, structure) in enumerate(zip(axes.ravel(), structures, strict=True)): self.get_plot(structure, fontsize=fontsize, ax=ax, with_labels=i == n_rows - 1, **kwargs) spg_symbol, spg_number = structure.get_space_group_info() ax.set_title(f"{structure.formula} {spg_symbol} ({spg_number}) ") diff --git a/src/pymatgen/analysis/diffraction/tem.py b/src/pymatgen/analysis/diffraction/tem.py index 83cd551b90b..9fc13da76c6 100644 --- a/src/pymatgen/analysis/diffraction/tem.py +++ b/src/pymatgen/analysis/diffraction/tem.py @@ -139,7 +139,7 @@ def get_interplanar_spacings( if (0, 0, 0) in points_filtered: points_filtered.remove((0, 0, 0)) interplanar_spacings_val = np.array([structure.lattice.d_hkl(x) for x in points_filtered]) - return dict(zip(points_filtered, interplanar_spacings_val, strict=False)) + return dict(zip(points_filtered, interplanar_spacings_val, strict=True)) def bragg_angles(self, interplanar_spacings: dict[Tuple3Ints, float]) -> dict[Tuple3Ints, float]: """Get the Bragg angles for every hkl point passed in (where n = 1). @@ -153,7 +153,7 @@ def bragg_angles(self, interplanar_spacings: dict[Tuple3Ints, float]) -> dict[Tu plane = list(interplanar_spacings) interplanar_spacings_val = np.array(list(interplanar_spacings.values())) bragg_angles_val = np.arcsin(self.wavelength_rel() / (2 * interplanar_spacings_val)) - return dict(zip(plane, bragg_angles_val, strict=False)) + return dict(zip(plane, bragg_angles_val, strict=True)) def get_s2(self, bragg_angles: dict[Tuple3Ints, float]) -> dict[Tuple3Ints, float]: """ @@ -169,7 +169,7 @@ def get_s2(self, bragg_angles: dict[Tuple3Ints, float]) -> dict[Tuple3Ints, floa plane = list(bragg_angles) bragg_angles_val = np.array(list(bragg_angles.values())) s2_val = (np.sin(bragg_angles_val) / self.wavelength_rel()) ** 2 - return dict(zip(plane, s2_val, strict=False)) + return dict(zip(plane, s2_val, strict=True)) def x_ray_factors( self, structure: Structure, bragg_angles: dict[Tuple3Ints, float] @@ -269,7 +269,7 @@ def cell_intensity(self, structure: Structure, bragg_angles: dict[Tuple3Ints, fl csf = self.cell_scattering_factors(structure, bragg_angles) csf_val = np.array(list(csf.values())) cell_intensity_val = (csf_val * csf_val.conjugate()).real - return dict(zip(bragg_angles, cell_intensity_val, strict=False)) + return dict(zip(bragg_angles, cell_intensity_val, strict=True)) def get_pattern( self, diff --git a/src/pymatgen/analysis/elasticity/elastic.py b/src/pymatgen/analysis/elasticity/elastic.py index 852bb45a490..a7386120dd2 100644 --- a/src/pymatgen/analysis/elasticity/elastic.py +++ b/src/pymatgen/analysis/elasticity/elastic.py @@ -137,7 +137,7 @@ class ElasticTensor(NthOrderElasticTensor): def __new__(cls, input_array, tol: float = 1e-4) -> Self: """ Create an ElasticTensor object. The constructor throws an error if the shape of - the input_matrix argument is not 3x3x3x3, i. e. in true tensor notation. Issues a + the input_matrix argument is not 3x3x3x3, i.e. in true tensor notation. Issues a warning if the input_matrix argument does not satisfy standard symmetries. Note that the constructor uses __new__ rather than __init__ according to the standard method of subclassing numpy ndarrays. @@ -564,7 +564,7 @@ def from_diff_fit(cls, strains, stresses, eq_stress=None, tol: float = 1e-10, or @property def order(self) -> int: """ - Order of the elastic tensor expansion, i. e. the order of the + Order of the elastic tensor expansion, i.e. the order of the highest included set of elastic constants. """ return self[-1].order @@ -619,7 +619,7 @@ def get_tgt(self, temperature: float | None = None, structure: Structure = None, points = quad["points"] weights = quad["weights"] num, denom, c = np.zeros((3, 3)), 0, 1 - for p, w in zip(points, weights, strict=False): + for p, w in zip(points, weights, strict=True): gk = ElasticTensor(self[0]).green_kristoffel(p) _rho_wsquareds, us = np.linalg.eigh(gk) us = [u / np.linalg.norm(u) for u in np.transpose(us)] @@ -856,7 +856,7 @@ def diff_fit(strains, stresses, eq_stress=None, order=2, tol: float = 1e-10): stresses (nx3x3 array-like): Array of 3x3 stresses to use in fitting ECs. These should be PK2 stresses. eq_stress (3x3 array-like): stress corresponding to - equilibrium strain (i. e. "0" strain state). + equilibrium strain (i.e. "0" strain state). If not specified, function will try to find the state in the list of provided stresses and strains. If not found, defaults to 0. @@ -882,7 +882,7 @@ def diff_fit(strains, stresses, eq_stress=None, order=2, tol: float = 1e-10): for _ord in range(1, order): cvec, carr = get_symbol_list(_ord + 1) svec = np.ravel(dei_dsi[_ord - 1].T) - cmap = dict(zip(cvec, np.dot(m[_ord - 1], svec), strict=False)) + cmap = dict(zip(cvec, np.dot(m[_ord - 1], svec), strict=True)) c_list.append(v_subs(carr, cmap)) return [Tensor.from_voigt(c) for c in c_list] @@ -916,7 +916,7 @@ def find_eq_stress(strains, stresses, tol: float = 1e-10): def get_strain_state_dict(strains, stresses, eq_stress=None, tol: float = 1e-10, add_eq=True, sort=True): """Create a dictionary of voigt notation stress-strain sets - keyed by "strain state", i. e. a tuple corresponding to + keyed by "strain state", i.e. a tuple corresponding to the non-zero entries in ratios to the lowest nonzero value, e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0) This allows strains to be collected in stencils as to @@ -974,7 +974,7 @@ def generate_pseudo(strain_states, order=3): Args: strain_states (6xN array like): a list of Voigt-notation strain-states, - i. e. perturbed indices of the strain as a function of the smallest + i.e. perturbed indices of the strain as a function of the smallest strain e.g. (0, 1, 0, 0, 1, 0) order (int): order of pseudo-inverse to calculate @@ -1010,7 +1010,7 @@ def generate_pseudo(strain_states, order=3): def get_symbol_list(rank, dim=6): """Get a symbolic representation of the Voigt-notation tensor that places identical symbols for entries related - by index transposition, i. e. C_1121 = C_1211 etc. + by index transposition, i.e. C_1121 = C_1211 etc. Args: dim (int): dimension of matrix/tensor, e.g. 6 for diff --git a/src/pymatgen/analysis/elasticity/strain.py b/src/pymatgen/analysis/elasticity/strain.py index 959640d4dc3..c60efb181bc 100644 --- a/src/pymatgen/analysis/elasticity/strain.py +++ b/src/pymatgen/analysis/elasticity/strain.py @@ -43,7 +43,7 @@ class Deformation(SquareTensor): def __new__(cls, deformation_gradient) -> Self: """ Create a Deformation object. Note that the constructor uses __new__ rather than - __init__ according to the standard method of subclassing numpy ndarrays. + __init__ according to the standard method of subclassing numpy arrays. Args: deformation_gradient (3x3 array-like): the 3x3 array-like @@ -56,11 +56,11 @@ def is_independent(self, tol: float = 1e-8): """Check to determine whether the deformation is independent.""" return len(self.get_perturbed_indices(tol)) == 1 - def get_perturbed_indices(self, tol: float = 1e-8): + def get_perturbed_indices(self, tol: float = 1e-8) -> list[tuple[int, int]]: """Get indices of perturbed elements of the deformation gradient, - i. e. those that differ from the identity. + i.e. those that differ from the identity. """ - return list(zip(*np.where(abs(self - np.eye(3)) > tol), strict=False)) + return list(zip(*np.where(abs(self - np.eye(3)) > tol), strict=True)) @property def green_lagrange_strain(self): diff --git a/src/pymatgen/analysis/eos.py b/src/pymatgen/analysis/eos.py index 1ec713c654b..f25844eaec7 100644 --- a/src/pymatgen/analysis/eos.py +++ b/src/pymatgen/analysis/eos.py @@ -431,7 +431,7 @@ def get_rms(x, y): return np.sqrt(np.sum((np.array(x) - np.array(y)) ** 2) / len(x)) # list of (energy, volume) tuples - e_v = list(zip(self.energies, self.volumes, strict=False)) + e_v = list(zip(self.energies, self.volumes, strict=True)) n_data = len(e_v) # minimum number of data points used for fitting n_data_min = max(n_data - 2 * min_ndata_factor, min_poly_order + 1) diff --git a/src/pymatgen/analysis/ewald.py b/src/pymatgen/analysis/ewald.py index de1ea68287c..fc12dcab415 100644 --- a/src/pymatgen/analysis/ewald.py +++ b/src/pymatgen/analysis/ewald.py @@ -332,7 +332,7 @@ def _calc_recip(self): s_reals = np.sum(oxi_states[None, :] * np.cos(grs), 1) s_imags = np.sum(oxi_states[None, :] * np.sin(grs), 1) - for g, g2, gr, exp_val, s_real, s_imag in zip(gs, g2s, grs, exp_vals, s_reals, s_imags, strict=False): + for g, g2, gr, exp_val, s_real, s_imag in zip(gs, g2s, grs, exp_vals, s_reals, s_imags, strict=True): # Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4) m = np.sin((gr[None, :] + math.pi / 4) - gr[:, None]) m *= exp_val / g2 diff --git a/src/pymatgen/analysis/ferroelectricity/polarization.py b/src/pymatgen/analysis/ferroelectricity/polarization.py index 9639d21248a..de54a7a57cd 100644 --- a/src/pymatgen/analysis/ferroelectricity/polarization.py +++ b/src/pymatgen/analysis/ferroelectricity/polarization.py @@ -307,7 +307,7 @@ def get_same_branch_polarization_data(self, convert_to_muC_per_cm2=True, all_in_ sites.append(new_site[0]) adjust_pol = [] - for site, struct in zip(sites, d_structs, strict=False): + for site, struct in zip(sites, d_structs, strict=True): adjust_pol.append(np.multiply(site.frac_coords, np.array(struct.lattice.lengths)).ravel()) return np.array(adjust_pol) diff --git a/src/pymatgen/analysis/interface_reactions.py b/src/pymatgen/analysis/interface_reactions.py index 532f1c24a37..2129d6df6ed 100644 --- a/src/pymatgen/analysis/interface_reactions.py +++ b/src/pymatgen/analysis/interface_reactions.py @@ -186,7 +186,7 @@ def get_kinks(self) -> list[tuple[int, float, float, Reaction, float]]: index_kink = range(1, len(critical_comp) + 1) - return list(zip(index_kink, x_kink, energy_kink, react_kink, energy_per_rxt_formula, strict=False)) + return list(zip(index_kink, x_kink, energy_kink, react_kink, energy_per_rxt_formula, strict=True)) def plot(self, backend: Literal["plotly", "matplotlib"] = "plotly") -> Figure | plt.Figure: """ @@ -326,7 +326,7 @@ def _get_elem_amt_in_rxn(self, rxn: Reaction) -> float: def _get_plotly_figure(self) -> Figure: """Get a Plotly figure of reaction kinks diagram.""" - kinks = map(list, zip(*self.get_kinks(), strict=False)) + kinks = map(list, zip(*self.get_kinks(), strict=True)) _, x, energy, reactions, _ = kinks lines = Scatter( @@ -348,7 +348,7 @@ def _get_plotly_figure(self) -> Figure: labels = [ f"{htmlify(str(r))}
\u0394Erxn = {round(e, 3)} eV/atom" - for r, e in zip(reactions, energy, strict=False) + for r, e in zip(reactions, energy, strict=True) ] markers = Scatter( @@ -392,13 +392,13 @@ def _get_matplotlib_figure(self) -> plt.Figure: ax = pretty_plot(8, 5) plt.xlim([-0.05, 1.05]) # plot boundary is 5% wider on each side - kinks = list(zip(*self.get_kinks(), strict=False)) + kinks = list(zip(*self.get_kinks(), strict=True)) _, x, energy, reactions, _ = kinks plt.plot(x, energy, "o-", markersize=8, c="navy", zorder=1) plt.scatter(self.minimum[0], self.minimum[1], marker="*", c="red", s=400, zorder=2) - for x_coord, y_coord, rxn in zip(x, energy, reactions, strict=False): + for x_coord, y_coord, rxn in zip(x, energy, reactions, strict=True): products = ", ".join( [latexify(p.reduced_formula) for p in rxn.products if not np.isclose(rxn.get_coeff(p), 0)] ) @@ -438,7 +438,7 @@ def _get_xaxis_title(self, latex: bool = True) -> str: def _get_plotly_annotations(x: list[float], y: list[float], reactions: list[Reaction]): """Get dictionary of annotations for the Plotly figure layout.""" annotations = [] - for x_coord, y_coord, rxn in zip(x, y, reactions, strict=False): + for x_coord, y_coord, rxn in zip(x, y, reactions, strict=True): products = ", ".join( [htmlify(p.reduced_formula) for p in rxn.products if not np.isclose(rxn.get_coeff(p), 0)] ) diff --git a/src/pymatgen/analysis/interfaces/coherent_interfaces.py b/src/pymatgen/analysis/interfaces/coherent_interfaces.py index e422b3b45d8..9a4cdda55e0 100644 --- a/src/pymatgen/analysis/interfaces/coherent_interfaces.py +++ b/src/pymatgen/analysis/interfaces/coherent_interfaces.py @@ -143,7 +143,7 @@ def _find_terminations(self): self._terminations = { (film_label, sub_label): (film_shift, sub_shift) for (film_label, film_shift), (sub_label, sub_shift) in product( - zip(film_terminations, film_shifts, strict=False), zip(sub_terminations, sub_shifts, strict=False) + zip(film_terminations, film_shifts, strict=True), zip(sub_terminations, sub_shifts, strict=True) ) } self.terminations = list(self._terminations) diff --git a/src/pymatgen/analysis/interfaces/zsl.py b/src/pymatgen/analysis/interfaces/zsl.py index e0eb264c445..30fd8851671 100644 --- a/src/pymatgen/analysis/interfaces/zsl.py +++ b/src/pymatgen/analysis/interfaces/zsl.py @@ -172,7 +172,7 @@ def get_equiv_transformations(self, transformation_sets, film_vectors, substrate for (f_trans, s_trans), (f, s) in zip( product(film_transformations, substrate_transformations), product(films, substrates), - strict=False, + strict=True, ): if is_same_vectors( f, diff --git a/src/pymatgen/analysis/local_env.py b/src/pymatgen/analysis/local_env.py index d63d6df4f1b..369b2e923fe 100644 --- a/src/pymatgen/analysis/local_env.py +++ b/src/pymatgen/analysis/local_env.py @@ -83,13 +83,13 @@ def __init__(self, structure: Structure) -> None: def radii(self): """List of ionic radii of elements in the order of sites.""" elems = [site.species_string for site in self._structure] - return dict(zip(elems, self._ionic_radii, strict=False)) + return dict(zip(elems, self._ionic_radii, strict=True)) @property def valences(self): """List of oxidation states of elements in the order of sites.""" el = [site.species_string for site in self._structure] - return dict(zip(el, self._valences, strict=False)) + return dict(zip(el, self._valences, strict=True)) @property def structure(self): @@ -522,7 +522,7 @@ def _get_nn_shell_info( # And, different first steps might results in the same neighbor # Now, we condense those neighbors into a single entry per neighbor all_sites = {} - for first_site, term_sites in zip(allowed_steps, terminal_neighbors, strict=False): + for first_site, term_sites in zip(allowed_steps, terminal_neighbors, strict=True): for term_site in term_sites: key = (term_site["site_index"], tuple(term_site["image"])) @@ -2398,12 +2398,8 @@ def compute_trigonometric_terms(self, thetas, phis): self._cos_n_p[1] = [math.cos(float(p)) for p in phis] for idx in range(2, self._max_trig_order + 1): - self._pow_sin_t[idx] = [ - e[0] * e[1] for e in zip(self._pow_sin_t[idx - 1], self._pow_sin_t[1], strict=False) - ] - self._pow_cos_t[idx] = [ - e[0] * e[1] for e in zip(self._pow_cos_t[idx - 1], self._pow_cos_t[1], strict=False) - ] + self._pow_sin_t[idx] = [e[0] * e[1] for e in zip(self._pow_sin_t[idx - 1], self._pow_sin_t[1], strict=True)] + self._pow_cos_t[idx] = [e[0] * e[1] for e in zip(self._pow_cos_t[idx - 1], self._pow_cos_t[1], strict=True)] self._sin_n_p[idx] = [math.sin(float(idx) * float(p)) for p in phis] self._cos_n_p[idx] = [math.cos(float(idx) * float(p)) for p in phis] @@ -2433,7 +2429,7 @@ def get_q2(self, thetas=None, phis=None): pre_y_2_2 = [0.25 * sqrt_15_2pi * val for val in self._pow_sin_t[2]] pre_y_2_1 = [ - 0.5 * sqrt_15_2pi * val[0] * val[1] for val in zip(self._pow_sin_t[1], self._pow_cos_t[1], strict=False) + 0.5 * sqrt_15_2pi * val[0] * val[1] for val in zip(self._pow_sin_t[1], self._pow_cos_t[1], strict=True) ] acc = 0.0 @@ -2506,15 +2502,15 @@ def get_q4(self, thetas=None, phis=None): pre_y_4_4 = [i16_3 * sqrt_35_2pi * val for val in self._pow_sin_t[4]] pre_y_4_3 = [ - i8_3 * sqrt_35_pi * val[0] * val[1] for val in zip(self._pow_sin_t[3], self._pow_cos_t[1], strict=False) + i8_3 * sqrt_35_pi * val[0] * val[1] for val in zip(self._pow_sin_t[3], self._pow_cos_t[1], strict=True) ] pre_y_4_2 = [ i8_3 * sqrt_5_2pi * val[0] * (7 * val[1] - 1.0) - for val in zip(self._pow_sin_t[2], self._pow_cos_t[2], strict=False) + for val in zip(self._pow_sin_t[2], self._pow_cos_t[2], strict=True) ] pre_y_4_1 = [ i8_3 * sqrt_5_pi * val[0] * (7 * val[1] - 3 * val[2]) - for val in zip(self._pow_sin_t[1], self._pow_cos_t[3], self._pow_cos_t[1], strict=False) + for val in zip(self._pow_sin_t[1], self._pow_cos_t[3], self._pow_cos_t[1], strict=True) ] acc = 0.0 @@ -2618,19 +2614,19 @@ def get_q6(self, thetas=None, phis=None): pre_y_6_6 = [i64 * sqrt_3003_pi * val for val in self._pow_sin_t[6]] pre_y_6_5 = [ - i32_3 * sqrt_1001_pi * val[0] * val[1] for val in zip(self._pow_sin_t[5], self._pow_cos_t[1], strict=False) + i32_3 * sqrt_1001_pi * val[0] * val[1] for val in zip(self._pow_sin_t[5], self._pow_cos_t[1], strict=True) ] pre_y_6_4 = [ i32_3 * sqrt_91_2pi * val[0] * (11 * val[1] - 1.0) - for val in zip(self._pow_sin_t[4], self._pow_cos_t[2], strict=False) + for val in zip(self._pow_sin_t[4], self._pow_cos_t[2], strict=True) ] pre_y_6_3 = [ i32 * sqrt_1365_pi * val[0] * (11 * val[1] - 3 * val[2]) - for val in zip(self._pow_sin_t[3], self._pow_cos_t[3], self._pow_cos_t[1], strict=False) + for val in zip(self._pow_sin_t[3], self._pow_cos_t[3], self._pow_cos_t[1], strict=True) ] pre_y_6_2 = [ i64 * sqrt_1365_pi * val[0] * (33 * val[1] - 18 * val[2] + 1.0) - for val in zip(self._pow_sin_t[2], self._pow_cos_t[4], self._pow_cos_t[2], strict=False) + for val in zip(self._pow_sin_t[2], self._pow_cos_t[4], self._pow_cos_t[2], strict=True) ] pre_y_6_1 = [ i16 * sqrt_273_2pi * val[0] * (33 * val[1] - 30 * val[2] + 5 * val[3]) @@ -2639,7 +2635,7 @@ def get_q6(self, thetas=None, phis=None): self._pow_cos_t[5], self._pow_cos_t[3], self._pow_cos_t[1], - strict=False, + strict=True, ) ] @@ -3694,7 +3690,7 @@ def get_nn_info(self, structure: Structure, n: int): mefir = _get_mean_fictive_ionic_radius(firs, minimum_fir=mefir) siw = [] - for nn, fir in zip(neighbors, firs, strict=False): + for nn, fir in zip(neighbors, firs, strict=True): if nn.nn_distance < self.cutoff: w = math.exp(1 - (fir / mefir) ** 6) if w > self.tol: diff --git a/src/pymatgen/analysis/magnetism/analyzer.py b/src/pymatgen/analysis/magnetism/analyzer.py index c4c93ba270b..d8e1491b36e 100644 --- a/src/pymatgen/analysis/magnetism/analyzer.py +++ b/src/pymatgen/analysis/magnetism/analyzer.py @@ -226,7 +226,7 @@ def __init__( else magmom if abs(magmom) > threshold_nonmag and site.species_string not in self.default_magmoms else 0 - for magmom, site in zip(magmoms, structure, strict=False) + for magmom, site in zip(magmoms, structure, strict=True) ] # overwrite existing magmoms with default_magmoms @@ -791,7 +791,7 @@ def _generate_transformations(self, structure: Structure) -> dict[str, MagOrderi sga = SpacegroupAnalyzer(structure) structure_sym = sga.get_symmetrized_structure() wyckoff = ["n/a"] * len(structure) - for indices, symbol in zip(structure_sym.equivalent_indices, structure_sym.wyckoff_symbols, strict=False): + for indices, symbol in zip(structure_sym.equivalent_indices, structure_sym.wyckoff_symbols, strict=True): for index in indices: wyckoff[index] = symbol is_magnetic_sites = [site.specie in types_mag_species for site in structure] @@ -799,7 +799,7 @@ def _generate_transformations(self, structure: Structure) -> dict[str, MagOrderi # set these symbols to None to filter them out later wyckoff = [ symbol if is_magnetic_site else "n/a" - for symbol, is_magnetic_site in zip(wyckoff, is_magnetic_sites, strict=False) + for symbol, is_magnetic_site in zip(wyckoff, is_magnetic_sites, strict=True) ] structure.add_site_property("wyckoff", wyckoff) wyckoff_symbols = set(wyckoff) - {"n/a"} diff --git a/src/pymatgen/analysis/magnetism/heisenberg.py b/src/pymatgen/analysis/magnetism/heisenberg.py index b094b9838fe..e8ca908d9fa 100644 --- a/src/pymatgen/analysis/magnetism/heisenberg.py +++ b/src/pymatgen/analysis/magnetism/heisenberg.py @@ -148,7 +148,7 @@ def _get_unique_sites(structure): unique_site_ids = {} wyckoff_ids = {} - for idx, (indices, symbol) in enumerate(zip(equivalent_indices, wyckoff_symbols, strict=False)): + for idx, (indices, symbol) in enumerate(zip(equivalent_indices, wyckoff_symbols, strict=True)): unique_site_ids[tuple(indices)] = idx wyckoff_ids[idx] = symbol for index in indices: @@ -195,7 +195,7 @@ def _get_nn_dict(self): all_dists = all_dists[:3] labels = ("nn", "nnn", "nnnn") - dists = dict(zip(labels, all_dists, strict=False)) + dists = dict(zip(labels, all_dists, strict=True)) # Get dictionary keys for interactions for k in unique_site_ids: @@ -376,7 +376,7 @@ def get_exchange(self): # Convert J_ij to meV j_ij[1:] *= 1000 # J_ij in meV j_ij = j_ij.tolist() - ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij, strict=False)} + ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij, strict=True)} self.ex_params = ex_params @@ -401,7 +401,7 @@ def get_low_energy_orderings(self): # epas = [e / len(s) for (e, s) in zip(self.energies, self.ordered_structures)] - for s, e in zip(self.ordered_structures, self.energies, strict=False): + for s, e in zip(self.ordered_structures, self.energies, strict=True): ordering = CollinearMagneticStructureAnalyzer(s, threshold=0, make_primitive=False).ordering magmoms = s.site_properties["magmom"] @@ -420,7 +420,7 @@ def get_low_energy_orderings(self): # Brute force search for closest thing to FM and AFM if not fm_struct or not afm_struct: - for s, e in zip(self.ordered_structures, self.energies, strict=False): + for s, e in zip(self.ordered_structures, self.energies, strict=True): magmoms = s.site_properties["magmom"] if abs(sum(magmoms)) > mag_max: # FM ground state @@ -723,7 +723,7 @@ def _do_cleanup(structures, energies): ] # Convert to energies / magnetic ion - energies = [e / len(s) for (e, s) in zip(energies, ordered_structures, strict=False)] + energies = [e / len(s) for (e, s) in zip(energies, ordered_structures, strict=True)] # Check for duplicate / degenerate states (sometimes different initial # configs relax to the same state) @@ -751,7 +751,7 @@ def _do_cleanup(structures, energies): energies = [energy for idx, energy in enumerate(energies) if idx not in remove_list] # Sort by energy if not already sorted - ordered_structures = [s for _, s in sorted(zip(energies, ordered_structures, strict=False), reverse=False)] + ordered_structures = [s for _, s in sorted(zip(energies, ordered_structures, strict=True), reverse=False)] ordered_energies = sorted(energies, reverse=False) return ordered_structures, ordered_energies diff --git a/src/pymatgen/analysis/molecule_matcher.py b/src/pymatgen/analysis/molecule_matcher.py index ecf976258b8..152de7be790 100644 --- a/src/pymatgen/analysis/molecule_matcher.py +++ b/src/pymatgen/analysis/molecule_matcher.py @@ -403,7 +403,7 @@ def _align_heavy_atoms(mol1, mol2, vmol1, vmol2, ilabel1, ilabel2, eq_atoms): canon_label2[c2 - 1] = canon_idx candidates1.remove(canon_idx) - canon_inchi_orig_map2 = list(zip(canon_label2, list(range(1, n_heavy + 1)), ilabel2, strict=False)) + canon_inchi_orig_map2 = list(zip(canon_label2, list(range(1, n_heavy + 1)), ilabel2, strict=True)) canon_inchi_orig_map2.sort(key=lambda m: m[0]) return tuple(x[2] for x in canon_inchi_orig_map2) @@ -464,7 +464,7 @@ def _align_hydrogen_atoms(mol1, mol2, heavy_indices1, heavy_indices2): hydrogen_label1.remove(idx) hydrogen_orig_idx2 = label2[len(heavy_indices2) :] - hydrogen_canon_orig_map2 = list(zip(hydrogen_label2, hydrogen_orig_idx2, strict=False)) + hydrogen_canon_orig_map2 = list(zip(hydrogen_label2, hydrogen_orig_idx2, strict=True)) hydrogen_canon_orig_map2.sort(key=lambda m: m[0]) hydrogen_canon_indices2 = [x[1] for x in hydrogen_canon_orig_map2] @@ -1100,7 +1100,7 @@ def get_principal_axis(coords, weights): """ Ixx = Iyy = Izz = Ixy = Ixz = Iyz = 0.0 - for (x, y, z), wt in zip(coords, weights, strict=False): + for (x, y, z), wt in zip(coords, weights, strict=True): Ixx += wt * (y * y + z * z) Iyy += wt * (x * x + z * z) Izz += wt * (x * x + y * y) diff --git a/src/pymatgen/analysis/molecule_structure_comparator.py b/src/pymatgen/analysis/molecule_structure_comparator.py index 7e74aee283e..07fbb1a0935 100644 --- a/src/pymatgen/analysis/molecule_structure_comparator.py +++ b/src/pymatgen/analysis/molecule_structure_comparator.py @@ -253,7 +253,7 @@ def _get_bonds(self, mol): for p in all_pairs ] - return [bond for bond, dist, cap in zip(all_pairs, pair_dists, max_length, strict=False) if dist <= cap] + return [bond for bond, dist, cap in zip(all_pairs, pair_dists, max_length, strict=True) if dist <= cap] def as_dict(self): """Get MSONable dict.""" diff --git a/src/pymatgen/analysis/phase_diagram.py b/src/pymatgen/analysis/phase_diagram.py index b9142cd208d..48e39f17b86 100644 --- a/src/pymatgen/analysis/phase_diagram.py +++ b/src/pymatgen/analysis/phase_diagram.py @@ -527,7 +527,7 @@ def _get_stable_entries_in_space(self, space) -> list[Entry]: Returns: list[Entry]: stable entries in the space. """ - return [e for e, s in zip(self._stable_entries, self._stable_spaces, strict=False) if space.issuperset(s)] + return [e for e, s in zip(self._stable_entries, self._stable_spaces, strict=True) if space.issuperset(s)] def get_reference_energy(self, comp: Composition) -> float: """Sum of elemental reference energies over all elements in a composition. @@ -594,7 +594,7 @@ def _get_facet_and_simplex(self, comp: Composition) -> tuple[Simplex, Simplex]: comp (Composition): A composition """ coord = self.pd_coords(comp) - for facet, simplex in zip(self.facets, self.simplexes, strict=False): + for facet, simplex in zip(self.facets, self.simplexes, strict=True): if simplex.in_simplex(coord, PhaseDiagram.numerical_tol / 10): return facet, simplex @@ -610,7 +610,7 @@ def _get_all_facets_and_simplexes(self, comp): all_facets = [ facet - for facet, simplex in zip(self.facets, self.simplexes, strict=False) + for facet, simplex in zip(self.facets, self.simplexes, strict=True) if simplex.in_simplex(coords, PhaseDiagram.numerical_tol / 10) ] @@ -634,7 +634,7 @@ def _get_facet_chempots(self, facet: list[int]) -> dict[Element, float]: atom_frac_mat = [[c.get_atomic_fraction(e) for e in self.elements] for c in comp_list] chempots = np.linalg.solve(atom_frac_mat, energy_list) - return dict(zip(self.elements, chempots, strict=False)) + return dict(zip(self.elements, chempots, strict=True)) def _get_simplex_intersections(self, c1, c2): """Get coordinates of the intersection of the tie line between two compositions @@ -669,7 +669,7 @@ def get_decomposition(self, comp: Composition) -> dict[PDEntry, float]: decomp_amts = simplex.bary_coords(self.pd_coords(comp)) return { self.qhull_entries[f]: amt - for f, amt in zip(facet, decomp_amts, strict=False) + for f, amt in zip(facet, decomp_amts, strict=True) if abs(amt) > PhaseDiagram.numerical_tol } @@ -871,7 +871,7 @@ def get_decomp_and_phase_separation_energy( compare_entries = self._get_stable_entries_in_space(entry_elems) else: compare_entries = [ - e for e, s in zip(self.qhull_entries, self._qhull_spaces, strict=False) if entry_elems.issuperset(s) + e for e, s in zip(self.qhull_entries, self._qhull_spaces, strict=True) if entry_elems.issuperset(s) ] # get memory ids of entries with the same composition. @@ -1069,7 +1069,7 @@ def get_critical_compositions(self, comp1, comp2): num_atoms = n1 + (n2 - n1) * x_unnormalized cs *= num_atoms[:, None] - return [Composition((elem, val) for elem, val in zip(pd_els, m, strict=False)) for m in cs] + return [Composition((elem, val) for elem, val in zip(pd_els, m, strict=True)) for m in cs] def get_element_profile(self, element, comp, comp_tol=1e-5): """ @@ -1822,7 +1822,7 @@ def _get_pd_patch_for_space(self, space: frozenset[Element]) -> tuple[frozenset[ Returns: space, PhaseDiagram for the given chemical space """ - space_entries = [e for e, s in zip(self.qhull_entries, self._qhull_spaces, strict=False) if space.issuperset(s)] + space_entries = [e for e, s in zip(self.qhull_entries, self._qhull_spaces, strict=True) if space.issuperset(s)] return space, PhaseDiagram(space_entries) @@ -1966,7 +1966,7 @@ def fmt(fl): energy = -(x * entry1.energy_per_atom + (1 - x) * entry2.energy_per_atom) - for c, entry in zip(coeffs[:-1], face_entries, strict=False): + for c, entry in zip(coeffs[:-1], face_entries, strict=True): if c > tol: redu_comp = entry.composition.reduced_composition products.append(f"{fmt(c / redu_comp.num_atoms * factor)} {redu_comp.reduced_formula}") @@ -1976,7 +1976,7 @@ def fmt(fl): rxn_str += " + ".join(products) comp = x * comp_vec1 + (1 - x) * comp_vec2 entry = PDEntry( - Composition(dict(zip(elements, comp, strict=False))), + Composition(dict(zip(elements, comp, strict=True))), energy=energy, attribute=rxn_str, ) @@ -2118,7 +2118,7 @@ def _get_slsqp_decomp( decomp_amts = solution.x return { c: amt # NOTE this is the amount of the fractional composition. - for c, amt in zip(competing_entries, decomp_amts, strict=False) + for c, amt in zip(competing_entries, decomp_amts, strict=True) if amt > PhaseDiagram.numerical_tol } @@ -2559,9 +2559,9 @@ def pd_plot_data(self): else: coord = tet_coord(data[line, 0:3]) lines.append(coord) - labelcoord = list(zip(*coord, strict=False)) - stable_entries[labelcoord[0]] = entry1 - stable_entries[labelcoord[1]] = entry2 + label_coord = list(zip(*coord, strict=True)) + stable_entries[label_coord[0]] = entry1 + stable_entries[label_coord[1]] = entry2 all_entries = pd.all_entries all_data = np.array(pd.all_entries_hulldata) @@ -2578,8 +2578,8 @@ def pd_plot_data(self): coord = triangular_coord([all_data[idx, 0:2], all_data[idx, 0:2]]) else: coord = tet_coord([all_data[idx, 0:3], all_data[idx, 0:3], all_data[idx, 0:3]]) - labelcoord = list(zip(*coord, strict=False)) - unstable_entries[entry] = labelcoord[0] + label_coord = list(zip(*coord, strict=True)) + unstable_entries[entry] = label_coord[0] return lines, stable_entries, unstable_entries @@ -2608,7 +2608,7 @@ def _create_plotly_figure_layout(self, label_stable=True): layout["annotations"] = annotations elif self._dim == 3 and self.ternary_style == "2d": layout = plotly_layouts["default_ternary_2d_layout"].copy() - for el, axis in zip(self._pd.elements, ["a", "b", "c"], strict=False): + for el, axis in zip(self._pd.elements, ["a", "b", "c"], strict=True): el_ref = self._pd.el_refs[el] clean_formula = str(el_ref.elements[0]) if hasattr(el_ref, "original_entry"): # for grand potential PDs, etc. @@ -2665,14 +2665,14 @@ def _create_plotly_lines(self): if self._dim == 3: form_enes = [ self._pd.get_form_energy_per_atom(self.pd_plot_data[1][coord]) - for coord in zip(line[0], line[1], strict=False) + for coord in zip(line[0], line[1], strict=True) ] z += [*form_enes, None] elif self._dim == 4: form_enes = [ self._pd.get_form_energy_per_atom(self.pd_plot_data[1][coord]) - for coord in zip(line[0], line[1], line[2], strict=False) + for coord in zip(line[0], line[1], line[2], strict=True) ] energies += [*form_enes, None] z += [*line[2], None] @@ -2743,7 +2743,7 @@ def _create_plotly_fill(self): coords = np.array( [ triangular_coord(c) - for c in zip(self._pd.qhull_data[:-1, 0], self._pd.qhull_data[:-1, 1], strict=False) + for c in zip(self._pd.qhull_data[:-1, 0], self._pd.qhull_data[:-1, 1], strict=True) ] ) energies = np.array([self._pd.get_form_energy_per_atom(entry) for entry in self._pd.qhull_entries]) @@ -2999,7 +2999,7 @@ def get_marker_props(coords, entries): x, y, z, texts, energies, uncertainties = [], [], [], [], [], [] is_stable = [entry in self._pd.stable_entries for entry in entries] - for coord, entry, stable in zip(coords, entries, is_stable, strict=False): + for coord, entry, stable in zip(coords, entries, is_stable, strict=True): energy = round(self._pd.get_form_energy_per_atom(entry), 3) entry_id = getattr(entry, "entry_id", "no ID") @@ -3031,9 +3031,9 @@ def get_marker_props(coords, entries): if self._dim == 3 and self.ternary_style == "2d": label += "
" total_sum_el = sum( - entry.composition[el] for el, _axis in zip(self._pd.elements, range(self._dim), strict=False) + entry.composition[el] for el, _axis in zip(self._pd.elements, range(self._dim), strict=True) ) - for el, axis in zip(self._pd.elements, range(self._dim), strict=False): + for el, axis in zip(self._pd.elements, range(self._dim), strict=True): _cartesian_positions = [x, y, z] _cartesian_positions[axis].append(entry.composition[el]) label += f"
{el}: {round(entry.composition[el]/total_sum_el, 6)}" @@ -3044,9 +3044,9 @@ def get_marker_props(coords, entries): label += "
" total_sum_el = sum( - entry.composition[el] for el, _axis in zip(self._pd.elements, range(self._dim), strict=False) + entry.composition[el] for el, _axis in zip(self._pd.elements, range(self._dim), strict=True) ) - for el, _axis in zip(self._pd.elements, range(self._dim), strict=False): + for el, _axis in zip(self._pd.elements, range(self._dim), strict=True): label += f"
{el}: {round(entry.composition[el]/total_sum_el, 6)}" elif self._dim == 4: x.append(coord[0]) @@ -3055,9 +3055,9 @@ def get_marker_props(coords, entries): label += "
" total_sum_el = sum( - entry.composition[el] for el, _axis in zip(self._pd.elements, range(self._dim), strict=False) + entry.composition[el] for el, _axis in zip(self._pd.elements, range(self._dim), strict=True) ) - for el, _axis in zip(self._pd.elements, range(self._dim), strict=False): + for el, _axis in zip(self._pd.elements, range(self._dim), strict=True): label += f"
{el}: {round(entry.composition[el]/total_sum_el, 6)}" else: x.append(coord[0]) @@ -3074,7 +3074,7 @@ def get_marker_props(coords, entries): unstable_coords, unstable_entries = [], [] highlight_coords, highlight_ents = [], [] - for coord, entry in zip(self.pd_plot_data[1], self.pd_plot_data[1].values(), strict=False): + for coord, entry in zip(self.pd_plot_data[1], self.pd_plot_data[1].values(), strict=True): if entry in highlight_entries: highlight_coords.append(coord) highlight_ents.append(entry) @@ -3082,7 +3082,7 @@ def get_marker_props(coords, entries): stable_coords.append(coord) stable_entries.append(entry) - for coord, entry in zip(self.pd_plot_data[2].values(), self.pd_plot_data[2], strict=False): + for coord, entry in zip(self.pd_plot_data[2].values(), self.pd_plot_data[2], strict=True): if entry in highlight_entries: highlight_coords.append(coord) highlight_ents.append(entry) diff --git a/src/pymatgen/analysis/piezo.py b/src/pymatgen/analysis/piezo.py index 2f168e1eef4..9d7c5793076 100644 --- a/src/pymatgen/analysis/piezo.py +++ b/src/pymatgen/analysis/piezo.py @@ -28,7 +28,7 @@ class PiezoTensor(Tensor): def __new__(cls, input_array: ArrayLike, tol: float = 1e-3) -> Self: """ Create an PiezoTensor object. The constructor throws an error if - the shape of the input_matrix argument is not 3x3x3, i. e. in true + the shape of the input_matrix argument is not 3x3x3, i.e. in true tensor notation. Note that the constructor uses __new__ rather than __init__ according to the standard method of subclassing numpy ndarrays. diff --git a/src/pymatgen/analysis/pourbaix_diagram.py b/src/pymatgen/analysis/pourbaix_diagram.py index 5ae9740ea2b..92eec2ef917 100644 --- a/src/pymatgen/analysis/pourbaix_diagram.py +++ b/src/pymatgen/analysis/pourbaix_diagram.py @@ -276,7 +276,7 @@ def __getattr__(self, attr): # TODO: Composition could be changed for compat with sum start = Composition() if attr == "composition" else 0 weighted_values = ( - getattr(entry, attr) * weight for entry, weight in zip(self.entry_list, self.weights, strict=False) + getattr(entry, attr) * weight for entry, weight in zip(self.entry_list, self.weights, strict=True) ) return sum(weighted_values, start) @@ -289,7 +289,7 @@ def __getattr__(self, attr): @property def name(self): - """MultiEntry name, i. e. the name of each entry joined by ' + '.""" + """MultiEntry name, i.e. the name of each entry joined by ' + '.""" return " + ".join(entry.name for entry in self.entry_list) def __repr__(self): @@ -695,7 +695,7 @@ def process_multientry(entry_list, prod_comp, coeff_threshold=1e-4): @staticmethod def get_pourbaix_domains(pourbaix_entries, limits=None): - """Get a set of Pourbaix stable domains (i. e. polygons) in + """Get a set of Pourbaix stable domains (i.e. polygons) in pH-V space from a list of pourbaix_entries. This function works by using scipy's HalfspaceIntersection @@ -746,7 +746,7 @@ def get_pourbaix_domains(pourbaix_entries, limits=None): # organize the boundary points by entry pourbaix_domains = {entry: [] for entry in pourbaix_entries} - for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets, strict=False): + for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets, strict=True): for v in facet: if v < len(pourbaix_entries): this_entry = pourbaix_entries[v] @@ -799,7 +799,7 @@ def get_decomposition_energy(self, entry, pH, V): V (float, list[float]): voltage at which to find the decomposition Returns: - Decomposition energy for the entry, i. e. the energy above + Decomposition energy for the entry, i.e. the energy above the "Pourbaix hull" in eV/atom at the given conditions """ # Check composition consistency between entry and Pourbaix diagram: diff --git a/src/pymatgen/analysis/quasiharmonic.py b/src/pymatgen/analysis/quasiharmonic.py index 5dcdc9a39e5..f137acb635d 100644 --- a/src/pymatgen/analysis/quasiharmonic.py +++ b/src/pymatgen/analysis/quasiharmonic.py @@ -350,7 +350,7 @@ def get_summary_dict(self): dct["gibbs_free_energy"] = self.gibbs_free_energy dct["temperatures"] = self.temperatures dct["optimum_volumes"] = self.optimum_volumes - for v, t in zip(self.optimum_volumes, self.temperatures, strict=False): + for v, t in zip(self.optimum_volumes, self.temperatures, strict=True): dct["debye_temperature"].append(self.debye_temperature(v)) dct["gruneisen_parameter"].append(self.gruneisen_parameter(t, v)) dct["thermal_conductivity"].append(self.thermal_conductivity(t, v)) diff --git a/src/pymatgen/analysis/reaction_calculator.py b/src/pymatgen/analysis/reaction_calculator.py index 93740dfa681..6f98b83fe5b 100644 --- a/src/pymatgen/analysis/reaction_calculator.py +++ b/src/pymatgen/analysis/reaction_calculator.py @@ -113,7 +113,7 @@ def calculate_energy(self, energies): Returns: reaction energy as a float. """ - return sum(amt * energies[c] for amt, c in zip(self._coeffs, self._all_comp, strict=False)) + return sum(amt * energies[c] for amt, c in zip(self._coeffs, self._all_comp, strict=True)) def normalize_to(self, comp: Composition, factor: float = 1) -> None: """ @@ -203,7 +203,7 @@ def normalized_repr(self) -> str: def _str_from_formulas(cls, coeffs, formulas) -> str: reactant_str = [] product_str = [] - for amt, formula in zip(coeffs, formulas, strict=False): + for amt, formula in zip(coeffs, formulas, strict=True): if abs(amt + 1) < cls.TOLERANCE: reactant_str.append(formula) elif abs(amt - 1) < cls.TOLERANCE: @@ -219,7 +219,7 @@ def _str_from_formulas(cls, coeffs, formulas) -> str: def _str_from_comp(cls, coeffs, compositions, reduce=False) -> tuple[str, float]: r_coeffs = np.zeros(len(coeffs)) r_formulas = [] - for idx, (amt, comp) in enumerate(zip(coeffs, compositions, strict=False)): + for idx, (amt, comp) in enumerate(zip(coeffs, compositions, strict=True)): formula, factor = comp.get_reduced_formula_and_factor() r_coeffs[idx] = amt * factor r_formulas.append(formula) @@ -232,7 +232,7 @@ def _str_from_comp(cls, coeffs, compositions, reduce=False) -> tuple[str, float] def as_entry(self, energies) -> ComputedEntry: """Get a ComputedEntry representation of the reaction.""" - relevant_comp = [comp * abs(coeff) for coeff, comp in zip(self._coeffs, self._all_comp, strict=False)] + relevant_comp = [comp * abs(coeff) for coeff, comp in zip(self._coeffs, self._all_comp, strict=True)] comp: Composition = sum(relevant_comp, Composition()) # type: ignore[assignment] entry = ComputedEntry(0.5 * comp, self.calculate_energy(energies)) diff --git a/src/pymatgen/analysis/structure_analyzer.py b/src/pymatgen/analysis/structure_analyzer.py index c26f5ce5a84..6b2b5be07ee 100644 --- a/src/pymatgen/analysis/structure_analyzer.py +++ b/src/pymatgen/analysis/structure_analyzer.py @@ -154,7 +154,7 @@ def plot_vor_analysis(voronoi_ensemble: list[tuple[str, float]]) -> plt.Axes: Returns: plt.Axes: Matplotlib Axes object with the plotted Voronoi analysis. """ - labels, val = zip(*voronoi_ensemble, strict=False) + labels, val = zip(*voronoi_ensemble, strict=True) arr = np.array(val, dtype=float) arr /= np.sum(arr) pos = np.arange(len(arr)) + 0.5 # the bar centers on the y axis diff --git a/src/pymatgen/analysis/structure_matcher.py b/src/pymatgen/analysis/structure_matcher.py index 2ef3dc7b8e1..ca4ed5b949a 100644 --- a/src/pymatgen/analysis/structure_matcher.py +++ b/src/pymatgen/analysis/structure_matcher.py @@ -913,7 +913,7 @@ def _anonymous_match( s2_comp = struct2.composition matches = [] for perm in itertools.permutations(sp2): - sp_mapping = dict(zip(sp1, perm, strict=False)) + sp_mapping = dict(zip(sp1, perm, strict=True)) # do quick check that compositions are compatible mapped_comp = Composition({sp_mapping[k]: v for k, v in s1_comp.items()}) diff --git a/src/pymatgen/analysis/structure_prediction/substitution_probability.py b/src/pymatgen/analysis/structure_prediction/substitution_probability.py index c003684391a..045c0ba6be3 100644 --- a/src/pymatgen/analysis/structure_prediction/substitution_probability.py +++ b/src/pymatgen/analysis/structure_prediction/substitution_probability.py @@ -152,7 +152,7 @@ def cond_prob_list(self, l1, l2): """ assert len(l1) == len(l2) p = 1 - for s1, s2 in zip(l1, l2, strict=False): + for s1, s2 in zip(l1, l2, strict=True): p *= self.cond_prob(s1, s2) return p @@ -231,9 +231,9 @@ def _recurse(output_prob, output_species): if len(output_species) == len(species): odict = {"probability": functools.reduce(mul, best_case_prob)} if to_this_composition: - odict["substitutions"] = dict(zip(output_species, species, strict=False)) + odict["substitutions"] = dict(zip(output_species, species, strict=True)) else: - odict["substitutions"] = dict(zip(species, output_species, strict=False)) + odict["substitutions"] = dict(zip(species, output_species, strict=True)) if len(output_species) == len(set(output_species)): output.append(odict) return diff --git a/src/pymatgen/analysis/structure_prediction/substitutor.py b/src/pymatgen/analysis/structure_prediction/substitutor.py index 187664b9281..1cad9a10645 100644 --- a/src/pymatgen/analysis/structure_prediction/substitutor.py +++ b/src/pymatgen/analysis/structure_prediction/substitutor.py @@ -207,7 +207,7 @@ def _recurse(output_prob, output_species): if functools.reduce(mul, best_case_prob) > self._threshold: if len(output_species) == len(species_list): odict = { - "substitutions": dict(zip(species_list, output_species, strict=False)), + "substitutions": dict(zip(species_list, output_species, strict=True)), "probability": functools.reduce(mul, best_case_prob), } output.append(odict) diff --git a/src/pymatgen/analysis/surface_analysis.py b/src/pymatgen/analysis/surface_analysis.py index 439c21a45ab..08666323732 100644 --- a/src/pymatgen/analysis/surface_analysis.py +++ b/src/pymatgen/analysis/surface_analysis.py @@ -780,7 +780,7 @@ def stable_u_range_dict( # sort the chempot ranges for each facet for entry, v in stable_urange_dict.items(): - se_dict[entry] = [se for idx, se in sorted(zip(v, se_dict[entry], strict=False))] + se_dict[entry] = [se for idx, se in sorted(zip(v, se_dict[entry], strict=True))] stable_urange_dict[entry] = sorted(v) if return_se_dict: @@ -1033,7 +1033,7 @@ def monolayer_vs_BE(self, plot_eads=False): # sort the binding energies and monolayers # in order to properly draw a line plot vals = sorted(ml_be_dict.items()) - monolayers, BEs = zip(*vals, strict=False) + monolayers, BEs = zip(*vals, strict=True) ax.plot(monolayers, BEs, "-o", c=self.color_dict[clean_entry], label=hkl) adsorbates = tuple(ads_entry.ads_entries_dict) @@ -1447,7 +1447,7 @@ def get_locpot_along_slab_plot(self, label_energies=True, plt=None, label_fontsi else: yg.append(pot) xg.append(self.along_c[idx]) - xg, yg = zip(*sorted(zip(xg, yg, strict=False)), strict=False) + xg, yg = zip(*sorted(zip(xg, yg, strict=True)), strict=True) plt.plot(xg, yg, "r", linewidth=2.5, zorder=-1) # make it look nice diff --git a/src/pymatgen/analysis/transition_state.py b/src/pymatgen/analysis/transition_state.py index 20e91a2aba4..522e8130617 100644 --- a/src/pymatgen/analysis/transition_state.py +++ b/src/pymatgen/analysis/transition_state.py @@ -109,7 +109,7 @@ def from_outcars(cls, outcars, structures, **kwargs) -> Self: rms_dist = [0] prev = structures[0] for st in structures[1:]: - dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st, strict=False)]) + dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st, strict=True)]) rms_dist.append(np.sqrt(np.sum(dists**2))) prev = st rms_dist = np.cumsum(rms_dist) @@ -173,7 +173,7 @@ def get_plot(self, normalize_rxn_coordinate: bool = True, label_barrier: bool = ax.set_ylabel("Energy (meV)") ax.set_ylim((np.min(ys) - 10, np.max(ys) * 1.02 + 20)) if label_barrier: - data = zip(xs * scale, ys, strict=False) + data = zip(xs * scale, ys, strict=True) barrier = max(data, key=lambda d: d[1]) ax.plot([0, barrier[0]], [barrier[1], barrier[1]], "k--", linewidth=0.5) ax.annotate( diff --git a/src/pymatgen/analysis/wulff.py b/src/pymatgen/analysis/wulff.py index 0e4a0c04b93..ee3dcd34207 100644 --- a/src/pymatgen/analysis/wulff.py +++ b/src/pymatgen/analysis/wulff.py @@ -202,7 +202,7 @@ def _get_all_miller_e(self): recp = self.structure.lattice.reciprocal_lattice_crystallographic recp_symm_ops = self.lattice.get_recp_symmetry_operation(self.symprec) - for i, (hkl, energy) in enumerate(zip(self.hkl_list, self.e_surf_list, strict=False)): + for i, (hkl, energy) in enumerate(zip(self.hkl_list, self.e_surf_list, strict=True)): for op in recp_symm_ops: miller = tuple(int(x) for x in op.operate(hkl)) if miller not in all_hkl: @@ -625,12 +625,12 @@ def volume(self) -> float: @property def miller_area_dict(self) -> dict[tuple, float]: """{hkl: area_hkl on wulff}.""" - return dict(zip(self.miller_list, self.color_area, strict=False)) + return dict(zip(self.miller_list, self.color_area, strict=True)) @property def miller_energy_dict(self) -> dict[tuple, float]: """{hkl: surface energy_hkl}.""" - return dict(zip(self.miller_list, self.e_surf_list, strict=False)) + return dict(zip(self.miller_list, self.e_surf_list, strict=True)) @property def surface_area(self) -> float: diff --git a/src/pymatgen/analysis/xas/spectrum.py b/src/pymatgen/analysis/xas/spectrum.py index 2eb1c94e99a..98de8141e8b 100644 --- a/src/pymatgen/analysis/xas/spectrum.py +++ b/src/pymatgen/analysis/xas/spectrum.py @@ -194,7 +194,7 @@ def stitch(self, other: XAS, num_samples: int = 500, mode: Literal["XAFS", "L23" ) l3_f = interp1d(l3_xanes.x, l3_xanes.y, bounds_error=True, fill_value=0, kind="cubic") energy = list(np.linspace(min(l3_xanes.x), max(l3_xanes.x), num=num_samples)) - mu = [i + j for i, j in zip([max(i, 0) for i in l2_f(energy)], l3_f(energy), strict=False)] + mu = [i + j for i, j in zip([max(i, 0) for i in l2_f(energy)], l3_f(energy), strict=True)] # check for jumps at the onset of L2-edge XANES idx = energy.index(min(energy, key=lambda x: (abs(x - l2_xanes.x[0])))) if abs(mu[idx] - mu[idx - 1]) / (mu[idx - 1]) > 0.1: diff --git a/src/pymatgen/apps/borg/hive.py b/src/pymatgen/apps/borg/hive.py index 053d614d05f..1e57a1d5428 100644 --- a/src/pymatgen/apps/borg/hive.py +++ b/src/pymatgen/apps/borg/hive.py @@ -253,7 +253,7 @@ def assimilate(self, path): param = {"hubbards": {}} if "LDAUU" in incar: - param["hubbards"] = dict(zip(poscar.site_symbols, incar["LDAUU"], strict=False)) + param["hubbards"] = dict(zip(poscar.site_symbols, incar["LDAUU"], strict=True)) param["is_hubbard"] = incar.get("LDAU", True) and sum(param["hubbards"].values()) > 0 param["run_type"] = None param["potcar_spec"] = potcar.spec diff --git a/src/pymatgen/cli/pmg_config.py b/src/pymatgen/cli/pmg_config.py index 2be015d72cf..877f055875f 100755 --- a/src/pymatgen/cli/pmg_config.py +++ b/src/pymatgen/cli/pmg_config.py @@ -272,7 +272,7 @@ def add_config_var(tokens: list[str], backup_suffix: str) -> None: print(f"Existing {rc_path} backed up to {rc_path}{backup_suffix}") dct = loadfn(rc_path) special_vals = {"true": True, "false": False, "none": None, "null": None} - for key, val in zip(tokens[::2], tokens[1::2], strict=False): + for key, val in zip(tokens[::2], tokens[1::2], strict=True): dct[key] = special_vals.get(val.lower(), val) dumpfn(dct, rc_path) print(f"New {rc_path} written!") diff --git a/src/pymatgen/command_line/bader_caller.py b/src/pymatgen/command_line/bader_caller.py index 07f3b65f591..58d0364280a 100644 --- a/src/pymatgen/command_line/bader_caller.py +++ b/src/pymatgen/command_line/bader_caller.py @@ -218,7 +218,7 @@ def _parse_acf(self) -> list[dict]: if line.startswith("-"): break vals = map(float, line.split()[1:]) - data.append(dict(zip(headers, vals, strict=False))) + data.append(dict(zip(headers, vals, strict=True))) for line in lines: tokens = line.strip().split(":") @@ -277,7 +277,7 @@ def find_encompassing_vol(data: np.ndarray) -> np.ndarray | None: self.chgcar.structure, self.chgcar.structure.frac_coords, atom_chgcars, - strict=False, + strict=True, ): # Find the index of the atom in the charge density atom index = np.round(np.multiply(loc, chg.dim)) diff --git a/src/pymatgen/command_line/gulp_caller.py b/src/pymatgen/command_line/gulp_caller.py index a2b7e4597fb..0e1ae16fdd6 100644 --- a/src/pymatgen/command_line/gulp_caller.py +++ b/src/pymatgen/command_line/gulp_caller.py @@ -408,12 +408,12 @@ def buckingham_potential(structure, val_dict=None): # If structure is oxidation state decorated, use that first. el = [site.specie.symbol for site in structure] valences = [site.specie.oxi_state for site in structure] - val_dict = dict(zip(el, valences, strict=False)) + val_dict = dict(zip(el, valences, strict=True)) except AttributeError: bv = BVAnalyzer() el = [site.specie.symbol for site in structure] valences = bv.get_valences(structure) - val_dict = dict(zip(el, valences, strict=False)) + val_dict = dict(zip(el, valences, strict=True)) # Try bush library first bpb = BuckinghamPotential("bush") @@ -490,7 +490,7 @@ def tersoff_potential(structure): bv = BVAnalyzer() el = [site.specie.symbol for site in structure] valences = bv.get_valences(structure) - el_val_dict = dict(zip(el, valences, strict=False)) + el_val_dict = dict(zip(el, valences, strict=True)) gin = "species \n" qerf_str = "qerfc\n" diff --git a/src/pymatgen/core/composition.py b/src/pymatgen/core/composition.py index bd109a8e927..d772480bedf 100644 --- a/src/pymatgen/core/composition.py +++ b/src/pymatgen/core/composition.py @@ -726,7 +726,7 @@ def charge(self) -> float | None: if {*oxi_states} <= {0, None}: # all oxidation states are None or 0 return None - return sum(oxi * amt for oxi, amt in zip(oxi_states, self.values(), strict=False)) + return sum(oxi * amt for oxi, amt in zip(oxi_states, self.values(), strict=True)) @property def charge_balanced(self) -> bool | None: @@ -1008,7 +1008,7 @@ def _get_oxi_state_guesses( for x in product(*el_sums): # Each x is a trial of one possible oxidation sum for each element if sum(x) == target_charge: # charge balance condition - el_sum_sol = dict(zip(elements, x, strict=False)) # element->oxid_sum + el_sum_sol = dict(zip(elements, x, strict=True)) # element->oxid_sum # Normalize oxid_sum by amount to get avg oxid state sol = {el: v / el_amt[el] for el, v in el_sum_sol.items()} # Add the solution to the list of solutions @@ -1022,7 +1022,7 @@ def _get_oxi_state_guesses( # Collect the combination of oxidation states for each site all_oxid_combo.append( - {e: el_best_oxid_combo[idx][v] for idx, (e, v) in enumerate(zip(elements, x, strict=False))} + {e: el_best_oxid_combo[idx][v] for idx, (e, v) in enumerate(zip(elements, x, strict=True))} ) # Sort the solutions from highest to lowest score @@ -1031,12 +1031,12 @@ def _get_oxi_state_guesses( *( (y, x) for (z, y, x) in sorted( - zip(all_scores, all_sols, all_oxid_combo, strict=False), + zip(all_scores, all_sols, all_oxid_combo, strict=True), key=lambda pair: pair[0], reverse=True, ) ), - strict=False, + strict=True, ) return tuple(all_sols), tuple(all_oxid_combo) diff --git a/src/pymatgen/core/interface.py b/src/pymatgen/core/interface.py index f71e7ac278f..3a2c0122723 100644 --- a/src/pymatgen/core/interface.py +++ b/src/pymatgen/core/interface.py @@ -2559,7 +2559,7 @@ def substrate_indices(self) -> list[int]: def substrate_sites(self) -> list[Site]: """The site objects in the substrate.""" return [ - site for site, tag in zip(self, self.site_properties["interface_label"], strict=False) if "substrate" in tag + site for site, tag in zip(self, self.site_properties["interface_label"], strict=True) if "substrate" in tag ] @property @@ -2575,7 +2575,7 @@ def film_indices(self) -> list[int]: @property def film_sites(self) -> list[Site]: """The film sites of the interface.""" - return [site for site, tag in zip(self, self.site_properties["interface_label"], strict=False) if "film" in tag] + return [site for site, tag in zip(self, self.site_properties["interface_label"], strict=True) if "film" in tag] @property def film(self) -> Structure: @@ -2687,7 +2687,7 @@ def _update_c(self, new_c: float) -> None: new_lattice = Lattice(new_latt_matrix) self._lattice = new_lattice - for site, c_coords in zip(self, self.cart_coords, strict=False): + for site, c_coords in zip(self, self.cart_coords, strict=True): site._lattice = new_lattice # Update the lattice site.coords = c_coords # Put back into original Cartesian space diff --git a/src/pymatgen/core/lattice.py b/src/pymatgen/core/lattice.py index 67263974733..62acc15907e 100644 --- a/src/pymatgen/core/lattice.py +++ b/src/pymatgen/core/lattice.py @@ -516,7 +516,7 @@ def parameters(self) -> tuple[float, float, float, float, float, float]: @property def params_dict(self) -> dict[str, float]: """Dictionary of lattice parameters.""" - return dict(zip("a b c alpha beta gamma".split(), self.parameters, strict=False)) + return dict(zip("a b c alpha beta gamma".split(), self.parameters, strict=True)) @property def reciprocal_lattice(self) -> Self: @@ -1320,7 +1320,7 @@ def dot( cart_a = np.reshape([self.get_cartesian_coords(vec) for vec in coords_a], (-1, 3)) cart_b = np.reshape([self.get_cartesian_coords(vec) for vec in coords_b], (-1, 3)) - return np.array(list(itertools.starmap(np.dot, zip(cart_a, cart_b, strict=False)))) + return np.array(list(itertools.starmap(np.dot, zip(cart_a, cart_b, strict=True)))) def norm(self, coords: ArrayLike, frac_coords: bool = True) -> np.ndarray: """Compute the norm of vector(s). @@ -1393,7 +1393,7 @@ def get_points_in_sphere( return np.array([]) if zip_results else tuple(np.array([]) for _ in range(4)) frac_coords = frac_points[indices] + images if zip_results: - return tuple(zip(frac_coords, distances, indices, images, strict=False)) + return tuple(zip(frac_coords, distances, indices, images, strict=True)) return frac_coords, distances, indices, images def get_points_in_sphere_py( @@ -1447,7 +1447,7 @@ def get_points_in_sphere_py( return [] if zip_results else [()] * 4 if zip_results: return neighbors - return [np.array(i) for i in list(zip(*neighbors, strict=False))] + return list(map(np.array, zip(*neighbors, strict=True))) @deprecated(get_points_in_sphere, "This is retained purely for checking purposes.") def get_points_in_sphere_old( @@ -1543,7 +1543,7 @@ def get_points_in_sphere_old( np.sqrt(d_2[within_r]), indices[within_r[0]], images[within_r[1:]], - strict=False, + strict=True, ) ) return shifted_coords[within_r], np.sqrt(d_2[within_r]), indices[within_r[0]], images[within_r[1:]] @@ -1804,7 +1804,7 @@ def get_points_in_spheres( nmin[_pbc] = nmin_temp[_pbc] nmax = np.ones_like(nmax_temp) nmax[_pbc] = nmax_temp[_pbc] - all_ranges = [np.arange(x, y, dtype="int64") for x, y in zip(nmin, nmax, strict=False)] + all_ranges = [np.arange(x, y, dtype="int64") for x, y in zip(nmin, nmax, strict=True)] matrix = lattice.matrix # Temporarily hold the fractional coordinates @@ -1856,7 +1856,7 @@ def get_points_in_spheres( cube_to_coords: dict[int, list] = defaultdict(list) cube_to_images: dict[int, list] = defaultdict(list) cube_to_indices: dict[int, list] = defaultdict(list) - for ii, jj, kk, ll in zip(all_cube_index.ravel(), valid_coords, valid_images, valid_indices, strict=False): + for ii, jj, kk, ll in zip(all_cube_index.ravel(), valid_coords, valid_images, valid_indices, strict=True): cube_to_coords[ii].append(jj) cube_to_images[ii].append(kk) cube_to_indices[ii].append(ll) @@ -1865,7 +1865,7 @@ def get_points_in_spheres( site_neighbors = find_neighbors(site_cube_index, nx, ny, nz) neighbors: list[list[tuple[np.ndarray, float, int, np.ndarray]]] = [] - for ii, jj in zip(center_coords, site_neighbors, strict=False): + for ii, jj in zip(center_coords, site_neighbors, strict=True): l1 = np.array(_three_to_one(jj, ny, nz), dtype=int).ravel() # Use the cube index map to find the all the neighboring # coords, images, and indices @@ -1878,7 +1878,7 @@ def get_points_in_spheres( nn_indices = itertools.chain(*(cube_to_indices[k] for k in ks)) distances = np.linalg.norm(nn_coords - ii[None, :], axis=1) nns: list[tuple[np.ndarray, float, int, np.ndarray]] = [] - for coord, index, image, dist in zip(nn_coords, nn_indices, nn_images, distances, strict=False): + for coord, index, image, dist in zip(nn_coords, nn_indices, nn_images, distances, strict=True): # Filtering out all sites that are beyond the cutoff # Here there is no filtering of overlapping sites if dist < r + numerical_tol: diff --git a/src/pymatgen/core/operations.py b/src/pymatgen/core/operations.py index 4658dafc565..7dcf9bb3b09 100644 --- a/src/pymatgen/core/operations.py +++ b/src/pymatgen/core/operations.py @@ -159,7 +159,7 @@ def transform_tensor(self, tensor: np.ndarray) -> np.ndarray: # Build einstein sum string lc = string.ascii_lowercase indices = lc[:rank], lc[rank : 2 * rank] - einsum_string = ",".join(a + i for a, i in zip(*indices, strict=False)) + einsum_string = ",".join(a + i for a, i in zip(*indices, strict=True)) einsum_string += f",{indices[::-1][0]}->{indices[::-1][1]}" einsum_args = [self.rotation_matrix] * rank + [tensor] diff --git a/src/pymatgen/core/periodic_table.py b/src/pymatgen/core/periodic_table.py index 21ed8e48532..9d9606704af 100644 --- a/src/pymatgen/core/periodic_table.py +++ b/src/pymatgen/core/periodic_table.py @@ -524,7 +524,7 @@ def term_symbols(self) -> list[list[str]]: # Total ML = sum(ml1, ml2), Total MS = sum(ms1, ms2) TL = [sum(ml_ms[comb[e]][0] for e in range(v_e)) for comb in e_config_combs] TS = [sum(ml_ms[comb[e]][1] for e in range(v_e)) for comb in e_config_combs] - comb_counter = Counter(zip(TL, TS, strict=False)) + comb_counter = Counter(zip(TL, TS, strict=True)) term_symbols = [] L_symbols = "SPDFGHIKLMNOQRTUVWXYZ" diff --git a/src/pymatgen/core/sites.py b/src/pymatgen/core/sites.py index 1e8429ee2c7..e984c4296de 100644 --- a/src/pymatgen/core/sites.py +++ b/src/pymatgen/core/sites.py @@ -329,7 +329,7 @@ def __init__( frac_coords = lattice.get_fractional_coords(coords) if coords_are_cartesian else coords if to_unit_cell: - frac_coords = np.array([np.mod(f, 1) if p else f for p, f in zip(lattice.pbc, frac_coords, strict=False)]) + frac_coords = np.array([np.mod(f, 1) if p else f for p, f in zip(lattice.pbc, frac_coords, strict=True)]) if not skip_checks: frac_coords = np.array(frac_coords) @@ -475,7 +475,7 @@ def z(self, z: float) -> None: def to_unit_cell(self, in_place: bool = False) -> Self | None: """Move frac coords to within the unit cell.""" - frac_coords = [np.mod(f, 1) if p else f for p, f in zip(self.lattice.pbc, self.frac_coords, strict=False)] + frac_coords = [np.mod(f, 1) if p else f for p, f in zip(self.lattice.pbc, self.frac_coords, strict=True)] if in_place: self.frac_coords = np.array(frac_coords) return None diff --git a/src/pymatgen/core/structure.py b/src/pymatgen/core/structure.py index 69dd1ddf37d..31d8e0142b7 100644 --- a/src/pymatgen/core/structure.py +++ b/src/pymatgen/core/structure.py @@ -547,7 +547,7 @@ def add_site_property(self, property_name: str, values: Sequence | np.ndarray) - """ if len(values) != len(self): raise ValueError(f"{len(values)=} must equal sites in structure={len(self)}") - for site, val in zip(self, values, strict=False): + for site, val in zip(self, values, strict=True): site.properties[property_name] = val return self @@ -651,7 +651,7 @@ def add_oxidation_state_by_site(self, oxidation_states: list[float]) -> Self: f"Oxidation states of all sites must be specified, expected {len(self)} values, " f"got {len(oxidation_states)}" ) - for site, ox in zip(self, oxidation_states, strict=False): + for site, ox in zip(self, oxidation_states, strict=True): new_sp = {} for el, occu in site.species.items(): sym = el.symbol @@ -712,7 +712,7 @@ def add_spin_by_site(self, spins: Sequence[float]) -> Self: if len(spins) != len(self): raise ValueError(f"Spins for all sites must be specified, expected {len(self)} spins, got {len(spins)}") - for site, spin in zip(self.sites, spins, strict=False): + for site, spin in zip(self.sites, spins, strict=True): new_species = {} for sp, occu in site.species.items(): sym = sp.symbol @@ -1321,7 +1321,7 @@ def from_spacegroup( all_coords: list[list[float]] = [] all_site_properties: dict[str, list] = defaultdict(list) all_labels: list[str | None] = [] - for idx, (sp, c) in enumerate(zip(species, frac_coords, strict=False)): + for idx, (sp, c) in enumerate(zip(species, frac_coords, strict=True)): cc = spg.get_orbit(c, tol=tol) all_sp.extend([sp] * len(cc)) all_coords.extend(cc) @@ -1427,7 +1427,7 @@ def from_magnetic_spacegroup( all_magmoms: list[float] = [] all_site_properties: dict[str, list] = defaultdict(list) all_labels: list[str | None] = [] - for idx, (spec, f_coord, magmom) in enumerate(zip(species, frac_coords, magmoms, strict=False)): + for idx, (spec, f_coord, magmom) in enumerate(zip(species, frac_coords, magmoms, strict=True)): cc, mm = msg.get_orbit(f_coord, magmom, tol=tol) all_sp.extend([spec] * len(cc)) all_coords.extend(cc) @@ -1864,10 +1864,10 @@ def get_symmetric_neighbor_list( # Compare all neighbors pairwise to find the pairs that connect the same # two sites, but with an inverted vector (R=-R) that connects the two and add # one of each pair to the redundant list. - for idx, (i, j, R, d) in enumerate(zip(*bonds, strict=False)): + for idx, (i, j, R, d) in enumerate(zip(*bonds, strict=True)): if idx in redundant: continue - for jdx, (i2, j2, R2, d2) in enumerate(zip(*bonds, strict=False)): + for jdx, (i2, j2, R2, d2) in enumerate(zip(*bonds, strict=True)): bool1 = i == j2 bool2 = j == i2 bool3 = (-R2 == R).all() @@ -2005,7 +2005,7 @@ def get_all_neighbors( atol = Site.position_atol all_sites = self.sites for cindex, pindex, image, f_coord, d in zip( - center_indices, points_indices, images, f_coords, distances, strict=False + center_indices, points_indices, images, f_coords, distances, strict=True ): psite = all_sites[pindex] csite = sites[cindex] @@ -2093,7 +2093,7 @@ def get_all_neighbors_py( lattice=self.lattice, ) neighbors: list[list[PeriodicNeighbor]] = [] - for point_neighbor, site in zip(point_neighbors, sites, strict=False): + for point_neighbor, site in zip(point_neighbors, sites, strict=True): nns: list[PeriodicNeighbor] = [] if len(point_neighbor) < 1: neighbors.append([]) @@ -2157,7 +2157,7 @@ def get_all_neighbors_old( nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr - all_ranges = list(itertools.starmap(np.arange, zip(nmin, nmax, strict=False))) + all_ranges = list(itertools.starmap(np.arange, zip(nmin, nmax, strict=True))) lattice = self._lattice matrix = lattice.matrix neighbors: list[list] = [[] for _ in range(len(self))] @@ -2172,7 +2172,7 @@ def get_all_neighbors_old( all_dists = all_distances(coords, site_coords) all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8) - for j, d, within_r in zip(indices, all_dists, all_within_r, strict=False): + for j, d, within_r in zip(indices, all_dists, all_within_r, strict=True): if include_site: nnsite = PeriodicSite( self[j].species, @@ -2604,7 +2604,7 @@ def factors(n: int): any_close = np.any(is_close, axis=-1) inds = np.all(any_close, axis=-1) - for inv_m, latt_mat in zip(inv_ms[inds], ms[inds], strict=False): + for inv_m, latt_mat in zip(inv_ms[inds], ms[inds], strict=True): new_m = np.dot(inv_m, self.lattice.matrix) ftol = np.divide(tolerance, np.sqrt(np.sum(new_m**2, axis=1))) @@ -2614,7 +2614,7 @@ def factors(n: int): new_props = defaultdict(list) new_labels = [] for gsites, gf_coords, non_nbrs in zip( - grouped_sites, grouped_frac_coords, grouped_non_nbrs, strict=False + grouped_sites, grouped_frac_coords, grouped_non_nbrs, strict=True ): all_frac = np.dot(gf_coords, latt_mat) @@ -4395,7 +4395,7 @@ def translate_sites( else: f_coords = self._lattice.get_fractional_coords(site.coords + vector) if to_unit_cell: - f_coords = [np.mod(f, 1) if p else f for p, f in zip(self.lattice.pbc, f_coords, strict=False)] + f_coords = [np.mod(f, 1) if p else f for p, f in zip(self.lattice.pbc, f_coords, strict=True)] self[idx].frac_coords = f_coords return self diff --git a/src/pymatgen/core/surface.py b/src/pymatgen/core/surface.py index 6dfd69751f4..505b4f2d40d 100644 --- a/src/pymatgen/core/surface.py +++ b/src/pymatgen/core/surface.py @@ -557,7 +557,7 @@ def get_equi_index(site: PeriodicSite) -> int: frac_coords.append(struct_matcher.frac_coords + [0, 0, shift]) # noqa: RUF005 # sort by species to put all similar species together. - sp_fcoord = sorted(zip(species, frac_coords, strict=False), key=lambda x: x[0]) + sp_fcoord = sorted(zip(species, frac_coords, strict=True), key=lambda x: x[0]) species = [x[0] for x in sp_fcoord] frac_coords = [x[1] for x in sp_fcoord] slab = type(self)( diff --git a/src/pymatgen/core/tensors.py b/src/pymatgen/core/tensors.py index b3c96299abd..162d4229be4 100644 --- a/src/pymatgen/core/tensors.py +++ b/src/pymatgen/core/tensors.py @@ -57,7 +57,7 @@ def __new__( Args: input_array: (array-like with shape 3^N): array-like representing - a tensor quantity in standard (i. e. non-Voigt) notation + a tensor quantity in standard (i.e. non-Voigt) notation vscale: (N x M array-like): a matrix corresponding to the coefficients of the Voigt-notation tensor check_rank: (int): If not None, checks that input_array's rank == check_rank. @@ -181,7 +181,7 @@ def average_over_unit_sphere(self, quad: dict | None = None) -> Self: """ quad = quad or DEFAULT_QUAD weights, points = quad["weights"], quad["points"] - return sum(w * self.project(n) for w, n in zip(weights, points, strict=False)) + return sum(w * self.project(n) for w, n in zip(weights, points, strict=True)) def get_grouped_indices(self, voigt: bool = False, **kwargs) -> list[list]: """Get index sets for equivalent tensor values. @@ -208,11 +208,11 @@ def get_grouped_indices(self, voigt: bool = False, **kwargs) -> list[list]: indices = list(itertools.product(*(range(n) for n in array.shape))) remaining = indices.copy() # Start with everything near zero - grouped = [list(zip(*np.where(np.isclose(array, 0, **kwargs)), strict=False))] + grouped = [list(zip(*np.where(np.isclose(array, 0, **kwargs)), strict=True))] remaining = [i for i in remaining if i not in grouped[0]] # Iteratively run through remaining indices while remaining: - new = list(zip(*np.where(np.isclose(array, array[remaining[0]], **kwargs)), strict=False)) + new = list(zip(*np.where(np.isclose(array, array[remaining[0]], **kwargs)), strict=True)) grouped.append(new) remaining = [i for i in remaining if i not in new] # Don't return any empty lists @@ -283,7 +283,7 @@ def symmetrized(self) -> Self: @property def voigt_symmetrized(self) -> Self: - """A "voigt"-symmetrized tensor, i. e. a Voigt-notation + """A "voigt"-symmetrized tensor, i.e. a Voigt-notation tensor such that it is invariant w.r.t. permutation of indices. """ if self.rank % 2 != 0 or self.rank < 2: @@ -431,7 +431,7 @@ def get_ieee_rotation( # IEEE rules: a=b in length; c,a || x3, x1 elif xtal_sys == "tetragonal": rotation = np.array( - [vec / mag for (mag, vec) in sorted(zip(lengths, vecs, strict=False), key=lambda x: x[0])] + [vec / mag for (mag, vec) in sorted(zip(lengths, vecs, strict=True), key=lambda x: x[0])] ) if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]): rotation[0], rotation[2] = rotation[2], rotation[0].copy() @@ -439,7 +439,7 @@ def get_ieee_rotation( # IEEE rules: c float: units_old = sorted(old_base.items(), key=lambda d: _UNAME2UTYPE[d[0]]) factor: float = old_factor / new_factor - for old, new in zip(units_old, units_new, strict=False): + for old, new in zip(units_old, units_new, strict=True): if old[1] != new[1]: raise UnitError(f"Units {old} and {new} are not compatible!") c = ALL_UNITS[_UNAME2UTYPE[old[0]]] diff --git a/src/pymatgen/electronic_structure/bandstructure.py b/src/pymatgen/electronic_structure/bandstructure.py index e7b072a6adf..43e8dfa3116 100644 --- a/src/pymatgen/electronic_structure/bandstructure.py +++ b/src/pymatgen/electronic_structure/bandstructure.py @@ -332,7 +332,7 @@ def get_vbm(self) -> dict[str, Any]: max_tmp = -float("inf") index = kpoint_vbm = None for value in self.bands.values(): - for idx, j in zip(*np.where(value < self.efermi), strict=False): + for idx, j in zip(*np.where(value < self.efermi), strict=True): if value[idx, j] > max_tmp: max_tmp = float(value[idx, j]) index = j @@ -398,7 +398,7 @@ def get_cbm(self) -> dict[str, Any]: max_tmp = float("inf") index = kpoint_cbm = None for value in self.bands.values(): - for idx, j in zip(*np.where(value >= self.efermi), strict=False): + for idx, j in zip(*np.where(value >= self.efermi), strict=True): if value[idx, j] < max_tmp: max_tmp = float(value[idx, j]) index = j diff --git a/src/pymatgen/electronic_structure/boltztrap.py b/src/pymatgen/electronic_structure/boltztrap.py index b004e683ac8..3acc8fb7846 100644 --- a/src/pymatgen/electronic_structure/boltztrap.py +++ b/src/pymatgen/electronic_structure/boltztrap.py @@ -852,7 +852,7 @@ def get_symm_bands(self, structure: Structure, efermi, kpt_line=None, labels_dic ] labels_dict = { label: key - for key, label in zip(*kpath.get_kpoints(coords_are_cartesian=False), strict=False) + for key, label in zip(*kpath.get_kpoints(coords_are_cartesian=False), strict=True) if label } kpt_line = [kp.frac_coords for kp in kpt_line] @@ -1388,7 +1388,7 @@ def get_complexity_factor( cond_mass = self.get_average_eff_mass(output=output, doping_levels=True)[dt][temp] if output == "average": - cmplx_fact[dt] = [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass, strict=False)] + cmplx_fact[dt] = [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass, strict=True)] else: cmplx_fact[dt] = [] @@ -1403,7 +1403,7 @@ def get_complexity_factor( cond_mass = self.get_average_eff_mass(output=output, doping_levels=False)[temp] if output == "average": - return [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass, strict=False)] + return [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass, strict=True)] cmplx_fact_list: list = [] for i, sm in enumerate(sbk_mass): diff --git a/src/pymatgen/electronic_structure/boltztrap2.py b/src/pymatgen/electronic_structure/boltztrap2.py index add5eb11a85..243426505a3 100644 --- a/src/pymatgen/electronic_structure/boltztrap2.py +++ b/src/pymatgen/electronic_structure/boltztrap2.py @@ -554,7 +554,7 @@ def get_dos(self, partial_dos=False, npts_mu=10000, T=None, progress=False): spins = [Spin.up] energies = [] - for spin, eb, vvb in zip(spins, eband_ud, vvband_ud, strict=False): + for spin, eb, vvb in zip(spins, eband_ud, vvband_ud, strict=True): energies, densities, _vvdos, _cdos = BL.BTPDOS(eb, vvb, npts=npts_mu, erange=enr) if T: @@ -589,7 +589,7 @@ def get_partial_doses(self, tdos, eband_ud, spins, enr, npts_mu, T, progress): else: bar = None - for spin, eb in zip(spins, eband_ud, strict=False): + for spin, eb in zip(spins, eband_ud, strict=True): for idx, site in enumerate(self.data.structure): if site not in pdoss: pdoss[site] = {} diff --git a/src/pymatgen/electronic_structure/plotter.py b/src/pymatgen/electronic_structure/plotter.py index fed6ee72988..01d5a0355d7 100644 --- a/src/pymatgen/electronic_structure/plotter.py +++ b/src/pymatgen/electronic_structure/plotter.py @@ -197,7 +197,7 @@ def get_plot( else: x = energy y = densities - all_pts.extend(list(zip(x, y, strict=False))) + all_pts.extend(list(zip(x, y, strict=True))) if self.stack: ax.fill(x, y, color=colors[idx % n_colors], label=str(key)) elif spin == Spin.down and beta_dashed: @@ -237,7 +237,7 @@ def get_plot( # Remove duplicate labels with a dictionary handles, labels = ax.get_legend_handles_labels() - label_dict = dict(zip(labels, handles, strict=False)) + label_dict = dict(zip(labels, handles, strict=True)) ax.legend(label_dict.values(), label_dict) legend_text = ax.get_legend().get_texts() # all the text.Text instance in the legend plt.setp(legend_text, fontsize=30) @@ -336,7 +336,7 @@ def _make_ticks(self, ax: plt.Axes) -> plt.Axes: # Sanitize only plot the uniq values uniq_d = [] uniq_l = [] - temp_ticks = list(zip(ticks["distance"], ticks["label"], strict=False)) + temp_ticks = list(zip(ticks["distance"], ticks["label"], strict=True)) for idx, t in enumerate(temp_ticks): if idx == 0: uniq_d.append(t[0]) @@ -349,7 +349,7 @@ def _make_ticks(self, ax: plt.Axes) -> plt.Axes: uniq_d.append(t[0]) uniq_l.append(t[1]) - logger.debug(f"Unique labels are {list(zip(uniq_d, uniq_l, strict=False))}") + logger.debug(f"Unique labels are {list(zip(uniq_d, uniq_l, strict=True))}") ax.set_xticks(uniq_d) ax.set_xticklabels(uniq_l) @@ -385,7 +385,7 @@ def _rescale_distances(bs_ref, bs): """ scaled_distances = [] - for br, br2 in zip(bs_ref.branches, bs.branches, strict=False): + for br, br2 in zip(bs_ref.branches, bs.branches, strict=True): start = br["start_index"] end = br["end_index"] max_d = bs_ref.distance[end] @@ -531,7 +531,7 @@ def _interpolate_bands(distances, energies, smooth_tol=0, smooth_k=3, smooth_np= int_energies, int_distances = [], [] smooth_k_orig = smooth_k - for dist, ene in zip(distances, energies, strict=False): + for dist, ene in zip(distances, energies, strict=True): br_en = [] warning_nan = ( f"WARNING! Distance / branch, band cannot be " @@ -662,7 +662,7 @@ def get_plot( distances = np.split(distances, steps) energies = np.hsplit(energies, steps) - for dist, ene in zip(distances, energies, strict=False): + for dist, ene in zip(distances, energies, strict=True): ax.plot(dist, ene.T, c=colors[ibs], ls=ls) # plot markers for vbm and cbm @@ -2196,7 +2196,7 @@ def _make_ticks_selected(self, ax: plt.Axes, branches: list[int]) -> tuple[plt.A uniq_d = [] uniq_l = [] - temp_ticks = list(zip(f_distance, f_label, strict=False)) + temp_ticks = list(zip(f_distance, f_label, strict=True)) for idx, tick in enumerate(temp_ticks): if idx == 0: uniq_d.append(tick[0]) @@ -2209,7 +2209,7 @@ def _make_ticks_selected(self, ax: plt.Axes, branches: list[int]) -> tuple[plt.A uniq_d.append(tick[0]) uniq_l.append(tick[1]) - logger.debug(f"Unique labels are {list(zip(uniq_d, uniq_l, strict=False))}") + logger.debug(f"Unique labels are {list(zip(uniq_d, uniq_l, strict=True))}") ax.set_xticks(uniq_d) ax.set_xticklabels(uniq_l) @@ -2584,7 +2584,7 @@ def _rgbline(ax, k, e, red, green, blue, alpha=1, linestyles="solid") -> None: blue = [0.5 * (blue[i] + blue[i + 1]) for i in range(n_seg)] alpha = np.ones(n_seg, float) * alpha lc = LineCollection( - seg, colors=list(zip(red, green, blue, alpha, strict=False)), linewidth=2, linestyles=linestyles + seg, colors=list(zip(red, green, blue, alpha, strict=True)), linewidth=2, linestyles=linestyles ) ax.add_collection(lc) @@ -3136,7 +3136,7 @@ def plot_seebeck_temp(self, doping="all", output: Literal["average", "eigs"] = " for xyz in range(3): ax.plot( temperatures, - list(zip(*sbk_temp, strict=False))[xyz], + list(zip(*sbk_temp, strict=True))[xyz], marker="s", label=f"{xyz} {dop} $cm^{{-3}}$", ) @@ -3190,7 +3190,7 @@ def plot_conductivity_temp( for xyz in range(3): ax.plot( temperatures, - list(zip(*cond_temp, strict=False))[xyz], + list(zip(*cond_temp, strict=True))[xyz], marker="s", label=f"{xyz} {dop} $cm^{{-3}}$", ) @@ -3245,7 +3245,7 @@ def plot_power_factor_temp( for xyz in range(3): ax.plot( temperatures, - list(zip(*pf_temp, strict=False))[xyz], + list(zip(*pf_temp, strict=True))[xyz], marker="s", label=f"{xyz} {dop} $cm^{{-3}}$", ) @@ -3299,7 +3299,7 @@ def plot_zt_temp(self, doping="all", output: Literal["average", "eigs"] = "avera for xyz in range(3): ax.plot( temperatures, - list(zip(*zt_temp, strict=False))[xyz], + list(zip(*zt_temp, strict=True))[xyz], marker="s", label=f"{xyz} {dop} $cm^{{-3}}$", ) @@ -3348,7 +3348,7 @@ def plot_eff_mass_temp(self, doping="all", output: Literal["average", "eigs"] = for xyz in range(3): ax.plot( temperatures, - list(zip(*em_temp, strict=False))[xyz], + list(zip(*em_temp, strict=True))[xyz], marker="s", label=f"{xyz} {dop} $cm^{{-3}}$", ) @@ -3387,7 +3387,7 @@ def plot_seebeck_dop(self, temps="all", output: Literal["average", "eigs"] = "av for xyz in range(3): ax.semilogx( self._bz.doping[dop_type], - list(zip(*sbk[dop_type][temp], strict=False))[xyz], + list(zip(*sbk[dop_type][temp], strict=True))[xyz], marker="s", label=f"{xyz} {temp} K", ) @@ -3436,7 +3436,7 @@ def plot_conductivity_dop(self, temps="all", output: Literal["average", "eigs"] for xyz in range(3): ax.semilogx( self._bz.doping[dop_type], - list(zip(*cond[dop_type][temp], strict=False))[xyz], + list(zip(*cond[dop_type][temp], strict=True))[xyz], marker="s", label=f"{xyz} {temp} K", ) @@ -3488,7 +3488,7 @@ def plot_power_factor_dop(self, temps="all", output: Literal["average", "eigs"] for xyz in range(3): ax.semilogx( self._bz.doping[dop_type], - list(zip(*pow_factor[dop_type][temp], strict=False))[xyz], + list(zip(*pow_factor[dop_type][temp], strict=True))[xyz], marker="s", label=f"{xyz} {temp} K", ) @@ -3537,7 +3537,7 @@ def plot_zt_dop(self, temps="all", output: Literal["average", "eigs"] = "average for xyz in range(3): ax.semilogx( self._bz.doping[dop_type], - list(zip(*zt[dop_type][temp], strict=False))[xyz], + list(zip(*zt[dop_type][temp], strict=True))[xyz], marker="s", label=f"{xyz} {temp} K", ) @@ -3591,7 +3591,7 @@ def plot_eff_mass_dop(self, temps="all", output: Literal["average", "eigs"] = "a for xyz in range(3): ax.semilogx( self._bz.doping[dop_type], - list(zip(*em[dop_type][temp], strict=False))[xyz], + list(zip(*em[dop_type][temp], strict=True))[xyz], marker="s", label=f"{xyz} {temp} K", ) @@ -3809,7 +3809,7 @@ def get_plot( else: x = energies y = -populations[spin] if plot_negative else populations[spin] - allpts.extend(list(zip(x, y, strict=False))) + allpts.extend(list(zip(x, y, strict=True))) if spin == Spin.up: ax.plot( x, @@ -4004,7 +4004,7 @@ def plot_fermi_surface( and any(np.all(line[1] == x) for x in bz[jface]) ): mlab.plot3d( - *zip(line[0], line[1], strict=False), + *zip(line[0], line[1], strict=True), color=(0, 0, 0), tube_radius=None, figure=fig, @@ -4040,7 +4040,7 @@ def plot_fermi_surface( and any(np.all(line[1] == x) for x in bz[jface]) ): mlab.plot3d( - *zip(line[0], line[1], strict=False), + *zip(line[0], line[1], strict=True), color=(0, 0, 0), tube_radius=None, figure=fig, @@ -4115,7 +4115,7 @@ def plot_wigner_seitz(lattice, ax: plt.Axes = None, **kwargs): and any(np.all(line[0] == x) for x in bz[jface]) and any(np.all(line[1] == x) for x in bz[jface]) ): - ax.plot(*zip(line[0], line[1], strict=False), **kwargs) + ax.plot(*zip(line[0], line[1], strict=True), **kwargs) return fig, ax @@ -4141,11 +4141,11 @@ def plot_lattice_vectors(lattice, ax: plt.Axes = None, **kwargs): vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0]) vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0]) - ax.plot(*zip(vertex1, vertex2, strict=False), **kwargs) + ax.plot(*zip(vertex1, vertex2, strict=True), **kwargs) vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0]) - ax.plot(*zip(vertex1, vertex2, strict=False), **kwargs) + ax.plot(*zip(vertex1, vertex2, strict=True), **kwargs) vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0]) - ax.plot(*zip(vertex1, vertex2, strict=False), **kwargs) + ax.plot(*zip(vertex1, vertex2, strict=True), **kwargs) return fig, ax @@ -4181,7 +4181,7 @@ def plot_path(line, lattice=None, coords_are_cartesian=False, ax: plt.Axes = Non raise ValueError("coords_are_cartesian False requires the lattice") vertex1 = lattice.get_cartesian_coords(vertex1) vertex2 = lattice.get_cartesian_coords(vertex2) - ax.plot(*zip(vertex1, vertex2, strict=False), **kwargs) + ax.plot(*zip(vertex1, vertex2, strict=True), **kwargs) return fig, ax diff --git a/src/pymatgen/entries/correction_calculator.py b/src/pymatgen/entries/correction_calculator.py index 30b45c602e1..20a5ceffc50 100644 --- a/src/pymatgen/entries/correction_calculator.py +++ b/src/pymatgen/entries/correction_calculator.py @@ -267,7 +267,7 @@ def graph_residual_error(self) -> go.Figure: abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)] labels_graph = self.names.copy() abs_errors, labels_graph = ( - list(t) for t in zip(*sorted(zip(abs_errors, labels_graph, strict=False)), strict=False) + list(t) for t in zip(*sorted(zip(abs_errors, labels_graph, strict=True)), strict=True) ) # sort by error n_err = len(abs_errors) @@ -333,7 +333,7 @@ def graph_residual_error_per_species(self, specie: str) -> go.Figure: del abs_errors[n_species - idx - 1] del diffs_cpy[n_species - idx - 1] abs_errors, labels_species = ( - list(tup) for tup in zip(*sorted(zip(abs_errors, labels_species, strict=False)), strict=False) + list(tup) for tup in zip(*sorted(zip(abs_errors, labels_species, strict=True)), strict=True) ) # sort by error n_err = len(abs_errors) diff --git a/src/pymatgen/entries/entry_tools.py b/src/pymatgen/entries/entry_tools.py index 72213d99e8a..310dac83be5 100644 --- a/src/pymatgen/entries/entry_tools.py +++ b/src/pymatgen/entries/entry_tools.py @@ -46,7 +46,7 @@ def _perform_grouping(args): entries = json.loads(entries_json, cls=MontyDecoder) hosts = json.loads(hosts_json, cls=MontyDecoder) - unmatched = list(zip(entries, hosts, strict=False)) + unmatched = list(zip(entries, hosts, strict=True)) while len(unmatched) > 0: ref_host = unmatched[0][1] logger.info(f"Reference tid = {unmatched[0][0].entry_id}, formula = {ref_host.formula}") diff --git a/src/pymatgen/ext/matproj_legacy.py b/src/pymatgen/ext/matproj_legacy.py index 268f93b9bc6..b539380d01a 100644 --- a/src/pymatgen/ext/matproj_legacy.py +++ b/src/pymatgen/ext/matproj_legacy.py @@ -1393,7 +1393,7 @@ def get_wulff_shape(self, material_id): # Prefer reconstructed surfaces, which have lower surface energies. if (miller not in miller_energy_map) or surf["is_reconstructed"]: miller_energy_map[miller] = surf["surface_energy"] - millers, energies = zip(*miller_energy_map.items(), strict=False) + millers, energies = zip(*miller_energy_map.items(), strict=True) return WulffShape(lattice, millers, energies) def get_gb_data( diff --git a/src/pymatgen/ext/optimade.py b/src/pymatgen/ext/optimade.py index 2a5f1f6bda4..8052aaed49d 100644 --- a/src/pymatgen/ext/optimade.py +++ b/src/pymatgen/ext/optimade.py @@ -379,7 +379,7 @@ def _sanitize_symbol(symbol): def _get_comp(sp_dict): return { _sanitize_symbol(symbol): conc - for symbol, conc in zip(sp_dict["chemical_symbols"], sp_dict["concentration"], strict=False) + for symbol, conc in zip(sp_dict["chemical_symbols"], sp_dict["concentration"], strict=True) } for data in json["data"]: diff --git a/src/pymatgen/io/abinit/abitimer.py b/src/pymatgen/io/abinit/abitimer.py index 1e0226b16d2..53d309ad545 100644 --- a/src/pymatgen/io/abinit/abitimer.py +++ b/src/pymatgen/io/abinit/abitimer.py @@ -28,7 +28,7 @@ def alternate(*iterables): [1, 2, 3, 4, 5, 6]. """ items = [] - for tup in zip(*iterables, strict=False): + for tup in zip(*iterables, strict=True): items.extend(tup) return items @@ -267,11 +267,9 @@ def pefficiency(self): # Compute the parallel efficiency (total and section efficiency) peff = {} ctime_peff = [ - (min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus, strict=False) - ] - wtime_peff = [ - (min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus, strict=False) + (min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus, strict=True) ] + wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus, strict=True)] n = len(timers) peff["total"] = {} @@ -285,11 +283,11 @@ def pefficiency(self): sects = [timer.get_section(sect_name) for timer in timers] try: ctime_peff = [ - (min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus, strict=False) + (min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus, strict=True) ] wtime_peff = [ (min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) - for (s, ncp) in zip(sects, ncpus, strict=False) + for (s, ncp) in zip(sects, ncpus, strict=True) ] except ZeroDivisionError: ctime_peff = n * [-1] @@ -738,7 +736,7 @@ def names_and_values(self, key, minval=None, minfract=None, sorted=True): # noq if minval is not None: assert minfract is None - for name, val in zip(names, values, strict=False): + for name, val in zip(names, values, strict=True): if val >= minval: new_names.append(name) new_values.append(val) @@ -753,7 +751,7 @@ def names_and_values(self, key, minval=None, minfract=None, sorted=True): # noq total = self.sum_sections(key) - for name, val in zip(names, values, strict=False): + for name, val in zip(names, values, strict=True): if val / total >= minfract: new_names.append(name) new_values.append(val) @@ -769,7 +767,7 @@ def names_and_values(self, key, minval=None, minfract=None, sorted=True): # noq if sorted: # Sort new_values and rearrange new_names. - nandv = list(zip(new_names, new_values, strict=False)) + nandv = list(zip(new_names, new_values, strict=True)) nandv.sort(key=lambda t: t[1]) new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv] diff --git a/src/pymatgen/io/abinit/inputs.py b/src/pymatgen/io/abinit/inputs.py index 643076581df..d84a097b8ba 100644 --- a/src/pymatgen/io/abinit/inputs.py +++ b/src/pymatgen/io/abinit/inputs.py @@ -1078,7 +1078,7 @@ def __init__(self, structure: Structure | Sequence[Structure], pseudos, pseudo_d def from_inputs(cls, inputs: list[BasicAbinitInput]) -> Self: """Construct a multidataset from a list of BasicAbinitInputs.""" for inp in inputs: - if any(p1 != p2 for p1, p2 in zip(inputs[0].pseudos, inp.pseudos, strict=False)): + if any(p1 != p2 for p1, p2 in zip(inputs[0].pseudos, inp.pseudos, strict=True)): raise ValueError("Pseudos must be consistent when from_inputs is invoked.") # Build BasicMultiDataset from input structures and pseudos and add inputs. @@ -1089,7 +1089,7 @@ def from_inputs(cls, inputs: list[BasicAbinitInput]) -> Self: ) # Add variables - for inp, new_inp in zip(inputs, multi, strict=False): + for inp, new_inp in zip(inputs, multi, strict=True): new_inp.set_vars(**inp) return multi @@ -1184,7 +1184,7 @@ def __radd__(self, other): def append(self, abinit_input): """Add a BasicAbinitInput to the list.""" assert isinstance(abinit_input, BasicAbinitInput) - if any(p1 != p2 for p1, p2 in zip(abinit_input.pseudos, abinit_input.pseudos, strict=False)): + if any(p1 != p2 for p1, p2 in zip(abinit_input.pseudos, abinit_input.pseudos, strict=True)): raise ValueError("Pseudos must be consistent when from_inputs is invoked.") self._inputs.append(abinit_input) @@ -1192,7 +1192,7 @@ def extend(self, abinit_inputs): """Extends self with a list of BasicAbinitInputs.""" assert all(isinstance(inp, BasicAbinitInput) for inp in abinit_inputs) for inp in abinit_inputs: - if any(p1 != p2 for p1, p2 in zip(self[0].pseudos, inp.pseudos, strict=False)): + if any(p1 != p2 for p1, p2 in zip(self[0].pseudos, inp.pseudos, strict=True)): raise ValueError("Pseudos must be consistent when from_inputs is invoked.") self._inputs.extend(abinit_inputs) diff --git a/src/pymatgen/io/abinit/pseudos.py b/src/pymatgen/io/abinit/pseudos.py index a0c63ab3a55..ccd0658f90b 100644 --- a/src/pymatgen/io/abinit/pseudos.py +++ b/src/pymatgen/io/abinit/pseudos.py @@ -623,7 +623,7 @@ def _dict_from_lines(lines, key_nums, sep=None) -> dict: if len(values) != len(keys): raise ValueError(f"{line=}\n {len(keys)=} must equal {len(values)=}") - kwargs.update(zip(keys, values, strict=False)) + kwargs.update(zip(keys, values, strict=True)) return kwargs diff --git a/src/pymatgen/io/aims/inputs.py b/src/pymatgen/io/aims/inputs.py index 85dcb336c45..344cc425bc0 100644 --- a/src/pymatgen/io/aims/inputs.py +++ b/src/pymatgen/io/aims/inputs.py @@ -150,7 +150,7 @@ def from_structure(cls, structure: Structure | Molecule) -> Self: magmoms = structure.site_properties.get("magmom", np.zeros(len(structure.species))) velocities = structure.site_properties.get("velocity", [None for _ in structure.species]) for species, coord, charge, magmom, v in zip( - structure.species, structure.cart_coords, charges, magmoms, velocities, strict=False + structure.species, structure.cart_coords, charges, magmoms, velocities, strict=True ): content_lines.append(f"atom {coord[0]: .12e} {coord[1]: .12e} {coord[2]: .12e} {species}") if charge != 0: @@ -799,7 +799,7 @@ def from_structure(cls, struct: Structure | Molecule, basis_set: str | dict[str, """Initialize species defaults from a structure.""" labels = [] elements = {} - for label, el in sorted(zip(struct.labels, struct.species, strict=False)): + for label, el in sorted(zip(struct.labels, struct.species, strict=True)): if not isinstance(el, Element): raise TypeError("FHI-aims does not support fractional compositions") if (label is None) or (el is None): diff --git a/src/pymatgen/io/aims/sets/bs.py b/src/pymatgen/io/aims/sets/bs.py index f4dfaf88269..275b294d5c6 100644 --- a/src/pymatgen/io/aims/sets/bs.py +++ b/src/pymatgen/io/aims/sets/bs.py @@ -31,7 +31,7 @@ def prepare_band_input(structure: Structure, density: float = 20): points, labels = bp.get_kpoints(line_density=density, coords_are_cartesian=False) lines_and_labels: list[_SegmentDict] = [] current_segment: _SegmentDict | None = None - for label_, coords in zip(labels, points, strict=False): + for label_, coords in zip(labels, points, strict=True): # rename the Gamma point label label = "G" if label_ in ("GAMMA", "\\Gamma", "Γ") else label_ if label: diff --git a/src/pymatgen/io/cif.py b/src/pymatgen/io/cif.py index 34f88ebb86b..46f820497f9 100644 --- a/src/pymatgen/io/cif.py +++ b/src/pymatgen/io/cif.py @@ -105,7 +105,7 @@ def _loop_to_str(self, loop: list[str]) -> str: for line in loop: out += "\n " + line - for fields in zip(*(self.data[k] for k in loop), strict=False): + for fields in zip(*(self.data[k] for k in loop), strict=True): line = "\n" for val in map(self._format_field, fields): if val[0] == ";": @@ -228,7 +228,7 @@ def from_str(cls, string: str) -> Self: n = len(items) // len(columns) assert len(items) % n == 0 loops.append(columns) - for k, v in zip(columns * n, items, strict=False): + for k, v in zip(columns * n, items, strict=True): data[k].append(v.strip()) elif issue := "".join(_str).strip(): @@ -610,7 +610,7 @@ def _unique_coords( raise ValueError("Length of magmoms and coords don't match.") magmoms_out: list[Magmom] = [] - for tmp_coord, tmp_magmom in zip(coords, magmoms, strict=False): + for tmp_coord, tmp_magmom in zip(coords, magmoms, strict=True): for op in self.symmetry_operations: coord = op.operate(tmp_coord) coord = np.array([i - math.floor(i) for i in coord]) diff --git a/src/pymatgen/io/cp2k/inputs.py b/src/pymatgen/io/cp2k/inputs.py index a7294532557..0082f0ced2c 100644 --- a/src/pymatgen/io/cp2k/inputs.py +++ b/src/pymatgen/io/cp2k/inputs.py @@ -172,7 +172,7 @@ def from_str(cls, s: str) -> Self: units = re.findall(r"\[(.*)\]", s) or [None] s = re.sub(r"\[(.*)\]", "", s) args: list[Any] = s.split() - args = list(map(postprocessor if args[0].upper() != "ELEMENT" else str, args)) # type: ignore[call-overload] + args = list(map(postprocessor if args[0].upper() != "ELEMENT" else str, args)) args[0] = str(args[0]) return cls(*args, units=units[0], description=description) @@ -204,7 +204,7 @@ def __str__(self): def __eq__(self, other: object) -> bool: if not isinstance(other, type(self)): return NotImplemented - return all(k == o for k, o in zip(self.keywords, other.keywords, strict=False)) + return all(k == o for k, o in zip(self.keywords, other.keywords, strict=True)) def __add__(self, other): return self.extend(other) @@ -611,7 +611,7 @@ def __str__(self): def __eq__(self, other: object) -> bool: if not isinstance(other, SectionList): return NotImplemented - return all(k == o for k, o in zip(self.sections, other.sections, strict=False)) + return all(k == o for k, o in zip(self.sections, other.sections, strict=True)) def __add__(self, other): self.append(other) @@ -1968,7 +1968,7 @@ def __init__( keywords["SCHEME"] = Keyword("SCHEME", scheme, *kpts[0]) elif len(kpts) > 1: keywords["KPOINT"] = KeywordList( - [Keyword("KPOINT", *k, w) for k, w in zip(self.kpts, self.weights, strict=False)] + [Keyword("KPOINT", *k, w) for k, w in zip(self.kpts, self.weights, strict=True)] ) else: raise ValueError("No k-points provided!") @@ -2035,7 +2035,7 @@ def from_kpoints(cls, kpoints: VaspKpoints, structure: Structure | None = None) scheme = "GAMMA" else: sga = SpacegroupAnalyzer(structure) - _kpts, weights = zip(*sga.get_ir_reciprocal_mesh(mesh=kpts), strict=False) # type: ignore[arg-type] + _kpts, weights = zip(*sga.get_ir_reciprocal_mesh(mesh=kpts), strict=True) # type: ignore[arg-type] kpts = list(itertools.chain.from_iterable(_kpts)) scheme = "GENERAL" @@ -2142,7 +2142,7 @@ def from_kpoints(cls, kpoints: VaspKpoints, kpoints_line_density: int = 20) -> S def pairwise(iterable): a = iter(iterable) - return zip(a, a, strict=False) + return zip(a, a, strict=True) kpoint_sets = [ KpointSet( @@ -2150,7 +2150,7 @@ def pairwise(iterable): kpoints=[(lbls[0], kpts[0]), (lbls[1], kpts[1])], units="B_VECTOR", ) - for lbls, kpts in zip(pairwise(kpoints.labels), pairwise(kpoints.kpts), strict=False) + for lbls, kpts in zip(pairwise(kpoints.labels), pairwise(kpoints.kpts), strict=True) ] elif kpoints.style in ( KpointsSupportedModes.Reciprocal, diff --git a/src/pymatgen/io/cp2k/outputs.py b/src/pymatgen/io/cp2k/outputs.py index 7766acea1a8..9b07461f761 100644 --- a/src/pymatgen/io/cp2k/outputs.py +++ b/src/pymatgen/io/cp2k/outputs.py @@ -297,7 +297,7 @@ def parse_structures(self, trajectory_file=None, lattice_file=None): self.structures = [] gs = self.initial_structure.site_properties.get("ghost") if not self.is_molecule: - for mol, latt in zip(mols, lattices, strict=False): + for mol, latt in zip(mols, lattices, strict=True): self.structures.append( Structure( lattice=latt, @@ -520,7 +520,7 @@ def parse_ionic_steps(self): if not self.data.get("stress_tensor"): self.parse_stresses() - for i, (structure, energy) in enumerate(zip(self.structures, self.data.get("total_energy"), strict=False)): + for i, (structure, energy) in enumerate(zip(self.structures, self.data.get("total_energy"), strict=True)): self.ionic_steps.append( { "structure": structure, @@ -629,7 +629,7 @@ def parse_dft_params(self): for _possible, _name in zip( ["RVV10", "LMKLL", "DRSLL", "DFT-D3", "DFT-D2"], ["RVV10", "LMKLL", "DRSLL", "D3", "D2"], - strict=False, + strict=True, ): if _possible in ll[0]: found = _name @@ -715,7 +715,7 @@ def parse_cell_params(self): reverse=False, ) i = iter(self.data["lattice"]) - lattices = list(zip(i, i, i, strict=False)) + lattices = list(zip(i, i, i, strict=True)) return lattices[0] def parse_atomic_kind_info(self): @@ -1463,7 +1463,7 @@ def _gauss_smear(densities, energies, npts, width): dct = np.zeros(npts) e_s = np.linspace(min(energies), max(energies), npts) - for e, _pd in zip(energies, densities, strict=False): + for e, _pd in zip(energies, densities, strict=True): weight = np.exp(-(((e_s - e) / width) ** 2)) / (np.sqrt(np.pi) * width) dct += _pd * weight diff --git a/src/pymatgen/io/cp2k/sets.py b/src/pymatgen/io/cp2k/sets.py index 2004c647af5..d08fc0304b1 100644 --- a/src/pymatgen/io/cp2k/sets.py +++ b/src/pymatgen/io/cp2k/sets.py @@ -1075,7 +1075,7 @@ def activate_motion( "LIST": Keyword("LIST", f"{t[0]}..{t[1]}"), }, ) - for t, c in zip(tuples, components, strict=False) + for t, c in zip(tuples, components, strict=True) if c ] ) diff --git a/src/pymatgen/io/feff/inputs.py b/src/pymatgen/io/feff/inputs.py index 0f50d518a98..38cb10b5d23 100644 --- a/src/pymatgen/io/feff/inputs.py +++ b/src/pymatgen/io/feff/inputs.py @@ -677,7 +677,7 @@ def from_file(cls, filename: str = "feff.inp") -> Self: else: eels_keys = ["BEAM_ENERGY", "ANGLES", "MESH", "POSITION"] eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])} - for k, v in zip(eels_keys, eels_params[1:], strict=False): + for k, v in zip(eels_keys, eels_params[1:], strict=True): eels_dict[k] = str(v) params[str(eels_params[0].split()[0])] = eels_dict diff --git a/src/pymatgen/io/gaussian.py b/src/pymatgen/io/gaussian.py index 7b24a27862d..4b4df3fe5bb 100644 --- a/src/pymatgen/io/gaussian.py +++ b/src/pymatgen/io/gaussian.py @@ -836,23 +836,23 @@ def _parse(self, filename): while "Atom AN" not in line: if "Frequencies --" in line: freqs = map(float, float_patt.findall(line)) - for ifreq, freq in zip(ifreqs, freqs, strict=False): + for ifreq, freq in zip(ifreqs, freqs, strict=True): frequencies[ifreq]["frequency"] = freq elif "Red. masses --" in line: r_masses = map(float, float_patt.findall(line)) - for ifreq, r_mass in zip(ifreqs, r_masses, strict=False): + for ifreq, r_mass in zip(ifreqs, r_masses, strict=True): frequencies[ifreq]["r_mass"] = r_mass elif "Frc consts --" in line: f_consts = map(float, float_patt.findall(line)) - for ifreq, f_const in zip(ifreqs, f_consts, strict=False): + for ifreq, f_const in zip(ifreqs, f_consts, strict=True): frequencies[ifreq]["f_constant"] = f_const elif "IR Inten --" in line: IR_intens = map(float, float_patt.findall(line)) - for ifreq, intens in zip(ifreqs, IR_intens, strict=False): + for ifreq, intens in zip(ifreqs, IR_intens, strict=True): frequencies[ifreq]["IR_intensity"] = intens else: syms = line.split()[:3] - for ifreq, sym in zip(ifreqs, syms, strict=False): + for ifreq, sym in zip(ifreqs, syms, strict=True): frequencies[ifreq]["symmetry"] = sym line = file.readline() @@ -860,7 +860,7 @@ def _parse(self, filename): line = file.readline() while normal_mode_patt.search(line): values = list(map(float, float_patt.findall(line))) - for idx, ifreq in zip(range(0, len(values), 3), ifreqs, strict=False): + for idx, ifreq in zip(range(0, len(values), 3), ifreqs, strict=True): frequencies[ifreq]["mode"].extend(values[idx : idx + 3]) line = file.readline() diff --git a/src/pymatgen/io/lammps/data.py b/src/pymatgen/io/lammps/data.py index 4f78cb84c17..03a3fabb88b 100644 --- a/src/pymatgen/io/lammps/data.py +++ b/src/pymatgen/io/lammps/data.py @@ -161,7 +161,7 @@ def get_str(self, significant_figures: int = 6) -> str: """ ph = f"{{:.{significant_figures}f}}" lines = [] - for bound, d in zip(self.bounds, "xyz", strict=False): + for bound, d in zip(self.bounds, "xyz", strict=True): fillers = bound + [d] * 2 bound_format = " ".join([ph] * 2 + [" {}lo {}hi"]) lines.append(bound_format.format(*fillers)) @@ -525,7 +525,7 @@ def disassemble( symbols = [Element(symbols[idx]).symbol for idx in np.argmin(diff, axis=1)] else: symbols = [f"Q{a}" for a in map(chr, range(97, 97 + len(unique_masses)))] - for um, s in zip(unique_masses, symbols, strict=False): + for um, s in zip(unique_masses, symbols, strict=True): masses.loc[masses["mass"] == um, "element"] = s if atom_labels is None: # add unique labels based on elements for el, vc in masses["element"].value_counts().items(): @@ -1007,7 +1007,7 @@ def from_bonding( topologies = { k: v - for k, v in zip(SECTION_KEYWORDS["topology"][:3], [bond_list, angle_list, dihedral_list], strict=False) + for k, v in zip(SECTION_KEYWORDS["topology"][:3], [bond_list, angle_list, dihedral_list], strict=True) if len(v) > 0 } or None return cls(sites=molecule, topologies=topologies, **kwargs) @@ -1478,7 +1478,7 @@ def get_str(self, distance: int = 6, velocity: int = 8, charge: int = 4, hybrid: lines = LammpsData.get_str(self, distance, velocity, charge, hybrid).splitlines() info = "# " + " + ".join( f"{a} {b}" if c == 1 else f"{a}({c}) {b}" - for a, b, c in zip(self.nums, self.names, self.mols_per_data, strict=False) + for a, b, c in zip(self.nums, self.names, self.mols_per_data, strict=True) ) lines.insert(1, info) return "\n".join(lines) diff --git a/src/pymatgen/io/lammps/outputs.py b/src/pymatgen/io/lammps/outputs.py index 271e361ccd4..69121ef3093 100644 --- a/src/pymatgen/io/lammps/outputs.py +++ b/src/pymatgen/io/lammps/outputs.py @@ -184,6 +184,6 @@ def _parse_thermo(lines: list[str]) -> pd.DataFrame: return df runs = [] - for b, e in zip(begins, ends, strict=False): + for b, e in zip(begins, ends, strict=True): runs.append(_parse_thermo(lines[b + 1 : e])) return runs diff --git a/src/pymatgen/io/lobster/lobsterenv.py b/src/pymatgen/io/lobster/lobsterenv.py index 65916a7e9b8..dcc91b28003 100644 --- a/src/pymatgen/io/lobster/lobsterenv.py +++ b/src/pymatgen/io/lobster/lobsterenv.py @@ -243,7 +243,7 @@ def anion_types(self) -> set[Element]: raise ValueError("No cations and anions defined") anion_species = [] - for site, val in zip(self.structure, self.valences, strict=False): + for site, val in zip(self.structure, self.valences, strict=True): if val < 0.0: anion_species.append(site.specie) @@ -413,7 +413,7 @@ def get_info_icohps_to_neighbors(self, isites=None, onlycation_isites=True): final_isites = [] for ival, _site in enumerate(self.structure): if ival in isites: - for keys, icohpsum in zip(self.list_keys[ival], self.list_icohps[ival], strict=False): + for keys, icohpsum in zip(self.list_keys[ival], self.list_icohps[ival], strict=True): summed_icohps += icohpsum list_icohps.append(icohpsum) labels.append(keys) @@ -559,7 +559,7 @@ def get_info_cohps_to_neighbors( # iterate through labels and atoms and check which bonds can be included new_labels = [] new_atoms = [] - for key, atompair, isite in zip(labels, atoms, final_isites, strict=False): + for key, atompair, isite in zip(labels, atoms, final_isites, strict=True): present = False for atomtype in only_bonds_to: # This is necessary to identify also bonds between the same elements correctly! diff --git a/src/pymatgen/io/lobster/outputs.py b/src/pymatgen/io/lobster/outputs.py index 26e74ea67da..5f6fea7e7ea 100644 --- a/src/pymatgen/io/lobster/outputs.py +++ b/src/pymatgen/io/lobster/outputs.py @@ -2115,7 +2115,7 @@ def _parse_matrix(file_data, pattern, e_fermi): matrix_real = [] matrix_imag = [] for start_inx_real, end_inx_real, start_inx_imag, end_inx_imag in zip( - start_inxs_real, end_inxs_real, start_inxs_imag, end_inxs_imag, strict=False + start_inxs_real, end_inxs_real, start_inxs_imag, end_inxs_imag, strict=True ): # matrix with text headers matrix_real = file_data[start_inx_real:end_inx_real] @@ -2148,7 +2148,7 @@ def _parse_matrix(file_data, pattern, e_fermi): # get a dict with basis functions as keys and average values as values average_average_matrix_diag_dict = dict( - zip(elements_basis_functions, average_matrix_diagonal_values, strict=False) + zip(elements_basis_functions, average_matrix_diagonal_values, strict=True) ) return matrix_diagonal_values, average_average_matrix_diag_dict, complex_matrices diff --git a/src/pymatgen/io/nwchem.py b/src/pymatgen/io/nwchem.py index c610195b3d2..5aa772591af 100644 --- a/src/pymatgen/io/nwchem.py +++ b/src/pymatgen/io/nwchem.py @@ -767,7 +767,7 @@ def isfloatstring(in_str): else: vibs = [float(vib) for vib in line.strip().split()[1:]] n_vibs = len(vibs) - for mode, dis in zip(normal_frequencies[-n_vibs:], vibs, strict=False): + for mode, dis in zip(normal_frequencies[-n_vibs:], vibs, strict=True): mode[1].append(dis) elif parse_projected_freq: @@ -778,7 +778,7 @@ def isfloatstring(in_str): else: vibs = [float(vib) for vib in line.strip().split()[1:]] n_vibs = len(vibs) - for mode, dis in zip(frequencies[-n_vibs:], vibs, strict=False): + for mode, dis in zip(frequencies[-n_vibs:], vibs, strict=True): mode[1].append(dis) elif parse_bset: @@ -787,7 +787,7 @@ def isfloatstring(in_str): else: tokens = line.split() if tokens[0] != "Tag" and not re.match(r"-+", tokens[0]): - basis_set[tokens[0]] = dict(zip(bset_header[1:], tokens[1:], strict=False)) + basis_set[tokens[0]] = dict(zip(bset_header[1:], tokens[1:], strict=True)) elif tokens[0] == "Tag": bset_header = tokens bset_header.pop(4) @@ -895,10 +895,10 @@ def isfloatstring(in_str): if frequencies: for _freq, mode in frequencies: - mode[:] = zip(*[iter(mode)] * 3, strict=False) + mode[:] = zip(*[iter(mode)] * 3, strict=True) if normal_frequencies: for _freq, mode in normal_frequencies: - mode[:] = zip(*[iter(mode)] * 3, strict=False) + mode[:] = zip(*[iter(mode)] * 3, strict=True) if hessian: len_hess = len(hessian) for ii in range(len_hess): diff --git a/src/pymatgen/io/optimade.py b/src/pymatgen/io/optimade.py index c7785f8ac2d..4f098110e44 100644 --- a/src/pymatgen/io/optimade.py +++ b/src/pymatgen/io/optimade.py @@ -56,7 +56,7 @@ def _pymatgen_species( chemical_symbols.append(symbol) concentration.append(current_species["concentration"][index]) - pymatgen_species.append(dict(zip(chemical_symbols, concentration, strict=False))) + pymatgen_species.append(dict(zip(chemical_symbols, concentration, strict=True))) return pymatgen_species @@ -98,9 +98,9 @@ def _optimade_reduce_or_anonymize_formula(formula: str, alphabetize: bool = True species = [s for _, s in zip(numbers, _optimade_anonymous_element_generator(), strict=False)] elif alphabetize: - species, numbers = zip(*sorted(zip(species, numbers, strict=False)), strict=False) # type: ignore[assignment] + species, numbers = zip(*sorted(zip(species, numbers, strict=True)), strict=True) # type: ignore[assignment] - return "".join(f"{s}{n if n != 1 else ''}" for n, s in zip(numbers, species, strict=False)) + return "".join(f"{s}{n if n != 1 else ''}" for n, s in zip(numbers, species, strict=True)) class OptimadeStructureAdapter: diff --git a/src/pymatgen/io/phonopy.py b/src/pymatgen/io/phonopy.py index 492765c40e4..fe9eee93219 100644 --- a/src/pymatgen/io/phonopy.py +++ b/src/pymatgen/io/phonopy.py @@ -224,7 +224,7 @@ def get_complete_ph_dos(partial_dos_path, phonopy_yaml_path): total_dos = PhononDos(arr[0], arr[1:].sum(axis=0)) partial_doses = {} - for site, p_dos in zip(structure, arr[1:], strict=False): + for site, p_dos in zip(structure, arr[1:], strict=True): partial_doses[site] = p_dos.tolist() return CompletePhononDos(structure, total_dos, partial_doses) @@ -330,7 +330,7 @@ def get_phonon_dos_from_fc( phonon.run_projected_dos(freq_min=freq_min, freq_max=freq_max, freq_pitch=freq_pitch) dos_raw = phonon.projected_dos.get_partial_dos() - p_doses = dict(zip(structure, dos_raw[1], strict=False)) + p_doses = dict(zip(structure, dos_raw[1], strict=True)) total_dos = PhononDos(dos_raw[0], dos_raw[1].sum(axis=0)) return CompletePhononDos(structure, total_dos, p_doses) @@ -403,7 +403,7 @@ def get_phonon_band_structure_symm_line_from_fc( phonon.run_qpoints(kpoints) frequencies = phonon.qpoints.get_frequencies().T - labels_dict = {a: k for a, k in zip(labels, kpoints, strict=False) if a != ""} + labels_dict = {a: k for a, k in zip(labels, kpoints, strict=True) if a != ""} return PhononBandStructureSymmLine(kpoints, frequencies, structure.lattice, labels_dict=labels_dict) diff --git a/src/pymatgen/io/qchem/inputs.py b/src/pymatgen/io/qchem/inputs.py index db5e660169f..4fbc69c9514 100644 --- a/src/pymatgen/io/qchem/inputs.py +++ b/src/pymatgen/io/qchem/inputs.py @@ -694,7 +694,7 @@ def cdft_template(cdft: list[list[dict]]) -> str: constraint["first_atoms"], constraint["last_atoms"], type_strings, - strict=False, + strict=True, ): if type_string != "": cdft_list.append(f" {coef} {first} {last} {type_string}") @@ -848,7 +848,7 @@ def read_molecule(string: str) -> Molecule | list[Molecule] | Literal["read"]: matches = read_pattern(string, patterns) mol_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer) - for match, table in zip(matches.get("charge_spin"), mol_table, strict=False): + for match, table in zip(matches.get("charge_spin"), mol_table, strict=True): charge = int(match[0]) spin = int(match[1]) species = [val[0] for val in table] diff --git a/src/pymatgen/io/qchem/outputs.py b/src/pymatgen/io/qchem/outputs.py index 9e2015b56ef..84b7efdfcbe 100644 --- a/src/pymatgen/io/qchem/outputs.py +++ b/src/pymatgen/io/qchem/outputs.py @@ -1979,8 +1979,8 @@ def _read_almo_msdft(self): spins_2 = [int(r.strip()) for r in temp_dict["states"][0][3].strip().split("\n")] self.data["almo_coupling_states"] = [ - [[i, j] for i, j in zip(charges_1, spins_1, strict=False)], - [[i, j] for i, j in zip(charges_2, spins_2, strict=False)], + [[i, j] for i, j in zip(charges_1, spins_1, strict=True)], + [[i, j] for i, j in zip(charges_2, spins_2, strict=True)], ] # State energies diff --git a/src/pymatgen/io/shengbte.py b/src/pymatgen/io/shengbte.py index a20f15a8ad7..21cf4103231 100644 --- a/src/pymatgen/io/shengbte.py +++ b/src/pymatgen/io/shengbte.py @@ -213,7 +213,7 @@ def from_structure(cls, structure: Structure, reciprocal_density: int | None = 5 elements = list(map(str, structure.elements)) unique_nums = np.unique(structure.atomic_numbers) - types_dict = dict(zip(unique_nums, range(len(unique_nums)), strict=False)) + types_dict = dict(zip(unique_nums, range(len(unique_nums)), strict=True)) types = [types_dict[i] + 1 for i in structure.atomic_numbers] control_dict = { @@ -250,7 +250,7 @@ def get_structure(self) -> Structure: unique_elements = self["elements"] n_unique_elements = len(unique_elements) - element_map = dict(zip(range(1, n_unique_elements + 1), unique_elements, strict=False)) + element_map = dict(zip(range(1, n_unique_elements + 1), unique_elements, strict=True)) species = [element_map[i] for i in self["types"]] cell = np.array(self["lattvec"]) diff --git a/src/pymatgen/io/vasp/outputs.py b/src/pymatgen/io/vasp/outputs.py index 4ade52f6c9a..b8e503d0060 100644 --- a/src/pymatgen/io/vasp/outputs.py +++ b/src/pymatgen/io/vasp/outputs.py @@ -602,7 +602,7 @@ def optical_absorb_coeff(freq: float, real: float, imag: float) -> float: return list( itertools.starmap( - optical_absorb_coeff, zip(self.dielectric_data["density"][0], real_avg, imag_avg, strict=False) + optical_absorb_coeff, zip(self.dielectric_data["density"][0], real_avg, imag_avg, strict=True) ) ) return None @@ -1024,7 +1024,7 @@ def get_band_structure( "A band structure along symmetry lines requires a label " "for each kpoint. Check your KPOINTS file" ) - labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts, strict=False)) + labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts, strict=True)) labels_dict.pop(None, None) # type: ignore[call-overload] return BandStructureSymmLine( @@ -1991,13 +1991,13 @@ def __init__(self, filename: PathLike) -> None: tokens = [float(i) for i in re.findall(r"[\d\.\-]+", clean)] tokens.pop(0) if read_charge: - charge.append(dict(zip(header, tokens, strict=False))) + charge.append(dict(zip(header, tokens, strict=True))) elif read_mag_x: - mag_x.append(dict(zip(header, tokens, strict=False))) + mag_x.append(dict(zip(header, tokens, strict=True))) elif read_mag_y: - mag_y.append(dict(zip(header, tokens, strict=False))) + mag_y.append(dict(zip(header, tokens, strict=True))) elif read_mag_z: - mag_z.append(dict(zip(header, tokens, strict=False))) + mag_z.append(dict(zip(header, tokens, strict=True))) elif clean.startswith("tot"): read_charge = False read_mag_x = False @@ -3221,7 +3221,7 @@ def zvals(results, match): micro_pyawk(self.filename, search, self) - self.zval_dict = dict(zip(self.atom_symbols, self.zvals, strict=False)) # type: ignore[attr-defined] + self.zval_dict = dict(zip(self.atom_symbols, self.zvals, strict=True)) # type: ignore[attr-defined] # Clean up del self.atom_symbols # type: ignore[attr-defined] diff --git a/src/pymatgen/io/xcrysden.py b/src/pymatgen/io/xcrysden.py index 04858c36d9a..21606474193 100644 --- a/src/pymatgen/io/xcrysden.py +++ b/src/pymatgen/io/xcrysden.py @@ -42,7 +42,7 @@ def to_str(self, atom_symbol: bool = True) -> str: cart_coords = self.structure.cart_coords lines.extend(("# Cartesian coordinates in Angstrom.", "PRIMCOORD", f" {len(cart_coords)} 1")) - for site, coord in zip(self.structure, cart_coords, strict=False): + for site, coord in zip(self.structure, cart_coords, strict=True): sp = site.specie.symbol if atom_symbol else f"{site.specie.Z}" x, y, z = coord lines.append(f"{sp} {x:20.14f} {y:20.14f} {z:20.14f}") diff --git a/src/pymatgen/phonon/dos.py b/src/pymatgen/phonon/dos.py index 0a94285e526..cdef8c7dd36 100644 --- a/src/pymatgen/phonon/dos.py +++ b/src/pymatgen/phonon/dos.py @@ -621,7 +621,7 @@ def from_dict(cls, dct: dict) -> Self: """Get CompleteDos object from dict representation.""" total_dos = PhononDos.from_dict(dct) struct = Structure.from_dict(dct["structure"]) - ph_doses = dict(zip(struct, dct["pdos"], strict=False)) + ph_doses = dict(zip(struct, dct["pdos"], strict=True)) return cls(struct, total_dos, ph_doses) diff --git a/src/pymatgen/phonon/plotter.py b/src/pymatgen/phonon/plotter.py index 729c82216cc..af6ffc6faad 100644 --- a/src/pymatgen/phonon/plotter.py +++ b/src/pymatgen/phonon/plotter.py @@ -189,7 +189,7 @@ def get_plot( all_frequencies.reverse() all_pts = [] colors = ("blue", "red", "green", "orange", "purple", "brown", "pink", "gray", "olive") - for idx, (key, frequencies, densities) in enumerate(zip(keys, all_frequencies, all_densities, strict=False)): + for idx, (key, frequencies, densities) in enumerate(zip(keys, all_frequencies, all_densities, strict=True)): color = self._doses[key].get("color", colors[idx % n_colors]) linewidth = self._doses[key].get("linewidth", 3) kwargs = { @@ -197,7 +197,7 @@ def get_plot( for key, val in self._doses[key].items() if key not in ["frequencies", "densities", "color", "linewidth"] } - all_pts.extend(list(zip(frequencies, densities, strict=False))) + all_pts.extend(list(zip(frequencies, densities, strict=True))) if invert_axes: xs, ys = densities, frequencies else: @@ -313,7 +313,7 @@ def _make_ticks(self, ax: Axes) -> Axes: ticks = self.get_ticks() # zip to sanitize, only plot the uniq values - if ticks_labels := list(zip(*zip(ticks["distance"], ticks["label"], strict=False), strict=False)): + if ticks_labels := list(zip(*zip(ticks["distance"], ticks["label"], strict=True), strict=True)): ax.set_xticks(ticks_labels[0]) ax.set_xticklabels(ticks_labels[1]) @@ -374,7 +374,7 @@ def get_plot( data = self.bs_plot_data() kwargs.setdefault("color", "blue") - for dists, freqs in zip(data["distances"], data["frequency"], strict=False): + for dists, freqs in zip(data["distances"], data["frequency"], strict=True): for idx in range(self.n_bands): ys = [freqs[idx][j] * u.factor for j in range(len(dists))] ax.plot(dists, ys, **kwargs) @@ -699,7 +699,7 @@ def plot_compare( color_self = ax.lines[0].get_color() ax.plot([], [], label=self._label or self_label, linewidth=2 * line_width, color=color_self) linestyle = other_kwargs.get("linestyle", "-") - for color_other, label_other in zip(colors_other, other_plotter, strict=False): + for color_other, label_other in zip(colors_other, other_plotter, strict=True): ax.plot([], [], label=label_other, linewidth=2 * line_width, color=color_other, linestyle=linestyle) ax.legend(**legend_kwargs) @@ -964,7 +964,7 @@ def get_plot( ax.set_ylabel(r"$\mathrm{Grüneisen\ parameter}$") n_points = len(ys) - 1 - for idx, (xi, yi) in enumerate(zip(xs, ys, strict=False)): + for idx, (xi, yi) in enumerate(zip(xs, ys, strict=True)): color = (1.0 / n_points * idx, 0, 1.0 / n_points * (n_points - idx)) ax.plot(xi, yi, marker, color=color, markersize=markersize) @@ -1091,7 +1091,7 @@ def get_plot_gs(self, ylim: float | None = None, plot_ph_bs_with_gruneisen: bool sc = None for (dists_inx, dists), (_, freqs) in zip( - enumerate(data["distances"]), enumerate(data["frequency"]), strict=False + enumerate(data["distances"]), enumerate(data["frequency"]), strict=True ): for band_idx in range(self.n_bands): if plot_ph_bs_with_gruneisen: diff --git a/src/pymatgen/phonon/thermal_displacements.py b/src/pymatgen/phonon/thermal_displacements.py index 76fc021fd17..d55e68131a2 100644 --- a/src/pymatgen/phonon/thermal_displacements.py +++ b/src/pymatgen/phonon/thermal_displacements.py @@ -229,7 +229,7 @@ def write_cif(self, filename: str) -> None: file.write("_atom_site_aniso_U_12\n") file.write(f"# Additional Data for U_Aniso: {self.temperature}\n") - for idx, (site, matrix) in enumerate(zip(self.structure, self.Ucif, strict=False)): + for idx, (site, matrix) in enumerate(zip(self.structure, self.Ucif, strict=True)): file.write( f"{site.specie.symbol}{idx} {matrix[0][0]} {matrix[1][1]} {matrix[2][2]}" f" {matrix[1][2]} {matrix[0][2]} {matrix[0][1]}\n" @@ -267,7 +267,7 @@ def compute_directionality_quality_criterion( Vectors are given in Cartesian coordinates """ # compare the atoms string at least - for spec1, spec2 in zip(self.structure.species, other.structure.species, strict=False): + for spec1, spec2 in zip(self.structure.species, other.structure.species, strict=True): if spec1 != spec2: raise ValueError( "Species in both structures are not the same! " @@ -282,7 +282,7 @@ def compute_directionality_quality_criterion( for self_Ucart, other_Ucart in zip( self.thermal_displacement_matrix_cart_matrixform, other.thermal_displacement_matrix_cart_matrixform, - strict=False, + strict=True, ): result_dict = {} @@ -363,7 +363,7 @@ def visualize_directionality_quality_criterion( # print all U11s (make sure they are in the correct order) counter = 1 # VESTA order: _U_12 _U_13 _atom_site_aniso_U_23 - for atom_therm, site in zip(matrix_cif, structure, strict=False): + for atom_therm, site in zip(matrix_cif, structure, strict=True): file.write( f"{counter} {site.species_string}{counter} {atom_therm[0]} " f"{atom_therm[1]} {atom_therm[2]} {atom_therm[5]} {atom_therm[4]} {atom_therm[3]}\n" diff --git a/src/pymatgen/symmetry/analyzer.py b/src/pymatgen/symmetry/analyzer.py index fbdf272bf9b..bd56a642ae3 100644 --- a/src/pymatgen/symmetry/analyzer.py +++ b/src/pymatgen/symmetry/analyzer.py @@ -296,7 +296,7 @@ def get_symmetry_operations(self, cartesian: bool = False) -> list[SymmOp]: sym_ops = [] mat = self._structure.lattice.matrix.T inv_mat = np.linalg.inv(mat) - for rot, trans in zip(rotation, translation, strict=False): + for rot, trans in zip(rotation, translation, strict=True): if cartesian: rot = np.dot(mat, np.dot(rot, inv_mat)) trans = np.dot(trans, self._structure.lattice.matrix) @@ -429,7 +429,7 @@ def get_ir_reciprocal_mesh( mapping, grid = spglib.get_ir_reciprocal_mesh(np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec) results = [] - for idx, count in zip(*np.unique(mapping, return_counts=True), strict=False): + for idx, count in zip(*np.unique(mapping, return_counts=True), strict=True): results.append(((grid[idx] + shift * (0.5, 0.5, 0.5)) / mesh, count)) return results @@ -1380,7 +1380,7 @@ def get_clustered_indices(): for index in get_clustered_indices(): sites = self.centered_mol.cart_coords[index] - for i, reference in zip(index, sites, strict=False): + for i, reference in zip(index, sites, strict=True): for op in symm_ops: rotated = np.dot(op, sites.T).T matched_indices = find_in_coord_list(rotated, reference, self.tol) diff --git a/src/pymatgen/symmetry/groups.py b/src/pymatgen/symmetry/groups.py index 18f787cfeeb..21c535c5099 100644 --- a/src/pymatgen/symmetry/groups.py +++ b/src/pymatgen/symmetry/groups.py @@ -468,7 +468,7 @@ def is_compatible(self, lattice: Lattice, tol: float = 1e-5, angle_tol: float = crys_system = self.crystal_system def check(param, ref, tolerance): - return all(abs(i - j) < tolerance for i, j in zip(param, ref, strict=False) if j is not None) + return all(abs(i - j) < tolerance for i, j in zip(param, ref, strict=True) if j is not None) if crys_system == "cubic": a = abc[0] diff --git a/src/pymatgen/symmetry/kpath.py b/src/pymatgen/symmetry/kpath.py index 24ced78f67d..38c21e4914a 100644 --- a/src/pymatgen/symmetry/kpath.py +++ b/src/pymatgen/symmetry/kpath.py @@ -894,7 +894,7 @@ def __init__(self, structure: Structure, symprec: float = 0.01, angle_tolerance= if not system_is_tri: warn("Non-zero 'magmom' data will be used to define unique atoms in the cell.") - site_data = zip(species, [tuple(vec) for vec in sp["magmom"]], strict=False) # type: ignore[assignment] + site_data = zip(species, [tuple(vec) for vec in sp["magmom"]], strict=True) # type: ignore[assignment] unique_species: list[SpeciesLike] = [] numbers = [] @@ -1437,7 +1437,7 @@ def _get_key_points(self): return key_points, bz_as_key_point_inds, face_center_inds def _get_key_point_orbits(self, key_points): - key_points_copy = dict(zip(range(len(key_points) - 1), key_points[0 : len(key_points) - 1], strict=False)) + key_points_copy = dict(zip(range(len(key_points) - 1), key_points[0 : len(key_points) - 1], strict=True)) # gamma not equivalent to any in BZ and is last point added to # key_points key_points_inds_orbits = [] @@ -1507,7 +1507,7 @@ def _get_key_lines(key_points, bz_as_key_point_inds): return key_lines def _get_key_line_orbits(self, key_points, key_lines, key_points_inds_orbits): - key_lines_copy = dict(zip(range(len(key_lines)), key_lines, strict=False)) + key_lines_copy = dict(zip(range(len(key_lines)), key_lines, strict=True)) key_lines_inds_orbits = [] i = 0 diff --git a/src/pymatgen/symmetry/maggroups.py b/src/pymatgen/symmetry/maggroups.py index f1b6510e8e0..72df6369ca0 100644 --- a/src/pymatgen/symmetry/maggroups.py +++ b/src/pymatgen/symmetry/maggroups.py @@ -403,7 +403,7 @@ def is_compatible(self, lattice: Lattice, tol: float = 1e-5, angle_tol: float = crys_system = self.crystal_system def check(param, ref, tolerance): - return all(abs(i - j) < tolerance for i, j in zip(param, ref, strict=False) if j is not None) + return all(abs(i - j) < tolerance for i, j in zip(param, ref, strict=True) if j is not None) if crys_system == "cubic": a = abc[0] diff --git a/src/pymatgen/transformations/advanced_transformations.py b/src/pymatgen/transformations/advanced_transformations.py index fe3eed2c70c..86051612305 100644 --- a/src/pymatgen/transformations/advanced_transformations.py +++ b/src/pymatgen/transformations/advanced_transformations.py @@ -721,7 +721,7 @@ def generate_dummy_specie(): DummySpecies(symbol, spin=Spin.up): constraint.order_parameter, DummySpecies(symbol, spin=Spin.down): 1 - constraint.order_parameter, } - for symbol, constraint in zip(dummy_species_symbols, order_parameters, strict=False) + for symbol, constraint in zip(dummy_species_symbols, order_parameters, strict=True) ] for site in dummy_struct: diff --git a/src/pymatgen/transformations/site_transformations.py b/src/pymatgen/transformations/site_transformations.py index d5454a8836e..0f666bb272c 100644 --- a/src/pymatgen/transformations/site_transformations.py +++ b/src/pymatgen/transformations/site_transformations.py @@ -378,7 +378,7 @@ def _fast_ordering(self, structure: Structure, num_remove_dict, num_to_return=1) def _enumerate_ordering(self, structure: Structure): # Generate the disordered structure first. struct = structure.copy() - for indices, fraction in zip(self.indices, self.fractions, strict=False): + for indices, fraction in zip(self.indices, self.fractions, strict=True): for ind in indices: new_sp = {sp: occu * fraction for sp, occu in structure[ind].species.items()} struct[ind] = new_sp @@ -409,7 +409,7 @@ def apply_transformation(self, structure: Structure, return_ranked_list: bool | """ num_remove_dict = {} total_combos = 0 - for idx, frac in zip(self.indices, self.fractions, strict=False): + for idx, frac in zip(self.indices, self.fractions, strict=True): n_to_remove = len(idx) * frac if abs(n_to_remove - int(round(n_to_remove))) > 1e-3: raise ValueError("Fraction to remove must be consistent with integer amounts in structure.") diff --git a/src/pymatgen/util/coord.py b/src/pymatgen/util/coord.py index 6b655c4d42e..a24236c5cbe 100644 --- a/src/pymatgen/util/coord.py +++ b/src/pymatgen/util/coord.py @@ -133,7 +133,7 @@ def get_linear_interpolated_value(x_values: ArrayLike, y_values: ArrayLike, x: f Returns: Value at x. """ - arr = np.array(sorted(zip(x_values, y_values, strict=False), key=lambda d: d[0])) + arr = np.array(sorted(zip(x_values, y_values, strict=True), key=lambda d: d[0])) indices = np.where(arr[:, 0] >= x)[0] diff --git a/src/pymatgen/util/plotting.py b/src/pymatgen/util/plotting.py index fdaa013bc69..3bb2ae9a23f 100644 --- a/src/pymatgen/util/plotting.py +++ b/src/pymatgen/util/plotting.py @@ -676,7 +676,7 @@ def wrapper(*args, **kwargs): tags = ascii_letters if len(fig.axes) > len(tags): tags = (1 + len(ascii_letters) // len(fig.axes)) * ascii_letters - for ax, tag in zip(fig.axes, tags, strict=False): + for ax, tag in zip(fig.axes, tags, strict=True): ax.annotate(f"({tag})", xy=(0.05, 0.95), xycoords="axes fraction") if tight_layout: diff --git a/src/pymatgen/util/testing/__init__.py b/src/pymatgen/util/testing/__init__.py index 543cfd9fae4..1842f2a0c02 100644 --- a/src/pymatgen/util/testing/__init__.py +++ b/src/pymatgen/util/testing/__init__.py @@ -117,7 +117,7 @@ def serialize_with_pickle(self, objects: Any, protocols: Sequence[int] | None = # Test for equality if test_eq: - for orig, unpickled in zip(objects, unpickled_objs, strict=False): + for orig, unpickled in zip(objects, unpickled_objs, strict=True): assert ( orig == unpickled ), f"Unpickled and original objects are unequal for {protocol=}\n{orig=}\n{unpickled=}" diff --git a/src/pymatgen/util/testing/aims.py b/src/pymatgen/util/testing/aims.py index ef4105faf41..59ecc49ed3d 100644 --- a/src/pymatgen/util/testing/aims.py +++ b/src/pymatgen/util/testing/aims.py @@ -48,7 +48,7 @@ def compare_files(test_name: str, work_dir: Path, ref_dir: Path) -> None: with gzip.open(f"{ref_dir / test_name / Path(file).name}.gz", "rt") as ref_file: ref_lines = [line.strip() for line in ref_file.readlines() if len(line.strip()) > 0 and line[0] != "#"] - for test_line, ref_line in zip(test_lines, ref_lines, strict=False): + for test_line, ref_line in zip(test_lines, ref_lines, strict=True): if "output" in test_line and "band" in test_line: assert check_band(test_line, ref_line) else: @@ -68,7 +68,7 @@ def compare_files(test_name: str, work_dir: Path, ref_dir: Path) -> None: assert ref == check if check_output: - for ref_out, check_out in zip(ref_output, check_output, strict=False): + for ref_out, check_out in zip(ref_output, check_output, strict=True): if "band" in check_out: assert check_band(check_out, ref_out) else: @@ -138,7 +138,7 @@ def compare_single_files(ref_file: str | Path, test_file: str | Path) -> None: with zopen(f"{ref_file}.gz", mode="rt") as rf: ref_lines = rf.readlines()[5:] - for test_line, ref_line in zip(test_lines, ref_lines, strict=False): + for test_line, ref_line in zip(test_lines, ref_lines, strict=True): if "species_dir" in ref_line: continue if test_line.strip() != ref_line.strip(): diff --git a/tests/analysis/chemenv/coordination_environments/test_coordination_geometries.py b/tests/analysis/chemenv/coordination_environments/test_coordination_geometries.py index d7d080c8db9..631b2151184 100644 --- a/tests/analysis/chemenv/coordination_environments/test_coordination_geometries.py +++ b/tests/analysis/chemenv/coordination_environments/test_coordination_geometries.py @@ -63,7 +63,7 @@ def test_coordination_geometry(self): cg_oct2 = CoordinationGeometry.from_dict(cg_oct.as_dict()) assert cg_oct.central_site == approx(cg_oct2.central_site) - for p1, p2 in zip(cg_oct.points, cg_oct2.points, strict=False): + for p1, p2 in zip(cg_oct.points, cg_oct2.points, strict=True): assert p1 == approx(p2) assert ( str(cg_oct) == "Coordination geometry type : Octahedron (IUPAC: OC-6 || IUCr: [6o])\n" diff --git a/tests/analysis/elasticity/test_elastic.py b/tests/analysis/elasticity/test_elastic.py index 2f901c70e26..f131d423895 100644 --- a/tests/analysis/elasticity/test_elastic.py +++ b/tests/analysis/elasticity/test_elastic.py @@ -422,7 +422,7 @@ def test_get_strain_state_dict(self): all_strains = [Strain.from_voigt(v).zeroed() for vec in vecs.values() for v in vec] rng.shuffle(all_strains) all_stresses = [Stress.from_voigt(rng.random(6)).zeroed() for _ in all_strains] - strain_dict = {k.tobytes(): v for k, v in zip(all_strains, all_stresses, strict=False)} + strain_dict = {k.tobytes(): v for k, v in zip(all_strains, all_stresses, strict=True)} ss_dict = get_strain_state_dict(all_strains, all_stresses, add_eq=False) # Check length of ss_dict assert len(strain_inds) == len(ss_dict) @@ -430,7 +430,7 @@ def test_get_strain_state_dict(self): assert set(strain_states) == set(ss_dict) for data in ss_dict.values(): # Check correspondence of strains/stresses - for strain, stress in zip(data["strains"], data["stresses"], strict=False): + for strain, stress in zip(data["strains"], data["stresses"], strict=True): assert_allclose( Stress.from_voigt(stress), strain_dict[Strain.from_voigt(strain).tobytes()], @@ -474,11 +474,11 @@ def test_fit(self): diff_fit(self.strains, self.pk_stresses, self.data_dict["eq_stress"]) reduced = [ (e, pk) - for e, pk in zip(self.strains, self.pk_stresses, strict=False) + for e, pk in zip(self.strains, self.pk_stresses, strict=True) if not (abs(abs(e) - 0.05) < 1e-10).any() ] # Get reduced dataset - r_strains, r_pk_stresses = zip(*reduced, strict=False) + r_strains, r_pk_stresses = zip(*reduced, strict=True) c2 = diff_fit(r_strains, r_pk_stresses, self.data_dict["eq_stress"], order=2) c2, c3, _c4 = diff_fit(r_strains, r_pk_stresses, self.data_dict["eq_stress"], order=4) c2, c3 = diff_fit(self.strains, self.pk_stresses, self.data_dict["eq_stress"], order=3) diff --git a/tests/analysis/elasticity/test_strain.py b/tests/analysis/elasticity/test_strain.py index 7fb2f437ba8..94f66279125 100644 --- a/tests/analysis/elasticity/test_strain.py +++ b/tests/analysis/elasticity/test_strain.py @@ -61,10 +61,10 @@ def test_apply_to_structure(self): assert_allclose(strained_non.sites[1].coords, [3.8872306, 1.224e-6, 2.3516318], atol=1e-7) # Check convention for applying transformation - for vec, defo_vec in zip(self.structure.lattice.matrix, strained_non.lattice.matrix, strict=False): + for vec, defo_vec in zip(self.structure.lattice.matrix, strained_non.lattice.matrix, strict=True): new_vec = np.dot(self.non_ind_defo, np.transpose(vec)) assert_allclose(new_vec, defo_vec) - for coord, defo_coord in zip(self.structure.cart_coords, strained_non.cart_coords, strict=False): + for coord, defo_coord in zip(self.structure.cart_coords, strained_non.cart_coords, strict=True): new_coord = np.dot(self.non_ind_defo, np.transpose(coord)) assert_allclose(new_coord, defo_coord) diff --git a/tests/analysis/magnetism/test_heisenberg.py b/tests/analysis/magnetism/test_heisenberg.py index 4f6e37eaceb..248bb559227 100644 --- a/tests/analysis/magnetism/test_heisenberg.py +++ b/tests/analysis/magnetism/test_heisenberg.py @@ -26,7 +26,7 @@ def setUpClass(cls): ordered_structures = list(c["structure"]) ordered_structures = [Structure.from_dict(d) for d in ordered_structures] epa = list(c["energy_per_atom"]) - energies = [e * len(s) for (e, s) in zip(epa, ordered_structures, strict=False)] + energies = [e * len(s) for (e, s) in zip(epa, ordered_structures, strict=True)] hm = HeisenbergMapper(ordered_structures, energies, cutoff=5.0, tol=0.02) cls.hms.append(hm) diff --git a/tests/analysis/test_chempot_diagram.py b/tests/analysis/test_chempot_diagram.py index 4321ff3b645..0cc61c6d63c 100644 --- a/tests/analysis/test_chempot_diagram.py +++ b/tests/analysis/test_chempot_diagram.py @@ -38,7 +38,7 @@ def test_el_refs(self): elems = [Element("Li"), Element("Fe"), Element("O")] energies = [-1.91301487, -6.5961471, -25.54966885] - correct_el_refs = dict(zip(elems, energies, strict=False)) + correct_el_refs = dict(zip(elems, energies, strict=True)) assert el_refs == approx(correct_el_refs) @@ -46,7 +46,7 @@ def test_el_refs_formal(self): el_refs = {elem: entry.energy for elem, entry in self.cpd_ternary_formal.el_refs.items()} elems = [Element("Li"), Element("Fe"), Element("O")] energies = [0, 0, 0] - correct_el_refs = dict(zip(elems, energies, strict=False)) + correct_el_refs = dict(zip(elems, energies, strict=True)) assert el_refs == approx(correct_el_refs) def test_border_hyperplanes(self): diff --git a/tests/analysis/test_interface_reactions.py b/tests/analysis/test_interface_reactions.py index 02b26475f02..fc3bac4e0a6 100644 --- a/tests/analysis/test_interface_reactions.py +++ b/tests/analysis/test_interface_reactions.py @@ -335,7 +335,7 @@ def test_convexity_helper(ir): lst = list(ir.get_kinks()) x_kink = [i[1] for i in lst] energy_kink = [i[2] for i in lst] - points = list(zip(x_kink, energy_kink, strict=False)) + points = list(zip(x_kink, energy_kink, strict=True)) if len(points) >= 3: # To test convexity of the plot, construct convex hull from # the kinks and make sure @@ -343,7 +343,7 @@ def test_convexity_helper(ir): # 2. all points are on the convex hull. relative_vectors_1 = [(x - x_kink[0], e - energy_kink[0]) for x, e in points] relative_vectors_2 = [(x - x_kink[-1], e - energy_kink[-1]) for x, e in points] - relative_vectors = zip(relative_vectors_1, relative_vectors_2, strict=False) + relative_vectors = zip(relative_vectors_1, relative_vectors_2, strict=True) positions = [np.cross(v1, v2) for v1, v2 in relative_vectors] assert np.all(np.array(positions) <= 0) diff --git a/tests/analysis/test_molecule_matcher.py b/tests/analysis/test_molecule_matcher.py index aca10521f0d..2d27fcbd5d4 100644 --- a/tests/analysis/test_molecule_matcher.py +++ b/tests/analysis/test_molecule_matcher.py @@ -55,7 +55,7 @@ def perturb(mol, scale, seed): rng = np.random.default_rng(seed=seed) dV = rng.normal(scale=scale, size=(len(mol), 3)) - for site, dv in zip(mol, dV, strict=False): + for site, dv in zip(mol, dV, strict=True): site.coords += dv diff --git a/tests/analysis/test_phase_diagram.py b/tests/analysis/test_phase_diagram.py index 7e6b831bfad..5380a1e013b 100644 --- a/tests/analysis/test_phase_diagram.py +++ b/tests/analysis/test_phase_diagram.py @@ -447,7 +447,7 @@ def test_get_element_profile(self): {"evolution": -1.0, "chempot": -10.48758201, "reaction": "Li2O -> 2 Li + 0.5 O2"}, ] result = self.pd.get_element_profile(Element("O"), Composition("Li2O")) - for d1, d2 in zip(expected, result, strict=False): + for d1, d2 in zip(expected, result, strict=True): assert d1["evolution"] == approx(d2["evolution"]) assert d1["chempot"] == approx(d2["chempot"]) assert d1["reaction"] == str(d2["reaction"]) @@ -505,7 +505,7 @@ def test_get_critical_compositions_fractional(self): Composition("Li0.3243244Fe0.1621621O0.51351349"), Composition("Li3FeO4").fractional_composition, ] - for crit, exp in zip(comps, expected, strict=False): + for crit, exp in zip(comps, expected, strict=True): assert crit.almost_equals(exp, rtol=0, atol=1e-5) comps = self.pd.get_critical_compositions(c1, c3) @@ -515,7 +515,7 @@ def test_get_critical_compositions_fractional(self): Composition("Li5FeO4").fractional_composition, Composition("Li2O").fractional_composition, ] - for crit, exp in zip(comps, expected, strict=False): + for crit, exp in zip(comps, expected, strict=True): assert crit.almost_equals(exp, rtol=0, atol=1e-5) def test_get_critical_compositions(self): @@ -529,7 +529,7 @@ def test_get_critical_compositions(self): Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4, Composition("Li3FeO4"), ] - for crit, exp in zip(comps, expected, strict=False): + for crit, exp in zip(comps, expected, strict=True): assert crit.almost_equals(exp, rtol=0, atol=1e-5) comps = self.pd.get_critical_compositions(c1, c3) @@ -539,7 +539,7 @@ def test_get_critical_compositions(self): Composition("Li5FeO4") / 3, Composition("Li2O"), ] - for crit, exp in zip(comps, expected, strict=False): + for crit, exp in zip(comps, expected, strict=True): assert crit.almost_equals(exp, rtol=0, atol=1e-5) # Don't fail silently if input compositions aren't in phase diagram @@ -560,7 +560,7 @@ def test_get_critical_compositions(self): Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4, Composition("Li3FeO4"), ] - for crit, exp in zip(comps, expected, strict=False): + for crit, exp in zip(comps, expected, strict=True): assert crit.almost_equals(exp, rtol=0, atol=1e-5) # case where the endpoints are identical diff --git a/tests/analysis/test_reaction_calculator.py b/tests/analysis/test_reaction_calculator.py index 1fcc128e1ad..dea6c9f6322 100644 --- a/tests/analysis/test_reaction_calculator.py +++ b/tests/analysis/test_reaction_calculator.py @@ -519,7 +519,7 @@ def test_as_from_dict(self): assert str(new_rxn) == "2 Li + O2 -> Li2O2" def test_all_entries(self): - for coeff, entry in zip(self.rxn.coeffs, self.rxn.all_entries, strict=False): + for coeff, entry in zip(self.rxn.coeffs, self.rxn.all_entries, strict=True): if coeff > 0: assert entry.reduced_formula == "Li2O2" assert entry.energy == approx(-959.64693323) diff --git a/tests/analysis/test_structure_matcher.py b/tests/analysis/test_structure_matcher.py index 0a9abee2a2e..76add6c9ee5 100644 --- a/tests/analysis/test_structure_matcher.py +++ b/tests/analysis/test_structure_matcher.py @@ -465,7 +465,7 @@ def test_supercell_subsets(self): # test when s1 is exact supercell of s2 result = sm.get_s2_like_s1(s1, s2) - for a, b in zip(s1, result, strict=False): + for a, b in zip(s1, result, strict=True): assert a.distance(b) < 0.08 assert a.species == b.species @@ -500,7 +500,7 @@ def test_supercell_subsets(self): s2_missing_site = s2.copy() del s2_missing_site[1] result = sm.get_s2_like_s1(s1, s2_missing_site) - for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result, strict=False): + for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result, strict=True): assert a.distance(b) < 0.08 assert a.species == b.species @@ -534,7 +534,7 @@ def test_get_s2_large_s2(self): result = sm.get_s2_like_s1(s1, s2) - for x, y in zip(s1, result, strict=False): + for x, y in zip(s1, result, strict=True): assert x.distance(y) < 0.08 def test_get_mapping(self): diff --git a/tests/command_line/test_gulp_caller.py b/tests/command_line/test_gulp_caller.py index e8fab4a266d..262d2d09af9 100644 --- a/tests/command_line/test_gulp_caller.py +++ b/tests/command_line/test_gulp_caller.py @@ -280,7 +280,7 @@ def setUp(self): bv = BVAnalyzer() val = bv.get_valences(self.mgo_uc) el = [site.species_string for site in self.mgo_uc] - self.val_dict = dict(zip(el, val, strict=False)) + self.val_dict = dict(zip(el, val, strict=True)) def test_get_energy_tersoff(self): structure = Structure.from_file(f"{VASP_IN_DIR}/POSCAR_Al12O18") diff --git a/tests/command_line/test_vampire_caller.py b/tests/command_line/test_vampire_caller.py index 9b475345b09..6ae96e0a3c6 100644 --- a/tests/command_line/test_vampire_caller.py +++ b/tests/command_line/test_vampire_caller.py @@ -27,13 +27,13 @@ def setUpClass(cls): ordered_structures = list(c["structure"]) ordered_structures = [Structure.from_dict(d) for d in ordered_structures] epa = list(c["energy_per_atom"]) - energies = [e * len(s) for (e, s) in zip(epa, ordered_structures, strict=False)] + energies = [e * len(s) for (e, s) in zip(epa, ordered_structures, strict=True)] cls.structure_inputs.append(ordered_structures) cls.energy_inputs.append(energies) def test_vampire(self): - for structs, energies in zip(self.structure_inputs, self.energy_inputs, strict=False): + for structs, energies in zip(self.structure_inputs, self.energy_inputs, strict=True): settings = {"start_t": 0, "end_t": 500, "temp_increment": 50} vc = VampireCaller( structs, diff --git a/tests/core/test_composition.py b/tests/core/test_composition.py index fdd9ef7f72d..c0cdb54d5a0 100644 --- a/tests/core/test_composition.py +++ b/tests/core/test_composition.py @@ -173,7 +173,7 @@ def test_average_electroneg(self): 1.21, 2.43, ) - for elem, val in zip(self.comps, electro_negs, strict=False): + for elem, val in zip(self.comps, electro_negs, strict=True): assert elem.average_electroneg == approx(val) def test_total_electrons(self): @@ -406,7 +406,7 @@ def test_from_weight_dict(self): ] formula_list = ["Ti87.6 V5.5 Al6.9", "Ti44.98 Ni55.02", "H2O"] - for weight_dict, formula in zip(weight_dict_list, formula_list, strict=False): + for weight_dict, formula in zip(weight_dict_list, formula_list, strict=True): c1 = Composition(formula).fractional_composition c2 = Composition.from_weight_dict(weight_dict).fractional_composition assert set(c1.elements) == set(c2.elements) diff --git a/tests/core/test_operations.py b/tests/core/test_operations.py index 71b732f707d..48523924080 100644 --- a/tests/core/test_operations.py +++ b/tests/core/test_operations.py @@ -259,7 +259,7 @@ def test_operate_magmom(self): transformed_magmoms = [[1, 2, 3], [-1, -2, -3], [1, -2, 3], [1, 2, -3]] - for xyzt_string, transformed_magmom in zip(xyzt_strings, transformed_magmoms, strict=False): + for xyzt_string, transformed_magmom in zip(xyzt_strings, transformed_magmoms, strict=True): for magmom in magmoms: op = MagSymmOp.from_xyzt_str(xyzt_string) assert_allclose(transformed_magmom, op.operate_magmom(magmom).global_moment) diff --git a/tests/core/test_structure.py b/tests/core/test_structure.py index f37153106e1..29b271ec99d 100644 --- a/tests/core/test_structure.py +++ b/tests/core/test_structure.py @@ -622,7 +622,7 @@ def test_get_all_neighbors_and_get_neighbors(self): assert len(all_nn[idx][0]) == 4 assert len(all_nn[idx]) == len(struct.get_neighbors(site, rand_radius)) - for site, nns in zip(struct, all_nn, strict=False): + for site, nns in zip(struct, all_nn, strict=True): for nn in nns: assert nn[0].is_periodic_image(struct[nn[2]]) dist = sum((site.coords - nn[0].coords) ** 2) ** 0.5 @@ -785,7 +785,7 @@ def test_get_all_neighbors_outside_cell(self): [[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3], ) all_nn = struct.get_all_neighbors(0.2, include_index=True) - for site, nns in zip(struct, all_nn, strict=False): + for site, nns in zip(struct, all_nn, strict=True): for nn in nns: assert nn[0].is_periodic_image(struct[nn[2]]) d = sum((site.coords - nn[0].coords) ** 2) ** 0.5 diff --git a/tests/core/test_surface.py b/tests/core/test_surface.py index 5a542e6434d..8fdbc079c9b 100644 --- a/tests/core/test_surface.py +++ b/tests/core/test_surface.py @@ -159,7 +159,7 @@ def test_surface_sites_and_symmetry(self): assert total_surf_sites / 2 == 4 # Test if the ratio of surface sites per area is - # constant, ie are the surface energies the same + # constant, i.e. are the surface energies the same? r1 = total_surf_sites / (2 * slab.surface_area) slab_gen = SlabGenerator(self.ag_fcc, (3, 1, 0), 10, 10, primitive=False) slab = slab_gen.get_slabs()[0] diff --git a/tests/core/test_tensors.py b/tests/core/test_tensors.py index b2730f4db3f..96f72e921d0 100644 --- a/tests/core/test_tensors.py +++ b/tests/core/test_tensors.py @@ -271,7 +271,7 @@ def test_tensor_mapping(self): reduced = symmetry_reduce(tbs, self.get_structure("Sn")) tkey = Tensor.from_values_indices([0.01], [(0, 0)]) tval = reduced[tkey] - for tens_1, tens_2 in zip(tval, reduced[tbs[0]], strict=False): + for tens_1, tens_2 in zip(tval, reduced[tbs[0]], strict=True): assert approx(tens_1) == tens_2 # Test set reduced[tkey] = "test_val" @@ -304,7 +304,7 @@ def test_populate(self): vtens = np.zeros([6] * 3) indices = [(0, 0, 0), (0, 0, 1), (0, 1, 2), (0, 3, 3), (0, 5, 5), (3, 4, 5)] values = [-1271.0, -814.0, -50.0, -3.0, -780.0, -95.0] - for v, idx in zip(values, indices, strict=False): + for v, idx in zip(values, indices, strict=True): vtens[idx] = v toec = Tensor.from_voigt(vtens) toec = toec.populate(sn, prec=1e-3, verbose=True) @@ -382,7 +382,7 @@ class like TensorCollection. tc_mod = getattr(tc_orig, attribute) if callable(tc_mod): tc_mod = tc_mod(*args, **kwargs) - for t_orig, t_mod in zip(tc_orig, tc_mod, strict=False): + for t_orig, t_mod in zip(tc_orig, tc_mod, strict=True): this_mod = getattr(t_orig, attribute) if callable(this_mod): this_mod = this_mod(*args, **kwargs) @@ -440,20 +440,20 @@ def test_list_based_functions(self): # from_voigt tc_input = list(np.random.default_rng().random((3, 6, 6))) tc = TensorCollection.from_voigt(tc_input) - for t_input, tensor in zip(tc_input, tc, strict=False): + for t_input, tensor in zip(tc_input, tc, strict=True): assert_allclose(Tensor.from_voigt(t_input), tensor) def test_serialization(self): # Test base serialize-deserialize dct = self.seq_tc.as_dict() new = TensorCollection.from_dict(dct) - for t, t_new in zip(self.seq_tc, new, strict=False): + for t, t_new in zip(self.seq_tc, new, strict=True): assert_allclose(t, t_new) voigt_symmetrized = self.rand_tc.voigt_symmetrized dct = voigt_symmetrized.as_dict(voigt=True) new_vsym = TensorCollection.from_dict(dct) - for t, t_new in zip(voigt_symmetrized, new_vsym, strict=False): + for t, t_new in zip(voigt_symmetrized, new_vsym, strict=True): assert_allclose(t, t_new) diff --git a/tests/core/test_trajectory.py b/tests/core/test_trajectory.py index 6fea3b7dae3..4e3b6b75cfd 100644 --- a/tests/core/test_trajectory.py +++ b/tests/core/test_trajectory.py @@ -390,7 +390,7 @@ def test_extend_frame_props(self): traj_1 = Trajectory(lattice=lattice, species=species, coords=coords, frame_properties=props_1) # energy and pressure properties - props_2 = [{"energy": e, "pressure": p} for e, p in zip(energy_2, pressure_2, strict=False)] + props_2 = [{"energy": e, "pressure": p} for e, p in zip(energy_2, pressure_2, strict=True)] traj_2 = Trajectory(lattice=lattice, species=species, coords=coords, frame_properties=props_2) # no properties diff --git a/tests/electronic_structure/test_boltztrap.py b/tests/electronic_structure/test_boltztrap.py index 46f22a7bb1e..f5762919507 100644 --- a/tests/electronic_structure/test_boltztrap.py +++ b/tests/electronic_structure/test_boltztrap.py @@ -205,7 +205,7 @@ def test_get_symm_bands(self): kpoints = [kp.frac_coords for kp in sbs.kpoints] labels_dict = {k: sbs.labels_dict[k].frac_coords for k in sbs.labels_dict} for kpt_line, label_dict in zip( - [None, sbs.kpoints, kpoints], [None, sbs.labels_dict, labels_dict], strict=False + [None, sbs.kpoints, kpoints], [None, sbs.labels_dict, labels_dict], strict=True ): sbs_bzt = self.bz_bands.get_symm_bands(structure, -5.25204548, kpt_line=kpt_line, labels_dict=label_dict) assert len(sbs_bzt.bands[Spin.up]) == approx(20) diff --git a/tests/electronic_structure/test_cohp.py b/tests/electronic_structure/test_cohp.py index d519230f6ab..22ac40a9f1a 100644 --- a/tests/electronic_structure/test_cohp.py +++ b/tests/electronic_structure/test_cohp.py @@ -900,7 +900,7 @@ def test_average_multi_center_cobi(self): for cohp1, cohp2 in zip( self.cobi_multi_B2H6.get_cohp_by_label("average").cohp[Spin.up], self.cobi_multi_B2H6_average2.get_cohp_by_label("average").cohp[Spin.up], - strict=False, + strict=True, ): print(cohp1) print(cohp2) @@ -909,21 +909,21 @@ def test_average_multi_center_cobi(self): for cohp1, cohp2 in zip( self.cobi_multi_B2H6.get_cohp_by_label("average").cohp[Spin.down], self.cobi_multi_B2H6_average2.get_cohp_by_label("average").cohp[Spin.down], - strict=False, + strict=True, ): assert cohp1 == approx(cohp2, abs=1e-4) for icohp1, icohp2 in zip( self.cobi_multi_B2H6.get_cohp_by_label("average").icohp[Spin.up], self.cobi_multi_B2H6_average2.get_cohp_by_label("average").icohp[Spin.up], - strict=False, + strict=True, ): assert icohp1 == approx(icohp2, abs=1e-4) for icohp1, icohp2 in zip( self.cobi_multi_B2H6.get_cohp_by_label("average").icohp[Spin.down], self.cobi_multi_B2H6_average2.get_cohp_by_label("average").icohp[Spin.down], - strict=False, + strict=True, ): assert icohp1 == approx(icohp2, abs=1e-4) diff --git a/tests/electronic_structure/test_dos.py b/tests/electronic_structure/test_dos.py index c3e0c3cab1f..dd9e7c36d80 100644 --- a/tests/electronic_structure/test_dos.py +++ b/tests/electronic_structure/test_dos.py @@ -311,7 +311,7 @@ class TestDOS(PymatgenTest): def setUp(self): with open(f"{TEST_DIR}/complete_dos.json") as file: dct = json.load(file) - ys = list(zip(dct["densities"]["1"], dct["densities"]["-1"], strict=False)) + ys = list(zip(dct["densities"]["1"], dct["densities"]["-1"], strict=True)) self.dos = DOS(dct["energies"], ys, dct["efermi"]) def test_get_gap(self): diff --git a/tests/entries/test_compatibility.py b/tests/entries/test_compatibility.py index 3cfdff54874..41127770c81 100644 --- a/tests/entries/test_compatibility.py +++ b/tests/entries/test_compatibility.py @@ -1018,7 +1018,7 @@ def test_processing_entries_inplace(self): # check whether the compatibility scheme can keep input entries unchanged entries_copy = copy.deepcopy(entries) self.compat.process_entries(entries, inplace=False) - assert all(e.correction == e_copy.correction for e, e_copy in zip(entries, entries_copy, strict=False)) + assert all(e.correction == e_copy.correction for e, e_copy in zip(entries, entries_copy, strict=True)) def test_check_potcar(self): MaterialsProject2020Compatibility(check_potcar=False).process_entries(self.entry1) @@ -1891,7 +1891,7 @@ def test_processing_entries_inplace(self): entries = [h2o_entry, o2_entry] entries_copy = copy.deepcopy(entries) MaterialsProjectAqueousCompatibility().process_entries(entries, inplace=False) - assert all(e.correction == e_copy.correction for e, e_copy in zip(entries, entries_copy, strict=False)) + assert all(e.correction == e_copy.correction for e, e_copy in zip(entries, entries_copy, strict=True)) def test_parallel_process_entries(self): hydrate_entry = ComputedEntry(Composition("FeH4O2"), -10) # nH2O = 2 diff --git a/tests/entries/test_mixing_scheme.py b/tests/entries/test_mixing_scheme.py index 8c29d1524b0..7bde2f6fd27 100644 --- a/tests/entries/test_mixing_scheme.py +++ b/tests/entries/test_mixing_scheme.py @@ -756,7 +756,7 @@ def test_data_ms_complete(ms_complete): ComputedStructureEntry match (or don't match) as intended. """ sm = StructureMatcher() - for g, s in zip(ms_complete.gga_entries, ms_complete.scan_entries, strict=False): + for g, s in zip(ms_complete.gga_entries, ms_complete.scan_entries, strict=True): if g.entry_id == "gga-3": assert not sm.fit(g.structure, s.structure) else: @@ -1226,7 +1226,7 @@ def test_processing_entries_inplace(self): # check whether the compatibility scheme can keep input entries unchanged entries_copy = copy.deepcopy(entries) MaterialsProjectDFTMixingScheme().process_entries(entries, inplace=False) - assert all(e.correction == e_copy.correction for e, e_copy in zip(entries, entries_copy, strict=False)) + assert all(e.correction == e_copy.correction for e, e_copy in zip(entries, entries_copy, strict=True)) def test_check_potcar(self, ms_complete): """Entries with invalid or missing POTCAR raise error by default but should be ignored if diff --git a/tests/io/abinit/test_abiobjects.py b/tests/io/abinit/test_abiobjects.py index 0053e3ad3d5..9bbd6a65420 100644 --- a/tests/io/abinit/test_abiobjects.py +++ b/tests/io/abinit/test_abiobjects.py @@ -101,7 +101,7 @@ def test_znucl_typat(self): assert [s.symbol for s in species_by_znucl(gan)] == ["Ga", "N"] - for itype1, itype2 in zip(def_typat, enforce_typat, strict=False): + for itype1, itype2 in zip(def_typat, enforce_typat, strict=True): assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1] with pytest.raises(ValueError, match="Both enforce_znucl and enforce_typat are required"): diff --git a/tests/io/abinit/test_inputs.py b/tests/io/abinit/test_inputs.py index f411ff2da3b..762ea2acad8 100644 --- a/tests/io/abinit/test_inputs.py +++ b/tests/io/abinit/test_inputs.py @@ -235,7 +235,7 @@ def test_api(self): assert new_multi.ndtset == multi.ndtset assert new_multi.structure == multi.structure - for old_inp, new_inp in zip(multi, new_multi, strict=False): + for old_inp, new_inp in zip(multi, new_multi, strict=True): assert old_inp is not new_inp assert old_inp.as_dict() == new_inp.as_dict() diff --git a/tests/io/aims/test_aims_inputs.py b/tests/io/aims/test_aims_inputs.py index e578e438435..234700bcc91 100644 --- a/tests/io/aims/test_aims_inputs.py +++ b/tests/io/aims/test_aims_inputs.py @@ -58,7 +58,7 @@ def test_read_h2o_in(tmp_path: Path): [0, -0.763239, -0.477047], ] - assert all(sp.symbol == symb for sp, symb in zip(h2o.structure.species, ["O", "H", "H"], strict=False)) + assert all(sp.symbol == symb for sp, symb in zip(h2o.structure.species, ["O", "H", "H"], strict=True)) assert_allclose(h2o.structure.cart_coords, in_coords) h2o_test_from_struct = AimsGeometryIn.from_structure(h2o.structure) diff --git a/tests/io/exciting/test_inputs.py b/tests/io/exciting/test_inputs.py index 9d41220498b..9859fd4ddf5 100644 --- a/tests/io/exciting/test_inputs.py +++ b/tests/io/exciting/test_inputs.py @@ -70,7 +70,7 @@ def test_write_string(self): ], ) exc_in = ExcitingInput(structure) - for l1, l2 in zip(input_string.split("\n"), exc_in.write_string("unchanged").split("\n"), strict=False): + for l1, l2 in zip(input_string.split("\n"), exc_in.write_string("unchanged").split("\n"), strict=True): if not l1.strip().startswith(" 0.999 else: diff --git a/tests/symmetry/test_analyzer.py b/tests/symmetry/test_analyzer.py index abc8c6b9beb..2024a9dc3ac 100644 --- a/tests/symmetry/test_analyzer.py +++ b/tests/symmetry/test_analyzer.py @@ -93,7 +93,7 @@ def test_get_point_group_operations(self): pg_ops = sg.get_point_group_operations() frac_symm_ops = sg.get_symmetry_operations() symm_ops = sg.get_symmetry_operations(cartesian=True) - for fop, op, pgop in zip(frac_symm_ops, symm_ops, pg_ops, strict=False): + for fop, op, pgop in zip(frac_symm_ops, symm_ops, pg_ops, strict=True): # translation vector values should all be 0 or 0.5 t = fop.translation_vector * 2 assert_allclose(t - np.round(t), 0) @@ -610,7 +610,7 @@ def test_get_kpoint_weights(self): ir_mesh = spga.get_ir_reciprocal_mesh((4, 4, 4)) weights = [i[1] for i in ir_mesh] weights = np.array(weights) / sum(weights) - for expected, weight in zip(weights, spga.get_kpoint_weights([i[0] for i in ir_mesh]), strict=False): + for expected, weight in zip(weights, spga.get_kpoint_weights([i[0] for i in ir_mesh]), strict=True): assert weight == approx(expected) for name in ("SrTiO3", "LiFePO4", "Graphite"): @@ -619,14 +619,14 @@ def test_get_kpoint_weights(self): ir_mesh = spga.get_ir_reciprocal_mesh((1, 2, 3)) weights = [i[1] for i in ir_mesh] weights = np.array(weights) / sum(weights) - for expected, weight in zip(weights, spga.get_kpoint_weights([i[0] for i in ir_mesh]), strict=False): + for expected, weight in zip(weights, spga.get_kpoint_weights([i[0] for i in ir_mesh]), strict=True): assert weight == approx(expected) vasp_run = Vasprun(f"{VASP_OUT_DIR}/vasprun.xml.gz") spga = SpacegroupAnalyzer(vasp_run.final_structure) wts = spga.get_kpoint_weights(vasp_run.actual_kpoints) - for w1, w2 in zip(vasp_run.actual_kpoints_weights, wts, strict=False): + for w1, w2 in zip(vasp_run.actual_kpoints_weights, wts, strict=True): assert w1 == approx(w2) kpts = [[0, 0, 0], [0.15, 0.15, 0.15], [0.2, 0.2, 0.2]] diff --git a/tests/symmetry/test_settings.py b/tests/symmetry/test_settings.py index 8bb67fde0d0..5c1e1254632 100644 --- a/tests/symmetry/test_settings.py +++ b/tests/symmetry/test_settings.py @@ -32,7 +32,7 @@ def setUp(self): ] def test_init(self): - for test_str, test_Pp in zip(self.test_strings, self.test_Pps, strict=False): + for test_str, test_Pp in zip(self.test_strings, self.test_Pps, strict=True): jft = JonesFaithfulTransformation.from_transformation_str(test_str) jft2 = JonesFaithfulTransformation(test_Pp[0], test_Pp[1]) assert_allclose(jft.P, jft2.P) @@ -56,7 +56,7 @@ def test_transform_lattice(self): [[5.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 5.0]], ] - for ref_lattice, (P, p) in zip(all_ref_lattices, self.test_Pps, strict=False): + for ref_lattice, (P, p) in zip(all_ref_lattices, self.test_Pps, strict=True): jft = JonesFaithfulTransformation(P, p) assert_allclose(jft.transform_lattice(lattice).matrix, ref_lattice) @@ -70,10 +70,10 @@ def test_transform_coords(self): [[-0.25, -0.5, -0.75], [0.25, 0.0, -0.25]], ] - for ref_coords, (P, p) in zip(all_ref_coords, self.test_Pps, strict=False): + for ref_coords, (P, p) in zip(all_ref_coords, self.test_Pps, strict=True): jft = JonesFaithfulTransformation(P, p) transformed_coords = jft.transform_coords(coords) - for coord, ref_coord in zip(transformed_coords, ref_coords, strict=False): + for coord, ref_coord in zip(transformed_coords, ref_coords, strict=True): assert_allclose(coord, ref_coord) def test_transform_symmops(self): @@ -187,5 +187,5 @@ def test_transform_symmops(self): transformed_symm_ops = [jft.transform_symmop(op) for op in input_symm_ops] - for transformed_op, ref_transformed_op in zip(transformed_symm_ops, ref_transformed_symm_ops, strict=False): + for transformed_op, ref_transformed_op in zip(transformed_symm_ops, ref_transformed_symm_ops, strict=True): assert transformed_op == ref_transformed_op diff --git a/tests/transformations/test_site_transformations.py b/tests/transformations/test_site_transformations.py index 3ce5dce0a21..a645030376a 100644 --- a/tests/transformations/test_site_transformations.py +++ b/tests/transformations/test_site_transformations.py @@ -326,7 +326,7 @@ def test(self): trafo = RadialSiteDistortionTransformation(0, 1, nn_only=True) struct = trafo.apply_transformation(self.structure) - for c1, c2 in zip(self.structure[1:7], struct[1:7], strict=False): + for c1, c2 in zip(self.structure[1:7], struct[1:7], strict=True): assert c1.distance(c2) == 1.0 assert np.array_equal(struct[0].coords, [0, 0, 0]) @@ -340,5 +340,5 @@ def test(self): def test_second_nn(self): trafo = RadialSiteDistortionTransformation(0, 1, nn_only=False) struct = trafo.apply_transformation(self.molecule) - for c1, c2 in zip(self.molecule[7:], struct[7:], strict=False): + for c1, c2 in zip(self.molecule[7:], struct[7:], strict=True): assert abs(round(sum(c2.coords - c1.coords), 2)) == 0.33