Skip to content

Commit

Permalink
Fix numpy int deprecations. Fixes #2366.
Browse files Browse the repository at this point in the history
  • Loading branch information
shyuep committed Jan 23, 2022
1 parent ae3f1b7 commit 362e26f
Show file tree
Hide file tree
Showing 20 changed files with 67 additions and 66 deletions.
10 changes: 5 additions & 5 deletions pymatgen/analysis/chemenv/connectivity/connected_components.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def compute_periodicity_all_simple_paths_algorithm(self):
path.append(test_node)
# TODO: there are some paths that appears twice for cycles, and there are some paths that should
# probably not be considered
this_path_deltas = [np.zeros(3, np.int_)]
this_path_deltas = [np.zeros(3, int)]
for (node1, node2) in [(node1, path[inode1 + 1]) for inode1, node1 in enumerate(path[:-1])]:
this_path_deltas_new = []
for key, edge_data in self._connected_subgraph[node1][node2].items():
Expand Down Expand Up @@ -499,7 +499,7 @@ def compute_periodicity_cycle_basis(self):
for cyc in cycles:
mycyc = list(cyc)
mycyc.append(cyc[0])
this_cycle_deltas = [np.zeros(3, np.int_)]
this_cycle_deltas = [np.zeros(3, int)]
for (node1, node2) in [(node1, mycyc[inode1 + 1]) for inode1, node1 in enumerate(mycyc[:-1])]:
this_cycle_deltas_new = []
for key, edge_data in self._connected_subgraph[node1][node2].items():
Expand Down Expand Up @@ -778,7 +778,7 @@ def elastic_centered_graph(self, start_node=None):
logging.debug(" Edge outside the cell ... getting neighbor back inside")
if (0, 0, 0) in ddeltas:
ddeltas.remove((0, 0, 0))
myddelta = np.array(ddeltas[0], np.int_)
myddelta = np.array(ddeltas[0], int)
node_neighbor_edges = centered_connected_subgraph.edges(
nbunch=[node_neighbor], data=True, keys=True
)
Expand All @@ -797,11 +797,11 @@ def elastic_centered_graph(self, start_node=None):
):
if edata["start"] == node_neighbor.isite and edata["end"] != node_neighbor.isite:
centered_connected_subgraph[n1][n2][key]["delta"] = tuple(
np.array(edata["delta"], np.int_) + myddelta
np.array(edata["delta"], int) + myddelta
)
elif edata["end"] == node_neighbor.isite:
centered_connected_subgraph[n1][n2][key]["delta"] = tuple(
np.array(edata["delta"], np.int_) - myddelta
np.array(edata["delta"], int) - myddelta
)
else:
raise ValueError("DUHH")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1700,7 +1700,7 @@ def coordination_geometry_symmetry_measures_separation_plane_optim(
continue
if sep not in nb_set.separations:
nb_set.separations[sep] = {}
mysep = [np.array(ss, dtype=np.int8) for ss in separation]
mysep = [np.array(ss, dtype=int) for ss in separation]
nb_set.separations[sep][separation] = (plane, mysep)
if sep == separation_plane_algo.separation:
new_seps.append(mysep)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1648,7 +1648,7 @@ def from_structure_environments(cls, strategy, structure_environments, valences=
raise ValueError(
"Weird, differences between one site in a periodic image cell is not integer ..."
)
nb_image_cell = np.array(rounddiff, np.int_)
nb_image_cell = np.array(rounddiff, int)
nb_allnbs_sites_index = len(_all_nbs_sites)
_all_nbs_sites.append(
{
Expand Down Expand Up @@ -2129,13 +2129,13 @@ def from_dict(cls, d):
for nb_site in d["all_nbs_sites"]:
site = dec.process_decoded(nb_site["site"])
if "image_cell" in nb_site:
image_cell = np.array(nb_site["image_cell"], np.int_)
image_cell = np.array(nb_site["image_cell"], int)
else:
diff = site.frac_coords - structure[nb_site["index"]].frac_coords
rounddiff = np.round(diff)
if not np.allclose(diff, rounddiff):
raise ValueError("Weird, differences between one site in a periodic image cell is not integer ...")
image_cell = np.array(rounddiff, np.int_)
image_cell = np.array(rounddiff, int)
all_nbs_sites.append({"site": site, "index": nb_site["index"], "image_cell": image_cell})
neighbors_sets = [
[
Expand Down
8 changes: 4 additions & 4 deletions pymatgen/analysis/chemenv/utils/graph_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ def get_delta(node1, node2, edge_data):
:return:
"""
if node1.isite == edge_data["start"] and node2.isite == edge_data["end"]:
return np.array(edge_data["delta"], dtype=np.int_)
return np.array(edge_data["delta"], dtype=int)
if node2.isite == edge_data["start"] and node1.isite == edge_data["end"]:
return -np.array(edge_data["delta"], dtype=np.int_)
return -np.array(edge_data["delta"], dtype=int)
raise ValueError("Trying to find a delta between two nodes with an edge that seems not to link these nodes.")


Expand Down Expand Up @@ -490,7 +490,7 @@ def get_all_elementary_cycles(graph):
all_edges_dict[(n2, n1)] = nedges
index2edge.append((n1, n2))
nedges += 1
cycles_matrix = np.zeros(shape=(len(cycle_basis), nedges), dtype=np.bool)
cycles_matrix = np.zeros(shape=(len(cycle_basis), nedges), dtype=bool)
for icycle, cycle in enumerate(cycle_basis):
for in1, n1 in enumerate(cycle):
n2 = cycle[(in1 + 1) % len(cycle)]
Expand All @@ -502,7 +502,7 @@ def get_all_elementary_cycles(graph):

for ncycles in range(1, len(cycle_basis) + 1):
for cycles_combination in itertools.combinations(cycles_matrix, ncycles):
edges_counts = np.array(np.mod(np.sum(cycles_combination, axis=0), 2), dtype=np.bool)
edges_counts = np.array(np.mod(np.sum(cycles_combination, axis=0), 2), dtype=bool)
myedges = [edge for iedge, edge in enumerate(index2edge) if edges_counts[iedge]]
# print(myedges)
try:
Expand Down
4 changes: 2 additions & 2 deletions pymatgen/analysis/defects/dilute_solution_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def dilute_solution_model(structure, e0, vac_defs, antisite_defs, T, trial_chem_
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n, n, n), dtype=np.int_)
dC = np.zeros((n, n, n), dtype=int)
for i in range(n):
for j in range(n):
for k in range(n):
Expand Down Expand Up @@ -852,7 +852,7 @@ def solute_site_preference_finder(
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n + 1, n + 1, n), dtype=np.int_)
dC = np.zeros((n + 1, n + 1, n), dtype=int)
for i in range(n):
for j in range(n):
for k in range(n):
Expand Down
4 changes: 2 additions & 2 deletions pymatgen/analysis/gb/grain.py
Original file line number Diff line number Diff line change
Expand Up @@ -1256,7 +1256,7 @@ def get_trans_mat(
r_matrix = np.dot(np.dot(np.linalg.inv(trans_cry.T), r_matrix), trans_cry.T)
# set one vector of the basis to the rotation axis direction, and
# obtain the corresponding transform matrix
eye = np.eye(3, dtype=np.int_)
eye = np.eye(3, dtype=int)
for h in range(3):
if abs(r_axis[h]) != 0:
eye[h] = np.array(r_axis)
Expand Down Expand Up @@ -2107,7 +2107,7 @@ def slab_from_csl(csl, surface, normal, trans_cry, max_search=20, quick_gen=Fals
# quickly generate a supercell, normal is not work in this way
if quick_gen:
scale_factor = []
eye = np.eye(3, dtype=np.int_)
eye = np.eye(3, dtype=int)
for i, j in enumerate(miller):
if j == 0:
scale_factor.append(eye[i])
Expand Down
4 changes: 2 additions & 2 deletions pymatgen/analysis/graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1135,9 +1135,9 @@ def __mul__(self, scaling_matrix):
# easier to extend to a general 3x3 scaling matrix.

# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
scale_matrix = np.array(scaling_matrix, int)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
scale_matrix = np.array(scale_matrix * np.eye(3), int)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError("Not tested with 3x3 scaling matrices yet.")
Expand Down
2 changes: 1 addition & 1 deletion pymatgen/analysis/local_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,7 @@ def get_all_voronoi_polyhedra(self, structure):
indices.extend([(x[2],) + x[3] for x in neighs])

# Get the non-duplicates (using the site indices for numerical stability)
indices = np.array(indices, dtype=np.int_)
indices = np.array(indices, dtype=int)
indices, uniq_inds = np.unique(indices, return_index=True, axis=0)
sites = [sites[i] for i in uniq_inds]

Expand Down
4 changes: 2 additions & 2 deletions pymatgen/analysis/structure_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ def _get_mask(self, struct1, struct2, fu, s1_supercell):
Returns:
mask, struct1 translation indices, struct2 translation index
"""
mask = np.zeros((len(struct2), len(struct1), fu), dtype=np.bool)
mask = np.zeros((len(struct2), len(struct1), fu), dtype=bool)

inner = []
for sp2, i in itertools.groupby(enumerate(struct2.species_and_occu), key=lambda x: x[1]):
Expand All @@ -580,7 +580,7 @@ def _get_mask(self, struct1, struct2, fu, s1_supercell):
if s1_supercell:
# remove the symmetrically equivalent s1 indices
inds = inds[::fu]
return np.array(mask, dtype=np.int_), inds, i
return np.array(mask, dtype=int), inds, i

def fit(self, struct1, struct2, symmetric=False):
"""
Expand Down
43 changes: 22 additions & 21 deletions pymatgen/core/lattice.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ class in `pymatgen.analysis.ferroelectricity.polarization`.
Returns:
Lattice coordinates.
"""
return self.lengths * self.get_fractional_coords(cart_coords)
return self.lengths * self.get_fractional_coords(cart_coords) # type: ignore

def d_hkl(self, miller_index: ArrayLike) -> float:
"""
Expand Down Expand Up @@ -1028,7 +1028,7 @@ def find_all_mappings(
# this can't be broadcast because they're different lengths
inds = [np.logical_and(dist / l < 1 + ltol, dist / l > 1 / (1 + ltol)) for l in lengths] # type: ignore
c_a, c_b, c_c = (cart[i] for i in inds)
f_a, f_b, f_c = (frac[i] for i in inds)
f_a, f_b, f_c = (frac[i] for i in inds) # type: ignore
l_a, l_b, l_c = (np.sum(c ** 2, axis=-1) ** 0.5 for c in (c_a, c_b, c_c))

def get_angles(v1, v2, l1, l2):
Expand All @@ -1045,7 +1045,7 @@ def get_angles(v1, v2, l1, l2):
for i, all_j in enumerate(gammab):
inds = np.logical_and(all_j[:, None], np.logical_and(alphab, betab[i][None, :]))
for j, k in np.argwhere(inds):
scale_m = np.array((f_a[i], f_b[j], f_c[k]), dtype=np.int_) # type: ignore
scale_m = np.array((f_a[i], f_b[j], f_c[k]), dtype=int) # type: ignore
if abs(np.linalg.det(scale_m)) < 1e-8:
continue

Expand Down Expand Up @@ -1123,7 +1123,7 @@ def _calculate_lll(self, delta: float = 0.75) -> Tuple[np.ndarray, np.ndarray]:
"""
# Transpose the lattice matrix first so that basis vectors are columns.
# Makes life easier.
# pylint: disable=E1136,E1137
# pylint: disable=E1136,E1137,E1126
a = self._matrix.copy().T

b = np.zeros((3, 3)) # Vectors after the Gram-Schmidt process
Expand Down Expand Up @@ -1179,7 +1179,8 @@ def _calculate_lll(self, delta: float = 0.75) -> Tuple[np.ndarray, np.ndarray]:
else:
# We have to do p/q, so do lstsq(q.T, p.T).T instead.
p = dot(a[:, k:3].T, b[:, (k - 2) : k])
q = np.diag(m[(k - 2) : k])
q = np.diag(m[(k - 2) : k]) # type: ignore
# pylint: disable=E1101
result = np.linalg.lstsq(q.T, p.T, rcond=None)[0].T # type: ignore
u[k:3, (k - 2) : k] = result

Expand Down Expand Up @@ -1436,7 +1437,7 @@ def norm(self, coords: ArrayLike, frac_coords: bool = True) -> float:
Returns:
one-dimensional `numpy` array.
"""
return np.sqrt(self.dot(coords, coords, frac_coords=frac_coords))
return np.sqrt(self.dot(coords, coords, frac_coords=frac_coords)) # type: ignore

def get_points_in_sphere(
self,
Expand Down Expand Up @@ -1630,12 +1631,12 @@ def get_points_in_sphere_old(
# Generate all possible images that could be within `r` of `center`
mins = np.floor(pcoords - nmax)
maxes = np.ceil(pcoords + nmax)
arange = np.arange(start=mins[0], stop=maxes[0], dtype=np.int_)
brange = np.arange(start=mins[1], stop=maxes[1], dtype=np.int_)
crange = np.arange(start=mins[2], stop=maxes[2], dtype=np.int_)
arange = arange[:, None] * np.array([1, 0, 0], dtype=np.int_)[None, :]
brange = brange[:, None] * np.array([0, 1, 0], dtype=np.int_)[None, :]
crange = crange[:, None] * np.array([0, 0, 1], dtype=np.int_)[None, :]
arange = np.arange(start=mins[0], stop=maxes[0], dtype=int)
brange = np.arange(start=mins[1], stop=maxes[1], dtype=int)
crange = np.arange(start=mins[2], stop=maxes[2], dtype=int)
arange = arange[:, None] * np.array([1, 0, 0], dtype=int)[None, :]
brange = brange[:, None] * np.array([0, 1, 0], dtype=int)[None, :]
crange = crange[:, None] * np.array([0, 0, 1], dtype=int)[None, :]
images = arange[:, None, None] + brange[None, :, None] + crange[None, None, :]

# Generate the coordinates of all atoms within these images
Expand All @@ -1644,7 +1645,7 @@ def get_points_in_sphere_old(
# Determine distance from `center`
cart_coords = self.get_cartesian_coords(fcoords)
cart_images = self.get_cartesian_coords(images)
coords = cart_coords[:, None, None, None, :] + cart_images[None, :, :, :, :]
coords = cart_coords[:, None, None, None, :] + cart_images[None, :, :, :, :] # pylint: disable=E1126
coords -= center[None, None, None, None, :]
coords **= 2
d_2 = np.sum(coords, axis=4)
Expand Down Expand Up @@ -1747,13 +1748,13 @@ def get_distance_and_image(
"""
if jimage is None:
v, d2 = pbc_shortest_vectors(self, frac_coords1, frac_coords2, return_d2=True)
fc = self.get_fractional_coords(v[0][0]) + frac_coords1 - frac_coords2
fc = np.array(np.round(fc), dtype=np.int_)
fc = self.get_fractional_coords(v[0][0]) + frac_coords1 - frac_coords2 # type: ignore
fc = np.array(np.round(fc), dtype=int)
return np.sqrt(d2[0, 0]), fc

jimage = np.array(jimage)
mapped_vec = self.get_cartesian_coords(jimage + frac_coords2 - frac_coords1)
return np.linalg.norm(mapped_vec), jimage
mapped_vec = self.get_cartesian_coords(jimage + frac_coords2 - frac_coords1) # type: ignore
return np.linalg.norm(mapped_vec), jimage # type: ignore

def get_miller_index_from_coords(
self,
Expand Down Expand Up @@ -1846,14 +1847,14 @@ def get_integer_index(miller_index: Sequence[float], round_dp: int = 4, verbose:
# deal with the case we have nice fractions
md = [Fraction(n).limit_denominator(12).denominator for n in mi]
mi *= reduce(lambda x, y: x * y, md)
int_miller_index = np.int_(np.round(mi, 1))
int_miller_index = np.round(mi, 1).astype(int)
mi /= np.abs(reduce(math.gcd, int_miller_index))

# round to a reasonable precision
mi = np.array([round(h, round_dp) for h in mi])

# need to recalculate this after rounding as values may have changed
int_miller_index = np.int_(np.round(mi, 1))
int_miller_index = np.round(mi, 1).astype(int)
if np.any(np.abs(mi - int_miller_index) > 1e-6) and verbose:
warnings.warn("Non-integer encountered in Miller index")
else:
Expand Down Expand Up @@ -1902,7 +1903,7 @@ def get_points_in_spheres(
"""
if isinstance(pbc, bool):
pbc = [pbc] * 3
pbc = np.array(pbc, dtype=np.bool_) # type: ignore
pbc = np.array(pbc, dtype=bool) # type: ignore
if return_fcoords and lattice is None:
raise ValueError("Lattice needs to be supplied to compute fractional coordinates")
center_coords_min = np.min(center_coords, axis=0)
Expand Down Expand Up @@ -1959,7 +1960,7 @@ def get_points_in_spheres(
else:
valid_coords = all_coords # type: ignore
valid_images = [[0, 0, 0]] * len(valid_coords)
valid_indices = np.arange(len(valid_coords))
valid_indices = np.arange(len(valid_coords)) # type: ignore

# Divide the valid 3D space into cubes and compute the cube ids
all_cube_index = _compute_cube_index(valid_coords, global_min, r) # type: ignore
Expand Down
4 changes: 2 additions & 2 deletions pymatgen/core/structure.py
Original file line number Diff line number Diff line change
Expand Up @@ -1096,9 +1096,9 @@ def __mul__(self, scaling_matrix: Union[int, Sequence[int], Sequence[Sequence[in
you prefer a subclass to return its own type, you need to override
this method in the subclass.
"""
scale_matrix = np.array(scaling_matrix, np.int16)
scale_matrix = np.array(scaling_matrix, int)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
scale_matrix = np.array(scale_matrix * np.eye(3), int)
new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))

f_lat = lattice_points_in_supercell(scale_matrix)
Expand Down
2 changes: 1 addition & 1 deletion pymatgen/core/surface.py
Original file line number Diff line number Diff line change
Expand Up @@ -844,7 +844,7 @@ def __init__(

slab_scale_factor = []
non_orth_ind = []
eye = np.eye(3, dtype=np.int_)
eye = np.eye(3, dtype=int)
for i, j in enumerate(miller_index):
if j == 0:
# Lattice vector is perpendicular to surface normal, i.e.,
Expand Down
2 changes: 1 addition & 1 deletion pymatgen/core/tests/test_units.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def test_time(self):
Check whether EnergyArray and FloatWithUnit have same behavior.
"""
# here there's a minor difference because we have a ndarray with
# dtype=np.int_.
# dtype=int.
a = TimeArray(20, "h")
self.assertAlmostEqual(a.to("s"), 3600 * 20)
# Test left and right multiplication.
Expand Down
4 changes: 2 additions & 2 deletions pymatgen/io/abinit/abiobjects.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def structure_from_abivars(cls=None, *args, **kwargs):
raise ValueError(f"len(typat) != len(coords):\ntypat: {typat}\ncoords: {coords}")

# Note conversion to int and Fortran --> C indexing
typat = np.array(typat, dtype=np.int_)
typat = np.array(typat, dtype=int)
species = [znucl_type[typ - 1] for typ in typat]

return cls(
Expand Down Expand Up @@ -244,7 +244,7 @@ def structure_to_abivars(structure, enforce_znucl=None, enforce_typat=None, **kw

# [ntypat] list
znucl_type = [specie.number for specie in types_of_specie]
typat = np.zeros(natom, np.int_)
typat = np.zeros(natom, int)
for atm_idx, site in enumerate(structure):
typat[atm_idx] = types_of_specie.index(site.specie) + 1
else:
Expand Down
Loading

0 comments on commit 362e26f

Please sign in to comment.