diff --git a/README.md b/README.md index 46b6610..8e8d336 100644 --- a/README.md +++ b/README.md @@ -2,14 +2,14 @@


A software package for analyzing Calabi-Yau hypersurfaces in toric varieties.
- Docker Build Status - Docker Image Size (tag) - GitHub + Latest release + License + Number of downloads

------------------------------------------------------------------------------- -### Important: This package is currently in beta. If you want to be a beta tester please contact Mehmet Demirtas (md775@cornell.edu) or Andres Rios-Tascon (ar2285@cornell.edu). +### Important: This package is currently in beta. If you want to be a beta tester please contact Mehmet Demirtas (m.demirtas@northeastern.edu) or Andres Rios-Tascon (ar2285@cornell.edu). CYTools is an open-source software package developed by [Liam McAllister's group](https://liammcallistergroup.com/) with the purpose of studying Calabi-Yau manifolds arising from the Kreuzer-Skarke database. It emerged from several years of effort towards exploring previously uncharted parts of the string landscape. It offers vastly superior computational performance compared to other software that are typically used in the field. Installation instructions and detailed documentation can be found in the [CYTools website](https://cytools.liammcallistergroup.com). @@ -19,4 +19,4 @@ CYTools makes use a variety of open-source projects. It includes a few code snip All original CYTools code is distributed under the terms of the [GNU General Public License version 3](https://www.gnu.org/licenses/gpl-3.0.txt). All other packages and code snippets are redistributed under their respective licenses. -The current lead developers are Mehmet Demirtas (md775@cornell.edu) and Andres Rios-Tascon (ar2285@cornell.edu). Questions, comments and/or suggestions can be directed to either of us. +The current lead developers are Mehmet Demirtas (m.demirtas@northeastern.edu) and Andres Rios-Tascon (ar2285@cornell.edu). Questions, comments and/or suggestions can be directed to either of us. diff --git a/cytools/__init__.py b/cytools/__init__.py index dbf8912..ec4846d 100644 --- a/cytools/__init__.py +++ b/cytools/__init__.py @@ -19,7 +19,7 @@ from cytools.utils import read_polytopes, fetch_polytopes # Latest version -version = "0.1.1" +version = "0.2.0" versions_with_serious_bugs = [] # Check for more recent versions of CYTools @@ -40,13 +40,13 @@ def check_for_updates(): ver = tuple(int(c) for c in version.split(".")) if latest_ver <= ver: continue - print("Info: A more recent version of CYTools is available. " - "We recommend upgrading before continuing.") + print("\nInfo: A more recent version of CYTools is available. " + "We recommend upgrading before continuing.\n") elif not checked_bugs and "versions_with_serious_bugs =" in l: checked_bugs = True bad_versions = literal_eval(l.split("=")[1].strip()) if version in bad_versions: - print("****************************\n" + print("\n****************************\n" "Warning: This version of CYTools contains a serious" " bug. Please upgrade to the latest version.\n" "****************************\n") diff --git a/cytools/calabiyau.py b/cytools/calabiyau.py index d145f2d..d69c958 100644 --- a/cytools/calabiyau.py +++ b/cytools/calabiyau.py @@ -21,6 +21,7 @@ from collections import Counter, defaultdict from itertools import combinations from math import factorial +import copy # Third party imports from flint import fmpz_mat, fmpq_mat, fmpz, fmpq from scipy.sparse import csr_matrix @@ -39,8 +40,8 @@ class CalabiYau: """ This class handles various computations relating to the Calabi-Yau manifold - itself. It can be used to compute intersection numbers, the Kähler cone of - the ambient variety, among other things. + itself. It can be used to compute intersection numbers, the toric Mori + and Kähler cones from the ambient variety, among other things. :::important Generally, objects of this class should not be constructed directly by the @@ -50,9 +51,10 @@ class CalabiYau: ::: :::tip experimental feature - This package is focused on computations on Calabi-Yau 3-folds, but there is - experimental support for Calabi-Yaus of other dimensions. See - [Experimental Features](./experimental) for more details. + This package is focused on computations on Calabi-Yau 3-fold hypersurfaces, + but there is experimental support for Calabi-Yaus of other dimensions and + complete intersection. See [Experimental Features](./experimental) for more + details. ::: ## Constructor @@ -64,8 +66,11 @@ class CalabiYau: [```__init__```](#__init__) function. **Arguments:** - - ```frst``` (Triangulation): A fine, regular, star triangularion of a - reflexive polytope. + - ```toric_var``` (ToricVariety): The ambient toric variety of the + Calabi-Yau. + - ```nef_partition``` (list, optional): A list of tuples of indices + specifying a nef-partition of the polytope, and defines a complete + intersection Calabi-Yau. **Example:** We construct a Calabi-Yau from an fine, regular, star triangulation of a @@ -83,39 +88,72 @@ class CalabiYau: ``` """ - def __init__(self, frst): + def __init__(self, toric_var, nef_partition=None): """ **Description:** Initializes a ```CalabiYau``` object. **Arguments:** - - ```frst``` (Triangulation): A fine, regular, star triangularion of a - reflexive polytope. + - ```toric_var``` (ToricVariety): A toric variety. + - ```nef_partition``` (list, optional): A list of tuples of indices + specifying a nef-partition of the polytope, and defines a complete + intersection Calabi-Yau. **Returns:** Nothing. """ # We first make sure that the input triangulation is appropriate. # Regularity is not checked since it is generally slow. - if not frst._allow_cy or not frst.is_star() or not frst.is_fine(): - raise Exception("The input triangulation is not suitable for " - "constructing a Calabi-Yau.") - if not frst._poly.is_favorable(lattice="N"): - raise Exception("Only favorable CYs are currently supported.") - if frst._all_poly_pts and not config.enable_experimental_features: - raise Exception("Triangulations that include points interior to " - "facets are currently experimental.") - self._frst = frst + if nef_partition is not None: + if not config._exp_features_enabled: + raise Exception("CICYs are an experimental feature and must be enabled.") + # Verify that the input defines a nef-partition + from cytools import Polytope + pts = toric_var.polytope().points() + convpoly = Polytope(pts[list(set.union(*[set(ii) for ii in nef_partition]))]) + if convpoly != toric_var.polytope(): + raise Exception("Input data does not define a nef partition") + polys = [Polytope(pts[[0]+list(ii)]) for ii in nef_partition] + sumpoly = polys[0] + for i in range(1,len(polys)): + sumpoly = sumpoly.minkowski_sum(polys[i]) + if not sumpoly.is_reflexive(): + raise Exception("Input data does not define a nef partition") + triang = toric_var.triangulation() + triangpts = [tuple(pt) for pt in triang.points()] + parts = [tuple(triang.points_to_indices(pt) for pt in pp.points() if any(pt) and tuple(pt) in triangpts) + for pp in polys] + self._nef_part = parts + else: + if not toric_var.triangulation().is_fine(): + raise Exception("Triangulation is non-fine.") + if ((toric_var.dim() != 4 or not toric_var.triangulation().polytope().is_favorable(lattice="N")) + and not config._exp_features_enabled): + raise Exception("Constructing Calabi-Yaus of dimensions other " + "than 3 or that are non-favorable are " + "experimental features and must be enabled.") + if not ((toric_var.triangulation().points().shape == toric_var.triangulation().polytope().points_not_interior_to_facets().shape + and all((toric_var.triangulation().points() == toric_var.triangulation().polytope().points_not_interior_to_facets()).flat)) + or (toric_var.triangulation().points().shape == toric_var.triangulation().polytope().points().shape + and all((toric_var.triangulation().points() == toric_var.triangulation().polytope().points()).flat))): + raise Exception("Calabi-Yau hypersurfaces must be constructed either points not interior to facets or all points.") + self._ambient_var = toric_var + self._optimal_ambient_var = None + self._nef_part = nef_partition + self._is_hypersurface = nef_partition is None # Initialize remaining hidden attributes + self._dim = None self._hash = None + self._glsm_charge_matrix = None + self._glsm_linrels = None self._divisor_basis = None self._mori_cone = [None]*3 - self._ambient_intersection_numbers = [None]*7 self._intersection_numbers = [None]*7 + self._prime_divs = None self._second_chern_class = None self._is_smooth = None - self._ambient_eff_gens = None - self._ambient_eff_cone = None + self._eff_gens = None + self._eff_cone = None def clear_cache(self, recursive=False, only_in_basis=False): """ @@ -133,50 +171,56 @@ def clear_cache(self, recursive=False, only_in_basis=False): Nothing. """ self._mori_cone[2] = None - self._ambient_intersection_numbers[2] = None - self._ambient_intersection_numbers[6] = None self._intersection_numbers[2] = None self._intersection_numbers[6] = None - self._ambient_eff_gens = None - self._ambient_eff_cone = None + self._eff_gens = None + self._eff_cone = None if not only_in_basis: + self._dim = None self._hash = None + self._glsm_charge_matrix = None + self._glsm_linrels = None self._divisor_basis = None self._mori_cone = [None]*3 - self._ambient_intersection_numbers = [None]*7 self._intersection_numbers = [None]*7 + self._prime_divs = None self._second_chern_class = None self._is_smooth = None if recursive: - self._frst.clear_cache(recursive=True) + self.ambient_variety().clear_cache(recursive=True) def __repr__(self): """ **Description:** - Returns a string describing the Calabi-Yau hypersurface. + Returns a string describing the Calabi-Yau manifold. **Arguments:** None. **Returns:** - (string) A string describing the Calabi-Yau hypersurface. + (string) A string describing the Calabi-Yau manifold. """ d = self.dim() - if d == 2: - out_str = (f"A K3 hypersurface with h11={self.h11()} in a " - + "3-dimensional toric variety.") - elif d == 3: - out_str = ("A Calabi-Yau 3-fold hypersurface with " - + f"h11={self.h11()} and h21={self.h21()} in a " - + "4-dimensional toric variety.") - elif d == 4: - out_str = ("A Calabi-Yau 4-fold hypersurface with " - + f"h11={self.h11()}, h12={self.h12()}, " - + f"h13={self.h13()}, and h22={self.h22()} in a " - + "5-dimensional toric variety.") + if self._is_hypersurface: + if d == 2: + out_str = (f"A K3 hypersurface with h11={self.h11()} in a " + + "3-dimensional toric variety") + elif d == 3: + out_str = ("A Calabi-Yau 3-fold hypersurface with " + + f"h11={self.h11()} and h21={self.h21()} in a " + + "4-dimensional toric variety") + elif d == 4: + out_str = ("A Calabi-Yau 4-fold hypersurface with " + + f"h11={self.h11()}, h12={self.h12()}, " + + f"h13={self.h13()}, and h22={self.h22()} in a " + + "5-dimensional toric variety") + else: + out_str = (f"A Calabi-Yau {d}-fold hypersurface in a " + + f"{d+1}-dimensional toric variety") else: - out_str = (f"A Calabi-Yau {d}-fold hypersurface in a " - + f"{d+1}-dimensional toric variety.") + dd = self.ambient_variety().dim() + out_str = (f"A complete intersection Calabi-Yau {d}-fold in a " + + f"{dd}-dimensional toric variety") return out_str def __eq__(self, other): @@ -247,19 +291,22 @@ def __hash__(self): """ if self._hash is not None: return self._hash - dim = self.frst().dim() - codim2_faces = [self.frst().points_to_indices(f.points()) - for f in self.polytope().faces(dim-2)] - restr_triang = set() - for f in codim2_faces: - face_pts = set(f) - for s in self.frst().simplices(): - inters = face_pts & set(s) - if len(inters) == dim-1: - ss = tuple(sorted(inters)) - restr_triang.add(ss) - self._hash = hash((hash(self.frst().polytope()),) + - tuple(sorted(restr_triang))) + if self._is_hypersurface: + dim = self.ambient_variety().dim() + codim2_faces = [self.ambient_variety().triangulation().points_to_indices(f.points()) + for f in self.ambient_variety().triangulation().polytope().faces(dim-2)] + restr_triang = set() + for f in codim2_faces: + face_pts = set(f) + for s in self.ambient_variety().triangulation().simplices(): + inters = face_pts & set(s) + if len(inters) == dim-1: + ss = tuple(sorted(inters)) + restr_triang.add(ss) + self._hash = hash((hash(self.ambient_variety().triangulation().polytope()),) + + tuple(sorted(restr_triang))) + else: + self._hash = hash((hash(self.ambient_variety()),hash(self._nef_part))) return self._hash def is_trivially_equivalent(self, other): @@ -282,39 +329,41 @@ def is_trivially_equivalent(self, other): **Returns:** (boolean) The truth value of the CYs being equal. """ - if self.frst().polytope() != other.frst().polytope(): + if not self._is_hypersurface: + return False + if self.polytope() != other.polytope(): return False - dim = self.frst().dim() - codim2_faces = [self.frst().points_to_indices(f.points()) + dim = self.ambient_variety().triangulation().dim() + codim2_faces = [self.ambient_variety().triangulation().points_to_indices(f.points()) for f in self.polytope().faces(dim-2)] restr_triang_self = set() restr_triang_other = set() for f in codim2_faces: face_pts = set(f) - for s in self.frst().simplices(): + for s in self.ambient_variety().triangulation().simplices(): inters = face_pts & set(s) if len(inters) == dim-1: ss = tuple(sorted(inters)) restr_triang_self.add(ss) - for s in other.frst().simplices(): + for s in other.ambient_variety().triangulation().simplices(): inters = face_pts & set(s) if len(inters) == dim-1: ss = tuple(sorted(inters)) restr_triang_other.add(ss) return restr_triang_self == restr_triang_other - def frst(self): + def ambient_variety(self): """ **Description:** - Returns the FRST giving rise to the ambient toric variety. + Returns the ambient toric variety. **Arguments:** None. **Returns:** - (Triangulation) The FRST giving rise to the ambient toric variety. + (ToricVariety) The ambient toric variety. """ - return self._frst + return self._ambient_var def polytope(self): """ @@ -329,7 +378,7 @@ def polytope(self): (Polytope) The polytope whose triangulation gives rise to the ambient toric variety. """ - return self._frst.polytope() + return self.ambient_variety().triangulation().polytope() def ambient_dim(self): """ @@ -342,7 +391,7 @@ def ambient_dim(self): **Returns:** (integer) The complex dimension of the ambient toric variety. """ - return self.frst().dim() + return self.ambient_variety().dim() def dim(self): """ @@ -355,7 +404,13 @@ def dim(self): **Returns:** (integer) The complex dimension of the Calabi-Yau hypersurface. """ - return self.frst().dim() - 1 + if self._dim is not None: + return self._dim + if self._is_hypersurface: + self._dim = self.ambient_variety().dim() - 1 + else: + self._dim = self.ambient_variety().triangulation().dim() - len(self._nef_part) + return self._dim def hpq(self, p, q): """ @@ -379,7 +434,9 @@ def hpq(self, p, q): **Returns:** (integer) The Hodge number h^{p,q} of the arising Calabi-Yau manifold. """ - if self.dim() not in (2,3,4): + if not self._is_hypersurface: + raise Exception("Not yet implemented for CICYs.") + if self.dim() not in (2,3,4) and p!=1 and q!=1: raise Exception("Only Calabi-Yaus of dimension 2-4 are currently " "supported.") return self.polytope().hpq(p,q,lattice="N") @@ -399,6 +456,8 @@ def h11(self): **Returns:** (integer) The Hodge number h^{1,1} of Calabi-Yau manifold. """ + if not self._is_hypersurface: + raise Exception("Not yet implemented for CICYs.") if self.dim() not in (2,3,4): raise Exception("Only Calabi-Yaus of dimension 2-4 are currently " "supported.") @@ -419,6 +478,8 @@ def h12(self): **Returns:** (integer) The Hodge number h^{1,2} of Calabi-Yau manifold. """ + if not self._is_hypersurface: + raise Exception("Not yet implemented for CICYs.") if self.dim() not in (2,3,4): raise Exception("Only Calabi-Yaus of dimension 2-4 are currently " "supported.") @@ -439,6 +500,8 @@ def h13(self): **Returns:** (integer) The Hodge number h^{1,3} of Calabi-Yau manifold. """ + if not self._is_hypersurface: + raise Exception("Not yet implemented for CICYs.") if self.dim() not in (2,3,4): raise Exception("Only Calabi-Yaus of dimension 2-4 are currently " "supported.") @@ -505,24 +568,13 @@ def chi(self): **Returns:** (integer) The Euler characteristic of the Calabi-Yau manifold. """ + if not self._is_hypersurface: + raise Exception("Not yet implemented for CICYs.") if self.dim() not in (2,3,4): raise Exception("Only Calabi-Yaus of dimension 2-4 are currently " "supported.") return self.polytope().chi(lattice="N") - def sr_ideal(self): - """ - **Description:** - Returns the Stanley–Reisner ideal of the ambient toric variety. - - **Arguments:** - None. - - **Returns:** - (list) The Stanley–Reisner ideal of the ambient toric variety. - """ - return self.frst().sr_ideal() - def glsm_charge_matrix(self, include_origin=True): """ **Description:** @@ -537,9 +589,14 @@ def glsm_charge_matrix(self, include_origin=True): **Returns:** (list) The GLSM charge matrix. """ - return self.polytope().glsm_charge_matrix( - include_origin=include_origin, - include_points_interior_to_facets=self.frst()._all_poly_pts) + if self._glsm_charge_matrix is not None: + return np.array(self._glsm_charge_matrix)[:,(0 if include_origin else 1):] + toric_divs = [0]+list(self.prime_toric_divisors()) + pts = self.ambient_variety().polytope().points_to_indices(self.ambient_variety().triangulation().points()[toric_divs]) + self._glsm_charge_matrix = self.polytope().glsm_charge_matrix( + include_origin=True, + points=pts) + return np.array(self._glsm_charge_matrix)[:,(0 if include_origin else 1):] def glsm_linear_relations(self, include_origin=True): """ @@ -555,9 +612,14 @@ def glsm_linear_relations(self, include_origin=True): (list) A matrix of linear relations of the columns of the GLSM charge matrix. """ - return self.polytope().glsm_linear_relations( - include_origin=include_origin, - include_points_interior_to_facets=self.frst()._all_poly_pts) + if self._glsm_linrels is not None: + return np.array(self._glsm_linrels)[(0 if include_origin else 1):,(0 if include_origin else 1):] + toric_divs = [0]+list(self.prime_toric_divisors()) + pts = self.ambient_variety().polytope().points_to_indices(self.ambient_variety().triangulation().points()[toric_divs]) + self._glsm_linrels = self.polytope().glsm_linear_relations( + include_origin=True, + points=pts) + return np.array(self._glsm_linrels)[(0 if include_origin else 1):,(0 if include_origin else 1):] def divisor_basis(self, include_origin=True, integral=None): """ @@ -597,10 +659,13 @@ def divisor_basis(self, include_origin=True, integral=None): ``` """ if self._divisor_basis is None or integral: - self._divisor_basis = self.polytope().glsm_basis( + toric_divs = [0]+list(self.prime_toric_divisors()) + pts = self.ambient_variety().polytope().points_to_indices(self.ambient_variety().triangulation().points()[toric_divs]) + tmp_divs = self.polytope().glsm_basis( integral=True, include_origin=True, - include_points_interior_to_facets=self.frst()._all_poly_pts) + points=pts) + self._divisor_basis = np.array([toric_divs[i] for i in tmp_divs]) self.clear_cache(only_in_basis=True) if len(self._divisor_basis.shape) == 1: if 0 in self._divisor_basis and not include_origin: @@ -666,7 +731,7 @@ def set_divisor_basis(self, basis, include_origin=True, self._divisor_basis = b # Else if input is a matrix elif len(b.shape) == 2: - if not config.enable_experimental_features: + if not config._exp_features_enabled: raise Exception("Using generic bases is currently an " "experimental feature and must be enabled in " "the configuration.") @@ -761,13 +826,11 @@ def set_curve_basis(self, basis, include_origin=True, if len(b.shape) != 2: raise Exception("Input must be either a vector or a matrix.") # Else input is a matrix - if not config.enable_experimental_features: + if not config._exp_features_enabled: raise Exception("Using generic bases is currently an " "experimental feature and must be enabled in " "the configuration.") - glsm_cm = self.polytope().glsm_charge_matrix( - include_origin=False, - include_points_interior_to_facets=self.frst()._all_poly_pts) + glsm_cm = self.glsm_charge_matrix(include_origin=False) glsm_rnk = np.linalg.matrix_rank(glsm_cm) t = type(b[0,0]) if t in [np.int64, np.float64]: @@ -822,634 +885,6 @@ def set_curve_basis(self, basis, include_origin=True, b_inv = np.linalg.pinv(new_b).T self.set_divisor_basis(b_inv, exact_arithmetic=exact_arithmetic) - def ambient_mori_cone(self, in_basis=False, include_origin=True, - from_intersection_numbers=False): - """ - **Description:** - Returns the Mori cone of the ambient toric variety. - - **Arguments:** - - ```in_basis``` (boolean, optional, default=False): Use the current - basis of curves, which is dual to what the basis returned by the - [```divisor_basis```](#divisor_basis) function. - - ```include_origin``` (boolean, optional, default=True): Includes the - origin of the polytope in the computation, which corresponds to the - canonical divisor. - - ```from_intersection_numbers``` (boolean, optional, default=False): - Compute the rays of the Mori cone using the intersection numbers of - the ambient variety. This can be faster if they are already computed. - The set of rays may be different, but they define the same cone. - - **Returns:** - (Cone) The Mori cone of the ambient toric variety. - """ - if self._mori_cone[0] is None: - if from_intersection_numbers: - rays = (self._compute_ambient_mori_rays_from_intersections_4d() - if self.dim() == 3 else - self._compute_ambient_mori_rays_from_intersections()) - self._mori_cone[0] = Cone(rays) - else: - self._mori_cone[0] = self.frst().cpl_cone().dual() - # 0: All divs, 1: No origin, 2: In basis - args_id = ((not include_origin)*1 if not in_basis else 0) + in_basis*2 - if self._mori_cone[args_id] is not None: - return self._mori_cone[args_id] - rays = self.frst().cpl_cone().hyperplanes() - basis = self.divisor_basis() - if include_origin and not in_basis: - new_rays = rays - elif not include_origin and not in_basis: - new_rays = rays[:,1:] - else: - if len(basis.shape) == 2: # If basis is matrix - new_rays = rays.dot(basis.T) - else: - new_rays = rays[:,basis] - c = Cone(new_rays, check=len(basis.shape)==2) - self._mori_cone[args_id] = c - return self._mori_cone[args_id] - - def _compute_ambient_mori_rays_from_intersections(self): - """ - **Description:** - Computes the Mori cone rays of the ambient variety using intersection - numbers. - - :::note - This function should generally not be called by the user. Instead, it - is called by the [ambient_mori_cone](#ambient_mori_cone) function when - the user wants to save some time if the ambient intersection numbers - were already computed. - ::: - - **Arguments:** - None. - - **Returns:** - (list) The list of generating rays of the Mori cone of the ambient - toric variety. - """ - ambient_intnums = self.ambient_intersection_numbers(in_basis=False) - dim = self.dim() - num_divs = self.h11() + dim + 2 - curve_dict = defaultdict(lambda: [[],[]]) - for ii in ambient_intnums: - if 0 in ii[:-1]: - continue - ctr = Counter(ii[:-1]) - if len(ctr) < dim: - continue - for comb in set(combinations(ctr.keys(),dim)): - crv = tuple(sorted(comb)) - curve_dict[crv][0].append(int(sum([i*(ctr[i]-(i in crv)) - for i in ctr]))) - curve_dict[crv][1].append(ii[-1]) - row_set = set() - for crv in curve_dict: - g = gcd_list(curve_dict[crv][1]) - row = np.zeros(num_divs, dtype=int) - for j,jj in enumerate(curve_dict[crv][0]): - row[jj] = int(round(curve_dict[crv][1][j]/g)) - row_set.add(tuple(row)) - mori_rays = np.array(list(row_set), dtype=int) - # Compute column corresponding to the origin - mori_rays[:,0] = -np.sum(mori_rays, axis=1) - return mori_rays - - def _compute_ambient_mori_rays_from_intersections_4d(self): - """ - **Description:** - Computes the Mori cone rays of the ambient variety using intersection - numbers. - - :::note notes - - This function should generally not be called by the user. Instead, - this is called by the [ambient_mori_cone](#ambient_mori_cone) function - when when the user wants to save some time if the ambient intersection - numbers were already computed. - - This function is a more optimized version for 4D toric varieties. - ::: - - **Arguments:** - None. - - **Returns:** - (list) The list of generating rays of the Mori cone of the ambient - toric variety. - """ - ambient_intnums = self.ambient_intersection_numbers(in_basis=False) - num_divs = int(max([ii[3] for ii in ambient_intnums])) + 1 - curve_dict = {} - curve_ctr = 0 - curve_sparse_list = [] - for ii in ambient_intnums: - if ii[0] == 0: - continue - if ii[0] == ii[1] == ii[2] == ii[3]: - continue - if ii[0] == ii[1] == ii[2]: - continue - if ii[1] == ii[2] == ii[3]: - continue - if ii[0] == ii[1] and ii[2] == ii[3]: - continue - if ii[0] == ii[1]: - if (ii[0],ii[2],ii[3]) not in curve_dict.keys(): - curve_dict[(ii[0],ii[2],ii[3])] = curve_ctr - curve_sparse_list.append([curve_ctr,ii[1],ii[-1]]) - curve_ctr += 1 - else: - curve_sparse_list.append([curve_dict[(ii[1],ii[2],ii[3])], - ii[0],ii[-1]]) - elif ii[1] == ii[2]: - if (ii[0],ii[1],ii[3]) not in curve_dict.keys(): - curve_dict[(ii[0],ii[1],ii[3])] = curve_ctr - curve_sparse_list.append([curve_ctr,ii[2],ii[-1]]) - curve_ctr += 1 - else: - curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[3])], - ii[2],ii[-1]]) - elif ii[2] == ii[3]: - if (ii[0],ii[1],ii[2]) not in curve_dict.keys(): - curve_dict[(ii[0],ii[1],ii[2])] = curve_ctr - curve_sparse_list.append([curve_ctr,ii[3],ii[-1]]) - curve_ctr += 1 - else: - curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[2])], - ii[3],ii[-1]]) - else: - if (ii[0],ii[1],ii[2]) not in curve_dict.keys(): - curve_dict[(ii[0],ii[1],ii[2])] = curve_ctr - curve_sparse_list.append([curve_ctr,ii[3],ii[-1]]) - curve_ctr += 1 - else: - curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[2])], - ii[3],ii[-1]]) - if (ii[0],ii[1],ii[3]) not in curve_dict.keys(): - curve_dict[(ii[0],ii[1],ii[3])] = curve_ctr - curve_sparse_list.append([curve_ctr,ii[2],ii[-1]]) - curve_ctr += 1 - else: - curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[3])], - ii[2],ii[-1]]) - if (ii[0],ii[2],ii[3]) not in curve_dict.keys(): - curve_dict[(ii[0],ii[2],ii[3])] = curve_ctr - curve_sparse_list.append([curve_ctr,ii[1],ii[-1]]) - curve_ctr += 1 - else: - curve_sparse_list.append([curve_dict[(ii[0],ii[2],ii[3])], - ii[1],ii[-1]]) - if (ii[1],ii[2],ii[3]) not in curve_dict.keys(): - curve_dict[(ii[1],ii[2],ii[3])] = curve_ctr - curve_sparse_list.append([curve_ctr,ii[0],ii[-1]]) - curve_ctr += 1 - else: - curve_sparse_list.append([curve_dict[(ii[1],ii[2],ii[3])], - ii[0],ii[-1]]) - row_list = [[] for i in range(curve_ctr)] - # Remove zeros - for ii in curve_sparse_list: - if ii[2]!=0: - row_list[ii[0]].append([ii[1],ii[2]]) - # Normalize - for row in row_list: - g = abs(gcd_list([ii[1] for ii in row])) - for ii in row: - ii[1] = int(round(ii[1]/g)) - row_list = set(tuple(tuple(tuple(ii) for ii in sorted(row)) - for row in row_list)) - mori_rays = np.zeros((len(row_list),num_divs), dtype=int) - for i,row in enumerate(row_list): - for ii in row: - mori_rays[i,int(round(ii[0]))] = round(ii[1]) - # Compute column corresponding to the origin - mori_rays[:,0] = -np.sum(mori_rays, axis=1) - return mori_rays - - def ambient_kahler_cone(self): - """ - **Description:** - Returns the Kähler cone of the ambient toric variety in the current - basis of divisors. - - **Arguments:** - None. - - **Returns:** - (Cone) The Kähler cone of the ambient toric variety. - """ - return self.ambient_mori_cone(in_basis=True).dual() - - def _construct_intnum_equations_4d(self): - """ - **Description:** - Auxiliary function used to compute the intersection numbers of the - ambient toric variety. This function is optimized for 4D varieties. - - **Arguments:** - None. - - **Returns:** - (tuple) A tuple where the first compotent is a sparse matrix M, the - second is a vector C, which are used to solve the system M*X=C, the - third is the list of intersection numbers not including - self-intersections, and the fourth is the list of intersection numbers - that are used as variables in the equation. - """ - # Origin is at index 0 - pts_ext = np.empty((self.frst().points().shape[0], - self.frst().points().shape[1]+1), - dtype=int) - pts_ext[:,:-1] = self.frst().points() - pts_ext[:,-1] = 1 - linear_relations = self.glsm_linear_relations(include_origin=False) - # First compute the distict intersection numbers - distintnum_array = sorted([ - [c for c in simp if c!=0] - + [1/abs(np.linalg.det([pts_ext[p] for p in simp]))] - for simp in self.frst().simplices()]) - frst = [[c for c in s if c != 0] for s in self.frst().simplices()] - simp_2 = set([j for i in [list(combinations(f,2)) for f in frst] - for j in i]) - simp_3 = set([j for i in [list(combinations(f,3)) for f in frst] - for j in i]) - # We construct and solve the linear system M*x + C = 0, where M is - # a rectangular mxn matrix and C is a vector. - ################################################################### - ### Define dictionaries, to be used to construct the linear system - ################################################################### - ## Dictionary of variables - # Most intersection numbers are trivially zero, find the possibly - # nonzero intersection numbers. - variable_array_1 = [tuple(j) for i in [[[s[0],s[0],s[1],s[2]], - [s[0],s[1],s[1],s[2]], - [s[0],s[1],s[2],s[2]]] - for s in simp_3] - for j in i] - variable_array_2 = [tuple(j) for i in [[[s[0],s[0],s[1],s[1]], - [s[0],s[0],s[0],s[1]], - [s[0],s[1],s[1],s[1]]] - for s in simp_2] - for j in i] - variable_array_3 = [(i,i,i,i) for i in range(1, len(pts_ext))] - variable_array = sorted(variable_array_1 + variable_array_2 - + variable_array_3) - variable_dict = {vv:v for v,vv in enumerate(variable_array)} - ## Dictionary to construct C - # C is constructed by adding/subtracting distinct intersection - # numbers. - c_dict = {s:[] for s in simp_3} - for d in distintnum_array: - c_dict[(d[0],d[1],d[2])] += [[d[3],d[4]]] - c_dict[(d[0],d[1],d[3])] += [[d[2],d[4]]] - c_dict[(d[0],d[2],d[3])] += [[d[1],d[4]]] - c_dict[(d[1],d[2],d[3])] += [[d[0],d[4]]] - ## Dictionary to construct M - eqn_array_1 = [tuple(s) for s in simp_3] - eqn_array_2 = [tuple(j) for i in [[[s[0],s[0],s[1]], - [s[0],s[1],s[1]]] - for s in simp_2] for j in i] - eqn_array_3 = [(i,i,i) for i in range(1, len(pts_ext))] - eqn_array = sorted(eqn_array_1 + eqn_array_2 + eqn_array_3) - eqn_dict = {eq:[] for eq in eqn_array} - for v in variable_array: - if v[0]==v[3]: - eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] - elif v[0]==v[2]: - eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] - eqn_dict[(v[0],v[1],v[3])] += [[v[2],variable_dict[v]]] - elif v[0]==v[1] and v[2]==v[3]: - eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] - eqn_dict[(v[0],v[2],v[3])] += [[v[1],variable_dict[v]]] - elif v[0]==v[1]: - eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] - eqn_dict[(v[0],v[1],v[3])] += [[v[2],variable_dict[v]]] - eqn_dict[(v[0],v[2],v[3])] += [[v[1],variable_dict[v]]] - elif v[1]==v[3]: - eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] - eqn_dict[(v[1],v[2],v[3])] += [[v[0],variable_dict[v]]] - elif v[1]==v[2]: - eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] - eqn_dict[(v[0],v[1],v[3])] += [[v[2],variable_dict[v]]] - eqn_dict[(v[1],v[2],v[3])] += [[v[0],variable_dict[v]]] - elif v[2]==v[3]: - eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] - eqn_dict[(v[0],v[2],v[3])] += [[v[1],variable_dict[v]]] - eqn_dict[(v[1],v[2],v[3])] += [[v[0],variable_dict[v]]] - else: - raise Exception("Failed to construct linear system.") - # Construct Linear System - num_rows = len(linear_relations)*len(eqn_array) - C = np.array([0.0]*num_rows) - M_row = [] - M_col = [] - M_val = [] - row_ctr = 0 - for eqn in eqn_array: - for lin in linear_relations: - if eqn[0]!=eqn[1] and eqn[1]!=eqn[2]: - c_temp = c_dict[eqn] - C[row_ctr] = sum([lin[cc[0]-1]*cc[1] for cc in c_temp]) - eqn_temp = eqn_dict[eqn] - for e in eqn_temp: - M_row.append(row_ctr) - M_col.append(e[1]) - M_val.append(lin[e[0]-1]) - row_ctr+=1 - Mat = csr_matrix((M_val,(M_row,M_col)), dtype=np.float64) - return Mat, C, distintnum_array, variable_array - - def _construct_intnum_equations(self): - """ - **Description:** - Auxiliary function used to compute the intersection numbers of the - ambient toric variety. - - **Arguments:** - None. - - **Returns:** - (tuple) A tuple where the first compotent is a sparse matrix M, the - second is a vector C, which are used to solve the system M*X=C, the - third is the list of intersection numbers not including - self-intersections, and the fourth is the list of intersection numbers - that are used as variables in the equation. - """ - dim = self.dim() - pts_ext = np.empty((self.frst().points().shape[0],dim+2), dtype=int) - pts_ext[:,:-1] = self.frst().points() - pts_ext[:,-1] = 1 - linear_relations = self.glsm_linear_relations(include_origin=False) - # First compute the distict intersection numbers - distintnum_array = sorted([ - [c for c in simp if c!=0] - + [1/abs(np.linalg.det([pts_ext[p] for p in simp]))] - for simp in self.frst().simplices()]) - frst = [[c for c in s if c != 0] for s in self.frst().simplices()] - simp_n = [set([j for i in [list(combinations(f,n)) for f in frst] - for j in i]) for n in range(2,dim+1)] - simp_n = [[np.array(c) for c in simp_n[n]] for n in range(len(simp_n))] - # We construct and solve the linear system M*x + C = 0, where M is - # a rectangular mxn matrix and C is a vector. - ################################################################### - ### Define dictionaries, to be used to construct the linear system - ################################################################### - ## Dictionary of variables - # Most intersection numbers are trivially zero, find the possibly - # nonzero intersection numbers. - choices_n = [] - for n in range(2,dim+1): - comb = list(combinations(range(dim),dim+1-n)) - choices = np.empty((len(comb),dim+1), dtype=int) - choices[:,0] = 0 - for k,c in enumerate(comb): - for i in range(1,dim+1): - choices[k,i] = choices[k,i-1] + (0 if i-1 in c else 1) - choices_n.append(choices) - variable_array_1 = [(i,)*(dim+1) for i in range(1,len(pts_ext))] - variable_array_n = [tuple(s[ch]) for n in range(len(simp_n)) - for s in simp_n[n] for ch in choices_n[n]] - variable_array = variable_array_1 + variable_array_n - variable_dict = {vv:v for v,vv in enumerate(variable_array)} - ## Dictionary to construct C - # C is constructed by adding/subtracting distinct intersection - # numbers. - c_dict = defaultdict(lambda: []) - for d in distintnum_array: - for i in range(len(d)-1): - c_dict[tuple(c for j,c in enumerate(d[:-1]) if j!= i) - ] += [(d[i],d[-1])] - ## Dictionary to construct M - eqn_array_1 = [tuple(s) for s in simp_n[-1]] - eqn_array_2 = [(i,)*dim for i in range(1, len(pts_ext))] - choices_n = [] - for n in range(2,dim): - comb = list(combinations(range(dim-1),dim-n)) - choices = np.empty((len(comb),dim), dtype=int) - choices[:,0] = 0 - for k,c in enumerate(comb): - for i in range(1,dim): - choices[k,i] = choices[k,i-1] + (0 if i-1 in c else 1) - choices_n.append(choices) - eqn_array_n = [tuple(s[ch]) for n in range(len(choices_n)) - for s in simp_n[n] for ch in choices_n[n]] - eqn_array = eqn_array_1 + eqn_array_2 + eqn_array_n - eqn_dict = defaultdict(lambda: []) - for v in variable_array: - for c in set(combinations(v,dim)): - k = None - for i in range(dim+1): - if i == dim or v[i] != c[i]: - k = i - break - eqn_dict[c] += [(v[k],variable_dict[v])] - # Construct Linear System - num_rows = len(linear_relations)*len(eqn_array) - C = np.zeros(num_rows, dtype=float) - M_row = [] - M_col = [] - M_val = [] - row_ctr = 0 - for eqn in eqn_array: - for lin in linear_relations: - if len(set(eqn)) == dim: - c_temp = c_dict[eqn] - C[row_ctr] = sum([lin[cc[0]-1]*cc[1] for cc in c_temp]) - eqn_temp = eqn_dict[eqn] - for e in eqn_temp: - M_row.append(row_ctr) - M_col.append(e[1]) - M_val.append(lin[e[0]-1]) - row_ctr+=1 - Mat = csr_matrix((M_val,(M_row,M_col)), dtype=np.float64) - return Mat, C, distintnum_array, variable_array - - def ambient_intersection_numbers(self, in_basis=False, - zero_as_anticanonical=False, backend="all", - check=True, backend_error_tol=1e-6, - round_to_zero_treshold=1e-3, - round_to_integer_error_tol=2e-5, - verbose=0, exact_arithmetic=False): - """ - **Description:** - Returns the intersection numbers of the ambient toric variety. - - :::tip experimental feature - The intersection numbers are computed as floating-point numbers by - default, but there is the option to turn them into rationals. The - process is fairly quick, but verifying that they are correct becomes - very slow at large $h^{1,1}$. - ::: - - **Arguments:** - - ```in_basis``` (boolean, optional, default=False): Return the - intersection numbers in the current basis of divisors. - - ```zero_as_anticanonical``` (boolean, optional, default=False): Treat - the zeroth index as corresponding to the anticanonical divisor - instead of the canonical divisor. - - ```backend``` (string, optional, default="all"): The sparse linear - solver to use. Options are "all", "sksparse" and "scipy". When set - to "all" every solver is tried in order until one succeeds. - - ```check``` (boolean, optional, default=True): Whether to explicitly - check the solution to the linear system. - - ```backend_error_tol``` (float, optional, default=1e-3): Error - tolerance for the solution of the linear system. - - ```round_to_zero_treshold``` (float, optional, default=1e-3): - Intersection numbers with magnitude smaller than this treshold are - rounded to zero. - - ```round_to_integer_error_tol``` (float, optional, default=1e-3): All - intersection numbers of the Calabi-Yau hypersurface must be integers - up to errors less than this value, when the CY is smooth. - - ```verbose``` (integer, optional, default=0): The verbosity level. - - verbose = 0: Do not print anything. - - verbose = 1: Print linear backend warnings. - - ```exact_arithmetic``` (boolean, optional, default=False): Converts - the intersection numbers into exact rational fractions. - - Returns: - (list) A matrix containing nonzero intersection numbers, in the format: - [[A,B,C,...,D,Kappa_ABC...D], ...], where A,B,C,...,D are indices of - divisors and Kappa_ABC...D is the intersection number. - """ - # 0: (canon,float), 1: (anticanon, float), 2: (basis, float) - # 4: (canon,fmpq), 5: (anticanon, fmpq), 6: (basis, fmpq) - args_id = ((1*zero_as_anticanonical if not in_basis else 0) - + 2*in_basis + 4*exact_arithmetic) - if self._ambient_intersection_numbers[args_id] is not None: - return np.array(self._ambient_intersection_numbers[args_id]) - if (self._ambient_intersection_numbers[0] is None - or (self._ambient_intersection_numbers[4] is None - and exact_arithmetic)): - backends = ["all", "sksparse", "scipy"] - if backend not in backends: - raise Exception("Invalid linear system backend. " - f"The options are: {backends}.") - if exact_arithmetic and not config.enable_experimental_features: - raise Exception("Using exact arithmetic is an experimental " - "feature and must be enabled in the " - "configuration.") - # Construct the linear equations - # Note that self.dim gives the dimension of the CY not the of the - # ambient variety - Mat, C, distintnum_array, variable_array = ( - self._construct_intnum_equations_4d() - if self.dim() == 3 else - self._construct_intnum_equations()) - # The system to be solved is Mat*x + C = 0. This is an - # overdetermined but consistent linear system. - # There is a unique solution to this system. We solve it by - # defining MM = Mat.transpose()*Mat and CC = - Mat.transpose()*C, - # and solve - # MM*x = CC - # Since MM is a positive definite full rank matrix, this system can - # be solved using via a Cholesky decomposition. - solution = solve_linear_system(Mat, C, backend=backend, check=check, - backend_error_tol=backend_error_tol, - verbose=verbose) - if solution is None: - raise Exception("Linear system solution failed.") - if exact_arithmetic: - solution_fmpq = fmpq_mat([array_float_to_fmpq(solution) - .tolist()]).transpose() - if check: - Mat_fmpq = fmpq_mat(Mat.shape[0],Mat.shape[1]) - Mat_dok = Mat.todok() - for k in Mat_dok.keys(): - Mat_fmpq[k] = float_to_fmpq(Mat_dok[k]) - C_fmpq = fmpq_mat([array_float_to_fmpq(C) - .tolist()]).transpose() - res = Mat_fmpq*solution_fmpq + C_fmpq - if any(np.array(res.tolist()).flat): - raise Exception("Conversion to rationals failed.") - if exact_arithmetic: - ambient_intnum = ([ii[:-1]+[float_to_fmpq(ii[-1])] - for ii in distintnum_array] - + [list(ii) + [solution_fmpq[i,0]] - for i,ii in enumerate(variable_array)]) - else: - ambient_intnum = (distintnum_array - +[list(ii) + [solution[i]] - for i,ii in enumerate(variable_array) - if abs(solution[i])>round_to_zero_treshold]) - # Add intersections with canonical divisor - # First we only compute intersection numbers with a single index 0 - # This is because precision errors add up significantly for - # intersection numbers with self-intersections of the canonical - # divisor - dim = self.dim() - canon_intnum = defaultdict(lambda: 0) - for ii in ambient_intnum: - choices = set(tuple(c for i,c in enumerate(ii[:-1]) if i!=j) - for j in range(dim+1)) - for c in choices: - canon_intnum[(0,)+c] -= ii[-1] - # Now we round all intersection numbers of the form K_0i...j to - # integers if the CY is smooth. Otherwise, we only remove the zero - # elements - if self.is_smooth() and not exact_arithmetic: - for ii in list(canon_intnum.keys()): - val = canon_intnum[ii] - round_val = int(round(val)) - if abs(val-round_val) > round_to_integer_error_tol: - print(ii, val) - raise Exception("Non-integer intersection numbers " - "detected in a smooth CY.") - if round_val != 0: - canon_intnum[ii] = round_val - else: - canon_intnum.pop(ii) - elif not exact_arithmetic: - for ii in list(canon_intnum.keys()): - if abs(canon_intnum[ii]) < round_to_zero_treshold: - canon_intnum.pop(ii) - # Now we compute remaining intersection numbers - canon_intnum_n = [canon_intnum] - for n in range(2,dim+2): - tmp_intnum = defaultdict(lambda: 0) - for ii,ii_val in canon_intnum_n[-1].items(): - choices = set(tuple(c for i,c in enumerate(ii[n-1:]) - if i!=j) for j in range(dim+2-n)) - for c in choices: - tmp_intnum[(0,)*n+c] -= ii_val - if not exact_arithmetic: - for ii in list(tmp_intnum.keys()): - if abs(tmp_intnum[ii]) < round_to_zero_treshold: - tmp_intnum.pop(ii) - canon_intnum_n.append(tmp_intnum) - for i in range(len(canon_intnum_n)): - ambient_intnum.extend([list(ii)+[canon_intnum_n[i][ii]] - for ii in canon_intnum_n[i]]) - ambient_intnum = sorted(ambient_intnum) - if exact_arithmetic: - self._ambient_intersection_numbers[4]= np.array(ambient_intnum) - self._ambient_intersection_numbers[0]= [ - ii[:-1] + [fmpq_to_float(ii[-1])] - for ii in ambient_intnum] - else: - self._ambient_intersection_numbers[0]= np.array(ambient_intnum) - # Now ambient intersection numbers have been computed - if zero_as_anticanonical and not in_basis: - self._ambient_intersection_numbers[args_id] = np.array([ - ii[:-1].tolist() - + [ii[-1]*(-1 if sum(ii[:-1] == 0)%2 == 1 else 1)] for ii in - self._ambient_intersection_numbers[4*exact_arithmetic]]) - elif in_basis: - basis = self.divisor_basis() - if len(basis.shape) == 2: # If basis is matrix - if basis.dtype == float and exact_arithmetic: - basis = array_float_to_fmpq(basis) - self._ambient_intersection_numbers[2+4*exact_arithmetic] = ( - symmetric_sparse_to_dense_in_basis( - self._ambient_intersection_numbers[4*exact_arithmetic], - basis)) - else: - self._ambient_intersection_numbers[2+4*exact_arithmetic] = ( - filter_tensor_indices( - self._ambient_intersection_numbers[4*exact_arithmetic], - basis)) - return np.array(self._ambient_intersection_numbers[args_id]) - def intersection_numbers(self, in_basis=False, zero_as_anticanonical=False, backend="all", check=True, backend_error_tol=1e-6, @@ -1494,11 +929,10 @@ def intersection_numbers(self, in_basis=False, zero_as_anticanonical=False, the intersection numbers into exact rational fractions. Returns: - (list) A matrix containing nonzero intersection numbers, in the format: - [[A,B,C,...,D,Kappa_ABC...D], ...], where A,B,C,...,D are indices of - divisors and Kappa_ABC...D is the intersection number. + (dict) A dictionary containing nonzero intersection numbers. The keys + are divisor indices in ascending order. """ - if exact_arithmetic and not config.enable_experimental_features: + if exact_arithmetic and not config._exp_features_enabled: raise Exception("Using exact arithmetic is an experimental " "feature and must be enabled in the " "configuration.") @@ -1507,26 +941,42 @@ def intersection_numbers(self, in_basis=False, zero_as_anticanonical=False, args_id = ((1*zero_as_anticanonical if not in_basis else 0) + 2*in_basis + 4*exact_arithmetic) if self._intersection_numbers[args_id] is not None: - return np.array(self._intersection_numbers[args_id]) + return copy.copy(self._intersection_numbers[args_id]) if ((self._intersection_numbers[0] is None and not exact_arithmetic) or (self._intersection_numbers[4] is None and exact_arithmetic)): - ambient_intnums = self.ambient_intersection_numbers(in_basis=False, - backend=backend, check=check, + ambient_intnums = self.ambient_variety().intersection_numbers( + in_basis=False, backend=backend, check=check, backend_error_tol=backend_error_tol, round_to_zero_treshold=round_to_zero_treshold, round_to_integer_error_tol=round_to_integer_error_tol, verbose=verbose, exact_arithmetic=exact_arithmetic) - intnum = np.array([[int(c) for c in ii[1:-1]]+[-ii[-1]] - for ii in ambient_intnums if ii[0]==0], - dtype=(None if exact_arithmetic - or not self.is_smooth() else int)) - self._intersection_numbers[4*exact_arithmetic] = intnum + if self._is_hypersurface: + intnums_cy = {ii[1:]:-ambient_intnums[ii] for ii in ambient_intnums if 0 in ii} + else: + triang_pts = [tuple(pt) for pt in self.ambient_variety().triangulation().points()] + parts = self._nef_part + ambient_dim = self.ambient_dim() + intnums_dict = ambient_intnums + for dd in range(len(parts)): + intnums_dict_tmp = defaultdict(lambda: 0) + for ii in intnums_dict: + choices = set(tuple(sorted(c for i,c in enumerate(ii) if i!=j)) for j in range(ambient_dim-dd) if ii[j] in parts[dd]) + for c in choices: + intnums_dict_tmp[c] += intnums_dict[ii] + intnums_dict = {ii:intnums_dict_tmp[ii] for ii in intnums_dict_tmp if abs(intnums_dict_tmp[ii]) > round_to_zero_treshold} + intnums_cy = intnums_dict + if all(abs(round(intnums_cy[ii])-intnums_cy[ii]) < round_to_integer_error_tol for ii in intnums_cy): + self._is_smooth = True + for ii in intnums_cy: + intnums_cy[ii] = int(round(intnums_cy[ii])) + self._intersection_numbers[4*exact_arithmetic] = intnums_cy # Now intersection numbers have been computed if zero_as_anticanonical and not in_basis: - self._intersection_numbers[args_id] = np.array([ - ii[:-1].tolist() - + [ii[-1]*(-1 if sum(ii[:-1] == 0)%2 == 1 else 1)] - for ii in self._intersection_numbers[4*exact_arithmetic]]) + self._intersection_numbers[args_id] = self._intersection_numbers[4*exact_arithmetic] + for ii in self._intersection_numbers[4*exact_arithmetic]: + if 0 not in ii: + continue + self._intersection_numbers[args_id] *= (-1 if sum(ii == 0)%2 == 1 else 1) elif in_basis: basis = self.divisor_basis() if len(basis.shape) == 2: # If basis is matrix @@ -1539,7 +989,33 @@ def intersection_numbers(self, in_basis=False, zero_as_anticanonical=False, else: self._intersection_numbers[args_id] = filter_tensor_indices( self._intersection_numbers[4*exact_arithmetic], basis) - return np.array(self._intersection_numbers[args_id]) + return copy.copy(self._intersection_numbers[args_id]) + + def prime_toric_divisors(self): + """ + **Description:** + Returns the list of inherited prime toric divisors. + + **Arguments:** + None + + **Returns:** + (list) A list of indices indicating the prime toric divisors of the + ambient variety that intersect the CY. + """ + if self._prime_divs is not None: + return self._prime_divs + intnum_ind = set.union(*[set(ii) for ii in self.intersection_numbers()]) + self._prime_divs = tuple(i for i in intnum_ind if i) + # If there are some non-intersecting divisors we construct a better + # toric variety in the background + if len(self._prime_divs) == self.ambient_variety().triangulation().points().shape[0]-1: + self._optimal_ambient_var = self._ambient_var + else: + heights = self._ambient_var.mori_cone().dual().tip_of_stretched_cone(1)[[0]+list(self._prime_divs)] + self._optimal_ambient_var = self.polytope().triangulate(heights=heights, points=self.polytope().points_to_indices( + self.ambient_variety().triangulation().points()[[0]+list(self._prime_divs)])).get_toric_variety() + return self._prime_divs def second_chern_class(self, in_basis=True): """ @@ -1561,23 +1037,23 @@ def second_chern_class(self, in_basis=True): if self.dim() != 3: raise Exception("This function currently only supports 3-folds.") if self._second_chern_class is None: - int_nums = [[c-1 for c in ii[:-1]] +[ii[-1]] for ii in - self.intersection_numbers(in_basis=False) - if min(ii[:-1]) != 0] c2 = np.zeros(self.h11()+self.dim()+1, dtype=int) - for ii in int_nums: + intnums = self.intersection_numbers(in_basis=False) + for ii in intnums: + if ii[0] == 0: + continue if ii[0] == ii[1] == ii[2]: continue elif ii[0] == ii[1]: - c2[ii[0]] += ii[3] + c2[ii[0]-1] += intnums[ii] elif ii[0] == ii[2]: - c2[ii[0]] += ii[3] + c2[ii[0]-1] += intnums[ii] elif ii[1] == ii[2]: - c2[ii[1]] += ii[3] + c2[ii[1]-1] += intnums[ii] else: - c2[ii[0]] += ii[3] - c2[ii[1]] += ii[3] - c2[ii[2]] += ii[3] + c2[ii[0]-1] += intnums[ii] + c2[ii[1]-1] += intnums[ii] + c2[ii[2]-1] += intnums[ii] if in_basis: basis = self.divisor_basis(include_origin=False) if len(basis.shape) == 2: # If basis is matrix @@ -1598,74 +1074,106 @@ def is_smooth(self): """ if self._is_smooth is not None: return self._is_smooth - sm = (True if self.dim() <= 3 else - all(c.is_smooth() for c in self.frst().fan_cones( - self.dim(),self.dim()-1))) - self._is_smooth = sm + if self._is_hypersurface: + sm = (True if self.dim() <= 3 else + all(c.is_smooth() for c in self.ambient_variety().triangulation().fan_cones(self.dim(),self.dim()-1))) + self._is_smooth = sm + else: + print("Warning: smoothness check is not implemented for CICYs.") + self._is_smooth = False return self._is_smooth - def ambient_effective_generators(self): + def toric_mori_cone(self, in_basis=False, include_origin=True, + from_intersection_numbers=False): """ **Description:** - Returns the rays that generate the effective cone of the ambient - variety. + Returns the Mori cone inferred from toric geometry. **Arguments:** - None. + - ```in_basis``` (boolean, optional, default=False): Use the current + basis of curves, which is dual to what the basis returned by the + [```divisor_basis```](#divisor_basis) function. + - ```include_origin``` (boolean, optional, default=True): Includes the + origin of the polytope in the computation, which corresponds to the + canonical divisor. + - ```from_intersection_numbers``` (boolean, optional, default=False): + Compute the rays of the Mori cone using the intersection numbers of + the variety. This can be faster if they are already computed. + The set of rays may be different, but they define the same cone. **Returns:** - (list) The rays that generate the effective cone of the ambient - variety. + (Cone) The Mori cone inferred from toric geometry. """ - if self._ambient_eff_gens is not None: - return np.array(self._ambient_eff_gens) - rays = np.eye(self.h11(), dtype=float).tolist() - linrels = self.glsm_linear_relations(include_origin=True) + if self._mori_cone[0] is None: + prime_toric_divs = self.prime_toric_divisors() + self._mori_cone[0] = self._optimal_ambient_var.mori_cone() + # 0: All divs, 1: No origin, 2: In basis + args_id = ((not include_origin)*1 if not in_basis else 0) + in_basis*2 + if self._mori_cone[args_id] is not None: + return self._mori_cone[args_id] + rays = self._mori_cone[0].rays() basis = self.divisor_basis() - if len(basis.shape) != 1: - raise Exception("Generic bases are not yet supported.") - no_basis = [i for i in range(self.h11()+self.dim()+2) - if i not in basis] - linrels_reord = linrels[:,no_basis+basis.tolist()] - linrels_rref = np.array( - fmpz_mat(linrels_reord.tolist()).rref()[0].tolist(), - dtype=int) - for i in range(linrels_rref.shape[0]): - linrels_rref[i,:] //= int(round(gcd_list(linrels_rref[i,:]))) - for i,ii in enumerate(no_basis): - linrels_reord[:,ii] = linrels_rref[:,i] - for i,ii in enumerate(basis,len(no_basis)): - linrels_reord[:,ii] = linrels_rref[:,i] - for l in linrels_reord: - if l[0] != 0: - continue - for i in no_basis: - if l[i] != 0: - r = [0]*self.h11() - for j,jj in enumerate(basis): - r[j] = l[jj]/(-l[i]) - for j in no_basis: - if l[j] != 0 and j != i: - raise Exception("An unexpected error occured.") - rays.append(r) - self._ambient_eff_gens = np.array(rays, dtype=float) - return np.array(self._ambient_eff_gens) - - def ambient_effective_cone(self): + if include_origin and not in_basis: + new_rays = rays + elif not include_origin and not in_basis: + new_rays = rays[:,1:] + else: + if len(basis.shape) == 2: # If basis is matrix + new_rays = rays.dot(basis.T) + else: + new_rays = rays[:,basis] + c = Cone(new_rays, check=len(basis.shape)==2) + self._mori_cone[args_id] = c + return self._mori_cone[args_id] + + def toric_kahler_cone(self): + """ + **Description:** + Returns the Kähler cone inferred from toric geometry in the current + basis of divisors. + + **Arguments:** + None. + + **Returns:** + (Cone) The Kähler cone inferred from toric geometry. + """ + return self.toric_mori_cone(in_basis=True).dual() + + def toric_effective_generators(self): + """ + **Description:** + Returns the rays that generate the effective cone inferred from toric + geometry. + + **Arguments:** + None. + + **Returns:** + (list) The rays that generate the toric effective cone. + """ + if self._eff_gens is None: + self.prime_toric_divisors() + self._optimal_ambient_var.set_divisor_basis(self.divisor_basis()) + self._eff_gens = self._optimal_ambient_var.effective_generators() + return np.array(self._eff_gens) + + def effective_cone(self): """ **Description:** - Returns the effective cone of the ambient variety. + Returns the effective cone inferred from toric geometry. **Arguments:** None. **Returns:** - (Cone) The effective cone of the ambient variety. + (Cone) The toric effective cone. """ - if self._ambient_eff_cone is not None: - return self._ambient_eff_cone - self._ambient_eff_cone = Cone(self.ambient_effective_generators()) - return self._ambient_eff_cone + if self._eff_cone is None: + self.prime_toric_divisors() + self._optimal_ambient_var.set_divisor_basis(self.divisor_basis()) + self._eff_cone = self._optimal_ambient_var.effective_cone() + return self._eff_cone def compute_cy_volume(self, tloc): """ @@ -1694,9 +1202,8 @@ def compute_cy_volume(self, tloc): else: for ii in intnums: mult = np.prod([factorial(c) - for c in Counter(ii[:self.dim()]).values()]) - xvol += (ii[-1]*np.prod([tloc[int(j)] for j in ii[:-1]]) - /mult) + for c in Counter(ii).values()]) + xvol += intnums[ii]*np.prod([tloc[int(j)] for j in ii])/mult return xvol def compute_divisor_volumes(self, tloc): @@ -1727,9 +1234,9 @@ def compute_divisor_volumes(self, tloc): else: tau = np.zeros(len(basis), dtype=float) for ii in intnums: - c = Counter(ii[:-1]) + c = Counter(ii) for j in c.keys(): - tau[j] += ii[-1] * np.prod( + tau[j] += intnums[ii] * np.prod( [tloc[k]**(c[k]-(j==k))/factorial(c[k]-(j==k)) for k in c.keys()]) return np.array(tau) @@ -1762,20 +1269,20 @@ def compute_AA(self, tloc): return AA AA = np.zeros((len(basis),)*2, dtype=float) for ii in intnums: - ii_list = Counter(ii[:-1]).most_common(3) + ii_list = Counter(ii).most_common(3) if len(ii_list)==1: - AA[ii_list[0][0],ii_list[0][0]] += ii[-1]*tloc[ii_list[0][0]] + AA[ii_list[0][0],ii_list[0][0]] += intnums[ii]*tloc[ii_list[0][0]] elif len(ii_list)==2: - AA[ii_list[0][0],ii_list[0][0]] += ii[-1]*tloc[ii_list[1][0]] - AA[ii_list[0][0],ii_list[1][0]] += ii[-1]*tloc[ii_list[0][0]] - AA[ii_list[1][0],ii_list[0][0]] += ii[-1]*tloc[ii_list[0][0]] + AA[ii_list[0][0],ii_list[0][0]] += intnums[ii]*tloc[ii_list[1][0]] + AA[ii_list[0][0],ii_list[1][0]] += intnums[ii]*tloc[ii_list[0][0]] + AA[ii_list[1][0],ii_list[0][0]] += intnums[ii]*tloc[ii_list[0][0]] elif len(ii_list)==3: - AA[ii_list[0][0],ii_list[1][0]] += ii[-1]*tloc[ii_list[2][0]] - AA[ii_list[1][0],ii_list[0][0]] += ii[-1]*tloc[ii_list[2][0]] - AA[ii_list[0][0],ii_list[2][0]] += ii[-1]*tloc[ii_list[1][0]] - AA[ii_list[2][0],ii_list[0][0]] += ii[-1]*tloc[ii_list[1][0]] - AA[ii_list[1][0],ii_list[2][0]] += ii[-1]*tloc[ii_list[0][0]] - AA[ii_list[2][0],ii_list[1][0]] += ii[-1]*tloc[ii_list[0][0]] + AA[ii_list[0][0],ii_list[1][0]] += intnums[ii]*tloc[ii_list[2][0]] + AA[ii_list[1][0],ii_list[0][0]] += intnums[ii]*tloc[ii_list[2][0]] + AA[ii_list[0][0],ii_list[2][0]] += intnums[ii]*tloc[ii_list[1][0]] + AA[ii_list[2][0],ii_list[0][0]] += intnums[ii]*tloc[ii_list[1][0]] + AA[ii_list[1][0],ii_list[2][0]] += intnums[ii]*tloc[ii_list[0][0]] + AA[ii_list[2][0],ii_list[1][0]] += intnums[ii]*tloc[ii_list[0][0]] else: raise Exception("Error: Inconsistent intersection numbers.") return AA diff --git a/cytools/cone.py b/cytools/cone.py index 8c074f3..262b868 100644 --- a/cytools/cone.py +++ b/cytools/cone.py @@ -111,13 +111,13 @@ def __init__(self, rays=None, hyperplanes=None, check=True): raise Exception("Input must be a matrix.") t = type(tmp_rays[0,0]) if t == fmpz: - if not config.enable_experimental_features: + if not config._exp_features_enabled: print("Arbitrary precision data types only have " "experimental support, so experimental features " "must be enabled in the configuration.") rays = array_fmpz_to_int(tmp_rays) elif t == fmpq: - if not config.enable_experimental_features: + if not config._exp_features_enabled: print("Arbitrary precision data types only have " "experimental support, so experimental features " "must be enabled in the configuration.") @@ -145,13 +145,13 @@ def __init__(self, rays=None, hyperplanes=None, check=True): raise Exception("Input must be a matrix.") t = type(tmp_hp[0,0]) if t == fmpz: - if not config.enable_experimental_features: + if not config._exp_features_enabled: print("Arbitrary precision data types only have " "experimental support, so experimental features " "must be enabled in the configuration.") hyperplanes = array_fmpz_to_int(tmp_hp) elif t == fmpq: - if not config.enable_experimental_features: + if not config._exp_features_enabled: print("Arbitrary precision data types only have " "experimental support, so experimental features " "must be enabled in the configuration.") @@ -225,9 +225,9 @@ def __repr__(self): if self._rays is not None: return (f"A {self._dim}-dimensional rational polyhedral cone in " f"RR^{self._ambient_dim} generated by {len(self._rays)} " - f"rays.") + f"rays") return (f"A rational polyhedral cone in RR^{self._ambient_dim} " - f"defined by {len(self._hyperplanes)} hyperplanes normals.") + f"defined by {len(self._hyperplanes)} hyperplanes normals") def __eq__(self, other): """ @@ -622,7 +622,7 @@ def tip_of_stretched_cone(self, c, backend="all", check=True, "constraint_error_tol, or using a different " "backend") if optimization_done: - return (np.sqrt(solution_val), np.array(solution_x)) + return np.array(solution_x) def is_solid(self, backend=None, c=0.01): """ diff --git a/cytools/config.py b/cytools/config.py index 72ee59f..2ea002b 100644 --- a/cytools/config.py +++ b/cytools/config.py @@ -36,15 +36,29 @@ def check_mosek_license(): mosek.Env().Task(0,0).optimize() mosek_is_activated = True except mosek.Error as e: - print(e) - print("Info: Mosek is not activated. " - "An alternative optimizer will be used.") + print("\nInfo: Mosek is not activated. " + "An alternative optimizer will be used.\n" + f"Error encountered: {e}\n") mosek_is_activated = False except: - print("Info: There was a problem with Mosek. " - "An alternative optimizer will be used.") + print("\nInfo: There was a problem with Mosek. " + "An alternative optimizer will be used.\n") mosek_is_activated = False check_mosek_license() +def set_mosek_path(path): + global mosek_license + mosek_license = path + check_mosek_license() + # Lock experimental features by default. -enable_experimental_features = False +_exp_features_enabled = False + +def enable_experimental_features(): + global _exp_features_enabled + _exp_features_enabled = True + print("\n**************************************************************\n" + "Warning: You have enabled experimental features of CYTools.\n" + "Some of these features may be broken or not fully tested,\n" + "and they may undergo significant changes in future versions.\n" + "**************************************************************\n") diff --git a/cytools/polytope.py b/cytools/polytope.py index b0a1d14..d92fd12 100644 --- a/cytools/polytope.py +++ b/cytools/polytope.py @@ -44,7 +44,7 @@ class Polytope: This class handles all computations relating to lattice polytopes, such as the computation of its lattice points and faces. When using reflexive polytopes, it also allows the computation of topological properties of the - arising Calabi-Yau manifolds that only depend on the polytope. + arising Calabi-Yau hypersurfaces that only depend on the polytope. ## Constructor @@ -159,10 +159,9 @@ def __init__(self, points, backend=None): ppl_pt = ppl.point(sum(pt[i]*vrs[i] for i in range(self._dim))) gs.insert(ppl_pt) self._optimal_poly = ppl.C_Polyhedron(gs) - optimal_ineqs = [ - list(ineq.coefficients()) - + [ineq.inhomogeneous_term()] - for ineq in self._optimal_poly.minimized_constraints()] + optimal_ineqs = [list(ineq.coefficients()) + + [ineq.inhomogeneous_term()] + for ineq in self._optimal_poly.minimized_constraints()] self._optimal_ineqs = np.array(optimal_ineqs, dtype=int) elif backend == "qhull": if self._dim == 0: # qhull cannot handle 0-dimensional polytopes @@ -184,10 +183,9 @@ def __init__(self, points, backend=None): if self._dim == 0: # PALP cannot handle 0-dimensional polytopes self._optimal_ineqs = np.array([[0]]) else: - palp = subprocess.Popen( - (config.palp_path + "poly.x", "-e"), - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) + palp = subprocess.Popen((config.palp_path + "poly.x", "-e"), + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) pt_list = "" optimal_pts = {tuple(pt) for pt in self._optimal_pts} for pt in optimal_pts: @@ -205,7 +203,7 @@ def __init__(self, points, backend=None): self._is_reflexive = "Vertices" in line ineqs_shape = [int(c) for c in line.split()[:2]] tmp_ineqs = [[int(c) for c in palp_out[i+j+1].split()] - for j in range(ineqs_shape[0])] + for j in range(ineqs_shape[0])] break if ineqs_shape[0] < ineqs_shape[1]: # Check if transposed tmp_ineqs = np.array(tmp_ineqs).T @@ -213,8 +211,7 @@ def __init__(self, points, backend=None): tmp_ineqs = np.array(tmp_ineqs) if self._is_reflexive: ineqs_shape = tmp_ineqs.shape - tmp_ineqs2 = np.empty((ineqs_shape[0], ineqs_shape[1]+1), - dtype=int) + tmp_ineqs2 = np.empty((ineqs_shape[0], ineqs_shape[1]+1), dtype=int) tmp_ineqs2[:,:-1] = tmp_ineqs tmp_ineqs2[:,-1] = 1 tmp_ineqs = tmp_ineqs2 @@ -226,15 +223,13 @@ def __init__(self, points, backend=None): self._optimal_ineqs_ext[:,self._dim_diff:] = self._optimal_ineqs self._optimal_ineqs_ext[:,:self._dim_diff] = 0 self._input_ineqs = np.empty(shape, dtype=int) - self._input_ineqs[:,:-1] = self._transf_matrix.T.dot( - self._optimal_ineqs_ext[:,:-1].T).T + self._input_ineqs[:,:-1] = self._transf_matrix.T.dot(self._optimal_ineqs_ext[:,:-1].T).T self._input_ineqs[:,-1] = [self._optimal_ineqs[i,-1] - v[:-1].dot(self._transl_vector) for i,v in enumerate(self._input_ineqs)] else: self._input_ineqs = np.empty(self._optimal_ineqs.shape, dtype=int) - self._input_ineqs[:,:-1] = self._transf_matrix.T.dot( - self._optimal_ineqs[:,:-1].T).T + self._input_ineqs[:,:-1] = self._transf_matrix.T.dot(self._optimal_ineqs[:,:-1].T).T self._input_ineqs[:,-1] = self._optimal_ineqs[:,-1] # Initialize remaining hidden attributes self._hash = None @@ -259,9 +254,9 @@ def __init__(self, points, backend=None): self._volume = None self._normal_form = [None]*2 self._autos = [None]*2 - self._glsm_charge_matrix = [None]*2 - self._glsm_linrels = [None]*2 - self._glsm_basis = [None]*4 + self._glsm_charge_matrix = dict() + self._glsm_linrels = dict() + self._glsm_basis = dict() def clear_cache(self): """ @@ -296,9 +291,9 @@ def clear_cache(self): self._volume = None self._normal_form = [None]*2 self._autos = [None]*2 - self._glsm_charge_matrix = [None]*2 - self._glsm_linrels = [None]*2 - self._glsm_basis = [None]*4 + self._glsm_charge_matrix = dict() + self._glsm_linrels = dict() + self._glsm_basis = dict() def __repr__(self): """ @@ -328,8 +323,7 @@ def __eq__(self, other): """ if not isinstance(other, Polytope): return NotImplemented - return (sorted(self.vertices().tolist()) - == sorted(other.vertices().tolist())) + return sorted(self.vertices().tolist()) == sorted(other.vertices().tolist()) def __ne__(self, other): """ @@ -344,8 +338,7 @@ def __ne__(self, other): """ if not isinstance(other, Polytope): return NotImplemented - return not (sorted(self.vertices().tolist()) - == sorted(other.vertices().tolist())) + return not sorted(self.vertices().tolist()) == sorted(other.vertices().tolist()) def __hash__(self): """ @@ -378,10 +371,8 @@ def is_linearly_equivalent(self, other, backend="native"): **Returns:** (bool) The truth value of the polytopes being linearly equivalent. """ - return (self.normal_form( - affine_transform=False, backend=backend).tolist() - == other.normal_form( - affine_transform=False, backend=backend).tolist()) + return (self.normal_form(affine_transform=False, backend=backend).tolist() + == other.normal_form(affine_transform=False, backend=backend).tolist()) def is_affinely_equivalent(self, other): """ @@ -476,10 +467,9 @@ def _points_saturated(self): points = [self._optimal_pts[0]] facet_ind = [frozenset([0])] else: - palp = subprocess.Popen( - (config.palp_path + "poly.x", "-p"), - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) + palp = subprocess.Popen((config.palp_path + "poly.x", "-p"), + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) pt_list = "" optimal_pts = {tuple(pt) for pt in self._optimal_pts} for pt in optimal_pts: @@ -497,8 +487,7 @@ def _points_saturated(self): pts_shape = [int(c) for c in line.split()[:2]] tmp_pts = np.empty(pts_shape, dtype=int) for j in range(pts_shape[0]): - tmp_pts[j,:] = ( - [int(c) for c in palp_out[i+j+1].split()]) + tmp_pts[j,:] = [int(c) for c in palp_out[i+j+1].split()] break if pts_shape[0] < pts_shape[1]: # Check if transposed points = tmp_pts.T @@ -506,9 +495,8 @@ def _points_saturated(self): points = tmp_pts # Now we find which inequialities each point saturates ineqs = self._optimal_ineqs - facet_ind = [frozenset(i for i,ii in enumerate(ineqs) - if ii[:-1].dot(pt) + ii[-1] == 0) - for pt in points] + facet_ind = [frozenset(i for i,ii in enumerate(ineqs) if ii[:-1].dot(pt) + ii[-1] == 0) + for pt in points] # Otherwise we use the algorithm by Volker Braun. else: # Find bounding box and sort by decreasing dimension size @@ -535,14 +523,12 @@ def _points_saturated(self): i_max = box_max[0] # Find the lower bound for the allowed region while i_min <= i_max: - if all(i_min*ineqs[i,0] + tmp_v[i] >= 0 - for i in range(len(tmp_v))): + if all(i_min*ineqs[i,0] + tmp_v[i] >= 0 for i in range(len(tmp_v))): break i_min += 1 # Find the upper bound for the allowed region while i_min <= i_max: - if all(i_max*ineqs[i,0] + tmp_v[i] >= 0 - for i in range(len(tmp_v))): + if all(i_max*ineqs[i,0] + tmp_v[i] >= 0 for i in range(len(tmp_v))): break i_max -= 1 # The points i_min .. i_max are contained in the polytope @@ -583,12 +569,8 @@ def _points_saturated(self): for i in range(points_mat.shape[0]): points_mat[i,:] += self._transl_vector # Organize the points as explained above. - self._points_sat = sorted( - [(tuple(points_mat[i]), facet_ind[i]) - for i in range(len(points))], - key=(lambda p: - (-(len(p[1]) if len(p[1]) > 0 else 1e9),) - + tuple(p[0]))) + self._points_sat = sorted([(tuple(points_mat[i]), facet_ind[i]) for i in range(len(points))], + key=(lambda p: (-(len(p[1]) if len(p[1]) > 0 else 1e9),) + tuple(p[0]))) self._pts_dict = {ii[0]:i for i,ii in enumerate(self._points_sat)} return copy.copy(self._points_sat) @@ -630,9 +612,7 @@ def interior_points(self, as_indices=False): (list) The list of interior lattice points of the polytope. """ if self._interior_points is None: - self._interior_points = np.array( - [pt[0] for pt in self._points_saturated() - if len(pt[1]) == 0]) + self._interior_points = np.array([pt[0] for pt in self._points_saturated() if len(pt[1]) == 0]) if as_indices: return self.points_to_indices(self._interior_points) return np.array(self._interior_points) @@ -650,9 +630,7 @@ def boundary_points(self, as_indices=False): (list) The list of boundary lattice points of the polytope. """ if self._boundary_points is None: - self._boundary_points = np.array( - [pt[0] for pt in self._points_saturated() - if len(pt[1]) > 0]) + self._boundary_points = np.array([pt[0] for pt in self._points_saturated() if len(pt[1]) > 0]) if as_indices: return self.points_to_indices(self._boundary_points) return np.array(self._boundary_points) @@ -670,9 +648,7 @@ def points_interior_to_facets(self, as_indices=False): (list) The list of lattice points interior to facets of the polytope. """ if self._points_interior_to_facets is None: - self._points_interior_to_facets = np.array([ - pt[0] for pt in self._points_saturated() - if len(pt[1]) == 1]) + self._points_interior_to_facets = np.array([pt[0] for pt in self._points_saturated() if len(pt[1]) == 1]) if as_indices: return self.points_to_indices(self._points_interior_to_facets) return np.array(self._points_interior_to_facets) @@ -691,9 +667,7 @@ def boundary_points_not_interior_to_facets(self, as_indices=False): the polytope. """ if self._boundary_points_not_interior_to_facets is None: - self._boundary_points_not_interior_to_facets = np.array( - [pt[0] for pt in self._points_saturated() - if len(pt[1]) > 1]) + self._boundary_points_not_interior_to_facets = np.array([pt[0] for pt in self._points_saturated() if len(pt[1]) > 1]) if as_indices: return self.points_to_indices( self._boundary_points_not_interior_to_facets) @@ -713,9 +687,7 @@ def points_not_interior_to_facets(self, as_indices=False): polytope. """ if self._points_not_interior_to_facets is None: - self._points_not_interior_to_facets = np.array( - [pt[0] for pt in self._points_saturated() - if len(pt[1]) != 1]) + self._points_not_interior_to_facets = np.array([pt[0] for pt in self._points_saturated() if len(pt[1]) != 1]) if as_indices: return self.points_to_indices(self._points_not_interior_to_facets) return np.array(self._points_not_interior_to_facets) @@ -733,8 +705,7 @@ def is_reflexive(self): """ if self._is_reflexive is not None: return self._is_reflexive - self._is_reflexive = (self.is_solid() - and all(c == 1 for c in self._input_ineqs[:,-1])) + self._is_reflexive = self.is_solid() and all(c == 1 for c in self._input_ineqs[:,-1]) return self._is_reflexive def hpq(self, p, q, lattice): @@ -796,8 +767,7 @@ def hpq(self, p, q, lattice): hpq += len(self.dual().points_not_interior_to_facets()) - d - 1 return hpq if p == 2: - hpq = (44 + 4*self.h11(lattice="N") - 2*self.h12(lattice="N") + - 4*self.h13(lattice="N")) + hpq = 44 + 4*self.h11(lattice="N") - 2*self.h12(lattice="N") + 4*self.h13(lattice="N") return hpq raise RuntimeError("Error computing Hodge numbers.") @@ -975,9 +945,7 @@ def chi(self, lattice): elif self.dim() == 4: self._chi = 2*(self.h11(lattice=lattice)-self.h21(lattice=lattice)) elif self.dim() == 5: - self._chi = 48 + 6*(self.h11(lattice=lattice) - - self.h12(lattice=lattice) - + self.h13(lattice=lattice)) + self._chi = 48 + 6*(self.h11(lattice=lattice) - self.h12(lattice=lattice) + self.h13(lattice=lattice)) return self._chi def _faces4d(self): @@ -1093,37 +1061,29 @@ def faces(self, d=None): if d is not None and d not in range(self._dim + 1): raise Exception(f"Polytope does not have faces of dimension {d}") if self._faces is not None: - return copy.copy(self._faces[d] if d is not None else - [copy.copy(ff) for ff in self._faces]) + return np.array(self._faces[d] if d is not None else [np.array(ff) for ff in self._faces]) if self._dual is not None and self._dual._faces is not None: - self._faces = ([[f.dual() for f in ff] - for ff in self._dual._faces[::-1][1:]] - + [[PolytopeFace(self, self.vertices(), - frozenset(), dim=self._dim)]]) - return copy.copy(self._faces[d] if d is not None else - [copy.copy(ff) for ff in self._faces]) + self._faces = ([[f.dual() for f in ff] for ff in self._dual._faces[::-1][1:]] + + [[PolytopeFace(self, self.vertices(), frozenset(), dim=self._dim)]]) + return np.array(self._faces[d] if d is not None else [np.array(ff) for ff in self._faces]) if self._dim == 4: self._faces = self._faces4d() - return copy.copy(self._faces[d] if d is not None else - [copy.copy(ff) for ff in self._faces]) + return np.array(self._faces[d] if d is not None else [np.array(ff) for ff in self._faces]) pts_sat = self._points_saturated() vert = [tuple(pt) for pt in self.vertices()] vert_sat = [tuple(pt) for pt in pts_sat if pt[0] in vert] organized_faces = [] # The list where all face obejcts will be stored # First construct trivial full-dimensional face - organized_faces.append([PolytopeFace(self, vert, frozenset(), - dim=self._dim)]) + organized_faces.append([PolytopeFace(self, vert, frozenset(), dim=self._dim)]) # If thee polytope is zero-dimensional, finish the computation if self._dim == 0: self._faces = organized_faces - return copy.copy(self._faces[d] if d is not None else - [copy.copy(ff) for ff in self._faces]) + return np.array(self._faces[d] if d is not None else [np.array(ff) for ff in self._faces]) # Now construct the facets tmp_facets = [] for j in range(len(self._input_ineqs)): tmp_vert = [pt[0] for pt in vert_sat if j in pt[1]] - tmp_facets.append(PolytopeFace(self, tmp_vert, - frozenset([j]), dim=self._dim-1)) + tmp_facets.append(PolytopeFace(self, tmp_vert, frozenset([j]), dim=self._dim-1)) organized_faces.append(tmp_facets) # Then iteratively construct lower-dimensional faces previous_faces = defaultdict(set) @@ -1140,8 +1100,7 @@ def faces(self, d=None): f2 = previous_faces_list[j] inter = previous_faces[f1] & previous_faces[f2] # Check if it has the right dimension - if np.linalg.matrix_rank([tuple(pt[0])+(1,) - for pt in inter])-1 != dd: + if np.linalg.matrix_rank([tuple(pt[0])+(1,) for pt in inter])-1 != dd: continue # Find saturated inequalities f3 = frozenset.intersection(*[pt[1] for pt in inter]) @@ -1156,11 +1115,9 @@ def faces(self, d=None): previous_faces_list = list(previous_faces.keys()) n_previous_faces = len(previous_faces_list) # Finally add vertices - organized_faces.append([PolytopeFace(self, [pt[0]], pt[1], dim=0) - for pt in vert_sat]) + organized_faces.append([PolytopeFace(self, [pt[0]], pt[1], dim=0) for pt in vert_sat]) self._faces = organized_faces[::-1] - return copy.copy(self._faces[d] if d is not None else - [copy.copy(ff) for ff in self._faces]) + return np.array(self._faces[d] if d is not None else [np.array(ff) for ff in self._faces]) def facets(self): """ @@ -1193,13 +1150,11 @@ def vertices(self): self._vertices = np.array([self._input_pts[0]]) elif self._backend == "ppl": points_mat = np.array([tuple(int(i) for i in pt.coefficients()) - for pt in self._optimal_poly.minimized_generators()]) + for pt in self._optimal_poly.minimized_generators()]) if self._ambient_dim > self._dim: - pts_mat_tmp = np.empty((points_mat.shape[0],self._ambient_dim), - dtype=int) + pts_mat_tmp = np.empty((points_mat.shape[0],self._ambient_dim), dtype=int) pts_mat_tmp[:,:self._dim_diff] = 0 - pts_mat_tmp[:,self._dim_diff:] = points_mat.reshape(-1, - self._dim) + pts_mat_tmp[:,self._dim_diff:] = points_mat.reshape(-1, self._dim) points_mat = pts_mat_tmp points_mat = self._inv_transf_matrix.dot(points_mat.T).T if self._ambient_dim > self._dim: @@ -1210,29 +1165,22 @@ def vertices(self): pt_tup = tuple(pt) if pt_tup not in input_pts: input_pts.append(pt_tup) - self._vertices = np.array([list(pt) for pt in input_pts - if pt in tmp_vert]) + self._vertices = np.array([list(pt) for pt in input_pts if pt in tmp_vert]) elif self._backend == "qhull": if self._dim == 1: # QHull cannot handle 1D polytopes - tmp_vert = [tuple(pt[0]) for pt in self._points_saturated() - if len(pt[1]) == 1] - self._vertices = np.array([list(pt) for pt in self._input_pts - if tuple(pt) in tmp_vert]) + tmp_vert = [tuple(pt[0]) for pt in self._points_saturated() if len(pt[1]) == 1] + self._vertices = np.array([list(pt) for pt in self._input_pts if tuple(pt) in tmp_vert]) else: self._vertices = self._input_pts[self._optimal_poly.vertices] else: # Backend is PALP - palp = subprocess.Popen( - (config.palp_path + "poly.x", "-v"), - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) + palp = subprocess.Popen((config.palp_path + "poly.x", "-v"), + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) pt_list = "" optimal_pts = {tuple(pt) for pt in self._optimal_pts} for pt in optimal_pts: - pt_list += (str(pt).replace("(","").replace(")","") - .replace(","," ") + "\n") - palp_out = palp.communicate( - input=f"{len(optimal_pts)} {self._dim}\n" - + pt_list + "\n")[0] + pt_list += (str(pt).replace("(","").replace(")","").replace(","," ") + "\n") + palp_out = palp.communicate(input=f"{len(optimal_pts)} {self._dim}\n" + pt_list + "\n")[0] if "Vertices of P" not in palp_out: raise Exception(f"PALP error. Full output: {palp_out}") palp_out = palp_out.split("\n") @@ -1242,14 +1190,11 @@ def vertices(self): pts_shape = [int(c) for c in line.split()[:2]] tmp_pts = np.empty(pts_shape, dtype=int) for j in range(pts_shape[0]): - tmp_pts[j,:] = ( - [int(c) for c in palp_out[i+j+1].split()]) + tmp_pts[j,:] = [int(c) for c in palp_out[i+j+1].split()] break - points = (tmp_pts.T - if pts_shape[0] < pts_shape[1] else tmp_pts) + points = (tmp_pts.T if pts_shape[0] < pts_shape[1] else tmp_pts) if self._ambient_dim > self._dim: - points_mat = np.empty((len(points),self._ambient_dim), - dtype=int) + points_mat = np.empty((len(points),self._ambient_dim), dtype=int) points_mat[:,self._dim_diff:] = points points_mat[:,:self._dim_diff] = 0 else: @@ -1264,8 +1209,7 @@ def vertices(self): pt_tup = tuple(pt) if pt_tup not in input_pts: input_pts.append(pt_tup) - self._vertices = np.array([list(pt) for pt in input_pts - if pt in tmp_vert]) + self._vertices = np.array([list(pt) for pt in input_pts if pt in tmp_vert]) return np.array(self._vertices) def dual(self): @@ -1338,7 +1282,8 @@ def is_favorable(self, lattice): "Options are: \"N\" or \"M\".") def glsm_charge_matrix(self, include_origin=True, - include_points_interior_to_facets=False): + include_points_interior_to_facets=False, + points=None): """ **Description:** Computes the GLSM charge matrix of the theory resulting from this @@ -1352,6 +1297,10 @@ def glsm_charge_matrix(self, include_origin=True, default=False): By default only boundary points not interior to facets are used. If this flag is set to true then points interior to facets are also used. + - ```points``` (list, optional): The list of indices of the points that + will be used. Note that if this option is used then the parameters + ```include_origin``` and ```include_points_interior_to_facets``` are + ignored. **Returns:** (list) The GLSM charge matrix. @@ -1359,34 +1308,38 @@ def glsm_charge_matrix(self, include_origin=True, if not self.is_reflexive(): raise Exception("The GLSM charge matrix can only be computed for " "reflexive polytopes.") - args_id = 1*include_points_interior_to_facets - if self._glsm_charge_matrix[args_id] is not None: - if not include_origin: - return np.array(self._glsm_charge_matrix[args_id][:,1:]) - return np.array(self._glsm_charge_matrix[args_id]) - # Set up the list of points that will be used. We always include the - # origin and discard it at the end if necessary. - if include_points_interior_to_facets: - pts = self.boundary_points() + # Set up the list of points that will be used. + if points is not None: + pts_ind = tuple(set(points)) + if min(pts_ind) < 0 or max(pts_ind) > self.points().shape[0]: + raise Exception("An index is out of the allowed range.") + include_origin = 0 in pts_ind + elif include_points_interior_to_facets: + pts_ind = tuple(range(self.points().shape[0])) else: - pts = self.boundary_points_not_interior_to_facets() - + pts_ind = tuple(range(self.points_not_interior_to_facets().shape[0])) + if pts_ind in self._glsm_charge_matrix: + if not include_origin and points is None: + return np.array(self._glsm_charge_matrix[pts_ind][:,1:]) + return np.array(self._glsm_charge_matrix[pts_ind]) + # If the result is not cached we do the computation + pts = self.points()[[i for i in pts_ind if i]] # Exclude origin pts_norms = [np.linalg.norm(p,1) for p in pts] pts_order = np.argsort(pts_norms) - # Find good lattice basis - good_lattice_basis = pts_order[:1] - current_rank = 1 - for p in pts_order[1:]: - tmp = pts[np.append(good_lattice_basis, p)] + good_lattice_basis = [] + current_rank = 0 + for p in pts_order: + tmp = pts[good_lattice_basis + [p]] rank = np.linalg.matrix_rank(np.dot(tmp.T,tmp)) if rank>current_rank: - good_lattice_basis = np.append(good_lattice_basis, p) + good_lattice_basis.append(p) current_rank = rank if rank==self._dim: break good_lattice_basis = np.sort(good_lattice_basis) - + if len(good_lattice_basis) != self._dim: + raise Exception("Failed to find basis.") glsm_basis = [i for i in range(len(pts)) if i not in good_lattice_basis] M = fmpq_mat(pts[good_lattice_basis].T.tolist()) M_inv = np.array(M.inv().tolist()) @@ -1397,40 +1350,39 @@ def glsm_charge_matrix(self, include_origin=True, extra_rows = np.array([[int(ii.p) for ii in i] for i in extra_rows]) extra_columns = np.multiply(extra_pts.T, column_scalings[:, None]).T extra_columns = np.array([[int(ii.p) for ii in i] for i in extra_columns]) - glsm = np.diag(column_scalings) for p,pp in enumerate(good_lattice_basis): glsm = np.insert(glsm, pp, extra_columns[p], axis=1) - - origin_column = -np.sum(glsm, axis=1) - glsm = np.insert(glsm, 0, origin_column, axis=1) - + if include_origin: + origin_column = -np.sum(glsm, axis=1) + glsm = np.insert(glsm, 0, origin_column, axis=1) linear_relations = extra_rows extra_linear_relation_columns = -1*np.diag(row_scalings) for p,pp in enumerate(good_lattice_basis): linear_relations = np.insert(linear_relations, pp, extra_linear_relation_columns[p], axis=1) - - linear_relations = np.insert(linear_relations, 0, np.ones(len(pts)), axis=0) - linear_relations = np.insert(linear_relations, 0, np.zeros(self._dim+1), axis=1) - linear_relations[0][0] = 1 - + if include_origin: + linear_relations = np.insert(linear_relations, 0, np.ones(len(pts)), axis=0) + linear_relations = np.insert(linear_relations, 0, np.zeros(self._dim+1), axis=1) + linear_relations[0][0] = 1 # Check that everything was computed correctly - if (any(glsm.dot([[0]*self._dim+[1]]+[pt+[1] for pt in pts.tolist()]).flat) + if (any(glsm.dot(([[0]*self._dim+[1]] if include_origin else [])+[pt+[1*include_origin] for pt in pts.tolist()]).flat) or any(glsm.dot(linear_relations.T).flatten()) - or np.linalg.matrix_rank(glsm[:,np.array(glsm_basis)+1]) + or np.linalg.matrix_rank(glsm[:,np.array(glsm_basis)+1*include_origin]) != len(glsm_basis)): + print(glsm.dot(([[0]*self._dim+[1]] if include_origin else [])+[pt+[1] for pt in pts.tolist()])) raise Exception("Error computing GLSM charge matrix.") - - self._glsm_charge_matrix[args_id] = glsm - self._glsm_linrels[args_id] = linear_relations - self._glsm_basis[args_id] = np.array(glsm_basis) + 1 - - if not include_origin: - return np.array(self._glsm_charge_matrix[args_id][:,1:]) - return np.array(self._glsm_charge_matrix[args_id]) + # We now cache the results + self._glsm_charge_matrix[pts_ind] = glsm + self._glsm_linrels[pts_ind] = linear_relations + self._glsm_basis[pts_ind] = np.array(glsm_basis) + 1 + # Finally return a copy of the result + if not include_origin and points is None: + return np.array(self._glsm_charge_matrix[pts_ind][:,1:]) + return np.array(self._glsm_charge_matrix[pts_ind]) def glsm_linear_relations(self, include_origin=True, - include_points_interior_to_facets=False): + include_points_interior_to_facets=False, + points=None): """ **Description:** Computes the linear relations of the GLSM charge matrix. @@ -1443,26 +1395,40 @@ def glsm_linear_relations(self, include_origin=True, default=False): By default only boundary points not interior to facets are used. If this flag is set to true then points interior to facets are also used. + - ```points``` (list, optional): The list of indices of the points that + will be used. Note that if this option is used then the parameters + ```include_origin``` and ```include_points_interior_to_facets``` are + ignored. **Returns:** (list) A matrix of linear relations of the columns of the GLSM charge matrix. """ - args_id = 1*include_points_interior_to_facets - if self._glsm_linrels[args_id] is not None: - if not include_origin: - return np.array(self._glsm_linrels[args_id][1:,1:]) - return np.array(self._glsm_linrels[args_id]) - + if points is not None: + pts_ind = tuple(set(points)) + if min(pts_ind) < 0 or max(pts_ind) > self.points().shape[0]: + raise Exception("An index is out of the allowed range.") + include_origin = 0 in pts_ind + elif include_points_interior_to_facets: + pts_ind = tuple(range(self.points().shape[0])) + else: + pts_ind = tuple(range(self.points_not_interior_to_facets().shape[0])) + if pts_ind in self._glsm_linrels: + if not include_origin and points is None: + return np.array(self._glsm_linrels[pts_ind][1:,1:]) + return np.array(self._glsm_linrels[pts_ind]) + # If linear relations are not cached we just call the GLSM charge + # matrix function since they are computed there self.glsm_charge_matrix(include_origin=True, - include_points_interior_to_facets=include_points_interior_to_facets) - - if not include_origin: - return np.array(self._glsm_linrels[args_id][1:,1:]) - return np.array(self._glsm_linrels[args_id]) + include_points_interior_to_facets=include_points_interior_to_facets, + points=points) + if not include_origin and points is None: + return np.array(self._glsm_linrels[pts_ind][1:,1:]) + return np.array(self._glsm_linrels[pts_ind]) def glsm_basis(self, include_origin=True, - include_points_interior_to_facets=False, integral=True): + include_points_interior_to_facets=False, points=None, + integral=True): """ **Description:** Computes a basis of columns of the GLSM charge matrix. @@ -1475,6 +1441,11 @@ def glsm_basis(self, include_origin=True, default=False): By default only boundary points not interior to facets are used. If this flag is set to true then points interior to facets are also used. + - ```points``` (list, optional): The list of indices of the points that + will be used. Note that if this option is used then the parameters + ```include_origin``` and ```include_points_interior_to_facets``` are + ignored. Also, note that the indices returned here will be the + indices of the sorted list of points. - ```integral``` (boolean, optional, default=True): Indicates whether to find an integral basis for the columns of the GLSM charge matrix. (i.e. so that remaining columns can be written as an integer linear @@ -1483,13 +1454,22 @@ def glsm_basis(self, include_origin=True, **Returns:** (list) A list of column indices that form a basis. """ - args_id = 1*include_points_interior_to_facets + 2*integral - if self._glsm_basis[args_id] is not None: - if not include_origin: - return np.array(self._glsm_basis[args_id]) - 1 - return np.array(self._glsm_basis[args_id]) + if points is not None: + pts_ind = tuple(set(points)) + if min(pts_ind) < 0 or max(pts_ind) > self.points().shape[0]: + raise Exception("An index is out of the allowed range.") + include_origin = 0 in pts_ind + elif include_points_interior_to_facets: + pts_ind = tuple(range(self.points().shape[0])) + else: + pts_ind = tuple(range(self.points_not_interior_to_facets().shape[0])) + if (pts_ind,integral) in self._glsm_basis: + if not include_origin and points is None: + return np.array(self._glsm_basis[(pts_ind,integral)]) - 1 + return np.array(self._glsm_basis[(pts_ind,integral)]) linrel_np = self.glsm_linear_relations(include_origin=True, - include_points_interior_to_facets=include_points_interior_to_facets) + include_points_interior_to_facets=include_points_interior_to_facets, + points=points) good_exclusions = 0 basis_exc = [] indices = np.arange(linrel_np.shape[1]) @@ -1505,8 +1485,7 @@ def glsm_basis(self, include_origin=True, except: continue linrel_rand = np.array(linrel.tolist(), dtype=int) - linrel_rand = np.array([v//int(round(abs(gcd_list(v)))) - for v in linrel_rand], dtype=int) + linrel_rand = np.array([v//int(round(abs(gcd_list(v)))) for v in linrel_rand], dtype=int) good_exclusions = 0 basis_exc = [] for v in linrel_rand: @@ -1529,29 +1508,26 @@ def glsm_basis(self, include_origin=True, "A non-integral one will be computed. Please let the " "developers know about the polytope that caused this " "issue.") - return self.glsm_basis( - include_origin=include_origin, - include_points_interior_to_facets= - include_points_interior_to_facets, - integral=False) + return self.glsm_basis(include_origin=include_origin, + include_points_interior_to_facets=include_points_interior_to_facets, + integral=False) linrel_dict = {ii:i for i,ii in enumerate(indices)} linrel_np = np.array(linrel_rand[:,[linrel_dict[i] for i in range(linrel_rand.shape[1])]]) - basis_ind = np.array(sorted([i for i in range(len(linrel_np[0])) - if linrel_dict[i] not in basis_exc]), - dtype=int) + basis_ind = np.array(sorted([i for i in range(len(linrel_np[0])) if linrel_dict[i] not in basis_exc]), dtype=int) ker_np = self.glsm_charge_matrix(include_origin=True, - include_points_interior_to_facets=include_points_interior_to_facets) + include_points_interior_to_facets=include_points_interior_to_facets, + points=points) if (np.linalg.matrix_rank(ker_np[:,basis_ind]) != len(basis_ind) or any(ker_np.dot(linrel_np.T).flatten())): raise Exception("Error finding basis") if integral: - self._glsm_linrels[1*include_points_interior_to_facets] = linrel_np - self._glsm_basis[1*include_points_interior_to_facets] = basis_ind - self._glsm_basis[args_id] = basis_ind - if not include_origin: - return np.array(self._glsm_basis[args_id]) - 1 - return np.array(self._glsm_basis[args_id]) + self._glsm_linrels[pts_ind] = linrel_np + self._glsm_basis[(pts_ind,integral)] = basis_ind + self._glsm_basis[(pts_ind,False)] = basis_ind + if not include_origin and points is None: + return np.array(self._glsm_basis[(pts_ind,integral)]) - 1 + return np.array(self._glsm_basis[(pts_ind,integral)]) def volume(self): """ @@ -1576,8 +1552,7 @@ def volume(self): elif self._dim == 1: self._volume = max(self._optimal_pts) - min(self._optimal_pts) else: - self._volume = int(round(ConvexHull(self._optimal_pts).volume - * math.factorial(self._dim))) + self._volume = int(round(ConvexHull(self._optimal_pts).volume * math.factorial(self._dim))) return self._volume def points_to_indices(self, points): @@ -1637,11 +1612,9 @@ def normal_form(self, affine_transform=False, backend="native"): "sometimes is incorrect. Using native backend.") backend = "native" if backend == "palp": - palp = subprocess.Popen( - (config.palp_path + "poly.x", ("-A" if affine_transform - else "-N")), - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) + palp = subprocess.Popen((config.palp_path + "poly.x", ("-A" if affine_transform else "-N")), + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) pt_list = "" optimal_pts = {tuple(pt) for pt in self._optimal_pts} for pt in optimal_pts: @@ -1659,11 +1632,9 @@ def normal_form(self, affine_transform=False, backend="native"): pts_shape = [int(c) for c in palp_out[i].split()[:2]] tmp_pts = np.empty(pts_shape, dtype=int) for j in range(pts_shape[0]): - tmp_pts[j,:] = ( - [int(c) for c in palp_out[i+j+1].split()]) + tmp_pts[j,:] = [int(c) for c in palp_out[i+j+1].split()] break - points = (tmp_pts.T - if pts_shape[0] < pts_shape[1] else tmp_pts) + points = (tmp_pts.T if pts_shape[0] < pts_shape[1] else tmp_pts) self._normal_form[args_id] = points return np.array(self._normal_form[args_id]) if backend != "native": @@ -1686,8 +1657,7 @@ def PGE(n, u, v): n_s = 1 prm = {0 : [np.eye(n_f, dtype=int), np.eye(n_v, dtype=int)]} for j in range(n_v): - m = np.argmax([PM[0,prm[0][1].dot(range(n_v))][i] - for i in range(j, n_v)]) + m = np.argmax([PM[0,prm[0][1].dot(range(n_v))][i] for i in range(j, n_v)]) if m > 0: prm[0][1] = PGE(n_v, j+1, m+j+1).dot(prm[0][1]) first_row = list(PM[0]) @@ -1698,21 +1668,18 @@ def PGE(n, u, v): m = np.argmax(PM[:,prm[n_s][1].dot(range(n_v))][k]) if m > 0: prm[n_s][1] = PGE(n_v, 1, m+1).dot(prm[n_s][1]) - d = (PM[k,prm[n_s][1].dot(range(n_v))][0] - - prm[0][1].dot(first_row)[0]) + d = PM[k,prm[n_s][1].dot(range(n_v))][0] - prm[0][1].dot(first_row)[0] if d < 0: # The largest elt of this row is smaller than largest elt # in 1st row, so nothing to do continue # otherwise: for i in range(1, n_v): - m = np.argmax([PM[k,prm[n_s][1].dot(range(n_v))][j] - for j in range(i, n_v)]) + m = np.argmax([PM[k,prm[n_s][1].dot(range(n_v))][j] for j in range(i, n_v)]) if m > 0: prm[n_s][1] = PGE(n_v, i+1, m+i+1).dot(prm[n_s][1]) if d == 0: - d = (PM[k,prm[n_s][1].dot(range(n_v))][i] - - prm[0][1].dot(first_row)[i]) + d = PM[k,prm[n_s][1].dot(range(n_v))][i] - prm[0][1].dot(first_row)[i] if d < 0: break if d < 0: @@ -1763,20 +1730,17 @@ def PGE(n, u, v): # between 0 and S(0) for s in range(l, n_f): for j in range(1, S[0]): - v = PM[prmb[n_p][0].dot(range(n_f)), - :][:,prmb[n_p][1].dot(range(n_v))][s] + v = PM[prmb[n_p][0].dot(range(n_f)),:][:,prmb[n_p][1].dot(range(n_v))][s] if v[0] < v[j]: prmb[n_p][1] = PGE(n_v, 1, j+1).dot(prmb[n_p][1]) if ccf == 0: - l_r[0] = PM[prmb[n_p][0].dot(range(n_f)), - :][:,prmb[n_p][1].dot(range(n_v))][s,0] + l_r[0] = PM[prmb[n_p][0].dot(range(n_f)),:][:,prmb[n_p][1].dot(range(n_v))][s,0] prmb[n_p][0] = PGE(n_f, l+1, s+1).dot(prmb[n_p][0]) n_p += 1 ccf = 1 prmb[n_p] = copy.copy(prm[k]) else: - d1 = PM[prmb[n_p][0].dot(range(n_f)), - :][:,prmb[n_p][1].dot(range(n_v))][s,0] + d1 = PM[prmb[n_p][0].dot(range(n_f)),:][:,prmb[n_p][1].dot(range(n_v))][s,0] d = d1 - l_r[0] if d < 0: # We move to the next line @@ -1812,18 +1776,15 @@ def PGE(n, u, v): s -= 1 # Find the largest value in this symmetry block for j in range(c+1, h): - v = PM[prmb[s][0].dot(range(n_f)), - :][:,prmb[s][1].dot(range(n_v))][l] + v = PM[prmb[s][0].dot(range(n_f)),:][:,prmb[s][1].dot(range(n_v))][l] if v[c] < v[j]: prmb[s][1] = PGE(n_v, c+1, j+1).dot(prmb[s][1]) if ccf == 0: # Set reference and carry on to next permutation - l_r[c] = PM[prmb[s][0].dot(range(n_f)), - :][:,prmb[s][1].dot(range(n_v))][l,c] + l_r[c] = PM[prmb[s][0].dot(range(n_f)),:][:,prmb[s][1].dot(range(n_v))][l,c] ccf = 1 else: - d1 = PM[prmb[s][0].dot(range(n_f)), - :][:,prmb[s][1].dot(range(n_v))][l,c] + d1 = PM[prmb[s][0].dot(range(n_f)),:][:,prmb[s][1].dot(range(n_v))][l,c] d = d1 - l_r[c] if d < 0: n_p -= 1 @@ -1852,8 +1813,7 @@ def PGE(n, u, v): # the restrictions the last worked out # row imposes. c = 0 - M = PM[prm[0][0].dot(range(n_f)), - :][:,prm[0][1].dot(range(n_v))][l] + M = PM[prm[0][0].dot(range(n_f)),:][:,prm[0][1].dot(range(n_v))][l] while c < n_v: s = S[c] + 1 S[c] = c + 1 @@ -1879,8 +1839,7 @@ def PGE(n, u, v): for i in range(n_v): k = i for j in range(i+1, n_v): - if (M_max[j] < M_max[k] - or (M_max[j] == M_max[k] and S_max[j] < S_max[k])): + if M_max[j] < M_max[k] or (M_max[j] == M_max[k] and S_max[j] < S_max[k]): k = j if not k == i: M_max[i], M_max[k] = M_max[k], M_max[i] @@ -1888,9 +1847,7 @@ def PGE(n, u, v): p_c = PGE(n_v, 1+i, 1+k).dot(p_c) # Create array of possible NFs. prm = [p_c.dot(l[1]) for l in prm.values()] - Vs = [np.array(fmpz_mat(V.T[:,sig.dot(range(n_v))].tolist() - ).hnf().tolist(), dtype=int).tolist() - for sig in prm] + Vs = [np.array(fmpz_mat(V.T[:,sig.dot(range(n_v))].tolist()).hnf().tolist(), dtype=int).tolist() for sig in prm] Vmin = min(Vs) if affine_transform: self._normal_form[args_id] = np.array(Vmin).T[:,:self._dim] @@ -1899,8 +1856,8 @@ def PGE(n, u, v): return np.array(self._normal_form[args_id]) def triangulate(self, heights=None, make_star=None, - include_points_interior_to_facets=None, simplices=None, - check_input_simplices=True, backend="cgal"): + include_points_interior_to_facets=None, points=None, + simplices=None, check_input_simplices=True, backend="cgal"): """ **Description:** Returns a single regular triangulation of the polytope. @@ -1926,6 +1883,9 @@ def triangulate(self, heights=None, make_star=None, to include points interior to facets from the triangulation. If not specified, it is set to False for reflexive polytopes and True otherwise. + - ```points``` (list, optional): The list of indices of the points that + will be used. Note that if this option is used then the parameter + ```include_points_interior_to_facets``` is ignored. - ```simplices``` (list, optional): A list of simplices specifying the triangulation. This is useful when a triangulation was previously computed and it needs to be used again. Note that the order of the @@ -1956,12 +1916,20 @@ def triangulate(self, heights=None, make_star=None, if self._ambient_dim > self._dim: raise Exception("Only triangulations of full-dimensional polytopes" "are supported.") - if include_points_interior_to_facets is None: - include_points_interior_to_facets = not self.is_reflexive() - if include_points_interior_to_facets: - triang_pts = self.points() + if points is not None: + pts_ind = tuple(set(points)) + if min(pts_ind) < 0 or max(pts_ind) > self.points().shape[0]: + raise Exception("An index is out of the allowed range.") + include_origin = 0 in pts_ind + elif include_points_interior_to_facets is None: + pts_ind = (tuple(range(self.points_not_interior_to_facets().shape[0])) + if self.is_reflexive() + else tuple(range(self.points().shape[0]))) else: - triang_pts = self.points_not_interior_to_facets() + pts_ind = (tuple(range(self.points().shape[0])) + if include_points_interior_to_facets + else tuple(range(self.points_not_interior_to_facets().shape[0]))) + triang_pts = self.points()[list(pts_ind)] if make_star is None: if heights is None and simplices is None: make_star = self.is_reflexive() @@ -1975,10 +1943,10 @@ def triangulate(self, heights=None, make_star=None, backend=backend) def random_triangulations_fast(self, N=None, c=0.2, max_retries=500, - make_star=True, only_fine=True, - include_points_interior_to_facets=None, - backend="cgal", as_list=False, - progress_bar=True): + make_star=True, only_fine=True, + include_points_interior_to_facets=None, + points=None, backend="cgal", as_list=False, + progress_bar=True): """ Constructs pseudorandom regular (optionally fine and star) triangulations of a given point set. This is done by picking random @@ -2014,6 +1982,9 @@ def random_triangulations_fast(self, N=None, c=0.2, max_retries=500, to include points interior to facets from the triangulation. If not specified, it is set to False for reflexive polytopes and True otherwise. + - ```points``` (list, optional): The list of indices of the points that + will be used. Note that if this option is used then the parameter + ```include_points_interior_to_facets``` is ignored. - ```backend``` (string, optional, default="cgal"): Specifies the backend used to compute the triangulation. The available options are "cgal" and "qhull". @@ -2037,13 +2008,20 @@ def random_triangulations_fast(self, N=None, c=0.2, max_retries=500, if N is None and as_list: raise Exception("Number of triangulations must be specified when " "returning a list.") - if include_points_interior_to_facets is None: - include_points_interior_to_facets = not self.is_reflexive() - if include_points_interior_to_facets: - triang_pts = self.points() + if points is not None: + pts_ind = tuple(set(points)) + if min(pts_ind) < 0 or max(pts_ind) > self.points().shape[0]: + raise Exception("An index is out of the allowed range.") + include_origin = 0 in pts_ind + elif include_points_interior_to_facets is None: + pts_ind = (tuple(range(self.points_not_interior_to_facets().shape[0])) + if self.is_reflexive() + else tuple(range(self.points().shape[0]))) else: - triang_pts = self.points_not_interior_to_facets() - triang_pts = [tuple(pt) for pt in triang_pts] + pts_ind = (tuple(range(self.points().shape[0])) + if include_points_interior_to_facets + else tuple(range(self.points_not_interior_to_facets().shape[0]))) + triang_pts = [tuple(pt) for pt in self.points()[list(pts_ind)]] if make_star is None: make_star = self.is_reflexive() if (0,)*self._dim not in triang_pts: @@ -2068,11 +2046,12 @@ def random_triangulations_fast(self, N=None, c=0.2, max_retries=500, return triangs_list def random_triangulations_fair(self, N=None, n_walk=None, n_flip=None, - initial_walk_steps=None, walk_step_size=1e-2, - max_steps_to_wall=25, fine_tune_steps=8, - max_retries=50, make_star=None, - include_points_interior_to_facets=None, backend="cgal", - as_list=False, progress_bar=True): + initial_walk_steps=None, walk_step_size=1e-2, + max_steps_to_wall=25, fine_tune_steps=8, + max_retries=50, make_star=None, + include_points_interior_to_facets=None, + points=None, backend="cgal", as_list=False, + progress_bar=True): """ **Description:** Returns a pseudorandom list of regular triangulations of a given point @@ -2132,6 +2111,9 @@ def random_triangulations_fair(self, N=None, n_walk=None, n_flip=None, to include points interior to facets from the triangulation. If not specified, it is set to False for reflexive polytopes and True otherwise. + - ```points``` (list, optional): The list of indices of the points that + will be used. Note that if this option is used then the parameter + ```include_points_interior_to_facets``` is ignored. - ```backend``` (string, optional, default="cgal"): Specifies the backend used to compute the triangulation. The available options are "cgal" and "qhull". @@ -2155,13 +2137,20 @@ def random_triangulations_fair(self, N=None, n_walk=None, n_flip=None, if N is None and as_list: raise Exception("Number of triangulations must be specified when " "returning a list.") - if include_points_interior_to_facets is None: - include_points_interior_to_facets = not self.is_reflexive() - if include_points_interior_to_facets: - triang_pts = self.points() + if points is not None: + pts_ind = tuple(set(points)) + if min(pts_ind) < 0 or max(pts_ind) > self.points().shape[0]: + raise Exception("An index is out of the allowed range.") + include_origin = 0 in pts_ind + elif include_points_interior_to_facets is None: + pts_ind = (tuple(range(self.points_not_interior_to_facets().shape[0])) + if self.is_reflexive() + else tuple(range(self.points().shape[0]))) else: - triang_pts = self.points_not_interior_to_facets() - triang_pts = [tuple(pt) for pt in triang_pts] + pts_ind = (tuple(range(self.points().shape[0])) + if include_points_interior_to_facets + else tuple(range(self.points_not_interior_to_facets().shape[0]))) + triang_pts = [tuple(pt) for pt in self.points()[list(pts_ind)]] if make_star is None: make_star = self.is_reflexive() if (0,)*self._dim not in triang_pts: @@ -2198,7 +2187,7 @@ def random_triangulations_fair(self, N=None, n_walk=None, n_flip=None, def all_triangulations(self, only_fine=True, only_regular=True, only_star=True, star_origin=None, include_points_interior_to_facets=None, - backend=None, as_list=False): + points=None, backend=None, as_list=False): """ **Description:** Computes all triangulations of the polytope using TOPCOM. @@ -2223,6 +2212,9 @@ def all_triangulations(self, only_fine=True, only_regular=True, to include points interior to facets from the triangulation. If not specified, it is set to False for reflexive polytopes and True otherwise. + - ```points``` (list, optional): The list of indices of the points that + will be used. Note that if this option is used then the parameter + ```include_points_interior_to_facets``` is ignored. - ```backend``` (string, optional): The optimizer used to check regularity computation. The available options are the backends of the [```is_solid```](./cone#is_solid) function of the @@ -2247,12 +2239,20 @@ def all_triangulations(self, only_fine=True, only_regular=True, raise Exception("The star_origin parameter must be specified " "when finding star triangulations of " "non-reflexive polytopes.") - if include_points_interior_to_facets is None: - include_points_interior_to_facets = not self.is_reflexive() - if include_points_interior_to_facets: - triang_pts = self.points() + if points is not None: + pts_ind = tuple(set(points)) + if min(pts_ind) < 0 or max(pts_ind) > self.points().shape[0]: + raise Exception("An index is out of the allowed range.") + include_origin = 0 in pts_ind + elif include_points_interior_to_facets is None: + pts_ind = (tuple(range(self.points_not_interior_to_facets().shape[0])) + if self.is_reflexive() + else tuple(range(self.points().shape[0]))) else: - triang_pts = self.points_not_interior_to_facets() + pts_ind = (tuple(range(self.points().shape[0])) + if include_points_interior_to_facets + else tuple(range(self.points_not_interior_to_facets().shape[0]))) + triang_pts = [tuple(pt) for pt in self.points()[list(pts_ind)]] if len(triang_pts) >= 15: print("Warning: Polytopes with more than around 15 points usually " "have too many triangulations, so this function may take " @@ -2287,8 +2287,7 @@ def automorphisms(self, square_to_one=False): for f in self.facets(): if f_min is None or len(f.vertices()) < len(f_min.vertices()): f_min = f - f_min_vert_rref = np.array(fmpz_mat(f_min.vertices().T.tolist() - ).rref()[0].tolist(), dtype=int) + f_min_vert_rref = np.array(fmpz_mat(f_min.vertices().T.tolist()).rref()[0].tolist(), dtype=int) pivots = [] for v in f_min_vert_rref: if any(v): @@ -2319,3 +2318,21 @@ def automorphisms(self, square_to_one=False): autos2.append(m) self._autos = [autos, autos2] return self._autos[args_id] + + def minkowski_sum(self, other): + """ + **Description:** + Returns the Minkowski sum of the two polytopes. + + **Arguments:** + - ```other``` (Polytope): The other polytope used for the Minkowski + sum. + + **Returns:** + (Polytope) The Minkowski sum. + """ + points = [] + for p1 in self.vertices(): + for p2 in other.vertices(): + points.append(p1+p2) + return Polytope(points) diff --git a/cytools/toricvariety.py b/cytools/toricvariety.py new file mode 100644 index 0000000..76a9614 --- /dev/null +++ b/cytools/toricvariety.py @@ -0,0 +1,1388 @@ +# This file is part of CYTools. +# +# CYTools is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# CYTools is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with CYTools. If not, see . + +""" +This module contains tools designed for toric variety computations. +""" + +#Standard imports +from collections import Counter, defaultdict +from itertools import combinations +import copy +# Third party imports +from flint import fmpz_mat, fmpq_mat, fmpz, fmpq +from scipy.sparse import csr_matrix +import numpy as np +# CYTools imports +from cytools.utils import (gcd_list, solve_linear_system, array_fmpz_to_int, + array_int_to_fmpz, array_float_to_fmpq, + array_fmpq_to_float, filter_tensor_indices, + symmetric_sparse_to_dense_in_basis, float_to_fmpq, + fmpq_to_float) +from cytools.calabiyau import CalabiYau +from cytools.cone import Cone +from cytools import config + + + +class ToricVariety: + """ + This class handles various computations relating to the toric varieties. + It can be used to compute intersection numbers, the Kähler cone, among + other things. + + :::important + Generally, objects of this class should not be constructed directly by the + end user. Instead, they should be created by the + [get_toric_variety](./triangulation#get_toric_variety) function of the + [Triangulation](./triangulation) class. + ::: + + ## Constructor + + ### ```cytools.toricvariety.ToricVariety``` + + **Description:** + Constructs a ```ToricVariety``` object. This is handled by the hidden + [```__init__```](#__init__) function. + + **Arguments:** + - ```triang``` (Triangulation): A star triangularion of a polytope. + + **Example:** + We construct a ToricVariety from a regular, star triangulation of a + polytope. Since this class is not intended to by initialized by the end + user, we create it via the + [```get_toric_variety```](./triangulation#get_toric_variety) function of + the [Triangulation](./triangulation) class. In this example we obtain + $\mathbb{P}^4$. + ```python {4} + from cytools import Polytope + p = Polytope([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[-1,-1,-1,-1]]) + t = p.triangulate() + v = t.get_toric_variety() + # Prints: A 4-dimensional toric variety with 5 affine patches. + ``` + """ + + def __init__(self, triang): + """ + **Description:** + Initializes a ```ToricVariety``` object. + + **Arguments:** + - ```triang``` (Triangulation): A star triangularion of a polytope. + + **Returns:** + Nothing. + """ + # We first make sure that the input triangulation is appropriate. + # Regularity is not checked since it is generally slow. + if not triang.is_star(): + raise Exception("The input triangulation must be star.") + self._triang = triang + # Initialize remaining hidden attributes + self._hash = None + self._glsm_charge_matrix = None + self._glsm_linrels = None + self._divisor_basis = None + self._mori_cone = [None]*3 + self._intersection_numbers = [None]*7 + self._is_compact = None + self._is_smooth = None + self._canon_div_is_smooth = None + self._eff_gens = None + self._eff_cone = None + self._fan_cones = dict() + self._nef_part = None + self._cy = None + if not self.is_compact() and not config._exp_features_enabled: + raise Exception("Non-compact varieties are currently an " + "experimental feature, so they must be enabled.") + + def clear_cache(self, recursive=False, only_in_basis=False): + """ + **Description:** + Clears the cached results of any previous computation. + + **Arguments:** + - ```recursive``` (boolean, optional, default=True): Whether to also + clear the cache of the defining triangulation and polytope. + This is ignored when only_in_basis=True. + - ```only_in_basis``` (boolean, optional, default=False): Only clears + the cache of computations that depend on a choice of basis. + + **Returns:** + Nothing. + """ + self._mori_cone[2] = None + self._intersection_numbers[2] = None + self._intersection_numbers[6] = None + self._eff_gens = None + self._eff_cone = None + if not only_in_basis: + self._hash = None + self._glsm_charge_matrix = None + self._glsm_linrels = None + self._divisor_basis = None + self._mori_cone = [None]*3 + self._intersection_numbers = [None]*7 + self._is_compact = None + self._is_smooth = None + self._canon_div_is_smooth = None + self._fan_cones = dict() + if recursive: + self._triang.clear_cache(recursive=True) + + def __repr__(self): + """ + **Description:** + Returns a string describing the toric variety. + + **Arguments:** + None. + + **Returns:** + (string) A string describing the toric variety. + """ + out_str = (f"A {'smooth' if self.is_smooth() else 'simplicial'} " + f"{'' if self.is_compact() else 'non-'}compact {self.dim()}" + f"-dimensional toric variety with {len(self.triangulation().simplices())}" + f" affine patches") + return out_str + + def __eq__(self, other): + """ + **Description:** + Implements comparison of toric varieties with ==. + + **Arguments:** + - ```other``` (ToricVariety): The other toric variety that is being + compared. + + **Returns:** + (boolean) The truth value of the toric varieties being equal. + """ + if not isinstance(other, ToricVariety): + return NotImplemented + return self.triangulation() == self.triangulation() + + def __ne__(self, other): + """ + **Description:** + Implements comparison of toric varieties with !=. + + **Arguments:** + - ```other``` (ToricVariety): The other toric variety that is being + compared. + + **Returns:** + (boolean) The truth value of the toric varieties being different. + """ + if not isinstance(other, ToricVariety): + return NotImplemented + return not (self == other) + + def __hash__(self): + """ + **Description:** + Implements the ability to obtain hash values from toric varieties. + + **Arguments:** + None. + + **Returns:** + (integer) The hash value of the toric variety. + """ + if self._hash is not None: + return self._hash + self._hash = hash((1,hash(self.triangulation()))) + return self._hash + + def is_compact(self): + """ + **Description:** + Returns True if the variety is compact and False otherwise. + + **Arguments:** + None. + + **Returns:** + (boolean) The truth value of the variety being compact. + """ + if self._is_compact is not None: + return self._is_compact + self._is_compact = (0,)*self.dim() in [tuple(pt) for pt in self.polytope().interior_points()] + return self._is_compact + + def triangulation(self): + """ + **Description:** + Returns the triangulation giving rise to the toric variety. + + **Arguments:** + None. + + **Returns:** + (Triangulation) The triangulation giving rise to the toric variety. + """ + return self._triang + + def polytope(self): + """ + **Description:** + Returns the polytope whose triangulation gives rise to the toric + variety. + + **Arguments:** + None. + + **Returns:** + (Polytope) The polytope whose triangulation gives rise to the toric + variety. + """ + return self._triang.polytope() + + def dim(self): + """ + **Description:** + Returns the complex dimension of the toric variety. + + **Arguments:** + None. + + **Returns:** + (integer) The complex dimension of the toric variety. + """ + return self.triangulation().dim() + + def sr_ideal(self): + """ + **Description:** + Returns the Stanley–Reisner ideal of the toric variety. + + **Arguments:** + None. + + **Returns:** + (list) The Stanley–Reisner ideal of the toric variety. + """ + return self.triangulation().sr_ideal() + + def glsm_charge_matrix(self, include_origin=True): + """ + **Description:** + Computes the GLSM charge matrix of the theory resulting from this + polytope. + + **Arguments:** + - ```include_origin``` (boolean, optional, default=True): Indicates + whether to use the origin in the calculation. This corresponds to the + inclusion of the canonical divisor. + + **Returns:** + (list) The GLSM charge matrix. + """ + if self._glsm_charge_matrix is not None: + return np.array(self._glsm_charge_matrix)[:,(0 if include_origin else 1):] + self._glsm_charge_matrix = self.polytope().glsm_charge_matrix( + include_origin=True, + points=self.polytope().points_to_indices(self.triangulation().points())) + return np.array(self._glsm_charge_matrix)[:,(0 if include_origin else 1):] + + def glsm_linear_relations(self, include_origin=True): + """ + **Description:** + Computes the linear relations of the GLSM charge matrix. + + **Arguments:** + - ```include_origin``` (boolean, optional, default=True): Indicates + whether to use the origin in the calculation. This corresponds to the + inclusion of the canonical divisor. + + **Returns:** + (list) A matrix of linear relations of the columns of the GLSM charge + matrix. + """ + if self._glsm_linrels is not None: + return np.array(self._glsm_linrels)[(0 if include_origin else 1):,(0 if include_origin else 1):] + self._glsm_linrels = self.polytope().glsm_linear_relations( + include_origin=True, + points=self.polytope().points_to_indices(self.triangulation().points())) + return np.array(self._glsm_linrels)[(0 if include_origin else 1):,(0 if include_origin else 1):] + + def divisor_basis(self, include_origin=True, integral=None): + """ + **Description:** + Returns the current basis of divisors of the toric variety. + + **Arguments:** + - ```include_origin``` (boolean, optional, default=True): Whether to + interpret the indexing of the vector as including the origin. + - ```integral``` (boolean, optional): Indicates whether to try to find + an integral basis for the columns of the GLSM charge matrix. (i.e. + so that remaining columns can be written as an integer linear + combination of the basis.) + + **Returns:** + (list) A list of column indices that form a basis. If a more generic + basis has been specified with the + [```set_divisor_basis```](#set_divisor_basis) or + [```set_curve_basis```](#set_curve_basis) functions then it returns a + matrix where the rows are the basis elements specified as a linear + combination of the canonical divisor and the prime toric divisors. + + **Example:** We consider the hypersurface in $\mathbb{P}(1,1,1,6,9)$ + which has $h^{1,1}=2$. If no basis has been specified, then this + function finds one. If a basis has been specified, then this + function returns it. + ```python {5,8} + from cytools import Polytope + p = Polytope([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[-1,-1,-6,-9]]) + t = p.triangulate() + cy = t.get_cy() + cy.divisor_basis() # We haven't specified any basis before + # Prints: array([5, 6]) + cy.set_divisor_basis([1,3]) # Here we specify a basis + cy.divisor_basis() # We have specified a basis + # Prints: array([1, 3]) + ``` + """ + if self._divisor_basis is None or integral: + self._divisor_basis = self.polytope().glsm_basis( + integral=True, + include_origin=True, + points=self.polytope().points_to_indices(self.triangulation().points())) + self.clear_cache(only_in_basis=True) + if len(self._divisor_basis.shape) == 1: + if 0 in self._divisor_basis and not include_origin: + raise Exception("The basis was requested not including the " + "origin, but it is included in the current basis.") + return np.array(self._divisor_basis) - (0 if include_origin else 1) + return np.array(self._divisor_basis) + + def set_divisor_basis(self, basis, include_origin=True, + exact_arithmetic=False): + """ + **Description:** + Specifies a basis of divisors of the toric variety. This can + be done with a vector specifying the indices of the prime toric + divisors. + + :::tip experimental feature + There is also the option of setting a generic basis with a matrix that + specifies basis elements as a linear combination of the h11+4 prime + toric divisors, or the canonical divisor plus the h11+4 prime toric + divisors. When using this kind of bases, 64-bit floating-point + arithmetic is used for things such as intersection numbers since + numbers can be very large and overflow 64-bit integers. There is the + option of using exact rational arithmetic by setting + exact_arithmetic=True, but performance is significantly affected. + ::: + + **Arguments:** + - ```basis``` (list): Vector or matrix specifying a basis. When a + vector is used, the entries will be taken as the indices of points of + the polytope or prime divisors of the toric variety. When a + matrix is used, the rows are taken as linear combinations of the + aforementioned divisors. + - ```include_origin``` (boolean, optional, default=True): Whether to + interpret the indexing specified by the input vector as including the + origin. + - ```exact_arithmetic``` (boolean, optional, default=False): Whether to + use exact rational arithmetic instead of floats when using a generic + basis. + + **Returns:** + Nothing. + + **Example:** + See the example in [```divisor_basis```](#divisor_basis) or in + [Experimental Features](./experimental). + """ + b = np.array(basis) + glsm_cm = self.glsm_charge_matrix(include_origin=True) + glsm_rnk = np.linalg.matrix_rank(glsm_cm) + # Check if the input is a vector + if len(b.shape) == 1: + if b.dtype != int: + raise Exception("Input vector must contain integer entries.") + if not include_origin: + b += 1 + # Check if it is a valid basis + if min(b) < 0 or max(b) >= glsm_cm.shape[1]: + raise Exception("Indices are not in appropriate range.") + if (glsm_rnk != np.linalg.matrix_rank(glsm_cm[:,b]) + or glsm_rnk != len(b)): + raise Exception("Input divisors do not form a basis.") + self._divisor_basis = b + # Else if input is a matrix + elif len(b.shape) == 2: + if not config._exp_features_enabled: + raise Exception("Using generic bases is currently an " + "experimental feature and must be enabled in " + "the configuration.") + # We start by converting the matrix into a common data type + t = type(b[0,0]) + if t in [np.int64, np.float64]: + tmp_b = b + elif t == fmpz: + tmp_b = np.array(b, dtype=int) + elif t == fmpq: + tmp_b = np.empty(b.shape, dtype=float) + for i in range(b.shape[0]): + for j in range(b.shape[1]): + tmp_b[i,j] = int(b[i,j].p)/int(b[i,j].q) + else: + raise Exception("Unsupported data type.") + if np.linalg.matrix_rank(tmp_b) != glsm_rnk: + raise Exception("Input matrix has incorrect rank.") + if b.shape == (glsm_rnk, glsm_cm.shape[1]): + new_b = b + tmp_new_b = tmp_b + elif b.shape == (glsm_rnk, glsm_cm.shape[1]-1): + new_b = np.empty(glsm_cm.shape, dtype=t) + new_b[:,1:] = b + new_b[:,0] = t(0) + tmp_new_b = np.zeros(glsm_cm.shape, dtype=tmp_b.dtype) + tmp_new_b[:,1:] = tmp_b + else: + raise Exception("Input matrix has incorrect shape.") + new_glsm_cm = tmp_new_b.dot(glsm_cm.T).T + if np.linalg.matrix_rank(new_glsm_cm) != glsm_rnk: + raise Exception("Input divisors do not form a basis.") + if exact_arithmetic and t == np.int64: + new_b = array_int_to_fmpz(new_b) + elif exact_arithmetic and t == np.float64: + new_b = array_float_to_fmpq(new_b) + elif t == np.int64: + new_b = np.array(new_b, dtype=float) + self._divisor_basis = new_b + else: + raise Exception("Input must be either a vector or a matrix.") + # Clear the cache of all in-basis computations + self.clear_cache(recursive=False, only_in_basis=True) + + def set_curve_basis(self, basis, include_origin=True, + exact_arithmetic=False): + """ + **Description:** + Specifies a basis of curves of the toric variety, which in turn + induces a basis of divisors. This can be done with a vector specifying + the indices of the standard basis of the lattice dual to the lattice of + prime toric divisors. Note that this case is equivalent to using the + same vector in the [```set_divisor_basis```](#set_divisor_basis) + function. + + :::tip experimental feature + There is also the option of setting a generic basis with a matrix that + specifies basis elements as a linear combination of the dual lattice of + prome toric divisors. When using this kind of bases, 64-bit + floating-point arithmetic is used for things such as intersection + numbers since numbers can be very large and overflow 64-bit integers. + There is the option of using exact rational arithmetic by setting + exact_arithmetic=True, but performance is significantly affected. + ::: + + **Arguments:** + - ```basis``` (list): Vector or matrix specifying a basis. When a + vector is used, the entries will be taken as indices of the standard + basis of the dual to the lattice of prime toric divisors. When a + matrix is used, the rows are taken as linear combinations of the + aforementioned elements. + - ```include_origin``` (boolean, optional, default=True): Whether to + interpret the indexing specified by the input vector as including the + origin. + - ```exact_arithmetic``` (boolean, optional, default=False): Whether to + use exact rational arithmetic instead of floats when using a generic + basis. + + **Returns:** + Nothing. + + **Example:** + See the analogous example in [```divisor_basis```](#divisor_basis) or a + more detailed example in [Experimental Features](./experimental). + """ + b = np.array(basis) + # Check if the input is a vector + if len(b.shape) == 1: + self.set_divisor_basis(b, include_origin=include_origin, + exact_arithmetic=exact_arithmetic) + return + if len(b.shape) != 2: + raise Exception("Input must be either a vector or a matrix.") + # Else input is a matrix + if not config._exp_features_enabled: + raise Exception("Using generic bases is currently an " + "experimental feature and must be enabled in " + "the configuration.") + glsm_cm = self.glsm_charge_matrix(include_origin=True) + glsm_rnk = np.linalg.matrix_rank(glsm_cm) + t = type(b[0,0]) + if t in [np.int64, np.float64]: + tmp_b = b + elif t == fmpz: + exact_arithmetic = True + tmp_b = np.array(b, dtype=int) + elif t == fmpq: + exact_arithmetic = True + tmp_b = np.empty(b.shape, dtype=float) + for i in range(b.shape[0]): + for j in range(b.shape[1]): + tmp_b[i,j] = int(b[i,j].p)/int(b[i,j].q) + else: + raise Exception("Unsupported data type.") + if np.linalg.matrix_rank(tmp_b) != glsm_rnk: + raise Exception("Input matrix has incorrect rank.") + if b.shape == (glsm_rnk, glsm_cm.shape[1]): + new_b = b + elif b.shape == (glsm_rnk, glsm_cm.shape[1]-1): + new_b = np.empty(glsm_cm.shape, dtype=t) + new_b[:,1:] = b + new_b[:,0] = t(0) + else: + raise Exception("Input matrix has incorrect shape.") + # Now we convert to exact rationals or integers if necessary + if exact_arithmetic and t == np.int64: + new_b = array_int_to_fmpz(new_b) + elif exact_arithmetic and t == np.float64: + new_b = array_float_to_fmpq(new_b) + # Now we compute the pseudo-inverse that defines a divisors basis. + if exact_arithmetic: + # Flint doesn't have a pseudo-inverse computation so we do this + # by first extending the matrix and finding the inverse, and + # then we truncate the result. + ctr = 0 + while ctr <= 10: + ctr += 1 + b_ext = np.concatenate( + (new_b, np.random.randint(-1, 1, + size=(glsm_cm.shape[1]-glsm_rnk,glsm_cm.shape[1]))), + axis=0) + if np.linalg.matrix_rank(array_fmpq_to_float(np.array(fmpq_mat( + b_ext.tolist()).tolist()))) == glsm_cm.shape[1]: + break + if ctr > 10: + raise Exception("There was a problem finding the inverse " + "matrix") + b_ext_inv = np.array(fmpz_mat(b_ext.tolist()).inv().tolist()) + b_inv = b_ext_inv[:,:glsm_rnk].T + else: + b_inv = np.linalg.pinv(new_b).T + self.set_divisor_basis(b_inv, exact_arithmetic=exact_arithmetic) + + def mori_cone(self, in_basis=False, include_origin=True, + from_intersection_numbers=False): + """ + **Description:** + Returns the Mori cone of the toric variety. + + **Arguments:** + - ```in_basis``` (boolean, optional, default=False): Use the current + basis of curves, which is dual to what the basis returned by the + [```divisor_basis```](#divisor_basis) function. + - ```include_origin``` (boolean, optional, default=True): Includes the + origin of the polytope in the computation, which corresponds to the + canonical divisor. + - ```from_intersection_numbers``` (boolean, optional, default=False): + Compute the rays of the Mori cone using the intersection numbers of + the variety. This can be faster if they are already computed. + The set of rays may be different, but they define the same cone. + + **Returns:** + (Cone) The Mori cone of the toric variety. + """ + if self._mori_cone[0] is None: + if from_intersection_numbers: + rays = (self._compute_mori_rays_from_intersections_4d() + if self.dim() == 4 else + self._compute_mori_rays_from_intersections()) + self._mori_cone[0] = Cone(rays) + else: + self._mori_cone[0] = self.triangulation().cpl_cone().dual() + # 0: All divs, 1: No origin, 2: In basis + args_id = ((not include_origin)*1 if not in_basis else 0) + in_basis*2 + if self._mori_cone[args_id] is not None: + return self._mori_cone[args_id] + rays = self._mori_cone[0].rays() + basis = self.divisor_basis() + if include_origin and not in_basis: + new_rays = rays + elif not include_origin and not in_basis: + new_rays = rays[:,1:] + else: + if len(basis.shape) == 2: # If basis is matrix + new_rays = rays.dot(basis.T) + else: + new_rays = rays[:,basis] + c = Cone(new_rays, check=len(basis.shape)==2) + self._mori_cone[args_id] = c + return self._mori_cone[args_id] + + def _compute_mori_rays_from_intersections(self): + """ + **Description:** + Computes the Mori cone rays of the variety using intersection numbers. + + :::note + This function should generally not be called by the user. Instead, it + is called by the [mori_cone](#mori_cone) function when + the user wants to save some time if the intersection numbers + were already computed. + ::: + + **Arguments:** + None. + + **Returns:** + (list) The list of generating rays of the Mori cone of the toric + variety. + """ + intnums = self.intersection_numbers(in_basis=False) + dim = self.dim() + num_divs = self.h11() + dim + 2 + curve_dict = defaultdict(lambda: [[],[]]) + for ii in intnums: + if 0 in ii: + continue + ctr = Counter(ii) + if len(ctr) < dim: + continue + for comb in set(combinations(ctr.keys(),dim)): + crv = tuple(sorted(comb)) + curve_dict[crv][0].append(int(sum([i*(ctr[i]-(i in crv)) for i in ctr]))) + curve_dict[crv][1].append(intnums[ii]) + row_set = set() + for crv in curve_dict: + g = gcd_list(curve_dict[crv][1]) + row = np.zeros(num_divs, dtype=int) + for j,jj in enumerate(curve_dict[crv][0]): + row[jj] = int(round(curve_dict[crv][1][j]/g)) + row_set.add(tuple(row)) + mori_rays = np.array(list(row_set), dtype=int) + # Compute column corresponding to the origin + mori_rays[:,0] = -np.sum(mori_rays, axis=1) + return mori_rays + + def _compute_mori_rays_from_intersections_4d(self): + """ + **Description:** + Computes the Mori cone rays of the variety using intersection numbers. + + :::note notes + - This function should generally not be called by the user. Instead, + this is called by the [mori_cone](#mori_cone) function + when when the user wants to save some time if the intersection + numbers were already computed. + - This function is a more optimized version for 4D toric varieties. + ::: + + **Arguments:** + None. + + **Returns:** + (list) The list of generating rays of the Mori cone of the toric + variety. + """ + intnums = self.intersection_numbers(in_basis=False) + num_divs = int(max([ii[3] for ii in intnums])) + 1 + curve_dict = {} + curve_ctr = 0 + curve_sparse_list = [] + for ii in intnums: + if ii[0] == 0: + continue + if ii[0] == ii[1] == ii[2] == ii[3]: + continue + if ii[0] == ii[1] == ii[2]: + continue + if ii[1] == ii[2] == ii[3]: + continue + if ii[0] == ii[1] and ii[2] == ii[3]: + continue + if ii[0] == ii[1]: + if (ii[0],ii[2],ii[3]) not in curve_dict.keys(): + curve_dict[(ii[0],ii[2],ii[3])] = curve_ctr + curve_sparse_list.append([curve_ctr,ii[1],ii[-1]]) + curve_ctr += 1 + else: + curve_sparse_list.append([curve_dict[(ii[1],ii[2],ii[3])],ii[0],ii[-1]]) + elif ii[1] == ii[2]: + if (ii[0],ii[1],ii[3]) not in curve_dict.keys(): + curve_dict[(ii[0],ii[1],ii[3])] = curve_ctr + curve_sparse_list.append([curve_ctr,ii[2],ii[-1]]) + curve_ctr += 1 + else: + curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[3])],ii[2],ii[-1]]) + elif ii[2] == ii[3]: + if (ii[0],ii[1],ii[2]) not in curve_dict.keys(): + curve_dict[(ii[0],ii[1],ii[2])] = curve_ctr + curve_sparse_list.append([curve_ctr,ii[3],ii[-1]]) + curve_ctr += 1 + else: + curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[2])],ii[3],ii[-1]]) + else: + if (ii[0],ii[1],ii[2]) not in curve_dict.keys(): + curve_dict[(ii[0],ii[1],ii[2])] = curve_ctr + curve_sparse_list.append([curve_ctr,ii[3],ii[-1]]) + curve_ctr += 1 + else: + curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[2])],ii[3],ii[-1]]) + if (ii[0],ii[1],ii[3]) not in curve_dict.keys(): + curve_dict[(ii[0],ii[1],ii[3])] = curve_ctr + curve_sparse_list.append([curve_ctr,ii[2],ii[-1]]) + curve_ctr += 1 + else: + curve_sparse_list.append([curve_dict[(ii[0],ii[1],ii[3])],ii[2],ii[-1]]) + if (ii[0],ii[2],ii[3]) not in curve_dict.keys(): + curve_dict[(ii[0],ii[2],ii[3])] = curve_ctr + curve_sparse_list.append([curve_ctr,ii[1],ii[-1]]) + curve_ctr += 1 + else: + curve_sparse_list.append([curve_dict[(ii[0],ii[2],ii[3])],ii[1],ii[-1]]) + if (ii[1],ii[2],ii[3]) not in curve_dict.keys(): + curve_dict[(ii[1],ii[2],ii[3])] = curve_ctr + curve_sparse_list.append([curve_ctr,ii[0],ii[-1]]) + curve_ctr += 1 + else: + curve_sparse_list.append([curve_dict[(ii[1],ii[2],ii[3])],ii[0],ii[-1]]) + row_list = [[] for i in range(curve_ctr)] + # Remove zeros + for ii in curve_sparse_list: + if ii[2]!=0: + row_list[ii[0]].append([ii[1],ii[2]]) + # Normalize + for row in row_list: + g = abs(gcd_list([ii[1] for ii in row])) + for ii in row: + ii[1] = int(round(ii[1]/g)) + row_list = set(tuple(tuple(tuple(ii) for ii in sorted(row)) for row in row_list)) + mori_rays = np.zeros((len(row_list),num_divs), dtype=int) + for i,row in enumerate(row_list): + for ii in row: + mori_rays[i,int(round(ii[0]))] = round(ii[1]) + # Compute column corresponding to the origin + mori_rays[:,0] = -np.sum(mori_rays, axis=1) + return mori_rays + + def kahler_cone(self): + """ + **Description:** + Returns the Kähler cone of the toric variety in the current + basis of divisors. + + **Arguments:** + None. + + **Returns:** + (Cone) The Kähler cone of the toric variety. + """ + return self.mori_cone(in_basis=True).dual() + + def _construct_intnum_equations_4d(self): + """ + **Description:** + Auxiliary function used to compute the intersection numbers of the + toric variety. This function is optimized for 4D varieties. + + **Arguments:** + None. + + **Returns:** + (tuple) A tuple where the first compotent is a sparse matrix M, the + second is a vector C, which are used to solve the system M*X=C, the + third is the list of intersection numbers not including + self-intersections, and the fourth is the list of intersection numbers + that are used as variables in the equation. + """ + # Origin is at index 0 + pts_ext = np.empty((self.triangulation().points().shape[0], + self.triangulation().points().shape[1]+1), + dtype=int) + pts_ext[:,:-1] = self.triangulation().points() + pts_ext[:,-1] = 1 + linear_relations = self.glsm_linear_relations(include_origin=False) + # First compute the distict intersection numbers + distintnum_array = sorted([ + [c for c in simp if c!=0] + + [1/abs(np.linalg.det([pts_ext[p] for p in simp]))] + for simp in self.triangulation().simplices()]) + frst = [[c for c in s if c != 0] for s in self.triangulation().simplices()] + simp_2 = set([j for i in [list(combinations(f,2)) for f in frst] for j in i]) + simp_3 = set([j for i in [list(combinations(f,3)) for f in frst] for j in i]) + # We construct and solve the linear system M*x + C = 0, where M is + # a rectangular mxn matrix and C is a vector. + ################################################################### + ### Define dictionaries, to be used to construct the linear system + ################################################################### + ## Dictionary of variables + # Most intersection numbers are trivially zero, find the possibly + # nonzero intersection numbers. + variable_array_1 = [tuple(j) for i in [[[s[0],s[0],s[1],s[2]], + [s[0],s[1],s[1],s[2]], + [s[0],s[1],s[2],s[2]]] + for s in simp_3] for j in i] + variable_array_2 = [tuple(j) for i in [[[s[0],s[0],s[1],s[1]], + [s[0],s[0],s[0],s[1]], + [s[0],s[1],s[1],s[1]]] + for s in simp_2] for j in i] + variable_array_3 = [(i,i,i,i) for i in range(1, len(pts_ext))] + variable_array = sorted(variable_array_1 + variable_array_2 + + variable_array_3) + variable_dict = {vv:v for v,vv in enumerate(variable_array)} + ## Dictionary to construct C + # C is constructed by adding/subtracting distinct intersection + # numbers. + c_dict = {s:[] for s in simp_3} + for d in distintnum_array: + c_dict[(d[0],d[1],d[2])] += [[d[3],d[4]]] + c_dict[(d[0],d[1],d[3])] += [[d[2],d[4]]] + c_dict[(d[0],d[2],d[3])] += [[d[1],d[4]]] + c_dict[(d[1],d[2],d[3])] += [[d[0],d[4]]] + ## Dictionary to construct M + eqn_array_1 = [tuple(s) for s in simp_3] + eqn_array_2 = [tuple(j) for i in [[[s[0],s[0],s[1]], + [s[0],s[1],s[1]]] + for s in simp_2] for j in i] + eqn_array_3 = [(i,i,i) for i in range(1, len(pts_ext))] + eqn_array = sorted(eqn_array_1 + eqn_array_2 + eqn_array_3) + eqn_dict = {eq:[] for eq in eqn_array} + for v in variable_array: + if v[0]==v[3]: + eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] + elif v[0]==v[2]: + eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] + eqn_dict[(v[0],v[1],v[3])] += [[v[2],variable_dict[v]]] + elif v[0]==v[1] and v[2]==v[3]: + eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] + eqn_dict[(v[0],v[2],v[3])] += [[v[1],variable_dict[v]]] + elif v[0]==v[1]: + eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] + eqn_dict[(v[0],v[1],v[3])] += [[v[2],variable_dict[v]]] + eqn_dict[(v[0],v[2],v[3])] += [[v[1],variable_dict[v]]] + elif v[1]==v[3]: + eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] + eqn_dict[(v[1],v[2],v[3])] += [[v[0],variable_dict[v]]] + elif v[1]==v[2]: + eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] + eqn_dict[(v[0],v[1],v[3])] += [[v[2],variable_dict[v]]] + eqn_dict[(v[1],v[2],v[3])] += [[v[0],variable_dict[v]]] + elif v[2]==v[3]: + eqn_dict[(v[0],v[1],v[2])] += [[v[3],variable_dict[v]]] + eqn_dict[(v[0],v[2],v[3])] += [[v[1],variable_dict[v]]] + eqn_dict[(v[1],v[2],v[3])] += [[v[0],variable_dict[v]]] + else: + raise Exception("Failed to construct linear system.") + # Construct Linear System + num_rows = len(linear_relations)*len(eqn_array) + C = np.array([0.0]*num_rows) + M_row = [] + M_col = [] + M_val = [] + row_ctr = 0 + for eqn in eqn_array: + for lin in linear_relations: + if eqn[0]!=eqn[1] and eqn[1]!=eqn[2]: + c_temp = c_dict[eqn] + C[row_ctr] = sum([lin[cc[0]-1]*cc[1] for cc in c_temp]) + eqn_temp = eqn_dict[eqn] + for e in eqn_temp: + M_row.append(row_ctr) + M_col.append(e[1]) + M_val.append(lin[e[0]-1]) + row_ctr+=1 + Mat = csr_matrix((M_val,(M_row,M_col)), dtype=np.float64) + return Mat, C, distintnum_array, variable_array + + def _construct_intnum_equations(self): + """ + **Description:** + Auxiliary function used to compute the intersection numbers of the + toric variety. + + **Arguments:** + None. + + **Returns:** + (tuple) A tuple where the first compotent is a sparse matrix M, the + second is a vector C, which are used to solve the system M*X=C, the + third is the list of intersection numbers not including + self-intersections, and the fourth is the list of intersection numbers + that are used as variables in the equation. + """ + dim = self.dim() + pts_ext = np.empty((self.triangulation().points().shape[0],dim+1), dtype=int) + pts_ext[:,:-1] = self.triangulation().points() + pts_ext[:,-1] = 1 + linear_relations = self.glsm_linear_relations(include_origin=False) + # First compute the distict intersection numbers + distintnum_array = sorted([ + [c for c in simp if c!=0] + + [1/abs(np.linalg.det([pts_ext[p] for p in simp]))] + for simp in self.triangulation().simplices()]) + frst = [[c for c in s if c != 0] for s in self.triangulation().simplices()] + simp_n = [set([j for i in [list(combinations(f,n)) for f in frst] + for j in i]) for n in range(2,dim)] + simp_n = [[np.array(c) for c in simp_n[n]] for n in range(len(simp_n))] + # We construct and solve the linear system M*x + C = 0, where M is + # a rectangular mxn matrix and C is a vector. + ################################################################### + ### Define dictionaries, to be used to construct the linear system + ################################################################### + ## Dictionary of variables + # Most intersection numbers are trivially zero, find the possibly + # nonzero intersection numbers. + choices_n = [] + for n in range(2,dim): + comb = list(combinations(range(dim-1),dim-n)) + choices = np.empty((len(comb),dim), dtype=int) + choices[:,0] = 0 + for k,c in enumerate(comb): + for i in range(1,dim): + choices[k,i] = choices[k,i-1] + (0 if i-1 in c else 1) + choices_n.append(choices) + variable_array_1 = [(i,)*dim for i in range(1,len(pts_ext))] + variable_array_n = [tuple(s[ch]) for n in range(len(simp_n)) + for s in simp_n[n] for ch in choices_n[n]] + variable_array = variable_array_1 + variable_array_n + variable_dict = {vv:v for v,vv in enumerate(variable_array)} + ## Dictionary to construct C + # C is constructed by adding/subtracting distinct intersection + # numbers. + c_dict = defaultdict(lambda: []) + for d in distintnum_array: + for i in range(len(d)-1): + c_dict[tuple(c for j,c in enumerate(d[:-1]) if j!= i) + ] += [(d[i],d[-1])] + ## Dictionary to construct M + eqn_array_1 = [tuple(s) for s in simp_n[-1]] + eqn_array_2 = [(i,)*(dim-1) for i in range(1, len(pts_ext))] + choices_n = [] + for n in range(2,dim-1): + comb = list(combinations(range(dim-2),dim-1-n)) + choices = np.empty((len(comb),dim-1), dtype=int) + choices[:,0] = 0 + for k,c in enumerate(comb): + for i in range(1,dim-1): + choices[k,i] = choices[k,i-1] + (0 if i-1 in c else 1) + choices_n.append(choices) + eqn_array_n = [tuple(s[ch]) for n in range(len(choices_n)) + for s in simp_n[n] for ch in choices_n[n]] + eqn_array = eqn_array_1 + eqn_array_2 + eqn_array_n + eqn_dict = defaultdict(lambda: []) + for v in variable_array: + for c in set(combinations(v,dim-1)): + k = None + for i in range(dim): + if i == dim-1 or v[i] != c[i]: + k = i + break + eqn_dict[c] += [(v[k],variable_dict[v])] + # Construct Linear System + num_rows = len(linear_relations)*len(eqn_array) + C = np.zeros(num_rows, dtype=float) + M_row = [] + M_col = [] + M_val = [] + row_ctr = 0 + for eqn in eqn_array: + for lin in linear_relations: + if len(set(eqn)) == dim-1: + c_temp = c_dict[eqn] + C[row_ctr] = sum([lin[cc[0]-1]*cc[1] for cc in c_temp]) + eqn_temp = eqn_dict[eqn] + for e in eqn_temp: + M_row.append(row_ctr) + M_col.append(e[1]) + M_val.append(lin[e[0]-1]) + row_ctr+=1 + Mat = csr_matrix((M_val,(M_row,M_col)), dtype=np.float64) + return Mat, C, distintnum_array, variable_array + + def intersection_numbers(self, in_basis=False, + zero_as_anticanonical=False, backend="all", + check=True, backend_error_tol=1e-6, + round_to_zero_treshold=1e-3, + round_to_integer_error_tol=2e-5, + verbose=0, exact_arithmetic=False): + """ + **Description:** + Returns the intersection numbers of the toric variety. + + :::tip experimental feature + The intersection numbers are computed as floating-point numbers by + default, but there is the option to turn them into rationals. The + process is fairly quick, but verifying that they are correct becomes + very slow at large $h^{1,1}$. + ::: + + **Arguments:** + - ```in_basis``` (boolean, optional, default=False): Return the + intersection numbers in the current basis of divisors. + - ```zero_as_anticanonical``` (boolean, optional, default=False): Treat + the zeroth index as corresponding to the anticanonical divisor + instead of the canonical divisor. + - ```backend``` (string, optional, default="all"): The sparse linear + solver to use. Options are "all", "sksparse" and "scipy". When set + to "all" every solver is tried in order until one succeeds. + - ```check``` (boolean, optional, default=True): Whether to explicitly + check the solution to the linear system. + - ```backend_error_tol``` (float, optional, default=1e-3): Error + tolerance for the solution of the linear system. + - ```round_to_zero_treshold``` (float, optional, default=1e-3): + Intersection numbers with magnitude smaller than this treshold are + rounded to zero. + - ```round_to_integer_error_tol``` (float, optional, default=1e-3): All + intersection numbers of the Calabi-Yau hypersurface must be integers + up to errors less than this value, when the CY is smooth. + - ```verbose``` (integer, optional, default=0): The verbosity level. + - verbose = 0: Do not print anything. + - verbose = 1: Print linear backend warnings. + - ```exact_arithmetic``` (boolean, optional, default=False): Converts + the intersection numbers into exact rational fractions. + + Returns: + (dict) A dictionary containing nonzero intersection numbers. The keys + are divisor indices in ascending order. + """ + # 0: (canon,float), 1: (anticanon, float), 2: (basis, float) + # 4: (canon,fmpq), 5: (anticanon, fmpq), 6: (basis, fmpq) + args_id = ((1*zero_as_anticanonical if not in_basis else 0) + + 2*in_basis + 4*exact_arithmetic) + if self._intersection_numbers[args_id] is not None: + return copy.copy(self._intersection_numbers[args_id]) + if (self._intersection_numbers[0] is None + or (self._intersection_numbers[4] is None + and exact_arithmetic)): + backends = ["all", "sksparse", "scipy"] + if backend not in backends: + raise Exception("Invalid linear system backend. " + f"The options are: {backends}.") + if exact_arithmetic and not config._exp_features_enabled: + raise Exception("Using exact arithmetic is an experimental " + "feature and must be enabled in the " + "configuration.") + # Construct the linear equations + # Note that self.dim gives the dimension of the CY not the of the + # variety + Mat, C, distintnum_array, variable_array = (self._construct_intnum_equations_4d() + if self.dim() == 4 else + self._construct_intnum_equations()) + # The system to be solved is Mat*x + C = 0. This is an + # overdetermined but consistent linear system. + # There is a unique solution to this system. We solve it by + # defining MM = Mat.transpose()*Mat and CC = - Mat.transpose()*C, + # and solve + # MM*x = CC + # Since MM is a positive definite full rank matrix, this system can + # be solved using via a Cholesky decomposition. + solution = solve_linear_system(Mat, C, backend=backend, check=check, + backend_error_tol=backend_error_tol, + verbose=verbose) + if solution is None: + raise Exception("Linear system solution failed.") + if exact_arithmetic: + solution_fmpq = fmpq_mat([array_float_to_fmpq(solution).tolist()]).transpose() + if check: + Mat_fmpq = fmpq_mat(Mat.shape[0],Mat.shape[1]) + Mat_dok = Mat.todok() + for k in Mat_dok.keys(): + Mat_fmpq[k] = float_to_fmpq(Mat_dok[k]) + C_fmpq = fmpq_mat([array_float_to_fmpq(C).tolist()]).transpose() + res = Mat_fmpq*solution_fmpq + C_fmpq + if any(np.array(res.tolist()).flat): + raise Exception("Conversion to rationals failed.") + if exact_arithmetic: + intnums = dict() + for ii in distintnum_array: + intnums[tuple(int(round(j) for j in ii[:-1]))] = float_to_fmpq(ii[-1]) + for i,ii in enumerate(variable_array): + if abs(solution[i]) < round_to_zero_treshold: + continue + intnums[tuple(ii)] = float_to_fmpq(solution[i]) + else: + intnums = dict() + for ii in distintnum_array: + intnums[tuple(int(round(j)) for j in ii[:-1])] = ii[-1] + for i,ii in enumerate(variable_array): + if abs(solution[i]) < round_to_zero_treshold: + continue + intnums[tuple(ii)] = solution[i] + if self.is_smooth(): + for ii in intnums: + c = intnums[ii] + if abs(round(c)-c) > round_to_integer_error_tol: + raise Exception("Non-integer intersection numbers " + "detected in a smooth toric variety.") + intnums[ii] = int(round(c)) + # Add intersections with canonical divisor + # First we only compute intersection numbers with a single index 0 + # This is because precision errors add up significantly for + # intersection numbers with self-intersections of the canonical + # divisor + dim = self.dim() + canon_intnum = defaultdict(lambda: 0) + for ii in intnums: + choices = set(tuple(c for i,c in enumerate(ii) if i!=j) for j in range(dim)) + for c in choices: + canon_intnum[(0,)+c] -= intnums[ii] + # Now we round all intersection numbers of the form K_0i...j to + # integers if the CY is smooth. Otherwise, we only remove the zero + # elements + if self.canonical_divisor_is_smooth() and not exact_arithmetic: + for ii in list(canon_intnum.keys()): + val = canon_intnum[ii] + round_val = int(round(val)) + if abs(val-round_val) > round_to_integer_error_tol: + print(ii, val) + raise Exception("Non-integer intersection numbers " + "detected in a smooth CY.") + if round_val != 0: + canon_intnum[ii] = round_val + else: + canon_intnum.pop(ii) + elif not exact_arithmetic: + for ii in list(canon_intnum.keys()): + if abs(canon_intnum[ii]) < round_to_zero_treshold: + canon_intnum.pop(ii) + # Now we compute remaining intersection numbers + canon_intnum_n = [canon_intnum] + for n in range(2,dim+1): + tmp_intnum = defaultdict(lambda: 0) + for ii,ii_val in canon_intnum_n[-1].items(): + choices = set(tuple(c for i,c in enumerate(ii[n-1:]) if i!=j) for j in range(dim+1-n)) + for c in choices: + tmp_intnum[(0,)*n+c] -= ii_val + if not exact_arithmetic: + for ii in list(tmp_intnum.keys()): + if abs(tmp_intnum[ii]) < round_to_zero_treshold: + tmp_intnum.pop(ii) + canon_intnum_n.append(tmp_intnum) + for i in range(len(canon_intnum_n)): + for ii in canon_intnum_n[i]: + intnums[ii] = canon_intnum_n[i][ii] + if exact_arithmetic: + self._intersection_numbers[4] = intnums + self._intersection_numbers[0] = {ii:fmpq_to_float(intnums[ii]) for ii in intnums} + else: + self._intersection_numbers[0]= intnums + # Now intersection numbers have been computed + if zero_as_anticanonical and not in_basis: + self._intersection_numbers[args_id] = self._intersection_numbers[4*exact_arithmetic] + for ii in self._intersection_numbers[args_id]: + if 0 not in ii: + continue + self._intersection_numbers[args_id][ii] *= (-1 if sum(ii == 0)%2 == 1 else 1) + elif in_basis: + basis = self.divisor_basis() + if len(basis.shape) == 2: # If basis is matrix + if basis.dtype == float and exact_arithmetic: + basis = array_float_to_fmpq(basis) + self._intersection_numbers[2+4*exact_arithmetic] = ( + symmetric_sparse_to_dense_in_basis( + self._intersection_numbers[4*exact_arithmetic], + basis)) + else: + self._intersection_numbers[2+4*exact_arithmetic] = ( + filter_tensor_indices( + self._intersection_numbers[4*exact_arithmetic], + basis)) + return copy.copy(self._intersection_numbers[args_id]) + + def is_smooth(self): + """ + **Description:** + Returns True if the toric variety is smooth. + + **Arguments:** + None. + + **Returns:** + (boolean) The truth value of the toric variety being smooth. + """ + if self._is_smooth is not None: + return self._is_smooth + pts = self.triangulation().points() + pts = np.insert(pts, 0, np.ones(len(pts), dtype=int), axis=1) + simp = self.triangulation().simplices() + self._is_smooth = all(abs(int(round(np.linalg.det(pts[s]))))==1 for s in simp) + return self._is_smooth + + def canonical_divisor_is_smooth(self): + """ + **Description:** + Returns True if the canonical divisor is smooth. + + **Arguments:** + None. + + **Returns:** + (boolean) The truth value of the canonical divisor being smooth. + """ + if self._canon_div_is_smooth is not None: + return self._canon_div_is_smooth + pts_mpcp = [tuple(pt) for pt in self.polytope().points_not_interior_to_facets()] + ind_triang = list(set.union(*[set(s) for s in self._triang.simplices()])) + pts_triang = [tuple(pt) for pt in self._triang.points()[ind_triang]] + sm = (all(pt in pts_triang for pt in pts_mpcp) and + (True if self.dim() <= 3 else + all(c.is_smooth() for c in self.fan_cones(self.dim(),self.dim()-2)))) + self._canon_div_is_smooth = sm + return self._canon_div_is_smooth + + def effective_generators(self): + """ + **Description:** + Returns the rays that generate the effective cone of the toric variety. + + **Arguments:** + None. + + **Returns:** + (list) The rays that generate the effective cone of the toric variety. + """ + if self._eff_gens is not None: + return np.array(self._eff_gens) + n_divs = self.triangulation().points().shape[0]-1 + rays = np.eye(n_divs-self.dim(), dtype=float).tolist() + linrels = self.glsm_linear_relations(include_origin=True) + basis = self.divisor_basis() + if len(basis.shape) != 1: + raise Exception("Generic bases are not yet supported.") + no_basis = [i for i in range(n_divs+1) + if i not in basis] + linrels_reord = linrels[:,no_basis+basis.tolist()] + linrels_rref = np.array(fmpz_mat(linrels_reord.tolist()).rref()[0].tolist(), dtype=int) + for i in range(linrels_rref.shape[0]): + linrels_rref[i,:] //= int(round(gcd_list(linrels_rref[i,:]))) + for i,ii in enumerate(no_basis): + linrels_reord[:,ii] = linrels_rref[:,i] + for i,ii in enumerate(basis,len(no_basis)): + linrels_reord[:,ii] = linrels_rref[:,i] + for l in linrels_reord: + if l[0] != 0: + continue + for i in no_basis: + if l[i] != 0: + r = [0]*(n_divs-self.dim()) + for j,jj in enumerate(basis): + r[j] = l[jj]/(-l[i]) + for j in no_basis: + if l[j] != 0 and j != i: + raise Exception("An unexpected error occured.") + rays.append(r) + self._eff_gens = np.array(rays, dtype=float) + return np.array(self._eff_gens) + + def effective_cone(self): + """ + **Description:** + Returns the effective cone of the toric variety. + + **Arguments:** + None. + + **Returns:** + (Cone) The effective cone of the toric variety. + """ + if self._eff_cone is not None: + return self._eff_cone + self._eff_cone = Cone(self.effective_generators()) + return self._eff_cone + + def fan_cones(self, d=None, face_dim=None): + """ + **Description:** + It returns the cones forming a fan defined by a star triangulation of a + reflexive polytope. The dimension of the desired cones can be + specified, and one can also restrict to cones that lie in faces of a + particular dimension. + + **Arguments:** + - ```d``` (integer, optional): The dimension of the desired cones. If + not specified, it returns the full-dimensional cones. + - ```face_dim``` (integer, optional): Restricts to cones that lie on + faces of the polytope of a particular dimension. If not specified, + then no restriction is imposed. + + **Returns:** + (list) The list of cones with the specified properties defined by the + star triangulation. + """ + if d is None: + d = (self.dim() if face_dim is None else face_dim) + if d not in range(1,self.dim()+1): + raise Exception("Only cones of dimension 1 through d are " + "supported.") + if (d,face_dim) in self._fan_cones: + return self._fan_cones[(d,face_dim)] + pts = self.triangulation().points() + cones = set() + triang_pts_tup = [tuple(pt) for pt in self.triangulation().points()] + faces = ([self.triangulation().points_to_indices([tuple(pt) for pt in f.points() if tuple(pt) in triang_pts_tup]) + for f in self.triangulation()._poly.faces(face_dim)] if face_dim is not None else None) + for s in self.triangulation().simplices(): + for c in combinations(s,d): + if (0 not in c and (faces is None or any(all(cc in f for cc in c) for f in faces))): + cones.add(tuple(sorted(c))) + self._fan_cones[(d,face_dim)] = [Cone(pts[list(c)]) for c in cones] + return self._fan_cones[(d,face_dim)] + + def get_cy(self, nef_partition=None): + """ + **Description:** + Returns a CalabiYau object corresponding to the anti-canonical + hypersurface on the toric variety defined by the fine, star, regular + triangulation. If a nef-partition is specified then it returns the + complete intersection Calabi-Yau that it specifies. + :::note + Only Calabi-Yau 3-fold hypersurfaces are fully supported. Other + dimensions require enabling the experimetal features of CYTools in the + [configuration](./configuration). + ::: + **Arguments:** + - ```nef_partition``` (list, optional): A list of tuples of indices + specifying a nef-partition of the polytope, and defines a complete + intersection Calabi-Yau. + **Returns:** + (CalabiYau) The Calabi-Yau arising from the triangulation. + """ + if nef_partition != self._nef_part: # Reset CY if nef partition changes + self._cy = None + if self._cy is not None: + return self._cy + if nef_partition is not None: + if not config._exp_features_enabled: + raise Exception("CICYs are an experimental feature and must be" + " enabled.") + self._cy = CalabiYau(self, nef_partition) + self._nef_part = nef_partition + else: + if not self.triangulation().is_fine(): + raise Exception("Triangulation is non-fine.") + if ((self.dim() != 4 or not self.triangulation().polytope().is_favorable(lattice="N")) + and not config._exp_features_enabled): + raise Exception("Constructing Calabi-Yaus of dimensions other " + "than 3 or that are non-favorable are " + "experimental features and must be enabled.") + if not ((self.triangulation().points().shape == self.triangulation().polytope().points_not_interior_to_facets().shape + and all((self.triangulation().points() == self.triangulation().polytope().points_not_interior_to_facets()).flat)) + or (self.triangulation().points().shape == self.triangulation().polytope().points().shape + and all((self.triangulation().points() == self.triangulation().polytope().points()).flat))): + raise Exception("Calabi-Yau hypersurfaces must be constructed either points not interior to facets or all points.") + self._cy = CalabiYau(self) + self._nef_part = nef_partition + return self._cy diff --git a/cytools/triangulation.py b/cytools/triangulation.py index 0ae8e37..87fc958 100644 --- a/cytools/triangulation.py +++ b/cytools/triangulation.py @@ -28,7 +28,7 @@ from flint import fmpz_mat import numpy as np # CYTools imports -from cytools.calabiyau import CalabiYau +from cytools.toricvariety import ToricVariety from cytools.cone import Cone from cytools.utils import gcd_list from cytools import config @@ -57,13 +57,13 @@ class Triangulation: [```__init__```](#__init__) function. :::note - Some checks on the input are always performed, as it is nesessary to have - all the data organized properly so that the results obtained with the - Calabi-Yau class are correct. In particular, the ordering of the points - needs to be consistent with what the the ordering the - [Polytope](./polytope) class uses. For this reason, this function first - attempts to fix discrepancies, and if it fails then it disallows the - creation of a Calabi-Yau object. + If you construct a triangulation object directly by inputting a list of + points they may be reordered to match the ordering of the points from the + [Polytope](./polytope) class. This is to ensure that computations of toric + varieties and Calabi-Yau manifolds are correct. To avoid this subtlety we + discourage users from constructing Triangulation objects directly, and + instead use the triangulation functions in the [Polytope](./polytope) + class. ::: **Arguments:** @@ -147,69 +147,21 @@ def __init__(self, triang_pts, poly=None, heights=None, make_star=False, **Returns:** Nothing. """ - self._triang_pts = np.array(triang_pts, dtype=int) - triang_pts_tup = [tuple(pt) for pt in self._triang_pts] + tmp_triang_pts = [tuple(pt) for pt in np.array(triang_pts, dtype=int)] heights = copy.deepcopy(heights) if poly is None: from cytools.polytope import Polytope - self._poly = Polytope(self._triang_pts) + self._poly = Polytope(tmp_triang_pts) else: self._poly = poly if not self._poly.is_solid(): raise Exception("Only triangulations of full-dimensional point " "configurations are supported.") - self._allow_cy = True - # Find out if all points are being used and check the consistency of - # the input - poly_pts_mpcp = [tuple(pt) for pt in - self._poly.points_not_interior_to_facets()] - if triang_pts_tup == poly_pts_mpcp: - self._all_poly_pts = False - else: - poly_pts = [tuple(pt) for pt in self._poly.points()] - if triang_pts_tup == poly_pts: - self._all_poly_pts = True - else: - # Here we attempt to fix the ordering or else we disallow the - # creation of a Calabi-Yau object - print("Warning: Unsupported point configuration found. " - "Attempting to fix...") - if set(triang_pts_tup) == set(poly_pts_mpcp): - self._all_poly_pts = False - ind_dict = {i:poly_pts_mpcp.index(triang_pts_tup[i]) - for i in range(len(triang_pts_tup))} - triang_pts_tup = poly_pts_mpcp - self._triang_pts = np.array(triang_pts_tup) - if heights is not None: - tmp_heights = [0]*len(heights) - for i,j in ind_dict.items(): - tmp_heights[j] = heights[i] - heights = tmp_heights - if simplices is not None: - simplices = [[ind_dict[i] for i in s] - for s in simplices] - print("Sucessfully fixed the input.") - elif set(triang_pts_tup) == set(poly_pts): - self._all_poly_pts = True - ind_dict = {i:poly_pts.index(triang_pts_tup[i]) - for i in range(len(triang_pts_tup))} - triang_pts_tup = poly_pts - self._triang_pts = np.array(triang_pts_tup) - if heights is not None: - tmp_heights = [0]*len(heights) - for i,j in ind_dict.items(): - tmp_heights[j] = heights[i] - heights = tmp_heights - if simplices is not None: - simplices = [[ind_dict[i] for i in s] - for s in simplices] - print("Sucessfully fixed the input.") - else: - self._all_poly_pts = False - self._allow_cy = False - print("Warning: Failed to fix the input. Creation of a " - "Calabi-Yau object will be disallowed. Other " - "functions may also return incorrect results.") + # Now we reorder the points to make sure they match the ordering of + # the Polytope class + poly_pts = [tuple(pt) for pt in self._poly.points()] + self._triang_pts = np.array([pt for pt in poly_pts if pt in tmp_triang_pts]) + triang_pts_tup = [tuple(pt) for pt in self._triang_pts] # Try to find the index of the origin. try: self._origin_index = triang_pts_tup.index((0,)*self._poly.dim()) @@ -217,6 +169,7 @@ def __init__(self, triang_pts, poly=None, heights=None, make_star=False, self._origin_index = -1 make_star = False self._pts_dict = {ii:i for i,ii in enumerate(triang_pts_tup)} + self._pts_triang_to_poly = {i:self._poly.points_to_indices(ii) for i,ii in enumerate(self._triang_pts)} self._backend = backend # Initialize hidden attributes self._hash = None @@ -227,8 +180,7 @@ def __init__(self, triang_pts, poly=None, heights=None, make_star=False, self._gkz_phi = None self._sr_ideal = None self._cpl_cone = [None]*2 - self._fan_cones = dict() - self._cy = None + self._toricvariety = None # Now save input triangulation or construct it if simplices is not None: self._simplices = np.array(sorted([sorted(s) for s in simplices])) @@ -251,8 +203,7 @@ def __init__(self, triang_pts, poly=None, heights=None, make_star=False, "be specified when working without a " "backend") if backend == "qhull": - heights = [np.dot(p,p) + np.random.normal(0,0.05) - for p in self._triang_pts] + heights = [np.dot(p,p) + np.random.normal(0,0.05) for p in self._triang_pts] elif backend == "cgal": heights = [np.dot(p,p) for p in self._triang_pts] elif backend == "topcom": @@ -267,15 +218,11 @@ def __init__(self, triang_pts, poly=None, heights=None, make_star=False, self._heights = heights # Now run the appropriate triangulation function if backend == "qhull": - self._simplices = qhull_triangulate(self._triang_pts, - heights) + self._simplices = qhull_triangulate(self._triang_pts, heights) if make_star: - facets_ind = [[self._pts_dict[tuple(pt)] - for pt in f.boundary_points()] - for f in self._poly.facets()] - self._simplices = convert_to_star(self._simplices, - facets_ind, - self._origin_index) + facets_ind = [[self._pts_dict[tuple(pt)] for pt in f.boundary_points()] + for f in self._poly.facets()] + self._simplices = convert_to_star(self._simplices, facets_ind, self._origin_index) # With CGAL we can obtain a star triangulation more quickly by # setting the height of the origin to be much lower than the # others. In theory this can also be done with QHull, but one @@ -288,15 +235,11 @@ def __init__(self, triang_pts, poly=None, heights=None, make_star=False, else: # Use TOPCOM self._simplices = topcom_triangulate(self._triang_pts) if make_star: - facets_ind = [[self._pts_dict[tuple(pt)] - for pt in f.boundary_points()] - for f in self._poly.facets()] - self._simplices = convert_to_star(self._simplices, - facets_ind, - self._origin_index) + facets_ind = [[self._pts_dict[tuple(pt)] for pt in f.boundary_points()] + for f in self._poly.facets()] + self._simplices = convert_to_star(self._simplices, facets_ind, self._origin_index) # Make sure that the simplices are sorted - self._simplices = np.array( - sorted([sorted(s) for s in self._simplices])) + self._simplices = np.array(sorted([sorted(s) for s in self._simplices])) def clear_cache(self, recursive=False): """ @@ -318,8 +261,7 @@ def clear_cache(self, recursive=False): self._gkz_phi = None self._sr_ideal = None self._cpl_cone = [None]*2 - self._fan_cones = dict() - self._cy = None + self._toricvariety = None if recursive: self._poly.clear_cache() @@ -339,7 +281,8 @@ def __repr__(self): if self._is_regular is not None else "") + f"{('star' if self.is_star() else 'non-star')} " f"triangulation of a {self.dim()}-dimensional " - f"polytope in ZZ^{self.dim()}") + f"point configuration with {len(self._triang_pts)} points " + f"in ZZ^{self.dim()}") def __eq__(self, other): """ @@ -356,8 +299,7 @@ def __eq__(self, other): if not isinstance(other, Triangulation): return NotImplemented return (self.polytope() == other.polytope() and - sorted(self.simplices().tolist()) - == sorted(other.simplices().tolist())) + sorted(self.simplices().tolist()) == sorted(other.simplices().tolist())) def __ne__(self, other): """ @@ -374,8 +316,7 @@ def __ne__(self, other): if not isinstance(other, Triangulation): return NotImplemented return not (self.polytope() == other.polytope() and - sorted(self.simplices().tolist()) - == sorted(other.simplices().tolist())) + sorted(self.simplices().tolist()) == sorted(other.simplices().tolist())) def __hash__(self): """ @@ -389,8 +330,8 @@ def __hash__(self): (integer) The hash value of the triangulation. """ if self._hash is None: - self._hash = hash((hash(self.polytope()),) + - tuple(sorted(tuple(s) for s in self.simplices()))) + self._hash = hash((hash(tuple(sorted(tuple(v) for v in self.points()))),) + + tuple(sorted(tuple(sorted(s)) for s in self.simplices()))) return self._hash def polytope(self): @@ -439,6 +380,26 @@ def points_to_indices(self, points): return self._pts_dict[tuple(points)] return np.array([self._pts_dict[tuple(pt)] for pt in points]) + def triangulation_to_polytope_indices(self, points): + """ + **Description:** + Takes a list of indices of points of the triangulation and it returns + the corresponding indices of the polytope. It also accepts a single + entry, in which case it returns the corresponding index. + + **Arguments:** + - ```points``` (list): A list of indices of points. + + **Returns:** + (list or integer) The list of indices corresponding to the given + points. Or the index of the point if only one is given. + """ + if len(np.array(points).shape) == 1: + if np.array(points).shape[0] == 0: + return np.zeros(0, dtype=int) + return self._pts_triang_to_poly[points] + return np.array([self._pts_triang_to_poly[pt] for pt in points]) + def simplices(self): """ **Description:** @@ -479,8 +440,7 @@ def is_fine(self): """ if self._is_fine is not None: return self._is_fine - self._is_fine = (len(set.union(*[set(s) for s in self._simplices])) - == len(self._triang_pts)) + self._is_fine = (len(set.union(*[set(s) for s in self._simplices])) == len(self._triang_pts)) return self._is_fine def is_regular(self, backend=None): @@ -501,9 +461,7 @@ def is_regular(self, backend=None): if self._is_regular is not None: return self._is_regular self._is_regular = (True if self.simplices().shape[0] == 1 else - self.cpl_cone( - include_points_not_in_triangulation=False - ).is_solid(backend=backend)) + self.cpl_cone(include_points_not_in_triangulation=False).is_solid(backend=backend)) return self._is_regular def is_star(self, star_origin=0): @@ -554,9 +512,8 @@ def is_valid(self): heights = cpl.tip_of_stretched_cone(0.1)[1] tmp_triang = Triangulation(self.points(), self.polytope(), heights=heights, make_star=False) - self._is_valid = ( - sorted(sorted(s) for s in self.simplices().tolist()) == - sorted(sorted(s) for s in tmp_triang.simplices().tolist())) + self._is_valid = (sorted(sorted(s) for s in self.simplices().tolist()) == + sorted(sorted(s) for s in tmp_triang.simplices().tolist())) return self._is_valid # If it is not regular, then we check this using the definition of a # triangulation. This can be quite slow for large polytopes. @@ -636,10 +593,7 @@ def random_flips(self, N, only_fine=None, only_regular=None, """ current_triang = self for n in range(N): - neighbors = current_triang.neighbor_triangulations( - only_fine=False, - only_regular=False, - only_star=False) + neighbors = current_triang.neighbor_triangulations(only_fine=False, only_regular=False, only_star=False) np.random.shuffle(neighbors) good_pick = False for t in neighbors: @@ -725,15 +679,9 @@ def sr_ideal(self): return copy.deepcopy(self._sr_ideal) if not self.is_star(): raise Exception("Triangulation is not star.") - points = (frozenset(range(len(self._triang_pts))) - - frozenset([self._origin_index])) - simplices = [[i for i in s if i != self._origin_index] - for s in self.simplices()] - simplex_tuples = tuple(frozenset(frozenset(ss) - for s in simplices - for ss in combinations(s, dd)) - for dd in range(1, - self.dim() + 1)) + points = (frozenset(range(len(self._triang_pts))) - frozenset([self._origin_index])) + simplices = [[i for i in s if i != self._origin_index] for s in self.simplices()] + simplex_tuples = tuple(frozenset(frozenset(ss) for s in simplices for ss in combinations(s, dd)) for dd in range(1,self.dim()+1)) SR_ideal = set() checked = set() for i in range(len(simplex_tuples)-1): @@ -750,8 +698,7 @@ def sr_ideal(self): in_SR = True if k not in simplex_tuples[i+1] and not in_SR: SR_ideal.add(k) - self._sr_ideal = sorted([sorted(s)for s in SR_ideal], - key=lambda x: (len(x),x)) + self._sr_ideal = sorted([sorted(s)for s in SR_ideal], key=lambda x: (len(x),x)) return copy.deepcopy(self._sr_ideal) def cpl_cone(self, backend=None, @@ -781,11 +728,8 @@ def cpl_cone(self, backend=None, if backend not in backends: raise Exception(f"Options for backend are: {backends}") if backend is None: - backend = ("native" if self.is_fine() - or not include_points_not_in_triangulation - else "topcom") - if (backend == "native" and not self.is_fine() - and not include_points_not_in_triangulation): + backend = ("native" if self.is_fine() or not include_points_not_in_triangulation else "topcom") + if (backend == "native" and not self.is_fine() and not include_points_not_in_triangulation): print("Warning: Native backend is not supported when not excluding" "points that are not in the triangulation. Using TOPCOM...") backend = "topcom" @@ -808,8 +752,7 @@ def cpl_cone(self, backend=None, m[:,j] = pts_ext[pt] for j,pt in enumerate(comm_pts): m[:,j+2] = pts_ext[pt] - v = np.array(fmpz_mat(m.tolist()).nullspace()[0] - .transpose().tolist()[0], dtype=int) + v = np.array(fmpz_mat(m.tolist()).nullspace()[0].transpose().tolist()[0], dtype=int) if v[0] < 0: v = -v # Reduce the vector @@ -825,97 +768,55 @@ def cpl_cone(self, backend=None, full_v = tuple(full_v) if full_v not in null_vecs: null_vecs.add(full_v) - self._cpl_cone[args_id] = Cone(hyperplanes=list(null_vecs), - check=False) + self._cpl_cone[args_id] = Cone(hyperplanes=list(null_vecs), check=False) return self._cpl_cone[args_id] # Otherwise we compute this cone by using differences of GKZ vectors. gkz_phi = self.gkz_phi() diffs = [] - for t in self.neighbor_triangulations(only_fine=False, - only_regular=False, - only_star=False): + for t in self.neighbor_triangulations(only_fine=False, only_regular=False, only_star=False): diffs.append(t.gkz_phi()-gkz_phi) self._cpl_cone[args_id] = Cone(hyperplanes=diffs) return self._cpl_cone[args_id] - def get_cy(self): + def get_toric_variety(self): """ **Description:** - Returns a CalabiYau object corresponding to the anti-canonical - hypersurface on the toric variety defined by the fine, star, regular + Returns a ToricVariety object corresponding to the fan defined by the triangulation. - :::note - Only Calabi-Yau 3-fold hypersurfaces are fully supported. Other - dimensions require enabling the experimetal features of CYTools in the - [configuration](./configuration). - ::: - **Arguments:** None. **Returns:** - (CalabiYau) The Calabi-Yau arising from the triangulation. + (ToricVariety) The toric variety arising from the triangulation. """ - if self._cy is not None: - return self._cy - if not self._allow_cy: - raise Exception("There is a problem with the data of the " - "triangulation. Constructing a CY is disallowed " - "since computations might produce wrong results.") - if not self._poly.is_favorable(lattice="N"): - raise Exception("Only favorable CYs are currently supported.") - if not self.is_star() or not self.is_fine(): - raise Exception("Triangulation is non-fine or non-star.") - if self._poly.dim() != 4 and not config.enable_experimental_features: - raise Exception("Constructing Calabi-Yaus of dimensions other " - "than 3 is experimental and must be enabled in " - "the configuration.") - self._cy = CalabiYau(self) - return self._cy - - def fan_cones(self, d=None, face_dim=None): + if self._toricvariety is not None: + return self._toricvariety + if not self.is_star(): + raise Exception("Triangulation is non-star.") + self._toricvariety = ToricVariety(self) + return self._toricvariety + + def get_cy(self, nef_partition=None): """ **Description:** - It returns the cones forming a fan defined by a star triangulation of a - reflexive polytope. The dimension of the desired cones can be - specified, and one can also restrict to cones that lie in faces of a - particular dimension. - + Returns a CalabiYau object corresponding to the anti-canonical + hypersurface on the toric variety defined by the fine, star, regular + triangulation. If a nef-partition is specified then it returns the + complete intersection Calabi-Yau that it specifies. + :::note + Only Calabi-Yau 3-fold hypersurfaces are fully supported. Other + dimensions require enabling the experimetal features of CYTools in the + [configuration](./configuration). + ::: **Arguments:** - - ```d``` (integer, optional): The dimension of the desired cones. If - not specified, it returns the full-dimensional cones. - - ```face_dim``` (integer, optional): Restricts to cones that lie on - faces of the polytope of a particular dimension. If not specified, - then no restriction is imposed. - + - ```nef_partition``` (list, optional): A list of tuples of indices + specifying a nef-partition of the polytope, and defines a complete + intersection Calabi-Yau. **Returns:** - (list) The list of cones with the specified properties defined by the - star triangulation. + (CalabiYau) The Calabi-Yau arising from the triangulation. """ - if d is None: - d = (self.dim() if face_dim is None else face_dim) - if d not in range(1,self.dim()+1): - raise Exception("Only cones of dimension 1 through d are " - "supported.") - if (d,face_dim) in self._fan_cones: - return self._fan_cones[(d,face_dim)] - if not self.is_star() or not self._poly.is_reflexive(): - raise Exception("Cones can only be obtained from star " - "triangulations of reflexive polytopes.") - pts = self.points() - cones = set() - faces = ([self.points_to_indices(f.points()) - for f in self._poly.faces(face_dim)] - if face_dim is not None else None) - for s in self.simplices(): - for c in combinations(s,d): - if (0 not in c - and (faces is None - or any(all(cc in f for cc in c) for f in faces))): - cones.add(tuple(sorted(c))) - self._fan_cones[(d,face_dim)] = [Cone(pts[list(c)]) for c in cones] - return self._fan_cones[(d,face_dim)] + return self.get_toric_variety().get_cy(nef_partition) def convert_to_star(simplices, facets, star_origin): @@ -980,13 +881,11 @@ def qhull_triangulate(points, heights): for i in range(len(points))] hull = ConvexHull(lifted_points) # We first pick the lower facets of the convex hull - low_fac = [hull.simplices[n] for n,nn in enumerate(hull.equations) - if nn[-2] < 0] # The -2 component is the lifting dimension + low_fac = [hull.simplices[n] for n,nn in enumerate(hull.equations) if nn[-2] < 0] # The -2 component is the lifting dimension # Then we only take the faces that project to full-dimensional simplices # in the original point configuration lifted_points = [pt[:-1] + (1,) for pt in lifted_points] - simp = [s for s in low_fac - if int(round(np.linalg.det([lifted_points[i] for i in s]))) != 0] + simp = [s for s in low_fac if int(round(np.linalg.det([lifted_points[i] for i in s]))) != 0] return np.array(sorted([sorted(s) for s in simp])) @@ -1105,10 +1004,9 @@ def all_triangulations(points, only_fine=False, only_regular=False, topcom_bin = (config.topcom_path + ("topcom-points2finetriangs" if only_fine else "topcom-points2triangs")) - topcom = subprocess.Popen( - (topcom_bin,), - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) + topcom = subprocess.Popen((topcom_bin,), + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) pts_str = str([list(pt)+[1] for pt in points]) topcom_res, topcom_err = topcom.communicate(input=pts_str+"[]") try: @@ -1263,7 +1161,6 @@ def random_triangulations_fair_generator(triang_pts, N=None, n_walk=10, n_flip=1 triang_pts = np.array(triang_pts) num_points = len(triang_pts) n_retries = 0 - # Obtain a random Delaunay triangulation by picking a random point as the # origin. rand_ind = np.random.randint(0,len(triang_pts)) @@ -1339,7 +1236,6 @@ def random_triangulations_fair_generator(triang_pts, N=None, n_walk=10, n_flip=1 n_retries = 0 step_per_tri_ctr = 0 yield temp_tri - step_ctr += 1 step_per_tri_ctr += 1 old_pt = new_pt/np.linalg.norm(new_pt) diff --git a/cytools/utils.py b/cytools/utils.py index ccfce1b..fe50ed9 100644 --- a/cytools/utils.py +++ b/cytools/utils.py @@ -162,21 +162,18 @@ def filter_tensor_indices(tensor, indices): convert the tensor of intersection numbers to a given basis. **Arguments:** - - ```tensor``` (list): The input symmetric sparse tensor of the form - [[a,b,...,c,M_ab...c]]. + - ```tensor``` (dict): The input symmetric sparse tensor of the form + of a dictionary {(a,b,...,c):M_ab...c, ...]. - ```indices``` (list): The list of indices that will be preserved. **Returns:** - (list) A matrix describing a tensor in the same format as the input, but + (dict) A dictionary describing a tensor in the same format as the input, but only with the desired indices. """ - dim = len(tensor[0]) - 1 - tensor_filtered = [c for c in tensor - if all(c[i] in indices for i in range(dim))] + tensor_filtered = {k:tensor[k] for k in tensor if all(c in indices for c in k)} indices_dict = {vv:v for v,vv in enumerate(indices)} - tensor_reindexed = sorted([sorted([indices_dict[jj] for jj in ii[:-1]]) - + [ii[-1]] for ii in tensor_filtered]) - return np.array(tensor_reindexed) + tensor_reindexed = {tuple(sorted(indices_dict[c] for c in k)):tensor_filtered[k] for k in tensor_filtered} + return tensor_reindexed def symmetric_sparse_to_dense_in_basis(tensor, basis): diff --git a/scripts/linux/cytools b/scripts/linux/cytools index b12de98..81f9bcf 100755 --- a/scripts/linux/cytools +++ b/scripts/linux/cytools @@ -106,7 +106,7 @@ cat << EOF ░░█████████ █████ █████ ░░██████ ░░██████ █████ ██████ ░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░ ░░░░░░ ░░░░░ ░░░░░░ - Developed by Liam McAllister's Group | Version 0.1.1 + Developed by Liam McAllister's Group | Version 0.2.0 https://cytools.liammcallistergroup.com EOF diff --git a/scripts/macos/cytools b/scripts/macos/cytools index cc58f88..5258b74 100755 --- a/scripts/macos/cytools +++ b/scripts/macos/cytools @@ -100,7 +100,7 @@ cat << EOF ░░█████████ █████ █████ ░░██████ ░░██████ █████ ██████ ░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░ ░░░░░░ ░░░░░ ░░░░░░ - Developed by Liam McAllister's Group | Version 0.1.1 + Developed by Liam McAllister's Group | Version 0.2.0 https://cytools.liammcallistergroup.com EOF diff --git a/scripts/windows/launcher.ps1 b/scripts/windows/launcher.ps1 index dee1b86..3737e30 100644 --- a/scripts/windows/launcher.ps1 +++ b/scripts/windows/launcher.ps1 @@ -38,7 +38,7 @@ $banner=@" ░░█████████ █████ █████ ░░██████ ░░██████ █████ ██████ ░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░ ░░░░░░ ░░░░░ ░░░░░░ - Developed by Liam McAllister's Group | Version 0.1.1 + Developed by Liam McAllister's Group | Version 0.2.0 https://cytools.liammcallistergroup.com "@ diff --git a/setup.py b/setup.py index c1187e7..4226331 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="cytools", - version="0.1.1", + version="0.2.0", author="Liam McAllister Group", author_email="", description="A software package for analyzing Calabi-Yau hypersurfaces in toric varieties.",