diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 9f13a85de..73257e7a6 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -58,20 +58,27 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Check full Python version + run: | + python --version + python_version=$(python --version 2>&1 | cut -d' ' -f2) + echo "Python version: $python_version" + echo "version=$python_version" >> $GITHUB_ENV + - name: Restore Python environment cache if: env.has_changes == 'true' id: restore-env uses: actions/cache/restore@v4 with: - path: .venv-${{ matrix.python-version }} - key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + path: .venv-${{ env.version }} + key: ${{ runner.os }}-venv-${{ env.version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} - name: Set up virtual environment if not restored from cache if: steps.restore-env.outputs.cache-hit != 'true' && env.has_changes == 'true' run: | gh cache list - python -m venv .venv-${{ matrix.python-version }} - source .venv-${{ matrix.python-version }}/bin/activate + python -m venv .venv-${{ env.version }} + source .venv-${{ env.version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt pip install matplotlib==3.9.2 @@ -79,7 +86,7 @@ jobs: - name: Benchmark with pytest-benchmark (PR) if: env.has_changes == 'true' run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate pwd lscpu pip list @@ -106,7 +113,7 @@ jobs: - name: Benchmark with pytest-benchmark (MASTER) if: env.has_changes == 'true' run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate pwd lscpu pip list @@ -122,7 +129,7 @@ jobs: - name: Put benchmark results in same folder if: env.has_changes == 'true' run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate pwd cd tests/benchmarks find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 1 > temp1 @@ -143,7 +150,7 @@ jobs: - name: Compare latest commit results to the master branch results if: env.has_changes == 'true' run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate cd tests/benchmarks pwd python compare_bench_results.py diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index ae0be3dbb..70ee94290 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -19,25 +19,32 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Check full Python version + run: | + python --version + python_version=$(python --version 2>&1 | cut -d' ' -f2) + echo "Python version: $python_version" + echo "version=$python_version" >> $GITHUB_ENV + - name: Restore Python environment cache id: restore-env uses: actions/cache/restore@v4 with: - path: .venv-${{ matrix.python-version }} - key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + path: .venv-${{ env.version }} + key: ${{ runner.os }}-venv-${{ env.version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} - name: Set up virtual environment if not restored from cache if: steps.restore-env.outputs.cache-hit != 'true' run: | gh cache list - python -m venv .venv-${{ matrix.python-version }} - source .venv-${{ matrix.python-version }}/bin/activate + python -m venv .venv-${{ env.version }} + source .venv-${{ env.version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt - name: Check files using the black formatter run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate black --version black --check desc/ tests/ || black_return_code=$? echo "BLACK_RETURN_CODE=$black_return_code" >> $GITHUB_ENV diff --git a/.github/workflows/cache_dependencies.yml b/.github/workflows/cache_dependencies.yml index dd648d78f..8b8fdc9ed 100644 --- a/.github/workflows/cache_dependencies.yml +++ b/.github/workflows/cache_dependencies.yml @@ -24,12 +24,19 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Check full Python version + run: | + python --version + python_version=$(python --version 2>&1 | cut -d' ' -f2) + echo "Python version: $python_version" + echo "version=$python_version" >> $GITHUB_ENV + - name: Delete old cached file with same python version run: | echo "Current Cached files list" gh cache list - echo "Deleting cached files with pattern: ${{ runner.os }}-venv-${{ matrix.python-version }}-" - for cache_key in $(gh cache list --json key -q ".[] | select(.key | startswith(\"${{ runner.os }}-venv-${{ matrix.python-version }}-\")) | .key"); do + echo "Deleting cached files with pattern: ${{ runner.os }}-venv-${{ env.version }}-" + for cache_key in $(gh cache list --json key -q ".[] | select(.key | startswith(\"${{ runner.os }}-venv-${{ env.version }}-\")) | .key"); do echo "Deleting cache with key: $cache_key" gh cache delete "$cache_key" done @@ -37,8 +44,8 @@ jobs: # Update the matplotlib version if needed later - name: Set up virtual environment run: | - python -m venv .venv-${{ matrix.python-version }} - source .venv-${{ matrix.python-version }}/bin/activate + python -m venv .venv-${{ env.version }} + source .venv-${{ env.version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt pip install matplotlib==3.9.2 @@ -47,12 +54,13 @@ jobs: id: cache-env uses: actions/cache@v4 with: - path: .venv-${{ matrix.python-version }} - key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + path: .venv-${{ env.version }} + key: ${{ runner.os }}-venv-${{ env.version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} - name: Verify virtual environment activation run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate + which python python --version pip --version pip list diff --git a/.github/workflows/notebook_tests.yml b/.github/workflows/notebook_tests.yml index 1d1dc1473..35613aae5 100644 --- a/.github/workflows/notebook_tests.yml +++ b/.github/workflows/notebook_tests.yml @@ -51,20 +51,27 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Check full Python version + run: | + python --version + python_version=$(python --version 2>&1 | cut -d' ' -f2) + echo "Python version: $python_version" + echo "version=$python_version" >> $GITHUB_ENV + - name: Restore Python environment cache if: env.has_changes == 'true' id: restore-env uses: actions/cache/restore@v4 with: - path: .venv-${{ matrix.python-version }} - key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + path: .venv-${{ env.version }} + key: ${{ runner.os }}-venv-${{ env.version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} - name: Set up virtual environment if not restored from cache if: steps.restore-env.outputs.cache-hit != 'true' && env.has_changes == 'true' run: | gh cache list - python -m venv .venv-${{ matrix.python-version }} - source .venv-${{ matrix.python-version }}/bin/activate + python -m venv .venv-${{ env.version }} + source .venv-${{ env.version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt pip install matplotlib==3.9.2 @@ -72,7 +79,7 @@ jobs: - name: Test notebooks with pytest and nbmake if: env.has_changes == 'true' run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate pwd lscpu pip list diff --git a/.github/workflows/regression_tests.yml b/.github/workflows/regression_tests.yml index 9e648d3ec..19122267c 100644 --- a/.github/workflows/regression_tests.yml +++ b/.github/workflows/regression_tests.yml @@ -50,20 +50,27 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Check full Python version + run: | + python --version + python_version=$(python --version 2>&1 | cut -d' ' -f2) + echo "Python version: $python_version" + echo "version=$python_version" >> $GITHUB_ENV + - name: Restore Python environment cache if: env.has_changes == 'true' id: restore-env uses: actions/cache/restore@v4 with: - path: .venv-${{ matrix.python-version }} - key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + path: .venv-${{ env.version }} + key: ${{ runner.os }}-venv-${{ env.version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} - name: Set up virtual environment if not restored from cache if: steps.restore-env.outputs.cache-hit != 'true' && env.has_changes == 'true' run: | gh cache list - python -m venv .venv-${{ matrix.python-version }} - source .venv-${{ matrix.python-version }}/bin/activate + python -m venv .venv-${{ env.version }} + source .venv-${{ env.version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt pip install matplotlib==3.9.2 @@ -77,7 +84,7 @@ jobs: - name: Test with pytest if: env.has_changes == 'true' run: | - source .venv-${{ matrix.python-version }}/bin/activate + source .venv-${{ env.version }}/bin/activate pwd lscpu pip list diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 6e8ebc7a6..023e6503b 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -56,20 +56,27 @@ jobs: with: python-version: ${{ matrix.combos.python_version }} + - name: Check full Python version + run: | + python --version + python_version=$(python --version 2>&1 | cut -d' ' -f2) + echo "Python version: $python_version" + echo "version=$python_version" >> $GITHUB_ENV + - name: Restore Python environment cache if: env.has_changes == 'true' id: restore-env uses: actions/cache/restore@v4 with: - path: .venv-${{ matrix.combos.python_version }} - key: ${{ runner.os }}-venv-${{ matrix.combos.python_version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + path: .venv-${{ env.version }} + key: ${{ runner.os }}-venv-${{ env.version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} - name: Set up virtual environment if not restored from cache if: steps.restore-env.outputs.cache-hit != 'true' && env.has_changes == 'true' run: | gh cache list - python -m venv .venv-${{ matrix.combos.python_version }} - source .venv-${{ matrix.combos.python_version }}/bin/activate + python -m venv .venv-${{ env.version }} + source .venv-${{ env.version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt pip install matplotlib==3.9.2 @@ -80,13 +87,20 @@ jobs: with: swap-size-gb: 10 - - name: Test with pytest + - name: Action Details if: env.has_changes == 'true' run: | - source .venv-${{ matrix.combos.python_version }}/bin/activate + source .venv-${{ env.version }}/bin/activate + which python + python --version pwd lscpu pip list + + - name: Test with pytest + if: env.has_changes == 'true' + run: | + source .venv-${{ env.version }}/bin/activate python -m pytest -v -m unit \ --durations=0 \ --cov-report xml:cov.xml \ diff --git a/CHANGELOG.md b/CHANGELOG.md index 50352e2d2..348e970cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,18 +4,20 @@ Changelog New Feature - Adds a new profile class ``PowerProfile`` for raising profiles to a power. +- Add ``desc.objectives.LinkingCurrentConsistency`` for ensuring that coils in a stage 2 or single stage optimization provide the required linking current for a given equilibrium. - Adds an option ``scaled_termination`` (defaults to True) to all of the desc optimizers to measure the norms for ``xtol`` and ``gtol`` in the scaled norm provided by ``x_scale`` (which defaults to using an adaptive scaling based on the Jacobian or Hessian). This should make things more robust when optimizing parameters with widely different magnitudes. The old behavior can be recovered by passing ``options={"scaled_termination": False}``. - ``desc.objectives.Omnigenity`` is now vectorized and able to optimize multiple surfaces at the same time. Previously it was required to use a different objective for each surface. +- Adds a new objective ``desc.objectives.MirrorRatio`` for targeting a particular mirror ratio on each flux surface, for either an ``Equilibrium`` or ``OmnigenousField``. Bug Fixes -- Small bug fix to use the correct normalization length ``a`` in the BallooningStability objective +- Small bug fix to use the correct normalization length ``a`` in the BallooningStability objective. +- Fixed I/O bug when saving/loading ``_Profile`` classes that do not have a ``_params`` attribute. v0.13.0 ------- New Features - - Adds ``from_input_file`` method to ``Equilibrium`` class to generate an ``Equilibrium`` object with boundary, profiles, resolution and flux specified in a given DESC or VMEC input file - Adds function ``solve_regularized_surface_current`` to ``desc.magnetic_fields`` module that implements the REGCOIL algorithm (Landreman, (2017)) for surface current normal field optimization * Can specify the tuple ``current_helicity=(M_coil, N_coil)`` to determine if resulting contours correspond to helical topology (both ``(M_coil, N_coil)`` not equal to 0), modular (``N_coil`` equal to 0 and ``M_coil`` nonzero) or windowpane/saddle (``M_coil`` and ``N_coil`` both zero) @@ -298,7 +300,7 @@ optimization. Set to False by default. non-singular, non-degenerate) coordinate mappings for initial guesses. This is applied automatically when creating a new `Equilibrium` if the default initial guess of scaling the boundary surface produces self-intersecting surfaces. This can be disabled by -passing `ensure_nested=False` when constructing the `Equilibrum`. +passing `ensure_nested=False` when constructing the `Equilibrium`. - Adds `loss_function` argument to all `Objective`s for applying one of min/max/mean to objective function values (for targeting the average value of a profile, etc). - `Equilibrium.get_profile` now allows user to choose a profile type (power series, spline, etc) @@ -450,7 +452,7 @@ Breaking changes - Renames ``theta_sfl`` to ``theta_PEST`` in compute functions to avoid confusion with other straight field line coordinate systems. - Makes plotting kwargs a bit more uniform. ``zeta``, ``nzeta``, ``nphi`` have all been -superceded by ``phi`` which can be an integer for equally spaced angles or a float or +superseded by ``phi`` which can be an integer for equally spaced angles or a float or array of float to specify angles manually. Bug fixes @@ -520,7 +522,7 @@ the future all quantities should evaluate correctly at the magnetic axis. Note t evaluating quantities at the axis generally requires higher order derivatives and so can be much more expensive than evaluating at nonsingular points, so during optimization it is not recommended to include a grid point at the axis. Generally a small finite value -such as ``rho = 1e-6`` will avoid the singuarlity with a negligible loss in accuracy for +such as ``rho = 1e-6`` will avoid the singularity with a negligible loss in accuracy for analytic quantities. - Adds new optimizers ``fmin-auglag`` and ``lsq-auglag`` for performing constrained optimization using the augmented Lagrangian method. These generally perform much better @@ -536,7 +538,7 @@ the existing methods ``compute_theta_coordinates`` and ``compute_flux_coordinate but allows mapping between arbitrary coordinates. - Adds calculation of $\nabla \mathbf{B}$ tensor and corresponding $L_{\nabla B}$ metric - Adds objective ``BScaleLength`` for penalizing strong magnetic field curvature. -- Adds objective ``ObjectiveFromUser`` for wrapping an arbitary user defined function. +- Adds objective ``ObjectiveFromUser`` for wrapping an arbitrary user defined function. - Adds utilities ``desc.grid.find_least_rational_surfaces`` and ``desc.grid.find_most_rational_surfaces`` for finding the least/most rational surfaces for a given rotational transform profile. @@ -1072,7 +1074,7 @@ New Features: - Updates `Equilibrium` to make creating them more straightforward. - Instead of a dictionary of arrays and values, init method now takes individual arguments. These can either be objects of the - correct type (ie `Surface` objects for boundary condiitons, + correct type (ie `Surface` objects for boundary conditions, `Profile` for pressure and iota etc,) or ndarrays which will get parsed into objects of the correct type (for backwards compatibility) @@ -1356,7 +1358,7 @@ Major changes: indexing, where only M+1 points were used instead of the correct 2\*M+1 - Rotated concentric grids by 2pi/3M to avoid symmetry plane at - theta=0,pi. Previously, for stellarator symmetic cases, the nodes at + theta=0,pi. Previously, for stellarator symmetric cases, the nodes at theta=0 did not contribute to helical force balance. - Added [L\_grid]{.title-ref} parameter to specify radial resolution of grid nodes directly and making the API more consistent. @@ -1522,7 +1524,7 @@ saved, and objectives getting compiled more often than necessary Major Changes: -- Changes to Equilibium/EquilibriaFamily: +- Changes to Equilibrium/EquilibriaFamily: - general switching to using properties rather than direct attributes when referencing things (ie, `eq.foo`, not `eq._foo`). This allows getter methods to have safeguards if diff --git a/desc/coils.py b/desc/coils.py index dcf4eaf34..3af1fde34 100644 --- a/desc/coils.py +++ b/desc/coils.py @@ -1300,6 +1300,15 @@ def current(self, new): for coil, cur in zip(self.coils, new): coil.current = cur + def _all_currents(self, currents=None): + """Return an array of all the currents (including those in virtual coils).""" + if currents is None: + currents = self.current + currents = jnp.asarray(currents) + if self.sym: + currents = jnp.concatenate([currents, -1 * currents[::-1]]) + return jnp.tile(currents, self.NFP) + def _make_arraylike(self, x): if isinstance(x, dict): x = [x] * len(self) @@ -2337,6 +2346,22 @@ def num_coils(self): """int: Number of coils.""" return sum([c.num_coils for c in self]) + def _all_currents(self, currents=None): + """Return an array of all the currents (including those in virtual coils).""" + if currents is None: + currents = jnp.array(flatten_list(self.current)) + all_currents = [] + i = 0 + for coil in self.coils: + if isinstance(coil, CoilSet): + curr = currents[i : i + len(coil)] + all_currents += [coil._all_currents(curr)] + i += len(coil) + else: + all_currents += [jnp.atleast_1d(currents[i])] + i += 1 + return jnp.concatenate(all_currents) + def compute( self, names, diff --git a/desc/compute/_basis_vectors.py b/desc/compute/_basis_vectors.py index dd0b24469..89280fe1a 100644 --- a/desc/compute/_basis_vectors.py +++ b/desc/compute/_basis_vectors.py @@ -34,7 +34,7 @@ def _b(params, transforms, profiles, data, **kwargs): @register_compute_fun( - name="e^rho", # ∇ρ is the same in (ρ,θ,ζ) and (ρ,α,ζ) coordinates. + name="e^rho", # ∇ρ is the same in any coordinate system. label="\\mathbf{e}^{\\rho}", units="m^{-1}", units_long="inverse meters", @@ -927,7 +927,7 @@ def _e_sup_theta_zz(params, transforms, profiles, data, **kwargs): @register_compute_fun( - name="e^zeta", # ∇ζ is the same in (ρ,θ,ζ) and (ρ,α,ζ) coordinates. + name="e^zeta", # ∇ζ is the same in any coordinate system. label="\\mathbf{e}^{\\zeta}", units="m^{-1}", units_long="inverse meters", @@ -2449,10 +2449,10 @@ def _e_sub_theta_over_sqrt_g(params, transforms, profiles, data, **kwargs): ) def _e_sub_vartheta_rp(params, transforms, profiles, data, **kwargs): # constant ρ and ϕ - e_vartheta = ( - data["e_theta"].T * data["phi_z"] - data["e_zeta"].T * data["phi_t"] - ) / (data["theta_PEST_t"] * data["phi_z"] - data["theta_PEST_z"] * data["phi_t"]) - data["e_theta_PEST"] = e_vartheta.T + data["e_theta_PEST"] = ( + (data["e_theta"].T * data["phi_z"] - data["e_zeta"].T * data["phi_t"]) + / (data["theta_PEST_t"] * data["phi_z"] - data["theta_PEST_z"] * data["phi_t"]) + ).T return data @@ -2473,11 +2473,13 @@ def _e_sub_vartheta_rp(params, transforms, profiles, data, **kwargs): ) def _e_sub_phi_rv(params, transforms, profiles, data, **kwargs): # constant ρ and ϑ - e_phi = ( - data["e_zeta"].T * data["theta_PEST_t"] - - data["e_theta"].T * data["theta_PEST_z"] - ) / (data["theta_PEST_t"] * data["phi_z"] - data["theta_PEST_z"] * data["phi_t"]) - data["e_phi|r,v"] = e_phi.T + data["e_phi|r,v"] = ( + ( + data["e_zeta"].T * data["theta_PEST_t"] + - data["e_theta"].T * data["theta_PEST_z"] + ) + / (data["theta_PEST_t"] * data["phi_z"] - data["theta_PEST_z"] * data["phi_t"]) + ).T return data @@ -2499,10 +2501,10 @@ def _e_sub_phi_rv(params, transforms, profiles, data, **kwargs): def _e_sub_rho_vp(params, transforms, profiles, data, **kwargs): # constant ϑ and ϕ data["e_rho|v,p"] = ( - data["e_rho"].T - - data["e_vartheta"].T * data["theta_PEST_r"] - - data["e_phi|r,v"].T * data["phi_r"] - ).T + data["e_rho"] + - data["e_vartheta"] * data["theta_PEST_r"][:, jnp.newaxis] + - data["e_phi|r,v"] * data["phi_r"][:, jnp.newaxis] + ) return data @@ -3206,7 +3208,28 @@ def _e_sub_zeta_zz(params, transforms, profiles, data, **kwargs): data["Z_zzz"], ] ).T + return data + +@register_compute_fun( + name="grad(phi)", + label="\\nabla \\phi", + units="m^{-1}", + units_long="Inverse meters", + description="Gradient of cylindrical toroidal angle ϕ.", + dim=3, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["R", "0"], + parameterization=[ + "desc.equilibrium.equilibrium.Equilibrium", + "desc.geometry.surface.FourierRZToroidalSurface", + ], +) +def _grad_phi(params, transforms, profiles, data, **kwargs): + data["grad(phi)"] = jnp.column_stack([data["0"], 1 / data["R"], data["0"]]) return data @@ -3456,7 +3479,7 @@ def _e_sub_theta_rp(params, transforms, profiles, data, **kwargs): label="\\mathbf{e}_{\\rho} |_{\\alpha, \\zeta}", units="m", units_long="meters", - description="Tangent vector along radial field line label", + description="Covariant radial basis vector in (ρ, α, ζ) Clebsch coordinates.", dim=3, params=[], transforms={}, @@ -3477,7 +3500,7 @@ def _e_rho_az(params, transforms, profiles, data, **kwargs): label="\\mathbf{e}_{\\alpha}", units="m", units_long="meters", - description="Tangent vector along poloidal field line label", + description="Covariant poloidal basis vector in (ρ, α, ζ) Clebsch coordinates.", dim=3, params=[], transforms={}, @@ -3496,8 +3519,8 @@ def _e_alpha(params, transforms, profiles, data, **kwargs): label="\\partial_{\\theta} \\mathbf{e}_{\\alpha}", units="m", units_long="meters", - description="Tangent vector along poloidal field line label, derivative wrt" - " DESC poloidal angle", + description="Covariant poloidal basis vector in (ρ, α, ζ) Clebsch coordinates," + " derivative wrt DESC poloidal angle", dim=3, params=[], transforms={}, @@ -3518,8 +3541,8 @@ def _e_alpha_t(params, transforms, profiles, data, **kwargs): label="\\partial_{\\zeta} \\mathbf{e}_{\\alpha}", units="m", units_long="meters", - description="Tangent vector along poloidal field line label, " - "derivative wrt DESC toroidal angle", + description="Covariant poloidal basis vector in (ρ, α, ζ) Clebsch coordinates, " + "derivative wrt DESC toroidal angle at fixed ρ,θ.", dim=3, params=[], transforms={}, @@ -3603,7 +3626,7 @@ def _e_zeta_ra_a(params, transforms, profiles, data, **kwargs): units="m", units_long="meters", description="Tangent vector along (collinear to) field line, " - "derivative wrt DESC toroidal angle", + "derivative wrt DESC toroidal angle at fixed ρ,θ.", dim=3, params=[], transforms={}, @@ -3679,3 +3702,43 @@ def _d_ell_d_zeta_z(params, transforms, profiles, data, **kwargs): dot(data["(e_zeta|r,a)_z|r,a"], data["e_zeta|r,a"]) / data["|e_zeta|r,a|"] ) return data + + +@register_compute_fun( + name="e_alpha|r,p", + label="\\mathbf{e}_{\\alpha} |_{\\rho, \\phi}", + units="m", + units_long="meters", + description="Covariant poloidal basis vector in (ρ, α, ϕ) Clebsch coordinates.", + dim=3, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["e_theta", "alpha_t", "e_zeta", "alpha_z", "phi_t", "phi_z"], +) +def _e_alpha_rp(params, transforms, profiles, data, **kwargs): + data["e_alpha|r,p"] = ( + (data["e_theta"].T * data["phi_z"] - data["e_zeta"].T * data["phi_t"]) + / (data["alpha_t"] * data["phi_z"] - data["alpha_z"] * data["phi_t"]) + ).T + return data + + +@register_compute_fun( + name="|e_alpha|r,p|", + label="|\\mathbf{e}_{\\alpha} |_{\\rho, \\phi}|", + units="m", + units_long="meters", + description="Norm of covariant poloidal basis vector in (ρ, α, ϕ) Clebsch " + "coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["e_alpha|r,p"], +) +def _e_alpha_rp_norm(params, transforms, profiles, data, **kwargs): + data["|e_alpha|r,p|"] = jnp.linalg.norm(data["e_alpha|r,p"], axis=-1) + return data diff --git a/desc/compute/_core.py b/desc/compute/_core.py index c6651c834..4766c3a73 100644 --- a/desc/compute/_core.py +++ b/desc/compute/_core.py @@ -3153,6 +3153,25 @@ def _theta_PEST_rt(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="theta_PEST_rz", + label="\\partial_{\\rho \\zeta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, derivative wrt " + "radial and DESC toroidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_rz"], +) +def _theta_PEST_rz(params, transforms, profiles, data, **kwargs): + data["theta_PEST_rz"] = data["lambda_rz"] + return data + + @register_compute_fun( name="theta_PEST_rrt", label="\\partial_{\\rho \\rho \\theta} \\vartheta", diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 97cb44515..b7237667c 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -105,6 +105,221 @@ def _B_sup_zeta(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="B^phi", + label="B^{\\phi}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant cylindrical toroidal angle component of magnetic field", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["psi_r/sqrt(g)", "phi_z", "theta_PEST_t", "phi_t", "theta_PEST_z"], +) +def _B_sup_phi(params, transforms, profiles, data, **kwargs): + # Written like this, independent of iota, to enable computing without integration. + data["B^phi"] = data["psi_r/sqrt(g)"] * ( + data["phi_z"] * data["theta_PEST_t"] - data["phi_t"] * data["theta_PEST_z"] + ) + return data + + +@register_compute_fun( + name="B^phi_r", + label="\\partial_{\\rho} B^{\\phi} |_{\\theta, \\zeta}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant cylindrical toroidal angle component of magnetic field," + " partial derivative wrt ρ in (ρ, θ, ζ) coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=[ + "psi_r/sqrt(g)", + "phi_z", + "theta_PEST_t", + "phi_t", + "theta_PEST_z", + "(psi_r/sqrt(g))_r", + "phi_rz", + "theta_PEST_rt", + "phi_rt", + "theta_PEST_rz", + ], +) +def _B_sup_phi_r(params, transforms, profiles, data, **kwargs): + data["B^phi_r"] = data["(psi_r/sqrt(g))_r"] * ( + data["phi_z"] * data["theta_PEST_t"] - data["phi_t"] * data["theta_PEST_z"] + ) + data["psi_r/sqrt(g)"] * ( + data["phi_rz"] * data["theta_PEST_t"] + + data["phi_z"] * data["theta_PEST_rt"] + - data["phi_rt"] * data["theta_PEST_z"] + - data["phi_t"] * data["theta_PEST_rz"] + ) + return data + + +@register_compute_fun( + name="B^phi_t", + label="\\partial_{\\theta} B^{\\phi} |_{\\rho, \\zeta}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant cylindrical toroidal angle component of magnetic field," + " partial derivative wrt θ in (ρ, θ, ζ) coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=[ + "psi_r/sqrt(g)", + "phi_z", + "theta_PEST_t", + "phi_t", + "theta_PEST_z", + "(psi_r/sqrt(g))_t", + "phi_tz", + "theta_PEST_tt", + "phi_tt", + "theta_PEST_tz", + ], +) +def _B_sup_phi_t(params, transforms, profiles, data, **kwargs): + data["B^phi_t"] = data["(psi_r/sqrt(g))_t"] * ( + data["phi_z"] * data["theta_PEST_t"] - data["phi_t"] * data["theta_PEST_z"] + ) + data["psi_r/sqrt(g)"] * ( + data["phi_tz"] * data["theta_PEST_t"] + + data["phi_z"] * data["theta_PEST_tt"] + - data["phi_tt"] * data["theta_PEST_z"] + - data["phi_t"] * data["theta_PEST_tz"] + ) + return data + + +@register_compute_fun( + name="B^phi_z", + label="\\partial_{\\zeta} B^{\\phi} |_{\\rho, \\theta}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant cylindrical toroidal angle component of magnetic field," + " partial derivative wrt ζ in (ρ, θ, ζ) coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=[ + "psi_r/sqrt(g)", + "phi_z", + "theta_PEST_t", + "phi_t", + "theta_PEST_z", + "(psi_r/sqrt(g))_z", + "phi_zz", + "theta_PEST_tz", + "phi_tz", + "theta_PEST_zz", + ], +) +def _B_sup_phi_z(params, transforms, profiles, data, **kwargs): + data["B^phi_z"] = data["(psi_r/sqrt(g))_z"] * ( + data["phi_z"] * data["theta_PEST_t"] - data["phi_t"] * data["theta_PEST_z"] + ) + data["psi_r/sqrt(g)"] * ( + data["phi_zz"] * data["theta_PEST_t"] + + data["phi_z"] * data["theta_PEST_tz"] + - data["phi_tz"] * data["theta_PEST_z"] + - data["phi_t"] * data["theta_PEST_zz"] + ) + return data + + +@register_compute_fun( + name="B^phi_v|r,p", + label="\\partial_{\\vartheta} B^{\\phi} |_{\\rho, \\phi}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant cylindrical toroidal angle component of magnetic field," + " partial derivative wrt ϑ in (ρ, ϑ, ϕ) coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["B^phi_t", "B^phi_z", "theta_PEST_t", "theta_PEST_z", "phi_t", "phi_z"], +) +def _B_sup_phi_v_rp(params, transforms, profiles, data, **kwargs): + data["B^phi_v|r,p"] = ( + data["B^phi_t"] * data["phi_z"] - data["B^phi_z"] * data["phi_t"] + ) / (data["theta_PEST_t"] * data["phi_z"] - data["theta_PEST_z"] * data["phi_t"]) + return data + + +@register_compute_fun( + name="B^phi_p|r,v", + label="\\partial_{\\phi} B^{\\phi} |_{\\rho, \\vartheta}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant cylindrical toroidal angle component of magnetic field," + " partial derivative wrt ϕ in (ρ, ϑ, ϕ) coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["B^phi_t", "B^phi_z", "theta_PEST_t", "theta_PEST_z", "phi_t", "phi_z"], +) +def _B_sup_phi_p_rv(params, transforms, profiles, data, **kwargs): + data["B^phi_p|r,v"] = ( + data["B^phi_z"] * data["theta_PEST_t"] - data["B^phi_t"] * data["theta_PEST_z"] + ) / (data["theta_PEST_t"] * data["phi_z"] - data["theta_PEST_z"] * data["phi_t"]) + return data + + +@register_compute_fun( + name="B^phi_r|v,p", + label="\\partial_{\\rho} B^{\\phi} |_{\\vartheta, \\phi}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant cylindrical toroidal angle component of magnetic field," + " partial derivative wrt ρ in (ρ, ϑ, ϕ) coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["B^phi_r", "B^phi_v|r,p", "B^phi_p|r,v", "theta_PEST_r", "phi_r"], +) +def _B_sup_phi_r_vp(params, transforms, profiles, data, **kwargs): + data["B^phi_r|v,p"] = ( + data["B^phi_r"] + - data["B^phi_v|r,p"] * data["theta_PEST_r"] + - data["B^phi_p|r,v"] * data["phi_r"] + ) + return data + + +@register_compute_fun( + name="|B|_r|v,p", + label="\\partial_{\\rho} |\\mathbf{B}| |_{\\vartheta, \\phi}", + units="T", + units_long="Tesla", + description="Magnetic field norm, derivative wrt ρ in (ρ, ϑ, ϕ) coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["grad(|B|)", "e_rho|v,p"], +) +def _B_norm_r_vp(params, transforms, profiles, data, **kwargs): + data["|B|_r|v,p"] = dot(data["grad(|B|)"], data["e_rho|v,p"]) + return data + + @register_compute_fun( name="B", label="\\mathbf{B}", @@ -127,7 +342,7 @@ def _B(params, transforms, profiles, data, **kwargs): @register_compute_fun( name="B_R", - label="B_{R}", + label="B_{R} = \\mathbf{B} \\cdot \\hat{R}", units="T", units_long="Tesla", description="Radial component of magnetic field in lab frame", @@ -145,7 +360,8 @@ def _B_R(params, transforms, profiles, data, **kwargs): @register_compute_fun( name="B_phi", - label="B_{\\phi}", + label="B_{\\phi} = \\mathbf{B} \\cdot \\hat{\\phi} " + "= \\mathbf{B} \\cdot R^{-1} \\mathbf{e}_{\\phi} |_{R, Z}", units="T", units_long="Tesla", description="Toroidal component of magnetic field in lab frame", @@ -154,16 +370,16 @@ def _B_R(params, transforms, profiles, data, **kwargs): transforms={}, profiles=[], coordinates="rtz", - data=["B"], + data=["B^phi", "R"], ) def _B_phi(params, transforms, profiles, data, **kwargs): - data["B_phi"] = data["B"][:, 1] + data["B_phi"] = data["R"] * data["B^phi"] return data @register_compute_fun( name="B_Z", - label="B_{Z}", + label="B_{Z} = \\mathbf{B} \\cdot \\hat{Z}", units="T", units_long="Tesla", description="Vertical component of magnetic field in lab frame", @@ -2462,6 +2678,7 @@ def _B_mag_alpha(params, transforms, profiles, data, **kwargs): data=["|B|_z", "|B|_a", "alpha_z"], ) def _B_mag_z_constant_rho_alpha(params, transforms, profiles, data, **kwargs): + # Same as grad(|B|) dot e_zeta|r,a but avoids radial derivatives. data["|B|_z|r,a"] = data["|B|_z"] - data["|B|_a"] * data["alpha_z"] return data @@ -3394,6 +3611,10 @@ def _B_dot_gradB_z(params, transforms, profiles, data, **kwargs): coordinates="r", data=["|B|"], resolution_requirement="tz", + parameterization=[ + "desc.equilibrium.equilibrium.Equilibrium", + "desc.magnetic_fields._core.OmnigenousField", + ], ) def _min_tz_modB(params, transforms, profiles, data, **kwargs): data["min_tz |B|"] = surface_min(transforms["grid"], data["|B|"]) @@ -3413,6 +3634,10 @@ def _min_tz_modB(params, transforms, profiles, data, **kwargs): coordinates="r", data=["|B|"], resolution_requirement="tz", + parameterization=[ + "desc.equilibrium.equilibrium.Equilibrium", + "desc.magnetic_fields._core.OmnigenousField", + ], ) def _max_tz_modB(params, transforms, profiles, data, **kwargs): data["max_tz |B|"] = surface_max(transforms["grid"], data["|B|"]) @@ -3431,6 +3656,10 @@ def _max_tz_modB(params, transforms, profiles, data, **kwargs): profiles=[], coordinates="r", data=["min_tz |B|", "max_tz |B|"], + parameterization=[ + "desc.equilibrium.equilibrium.Equilibrium", + "desc.magnetic_fields._core.OmnigenousField", + ], ) def _mirror_ratio(params, transforms, profiles, data, **kwargs): data["mirror ratio"] = (data["max_tz |B|"] - data["min_tz |B|"]) / ( diff --git a/desc/compute/_neoclassical.py b/desc/compute/_neoclassical.py index 4461efeea..ea3ba46fb 100644 --- a/desc/compute/_neoclassical.py +++ b/desc/compute/_neoclassical.py @@ -11,13 +11,19 @@ from functools import partial +from orthax.legendre import leggauss from quadax import simpson from desc.backend import imap, jit, jnp from ..integrals.bounce_integral import Bounce1D -from ..integrals.quad_utils import chebgauss2 -from ..utils import safediv +from ..integrals.quad_utils import ( + automorphism_sin, + chebgauss2, + get_quadrature, + grad_automorphism_sin, +) +from ..utils import cross, dot, safediv from .data_index import register_compute_fun _bounce_doc = { @@ -272,3 +278,252 @@ def eps_32(data): def _effective_ripple(params, transforms, profiles, data, **kwargs): data["effective ripple"] = data["effective ripple 3/2"] ** (2 / 3) return data + + +@register_compute_fun( + name="Gamma_c Velasco", + label=( + # Γ_c = π/(8√2) ∫dλ 〈 ∑ⱼ [v τ γ_c²]ⱼ 〉 + "\\Gamma_c = \\frac{\\pi}{8 \\sqrt{2}} " + "\\int d\\lambda \\langle \\sum_j (v \\tau \\gamma_c^2)_j \\rangle" + ), + units="~", + units_long="None", + description="Energetic ion confinement proxy " + "as defined by Velasco et al. (doi:10.1088/1741-4326/ac2994)", + dim=1, + params=[], + transforms={"grid": []}, + profiles=[], + coordinates="r", + data=["min_tz |B|", "max_tz |B|", "cvdrift0", "gbdrift", "fieldline length"] + + Bounce1D.required_names, + source_grid_requirement={"coordinates": "raz", "is_meshgrid": True}, + **_bounce_doc, +) +@partial(jit, static_argnames=["num_quad", "num_pitch", "num_well", "batch"]) +def _Gamma_c_Velasco(params, transforms, profiles, data, **kwargs): + """Energetic ion confinement proxy as defined by Velasco et al. + + A model for the fast evaluation of prompt losses of energetic ions in stellarators. + J.L. Velasco et al. 2021 Nucl. Fusion 61 116059. + https://doi.org/10.1088/1741-4326/ac2994. + Equation 16. + """ + # noqa: unused dependency + if "quad" in kwargs: + quad = kwargs["quad"] + else: + quad = get_quadrature( + leggauss(kwargs.get("num_quad", 32)), + (automorphism_sin, grad_automorphism_sin), + ) + num_well = kwargs.get("num_well", None) + batch = kwargs.get("batch", True) + grid = transforms["grid"].source_grid + + def d_v_tau(data, B, pitch): + return safediv(2.0, jnp.sqrt(jnp.abs(1 - pitch * B))) + + def _cvdrift0(data, B, pitch): + return safediv( + data["cvdrift0"] * (1 - 0.5 * pitch * B), jnp.sqrt(jnp.abs(1 - pitch * B)) + ) + + def _gbdrift(data, B, pitch): + return safediv( + data["gbdrift"] * (1 - 0.5 * pitch * B), jnp.sqrt(jnp.abs(1 - pitch * B)) + ) + + def Gamma_c_Velasco(data): + """∫ dλ ∑ⱼ [v τ γ_c²]ⱼ.""" + bounce = Bounce1D(grid, data, quad, automorphism=None, is_reshaped=True) + points = bounce.points(data["pitch_inv"], num_well=num_well) + v_tau, cvdrift0, gbdrift = bounce.integrate( + [d_v_tau, _cvdrift0, _gbdrift], + data["pitch_inv"], + data, + ["cvdrift0", "gbdrift"], + points=points, + batch=batch, + ) + gamma_c = jnp.arctan(safediv(cvdrift0, gbdrift)) + return (4 / jnp.pi**2) * ( + (v_tau * gamma_c**2).sum(axis=-1) + * data["pitch_inv"] ** (-2) + * data["pitch_inv weight"] + ).sum(axis=-1) + + data["Gamma_c Velasco"] = ( + jnp.pi + / (8 * 2**0.5) + * _compute( + Gamma_c_Velasco, + {"cvdrift0": data["cvdrift0"], "gbdrift": data["gbdrift"]}, + data, + grid, + kwargs.get("num_pitch", 64), + ) + / data["fieldline length"] + ) + return data + + +@register_compute_fun( + name="Gamma_c", + label=( + # Γ_c = π/(8√2) ∫dλ 〈 ∑ⱼ [v τ γ_c²]ⱼ 〉 + "\\Gamma_c = \\frac{\\pi}{8 \\sqrt{2}} " + "\\int d\\lambda \\langle \\sum_j (v \\tau \\gamma_c^2)_j \\rangle" + ), + units="~", + units_long="None", + description="Energetic ion confinement proxy, Nemov et al.", + dim=1, + params=[], + transforms={"grid": []}, + profiles=[], + coordinates="r", + data=[ + "min_tz |B|", + "max_tz |B|", + "B^phi", + "B^phi_r|v,p", + "b", + "|B|_r|v,p", + "iota_r", + "grad(phi)", + "e^rho", + "|grad(rho)|", + "|e_alpha|r,p|", + "kappa_g", + "psi_r", + "fieldline length", + ] + + Bounce1D.required_names, + source_grid_requirement={"coordinates": "raz", "is_meshgrid": True}, + **_bounce_doc, + quad2="Same as ``quad`` for the weak singular integrals in particular.", +) +@partial(jit, static_argnames=["num_quad", "num_pitch", "num_well", "batch"]) +def _Gamma_c(params, transforms, profiles, data, **kwargs): + """Energetic ion confinement proxy as defined by Nemov et al. + + Poloidal motion of trapped particle orbits in real-space coordinates. + V. V. Nemov, S. V. Kasilov, W. Kernbichler, G. O. Leitold. + Phys. Plasmas 1 May 2008; 15 (5): 052501. + https://doi.org/10.1063/1.2912456. + Equation 61. + + The radial electric field has a negligible effect on alpha particle confinement, + so it is assumed to be zero. + """ + # noqa: unused dependency + if "quad" in kwargs: + quad = kwargs["quad"] + else: + quad = get_quadrature( + leggauss(kwargs.get("num_quad", 32)), + (automorphism_sin, grad_automorphism_sin), + ) + quad2 = kwargs["quad2"] if "quad2" in kwargs else chebgauss2(quad[0].size) + num_well = kwargs.get("num_well", None) + batch = kwargs.get("batch", True) + grid = transforms["grid"].source_grid + + # The derivative (∂/∂ψ)|ϑ,ϕ belongs to flux coordinates which satisfy + # α = ϑ − χ(ψ) ϕ where α is the poloidal label of ψ,α Clebsch coordinates. + # Choosing χ = ι implies ϑ, ϕ are PEST angles. + # ∂G/∂((λB₀)⁻¹) = λ²B₀ ∫ dℓ (1 − λ|B|/2) / √(1 − λ|B|) ∂|B|/∂ψ / |B| + # ∂V/∂((λB₀)⁻¹) = 3/2 λ²B₀ ∫ dℓ √(1 − λ|B|) K / |B| + # ∂g/∂((λB₀)⁻¹) = λ²B₀² ∫ dℓ (1 − λ|B|/2) / √(1 − λ|B|) |∇ψ| κ_g / |B| + # tan(π/2 γ_c) = + # ∫ dℓ (1 − λ|B|/2) / √(1 − λ|B|) |∇ρ| κ_g / |B| + # ---------------------------------------------- + # (|∇ρ| ‖e_α|ρ,ϕ‖)ᵢ ∫ dℓ [ (1 − λ|B|/2)/√(1 − λ|B|) ∂|B|/∂ψ + √(1 − λ|B|) K ] / |B| + + def d_v_tau(data, B, pitch): + return safediv(2.0, jnp.sqrt(jnp.abs(1 - pitch * B))) + + def drift1(data, B, pitch): + return ( + safediv(1 - 0.5 * pitch * B, jnp.sqrt(jnp.abs(1 - pitch * B))) + * data["|grad(rho)|*kappa_g"] + / B + ) + + def drift2(data, B, pitch): + return ( + safediv(1 - 0.5 * pitch * B, jnp.sqrt(jnp.abs(1 - pitch * B))) + * data["|B|_psi|v,p"] + / B + ) + + def drift3(data, B, pitch): + return jnp.sqrt(jnp.abs(1 - pitch * B)) * data["K"] / B + + def Gamma_c(data): + """∫ dλ ∑ⱼ [v τ γ_c²]ⱼ.""" + # Note v τ = 4λ⁻²B₀⁻¹ ∂I/∂((λB₀)⁻¹) where v is the particle velocity, + # τ is the bounce time, and I is defined in Nemov eq. 36. + bounce = Bounce1D(grid, data, quad, automorphism=None, is_reshaped=True) + points = bounce.points(data["pitch_inv"], num_well=num_well) + v_tau, f1, f2 = bounce.integrate( + [d_v_tau, drift1, drift2], + data["pitch_inv"], + data, + ["|grad(rho)|*kappa_g", "|B|_psi|v,p"], + points=points, + batch=batch, + ) + gamma_c = jnp.arctan( + safediv( + f1, + ( + f2 + + bounce.integrate( + drift3, + data["pitch_inv"], + data, + "K", + points=points, + batch=batch, + quad=quad2, + ) + ) + * bounce.interp_to_argmin(data["|grad(rho)|*|e_alpha|r,p|"], points), + ) + ) + return (4 / jnp.pi**2) * ( + (v_tau * gamma_c**2).sum(axis=-1) + * data["pitch_inv"] ** (-2) + * data["pitch_inv weight"] + ).sum(axis=-1) + + # We rewrite equivalents of Nemov et al.'s expression's using single-valued + # maps of a physical coordinates. This avoids the computational issues of + # multivalued maps. It further enables use of more efficient methods, such as + # fast transforms and fixed computational grids throughout optimization, which + # are used in the ``Bounce2D`` operator on a developer branch. + + # It is assumed the grid is sufficiently dense to reconstruct |B|, + # so anything smoother than |B| may be captured accurately as a single + # spline rather than splining each component. + fun_data = { + "|grad(rho)|*kappa_g": data["|grad(rho)|"] * data["kappa_g"], + "|grad(rho)|*|e_alpha|r,p|": data["|grad(rho)|"] * data["|e_alpha|r,p|"], + "|B|_psi|v,p": data["|B|_r|v,p"] / data["psi_r"], + "K": data["iota_r"] * dot(cross(data["e^rho"], data["b"]), data["grad(phi)"]) + # Behaves as ∂log(|B|²/B^ϕ)/∂ψ |B| if one ignores the issue of a log argument + # with units. Smoothness determined by positive lower bound of log argument, + # and hence behaves as ∂log(|B|)/∂ψ |B| = ∂|B|/∂ψ. + - (2 * data["|B|_r|v,p"] - data["|B|"] * data["B^phi_r|v,p"] / data["B^phi"]) + / data["psi_r"], + } + data["Gamma_c"] = ( + jnp.pi + / (8 * 2**0.5) + * _compute(Gamma_c, fun_data, data, grid, kwargs.get("num_pitch", 64)) + / data["fieldline length"] + ) + return data diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 186049a2f..31e164ace 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -18,7 +18,7 @@ def _periodic(x, period): def _fixup_residual(r, period): r = _periodic(r, period) - # r should be between -period and period + # r should be between -period/2 and period/2 return jnp.where((r > period / 2) & jnp.isfinite(period), -period + r, r) diff --git a/desc/objectives/__init__.py b/desc/objectives/__init__.py index 7a58c7812..8cbd4b4d7 100644 --- a/desc/objectives/__init__.py +++ b/desc/objectives/__init__.py @@ -9,6 +9,7 @@ CoilSetLinkingNumber, CoilSetMinDistance, CoilTorsion, + LinkingCurrentConsistency, PlasmaCoilSetMinDistance, QuadraticFlux, SurfaceCurrentRegularization, @@ -36,11 +37,12 @@ Elongation, GoodCoordinates, MeanCurvature, + MirrorRatio, PlasmaVesselDistance, PrincipalCurvature, Volume, ) -from ._neoclassical import EffectiveRipple +from ._neoclassical import EffectiveRipple, GammaC from ._omnigenity import ( Isodynamicity, Omnigenity, diff --git a/desc/objectives/_coils.py b/desc/objectives/_coils.py index b13e024f0..a4ef477a3 100644 --- a/desc/objectives/_coils.py +++ b/desc/objectives/_coils.py @@ -2,6 +2,7 @@ import warnings import numpy as np +from scipy.constants import mu_0 from desc.backend import ( fori_loop, @@ -15,7 +16,7 @@ from desc.compute.utils import _compute as compute_fun from desc.grid import LinearGrid, _Grid from desc.integrals import compute_B_plasma -from desc.utils import Timer, errorif, safenorm, warnif +from desc.utils import Timer, broadcast_tree, errorif, safenorm, warnif from .normalization import compute_scaling_factors from .objective_funs import _Objective, collect_docs @@ -1762,6 +1763,202 @@ def compute(self, params_1, params_2=None, constants=None): return Psi +class LinkingCurrentConsistency(_Objective): + """Target the self-consistent poloidal linking current between the plasma and coils. + + A self-consistent coil + plasma configuration must have the sum of the signed + currents in the coils that poloidally link the plasma equal to the total poloidal + current required to be linked by the plasma according to the loop integral of its + toroidal magnetic field, given by `G(rho=1)`. This objective computes the difference + between these two quantities, such that a value of zero means the coils create the + correct net poloidal current. + + Assumes the coil topology does not change (ie the linking number with the plasma + is fixed). + + Parameters + ---------- + eq : Equilibrium + Equilibrium that will be optimized to satisfy the Objective. + coil : CoilSet + Coil(s) that are to be optimized. + grid : Grid, optional + Collocation grid containing the nodes to evaluate plasma current at. + Defaults to ``LinearGrid(M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym)``. + eq_fixed : bool + Whether the equilibrium is assumed fixed (should be true for stage 2, false + for single stage). + """ + + __doc__ = __doc__.rstrip() + collect_docs( + target_default="``target=0``.", + bounds_default="``target=0``.", + ) + + _scalar = True + _units = "(A)" + _print_value_fmt = "Linking current error: " + + def __init__( + self, + eq, + coil, + *, + grid=None, + eq_fixed=False, + target=None, + bounds=None, + weight=1, + normalize=True, + normalize_target=True, + loss_function=None, + deriv_mode="auto", + jac_chunk_size=None, + name="linking current", + ): + if target is None and bounds is None: + target = 0 + self._grid = grid + self._eq_fixed = eq_fixed + self._linear = eq_fixed + self._eq = eq + self._coil = coil + + super().__init__( + things=[coil] if eq_fixed else [coil, eq], + target=target, + bounds=bounds, + weight=weight, + normalize=normalize, + normalize_target=normalize_target, + loss_function=loss_function, + deriv_mode=deriv_mode, + jac_chunk_size=jac_chunk_size, + name=name, + ) + + def build(self, use_jit=True, verbose=1): + """Build constant arrays. + + Parameters + ---------- + use_jit : bool, optional + Whether to just-in-time compile the objective and derivatives. + verbose : int, optional + Level of output. + + """ + eq = self._eq + coil = self._coil + grid = self._grid or LinearGrid( + M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym + ) + warnif( + not np.allclose(grid.nodes[:, 0], 1), + UserWarning, + "grid includes interior points, should be rho=1.", + ) + + self._dim_f = 1 + self._data_keys = ["G"] + + all_params = tree_map(lambda dim: np.arange(dim), coil.dimensions) + current_params = tree_map(lambda idx: {"current": idx}, True) + # indices of coil currents + self._indices = tree_leaves(broadcast_tree(current_params, all_params)) + self._num_coils = coil.num_coils + + profiles = get_profiles(self._data_keys, obj=eq, grid=grid) + transforms = get_transforms(self._data_keys, obj=eq, grid=grid) + + # compute linking number of coils with plasma. To do this we add a fake "coil" + # along the magnetic axis and compute the linking number of that coilset + from desc.coils import FourierRZCoil, MixedCoilSet + + axis_coil = FourierRZCoil( + 1.0, + eq.axis.R_n, + eq.axis.Z_n, + eq.axis.R_basis.modes[:, 2], + eq.axis.Z_basis.modes[:, 2], + eq.axis.NFP, + ) + dummy_coilset = MixedCoilSet(axis_coil, coil, check_intersection=False) + # linking number for coils with axis + link = np.round(dummy_coilset._compute_linking_number())[0, 1:] + + self._constants = { + "quad_weights": 1.0, + "link": link, + } + + if self._eq_fixed: + data = compute_fun( + "desc.equilibrium.equilibrium.Equilibrium", + self._data_keys, + params=eq.params_dict, + transforms=transforms, + profiles=profiles, + ) + eq_linking_current = 2 * jnp.pi * data["G"][0] / mu_0 + self._constants["eq_linking_current"] = eq_linking_current + else: + self._constants["profiles"] = profiles + self._constants["transforms"] = transforms + + if self._normalize: + params = tree_leaves( + coil.params_dict, is_leaf=lambda x: isinstance(x, dict) + ) + self._normalization = np.sum([np.abs(param["current"]) for param in params]) + + super().build(use_jit=use_jit, verbose=verbose) + + def compute(self, coil_params, eq_params=None, constants=None): + """Compute linking current error. + + Parameters + ---------- + coil_params : dict + Dictionary of coilset degrees of freedom, eg ``CoilSet.params_dict`` + eq_params : dict + Dictionary of equilibrium degrees of freedom, eg ``Equilibrium.params_dict`` + Only required if eq_fixed=False. + constants : dict + Dictionary of constant data, eg transforms, profiles etc. + Defaults to self._constants. + + Returns + ------- + f : array of floats + Linking current error. + + """ + if constants is None: + constants = self.constants + if self._eq_fixed: + eq_linking_current = constants["eq_linking_current"] + else: + data = compute_fun( + "desc.equilibrium.equilibrium.Equilibrium", + self._data_keys, + params=eq_params, + transforms=constants["transforms"], + profiles=constants["profiles"], + ) + eq_linking_current = 2 * jnp.pi * data["G"][0] / mu_0 + + coil_currents = jnp.concatenate( + [ + jnp.atleast_1d(param[idx]) + for param, idx in zip(tree_leaves(coil_params), self._indices) + ] + ) + coil_currents = self.things[0]._all_currents(coil_currents) + coil_linking_current = jnp.sum(constants["link"] * coil_currents) + return eq_linking_current - coil_linking_current + + class CoilSetLinkingNumber(_Objective): """Prevents coils from becoming interlinked. diff --git a/desc/objectives/_geometry.py b/desc/objectives/_geometry.py index b2d7abc2f..747a3951c 100644 --- a/desc/objectives/_geometry.py +++ b/desc/objectives/_geometry.py @@ -1355,3 +1355,146 @@ def compute(self, params, constants=None): f = data["g_rr"] return jnp.concatenate([g, constants["sigma"] * f]) + + +class MirrorRatio(_Objective): + """Target a particular value mirror ratio. + + The mirror ratio is defined as: + + (Bₘₐₓ - Bₘᵢₙ) / (Bₘₐₓ + Bₘᵢₙ) + + Where Bₘₐₓ and Bₘᵢₙ are the maximum and minimum values of ||B|| on a given surface. + Returns one value for each surface in ``grid``. + + Parameters + ---------- + eq : Equilibrium or OmnigenousField + Equilibrium or OmnigenousField that will be optimized to satisfy the Objective. + grid : Grid, optional + Collocation grid containing the nodes to evaluate at. Defaults to + ``LinearGrid(M=eq.M_grid, N=eq.N_grid)`` for ``Equilibrium`` + or ``LinearGrid(theta=2*eq.M_B, N=2*eq.N_x)`` for ``OmnigenousField``. + + """ + + __doc__ = __doc__.rstrip() + collect_docs( + target_default="``target=0.2``.", + bounds_default="``target=0.2``.", + ) + + _coordinates = "r" + _units = "(dimensionless)" + _print_value_fmt = "Mirror ratio: " + + def __init__( + self, + eq, + *, + grid=None, + target=None, + bounds=None, + weight=1, + normalize=True, + normalize_target=True, + loss_function=None, + deriv_mode="auto", + name="mirror ratio", + jac_chunk_size=None, + ): + if target is None and bounds is None: + target = 0.2 + self._grid = grid + super().__init__( + things=eq, + target=target, + bounds=bounds, + weight=weight, + normalize=normalize, + normalize_target=normalize_target, + loss_function=loss_function, + deriv_mode=deriv_mode, + name=name, + jac_chunk_size=jac_chunk_size, + ) + + def build(self, use_jit=True, verbose=1): + """Build constant arrays. + + Parameters + ---------- + use_jit : bool, optional + Whether to just-in-time compile the objective and derivatives. + verbose : int, optional + Level of output. + + """ + eq = self.things[0] + from desc.equilibrium import Equilibrium + from desc.magnetic_fields import OmnigenousField + + if self._grid is None and isinstance(eq, Equilibrium): + grid = LinearGrid( + M=eq.M_grid, + N=eq.N_grid, + NFP=eq.NFP, + sym=eq.sym, + ) + elif self._grid is None and isinstance(eq, OmnigenousField): + grid = LinearGrid( + theta=2 * eq.M_B, + N=2 * eq.N_x, + NFP=eq.NFP, + ) + else: + grid = self._grid + + self._dim_f = grid.num_rho + self._data_keys = ["mirror ratio"] + + timer = Timer() + if verbose > 0: + print("Precomputing transforms") + timer.start("Precomputing transforms") + + profiles = get_profiles(self._data_keys, obj=eq, grid=grid) + transforms = get_transforms(self._data_keys, obj=eq, grid=grid) + self._constants = { + "transforms": transforms, + "profiles": profiles, + } + + timer.stop("Precomputing transforms") + if verbose > 1: + timer.disp("Precomputing transforms") + + super().build(use_jit=use_jit, verbose=verbose) + + def compute(self, params, constants=None): + """Compute mirror ratio. + + Parameters + ---------- + params : dict + Dictionary of equilibrium or field degrees of freedom, + eg Equilibrium.params_dict + constants : dict + Dictionary of constant data, eg transforms, profiles etc. Defaults to + self.constants + + Returns + ------- + M : ndarray + Mirror ratio on each surface. + + """ + if constants is None: + constants = self.constants + data = compute_fun( + self.things[0], + self._data_keys, + params=params, + transforms=constants["transforms"], + profiles=constants["profiles"], + ) + return constants["transforms"]["grid"].compress(data["mirror ratio"]) diff --git a/desc/objectives/_neoclassical.py b/desc/objectives/_neoclassical.py index f171d8126..a2fb53951 100644 --- a/desc/objectives/_neoclassical.py +++ b/desc/objectives/_neoclassical.py @@ -1,16 +1,36 @@ """Objectives for targeting neoclassical transport.""" import numpy as np +from orthax.legendre import leggauss from desc.compute import get_profiles, get_transforms from desc.compute.utils import _compute as compute_fun from desc.grid import LinearGrid from desc.utils import Timer -from ..integrals.quad_utils import chebgauss2 -from .objective_funs import _Objective +from ..integrals.quad_utils import ( + automorphism_sin, + chebgauss2, + get_quadrature, + grad_automorphism_sin, +) +from .objective_funs import _Objective, collect_docs from .utils import _parse_callable_target_bounds +_bounce_overwrite = { + "deriv_mode": """ + deriv_mode : {"auto", "fwd", "rev"} + Specify how to compute Jacobian matrix, either forward mode or reverse mode AD. + ``auto`` selects forward or reverse mode based on the size of the input and + output of the objective. Has no effect on ``self.grad`` or ``self.hess`` which + always use reverse mode and forward over reverse mode respectively. + + Default is ``fwd``. If ``rev`` is chosen, then ``jac_chunk_size=1`` is chosen + by default. In ``rev`` mode, reducing the pitch angle parameter ``batch_size`` + does not reduce memory, so it is recommended to retain the default for that. + """ +} + class EffectiveRipple(_Objective): """The effective ripple is a proxy for neoclassical transport. @@ -33,74 +53,50 @@ class EffectiveRipple(_Objective): Parameters ---------- eq : Equilibrium - Equilibrium that will be optimized to satisfy the Objective. - target : {float, ndarray, callable}, optional - Target value(s) of the objective. Only used if bounds is None. - Must be broadcastable to Objective.dim_f. If a callable, should take a - single argument ``rho`` and return the desired value of the profile at those - locations. Defaults to 0. - bounds : tuple of {float, ndarray, callable}, optional - Lower and upper bounds on the objective. Overrides target. - Both bounds must be broadcastable to Objective.dim_f. - If a callable, each should take a single argument ``rho`` and return the - desired bound (lower or upper) of the profile at those locations. - weight : {float, ndarray}, optional - Weighting to apply to the Objective, relative to other Objectives. - Must be broadcastable to Objective.dim_f - normalize : bool, optional - This quantity is already normalized so this parameter is ignored. - Whether to compute the error in physical units or non-dimensionalize. - normalize_target : bool, optional - Whether target and bounds should be normalized before comparing to computed - values. If `normalize` is ``True`` and the target is in physical units, - this should also be set to True. - loss_function : {None, 'mean', 'min', 'max'}, optional - Loss function to apply to the objective values once computed. This loss function - is called on the raw compute value, before any shifting, scaling, or - normalization. - deriv_mode : {"auto", "fwd", "rev"} - Specify how to compute Jacobian matrix, either forward mode or reverse mode AD. - "auto" selects forward or reverse mode based on the size of the input and output - of the objective. Has no effect on self.grad or self.hess which always use - reverse mode and forward over reverse mode respectively. + ``Equilibrium`` to be optimized. rho : ndarray Unique coordinate values specifying flux surfaces to compute on. alpha : ndarray Unique coordinate values specifying field line labels to compute on. + batch : bool + Whether to vectorize part of the computation. Default is true. Y_B : int - Number of points per toroidal transit at which to sample data along field - line. Default is 100. + Desired resolution for algorithm to compute bounce points. + Default is double ``Y``. Something like 100 is usually sufficient. + Currently, this is the number of knots per toroidal transit over + to approximate |B| with cubic splines. num_transit : int Number of toroidal transits to follow field line. For axisymmetric devices, one poloidal transit is sufficient. Otherwise, - more transits will give more accurate result, with diminishing returns. + assuming the surface is not near rational, more transits will + approximate surface averages better, with diminishing returns. + num_well : int + Maximum number of wells to detect for each pitch and field line. + Giving ``None`` will detect all wells but due to current limitations in + JAX this will have worse performance. + Specifying a number that tightly upper bounds the number of wells will + increase performance. In general, an upper bound on the number of wells + per toroidal transit is ``Aι+B`` where ``A``,``B`` are the poloidal and + toroidal Fourier resolution of |B|, respectively, in straight-field line + PEST coordinates, and ι is the rotational transform normalized by 2π. + A tighter upper bound than ``num_well=(Aι+B)*num_transit`` is preferable. + The ``check_points`` or ``plot`` methods in ``desc.integrals.Bounce2D`` + are useful to select a reasonable value. num_quad : int Resolution for quadrature of bounce integrals. Default is 32. num_pitch : int - Resolution for quadrature over velocity coordinate. Default 50. - batch : bool - Whether to vectorize part of the computation. Default is true. - num_well : int - Maximum number of wells to detect for each pitch and field line. - Default is to detect all wells, but due to limitations in JAX this option - may consume more memory. Specifying a number that tightly upper bounds - the number of wells will increase performance. - name : str, optional - Name of the objective function. - jac_chunk_size : int , optional - Will calculate the Jacobian for this objective ``jac_chunk_size`` - columns at a time, instead of all at once. The memory usage of the - Jacobian calculation is roughly ``memory usage = m0 + m1*jac_chunk_size``: - the smaller the chunk size, the less memory the Jacobian calculation - will require (with some baseline memory usage). The time to compute the - Jacobian is roughly ``t=t0 +t1/jac_chunk_size``, so the larger the - ``jac_chunk_size``, the faster the calculation takes, at the cost of - requiring more memory. A ``jac_chunk_size`` of 1 corresponds to the least - memory intensive, but slowest method of calculating the Jacobian. - If None, it will use the largest size i.e ``obj.dim_x``. + Resolution for quadrature over velocity coordinate. Default is 50. """ + __doc__ = __doc__.rstrip() + collect_docs( + target_default="``target=0``.", + bounds_default="``target=0``.", + normalize_detail=" Note: Has no effect for this objective.", + normalize_target_detail=" Note: Has no effect for this objective.", + overwrite=_bounce_overwrite, + ) + _coordinates = "r" _units = "~" _print_value_fmt = "Effective ripple ε: " @@ -108,6 +104,7 @@ class EffectiveRipple(_Objective): def __init__( self, eq, + *, target=None, bounds=None, weight=1, @@ -115,14 +112,13 @@ def __init__( normalize_target=True, loss_function=None, deriv_mode="auto", - *, rho=1.0, alpha=0.0, + batch=True, Y_B=100, num_transit=10, num_quad=32, num_pitch=50, - batch=True, num_well=None, name="Effective ripple", jac_chunk_size=None, @@ -141,12 +137,12 @@ def __init__( "R0", # TODO: GitHub PR #1094 ] self._constants = { - "quad_weights": 1, + "quad_weights": 1.0, "rho": rho, "alpha": alpha, "zeta": np.linspace(0, 2 * np.pi * num_transit, Y_B * num_transit), + "quad": chebgauss2(num_quad), } - self._constants["quad x"], self._constants["quad w"] = chebgauss2(num_quad) self._hyperparameters = { "num_pitch": num_pitch, "batch": batch, @@ -224,7 +220,6 @@ def compute(self, params, constants=None): if constants is None: constants = self.constants eq = self.things[0] - # TODO: compute all deps of effective ripple here data = compute_fun( eq, self._keys_1dr, @@ -232,7 +227,6 @@ def compute(self, params, constants=None): constants["transforms_1dr"], constants["profiles"], ) - # TODO: interpolate all deps to this grid with fft utilities from fourier bounce grid = eq._get_rtz_grid( constants["rho"], constants["alpha"], @@ -256,7 +250,242 @@ def compute(self, params, constants=None): get_transforms("effective ripple", eq, grid, jitable=True), constants["profiles"], data=data, - quad=(constants["quad x"], constants["quad w"]), + quad=constants["quad"], **self._hyperparameters, ) return grid.compress(data["effective ripple"]) + + +class GammaC(_Objective): + """Γ_c is a proxy for measuring energetic ion confinement. + + References + ---------- + Poloidal motion of trapped particle orbits in real-space coordinates. + V. V. Nemov, S. V. Kasilov, W. Kernbichler, G. O. Leitold. + Phys. Plasmas 1 May 2008; 15 (5): 052501. + https://doi.org/10.1063/1.2912456. + Equation 61. + + A model for the fast evaluation of prompt losses of energetic ions in stellarators. + J.L. Velasco et al. 2021 Nucl. Fusion 61 116059. + https://doi.org/10.1088/1741-4326/ac2994. + Equation 16. + + Parameters + ---------- + eq : Equilibrium + ``Equilibrium`` to be optimized. + rho : ndarray + Unique coordinate values specifying flux surfaces to compute on. + alpha : ndarray + Unique coordinate values specifying field line labels to compute on. + batch : bool + Whether to vectorize part of the computation. Default is true. + Y_B : int + Desired resolution for algorithm to compute bounce points. + Default is double ``Y``. Something like 100 is usually sufficient. + Currently, this is the number of knots per toroidal transit over + to approximate |B| with cubic splines. + num_transit : int + Number of toroidal transits to follow field line. + For axisymmetric devices, one poloidal transit is sufficient. Otherwise, + assuming the surface is not near rational, more transits will + approximate surface averages better, with diminishing returns. + num_well : int + Maximum number of wells to detect for each pitch and field line. + Giving ``None`` will detect all wells but due to current limitations in + JAX this will have worse performance. + Specifying a number that tightly upper bounds the number of wells will + increase performance. In general, an upper bound on the number of wells + per toroidal transit is ``Aι+B`` where ``A``,``B`` are the poloidal and + toroidal Fourier resolution of |B|, respectively, in straight-field line + PEST coordinates, and ι is the rotational transform normalized by 2π. + A tighter upper bound than ``num_well=(Aι+B)*num_transit`` is preferable. + The ``check_points`` or ``plot`` methods in ``desc.integrals.Bounce2D`` + are useful to select a reasonable value. + num_quad : int + Resolution for quadrature of bounce integrals. Default is 32. + num_pitch : int + Resolution for quadrature over velocity coordinate. Default is 64. + Nemov : bool + Whether to use the Γ_c as defined by Nemov et al. or Velasco et al. + Default is Nemov. Set to ``False`` to use Velascos's. + + Nemov's Γ_c converges to a finite nonzero value in the infinity limit + of the number of toroidal transits. Velasco's expression has a secular + term that drives the result to zero as the number of toroidal transits + increases if the secular term is not averaged out from the singular + integrals. Currently, an optimization using Velasco's metric may need + to be evaluated by measuring decrease in Γ_c at a fixed number of toroidal + transits. + + """ + + __doc__ = __doc__.rstrip() + collect_docs( + target_default="``target=0``.", + bounds_default="``target=0``.", + normalize_detail=" Note: Has no effect for this objective.", + normalize_target_detail=" Note: Has no effect for this objective.", + overwrite=_bounce_overwrite, + ) + + _coordinates = "r" + _units = "~" + _print_value_fmt = "Γ_c: " + + def __init__( + self, + eq, + *, + target=None, + bounds=None, + weight=1, + normalize=True, + normalize_target=True, + loss_function=None, + deriv_mode="auto", + rho=np.linspace(0.5, 1, 3), + alpha=np.array([0]), + batch=True, + num_transit=10, + Y_B=100, + num_quad=32, + num_pitch=64, + num_well=None, + Nemov=True, + name="Gamma_c", + jac_chunk_size=None, + ): + if target is None and bounds is None: + target = 0.0 + + rho, alpha = np.atleast_1d(rho, alpha) + self._dim_f = rho.size + self._constants = { + "quad_weights": 1.0, + "rho": rho, + "alpha": alpha, + "zeta": np.linspace(0, 2 * np.pi * num_transit, Y_B * num_transit), + } + self._hyperparameters = { + "num_quad": num_quad, + "num_pitch": num_pitch, + "batch": batch, + "num_well": num_well, + } + self._keys_1dr = ["iota", "iota_r", "min_tz |B|", "max_tz |B|"] + self._key = "Gamma_c" if Nemov else "Gamma_c Velasco" + + super().__init__( + things=eq, + target=target, + bounds=bounds, + weight=weight, + normalize=normalize, + normalize_target=normalize_target, + loss_function=loss_function, + deriv_mode=deriv_mode, + name=name, + jac_chunk_size=jac_chunk_size, + ) + + def build(self, use_jit=True, verbose=1): + """Build constant arrays. + + Parameters + ---------- + use_jit : bool, optional + Whether to just-in-time compile the objective and derivatives. + verbose : int, optional + Level of output. + + """ + eq = self.things[0] + self._grid_1dr = LinearGrid( + rho=self._constants["rho"], M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym + ) + num_quad = self._hyperparameters.pop("num_quad") + self._constants["quad"] = get_quadrature( + leggauss(num_quad), + (automorphism_sin, grad_automorphism_sin), + ) + if self._key == "Gamma_c": + self._constants["quad2"] = chebgauss2(num_quad) + self._target, self._bounds = _parse_callable_target_bounds( + self._target, self._bounds, self._constants["rho"] + ) + + timer = Timer() + if verbose > 0: + print("Precomputing transforms") + timer.start("Precomputing transforms") + + self._constants["transforms_1dr"] = get_transforms( + self._keys_1dr, eq, self._grid_1dr + ) + self._constants["profiles"] = get_profiles( + self._keys_1dr + [self._key], eq, self._grid_1dr + ) + + timer.stop("Precomputing transforms") + if verbose > 1: + timer.disp("Precomputing transforms") + + super().build(use_jit=use_jit, verbose=verbose) + + def compute(self, params, constants=None): + """Compute Γ_c. + + Parameters + ---------- + params : dict + Dictionary of equilibrium degrees of freedom, e.g. + ``Equilibrium.params_dict`` + constants : dict + Dictionary of constant data, e.g. transforms, profiles etc. + Defaults to ``self.constants``. + + Returns + ------- + result : ndarray + Γ_c as a function of the flux surface label. + + """ + if constants is None: + constants = self.constants + eq = self.things[0] + data = compute_fun( + eq, + self._keys_1dr, + params, + constants["transforms_1dr"], + constants["profiles"], + ) + grid = eq._get_rtz_grid( + constants["rho"], + constants["alpha"], + constants["zeta"], + coordinates="raz", + iota=self._grid_1dr.compress(data["iota"]), + params=params, + ) + data = { + key: grid.copy_data_from_other(data[key], self._grid_1dr) + for key in self._keys_1dr + } + quad2 = {} + if self._key == "Gamma_c": + quad2["quad2"] = constants["quad2"] + data = compute_fun( + eq, + self._key, + params, + get_transforms(self._key, eq, grid, jitable=True), + constants["profiles"], + data=data, + quad=constants["quad"], + **quad2, + **self._hyperparameters, + ) + return grid.compress(data[self._key]) diff --git a/desc/profiles.py b/desc/profiles.py index ae1684abb..faaa7e3ab 100644 --- a/desc/profiles.py +++ b/desc/profiles.py @@ -35,7 +35,7 @@ class _Profile(IOAble, ABC): Subclasses must also implement getter and setter methods for params """ - _io_attrs_ = ["_name", "_params"] + _io_attrs_ = ["_name"] def __init__(self, name=""): self.name = name @@ -673,7 +673,7 @@ class PowerSeriesProfile(_Profile): """ - _io_attrs_ = _Profile._io_attrs_ + ["_basis"] + _io_attrs_ = _Profile._io_attrs_ + ["_params", "_basis"] def __init__(self, params=None, modes=None, sym="auto", name=""): super().__init__(name) @@ -848,7 +848,7 @@ class TwoPowerProfile(_Profile): """ - _io_attrs_ = _Profile._io_attrs_ + _io_attrs_ = _Profile._io_attrs_ + ["_params"] def __init__(self, params=None, name=""): super().__init__(name) @@ -954,7 +954,7 @@ class SplineProfile(_Profile): """ - _io_attrs_ = _Profile._io_attrs_ + ["_knots", "_method"] + _io_attrs_ = _Profile._io_attrs_ + ["_params", "_knots", "_method"] def __init__(self, values=None, knots=None, method="cubic2", name=""): super().__init__(name) @@ -1048,7 +1048,7 @@ class HermiteSplineProfile(_Profile): """ - _io_attrs_ = _Profile._io_attrs_ + ["_knots", "_params"] + _io_attrs_ = _Profile._io_attrs_ + ["_params", "_knots"] def __init__(self, f, df, knots=None, name=""): super().__init__(name) @@ -1149,6 +1149,8 @@ class MTanhProfile(_Profile): """ + _io_attrs_ = _Profile._io_attrs_ + ["_params"] + def __init__(self, params=None, name=""): super().__init__(name) @@ -1383,7 +1385,7 @@ class FourierZernikeProfile(_Profile): """ - _io_attrs_ = _Profile._io_attrs_ + ["_basis"] + _io_attrs_ = _Profile._io_attrs_ + ["_params", "_basis"] def __init__(self, params=None, modes=None, sym="auto", NFP=1, name=""): super().__init__(name) diff --git a/docs/api.rst b/docs/api.rst index fa32edb86..b6adda31c 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -208,9 +208,11 @@ Objective Functions desc.objectives.HelicalForceBalance desc.objectives.Isodynamicity desc.objectives.LinearObjectiveFromUser + desc.objectives.LinkingCurrentConsistency desc.objectives.MagneticWell desc.objectives.MeanCurvature desc.objectives.MercierStability + desc.objectives.MirrorRatio desc.objectives.ObjectiveFromUser desc.objectives.ObjectiveFunction desc.objectives.Omnigenity diff --git a/docs/api_objectives.rst b/docs/api_objectives.rst index 5fddaaed0..050ca4acb 100644 --- a/docs/api_objectives.rst +++ b/docs/api_objectives.rst @@ -49,6 +49,7 @@ Geometry desc.objectives.PrincipalCurvature desc.objectives.PlasmaVesselDistance desc.objectives.BScaleLength + desc.objectives.MirrorRatio desc.objectives.GoodCoordinates @@ -108,6 +109,7 @@ Coil Optimization desc.objectives.CoilArclengthVariance desc.objectives.ToroidalFlux desc.objectives.SurfaceCurrentRegularization + desc.objectives.LinkingCurrentConsistency Profiles diff --git a/requirements.txt b/requirements.txt index 43e74fde8..9e0525e93 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,6 +13,6 @@ plotly >= 5.16, <= 5.24.1 psutil <= 6.1.0 pylatexenc >= 2.0, <= 2.10 quadax >= 0.2.2, <= 0.2.4 -scikit-image <= 0.24.0 +scikit-image <= 0.25.0 scipy >= 1.7.0, <= 1.14.1 termcolor <= 2.5.0 diff --git a/tests/baseline/test_Gamma_c.png b/tests/baseline/test_Gamma_c.png new file mode 100644 index 000000000..451f7c0cf Binary files /dev/null and b/tests/baseline/test_Gamma_c.png differ diff --git a/tests/baseline/test_Gamma_c_Velasco.png b/tests/baseline/test_Gamma_c_Velasco.png new file mode 100644 index 000000000..30f5e7ace Binary files /dev/null and b/tests/baseline/test_Gamma_c_Velasco.png differ diff --git a/tests/baseline/test_effective_ripple.png b/tests/baseline/test_effective_ripple.png index 9bb7b5e23..c61ff3003 100644 Binary files a/tests/baseline/test_effective_ripple.png and b/tests/baseline/test_effective_ripple.png differ diff --git a/tests/inputs/master_compute_data_rpz.pkl b/tests/inputs/master_compute_data_rpz.pkl index 62adc1571..46fcc1210 100644 Binary files a/tests/inputs/master_compute_data_rpz.pkl and b/tests/inputs/master_compute_data_rpz.pkl differ diff --git a/tests/test_compute_funs.py b/tests/test_compute_funs.py index 52bd5b13e..6a5fdd14c 100644 --- a/tests/test_compute_funs.py +++ b/tests/test_compute_funs.py @@ -11,7 +11,7 @@ from desc.geometry import FourierRZToroidalSurface from desc.grid import LinearGrid from desc.io import load -from desc.utils import dot +from desc.utils import cross, dot # convolve kernel is reverse of FD coeffs FD_COEF_1_2 = np.array([-1 / 2, 0, 1 / 2])[::-1] @@ -1541,41 +1541,62 @@ def test_surface_equilibrium_geometry(): @pytest.mark.unit -def test_parallel_grad(): - """Test geometric and physical methods of computing parallel gradients agree.""" - eq = get("W7-X") - with pytest.warns(UserWarning, match="Reducing radial"): - eq.change_resolution(2, 2, 2, 4, 4, 4) - data = eq.compute( - [ - "e_zeta|r,a", - "B", - "B^zeta", - "|B|_z|r,a", - "grad(|B|)", - "|e_zeta|r,a|_z|r,a", - "B^zeta_z|r,a", - "|B|", - "gbdrift (secular)", - "gbdrift (secular)/phi", - "phi", - ], - ) - np.testing.assert_allclose(data["e_zeta|r,a"], (data["B"].T / data["B^zeta"]).T) - np.testing.assert_allclose( - data["|B|_z|r,a"], dot(data["grad(|B|)"], data["e_zeta|r,a"]) - ) - np.testing.assert_allclose( - data["|e_zeta|r,a|_z|r,a"], - data["|B|_z|r,a"] / np.abs(data["B^zeta"]) - - data["|B|"] - * data["B^zeta_z|r,a"] - * np.sign(data["B^zeta"]) - / data["B^zeta"] ** 2, - ) - np.testing.assert_allclose( - data["gbdrift (secular)"], data["gbdrift (secular)/phi"] * data["phi"] - ) +def test_clebsch_sfl_funs(): + """Test geometric and physical methods of computing B agree.""" + + def test(eq): + with pytest.warns(UserWarning, match="Reducing radial"): + eq.change_resolution(2, 2, 2, 4, 4, 4) + data = eq.compute( + [ + "e_zeta|r,a", + "B", + "B^zeta", + "B^phi", + "|B|_z|r,a", + "grad(|B|)", + "|e_zeta|r,a|_z|r,a", + "B^zeta_z|r,a", + "|B|", + "sqrt(g)_Clebsch", + "sqrt(g)_PEST", + "psi_r", + "grad(psi)", + "grad(alpha)", + "grad(phi)", + "B_phi", + "gbdrift (secular)", + "gbdrift (secular)/phi", + "phi", + ], + ) + np.testing.assert_allclose(data["e_zeta|r,a"], (data["B"].T / data["B^zeta"]).T) + np.testing.assert_allclose( + data["|B|_z|r,a"], dot(data["grad(|B|)"], data["e_zeta|r,a"]) + ) + np.testing.assert_allclose( + data["|e_zeta|r,a|_z|r,a"], + data["|B|_z|r,a"] / np.abs(data["B^zeta"]) + - data["|B|"] + * data["B^zeta_z|r,a"] + * np.sign(data["B^zeta"]) + / data["B^zeta"] ** 2, + ) + np.testing.assert_allclose( + data["B"], cross(data["grad(psi)"], data["grad(alpha)"]) + ) + np.testing.assert_allclose( + data["B^zeta"], data["psi_r"] / data["sqrt(g)_Clebsch"] + ) + np.testing.assert_allclose(data["B^phi"], data["psi_r"] / data["sqrt(g)_PEST"]) + np.testing.assert_allclose(data["B^phi"], dot(data["B"], data["grad(phi)"])) + np.testing.assert_allclose(data["B_phi"], data["B"][:, 1]) + np.testing.assert_allclose( + data["gbdrift (secular)"], data["gbdrift (secular)/phi"] * data["phi"] + ) + + test(get("W7-X")) + test(get("NCSX")) @pytest.mark.unit diff --git a/tests/test_input_output.py b/tests/test_input_output.py index 6ac1d4433..9ad533723 100644 --- a/tests/test_input_output.py +++ b/tests/test_input_output.py @@ -19,6 +19,14 @@ SplineMagneticField, ToroidalMagneticField, ) +from desc.profiles import ( + PowerProfile, + PowerSeriesProfile, + ProductProfile, + ScaledProfile, + SumProfile, + TwoPowerProfile, +) from desc.transform import Transform from desc.utils import equals @@ -666,6 +674,26 @@ def test_save_after_load(tmpdir_factory): assert eq2.equiv(eq) +@pytest.mark.unit +def test_io_profiles(tmpdir_factory): + """Test saving/loading profiles. Test for GH issue #1448.""" + p0 = SumProfile( + TwoPowerProfile(params=[0.6, 2, 1.5]), PowerSeriesProfile(params=[0.4, 0, -0.4]) + ) + n = ScaledProfile(2, PowerProfile(1 / 3, p0)) + T = ScaledProfile(0.5, PowerProfile(2 / 3, p0)) + p1 = ProductProfile(n, T) + + tmpdir = tmpdir_factory.mktemp("profiles") + tmp_path = tmpdir.join("p0.h5") + p1.save(tmp_path) + p2 = load(tmp_path) + + x = np.linspace(0.1, 0.9, 9) + np.testing.assert_allclose(p0(x), p1(x)) + np.testing.assert_allclose(p1(x), p2(x)) + + @pytest.mark.unit def test_io_OmnigenousField(tmpdir_factory): """Test saving/loading an OmnigenousField works (tests dict saving).""" diff --git a/tests/test_neoclassical.py b/tests/test_neoclassical.py index 231b78d9a..6df2e1603 100644 --- a/tests/test_neoclassical.py +++ b/tests/test_neoclassical.py @@ -59,7 +59,9 @@ def test_effective_ripple(): eq = get("W7-X") rho = np.linspace(0, 1, 10) alpha = np.array([0]) - zeta = np.linspace(0, 20 * np.pi, 1000) + Y_B = 100 + num_transit = 10 + zeta = np.linspace(0, 2 * np.pi * num_transit, Y_B * num_transit) grid = get_rtz_grid(eq, rho, alpha, zeta, coordinates="raz") data = eq.compute("effective ripple", grid=grid) assert np.isfinite(data["effective ripple"]).all() @@ -77,6 +79,42 @@ def test_effective_ripple(): return fig +@pytest.mark.unit +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) +def test_Gamma_c_Velasco(): + """Test Γ_c with W7-X.""" + eq = get("W7-X") + rho = np.linspace(0, 1, 10) + alpha = np.array([0]) + Y_B = 100 + num_transit = 10 + zeta = np.linspace(0, 2 * np.pi * num_transit, Y_B * num_transit) + grid = eq._get_rtz_grid(rho, alpha, zeta, coordinates="raz") + data = eq.compute("Gamma_c Velasco", grid=grid) + assert np.isfinite(data["Gamma_c Velasco"]).all() + fig, ax = plt.subplots() + ax.plot(rho, grid.compress(data["Gamma_c Velasco"]), marker="o") + return fig + + +@pytest.mark.unit +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) +def test_Gamma_c(): + """Test Γ_c Nemov with W7-X.""" + eq = get("W7-X") + rho = np.linspace(0, 1, 10) + alpha = np.array([0]) + Y_B = 100 + num_transit = 10 + zeta = np.linspace(0, 2 * np.pi * num_transit, Y_B * num_transit) + grid = eq._get_rtz_grid(rho, alpha, zeta, coordinates="raz") + data = eq.compute("Gamma_c", grid=grid) + assert np.isfinite(data["Gamma_c"]).all() + fig, ax = plt.subplots() + ax.plot(rho, grid.compress(data["Gamma_c"]), marker="o") + return fig + + class NeoIO: """Class to interface with NEO.""" diff --git a/tests/test_objective_funs.py b/tests/test_objective_funs.py index cef402780..9111a60bb 100644 --- a/tests/test_objective_funs.py +++ b/tests/test_objective_funs.py @@ -23,6 +23,7 @@ ) from desc.compute import get_transforms from desc.equilibrium import Equilibrium +from desc.equilibrium.coords import get_rtz_grid from desc.examples import get from desc.geometry import FourierPlanarCurve, FourierRZToroidalSurface, FourierXYZCurve from desc.grid import ConcentricGrid, LinearGrid, QuadratureGrid @@ -56,13 +57,16 @@ ForceBalance, ForceBalanceAnisotropic, FusionPower, + GammaC, GenericObjective, HeatingPowerISS04, Isodynamicity, LinearObjectiveFromUser, + LinkingCurrentConsistency, MagneticWell, MeanCurvature, MercierStability, + MirrorRatio, ObjectiveFromUser, ObjectiveFunction, Omnigenity, @@ -1424,6 +1428,108 @@ def test_signed_plasma_vessel_distance(self): ) obj.build() + @pytest.mark.unit + def test_mirror_ratio_equilibrium(self): + """Test mirror ratio objective for Equilibrium.""" + # axisymmetry, no iota, so B ~ B0/R + eq = Equilibrium(L=8, M=8) + eq.solve() + # R0 = 10, a=1, so Bmax = B0/9, Bmin = B0/11 + mirror_ratio = (1 / 9 - 1 / 11) / (1 / 9 + 1 / 11) + obj = MirrorRatio(eq) + obj.build() + f = obj.compute(eq.params_dict) + # not perfect agreement bc eq is low res, so B isnt exactly B0/R + np.testing.assert_allclose(f, mirror_ratio, rtol=3e-3) + + @pytest.mark.unit + def test_mirror_ratio_omni_field(self): + """Test mirror ratio objective for OmnigenousField.""" + field = OmnigenousField( + L_B=1, + M_B=3, + L_x=1, + M_x=1, + N_x=1, + NFP=1, + helicity=(0, 1), + B_lm=np.array( + [ + # f(r) = B0 + B1*(2r-1) + # f(0) = [0.8, 1.0, 1.2] + # f(1) = [1.0, 1.0, 1.0] + [0.9, 1.0, 1.1], # B0 + [0.1, 0.0, -0.1], # B1 + ] + ).flatten(), + ) + + mirror_ratio_axis = (1.2 - 0.8) / (1.2 + 0.8) + mirror_ratio_edge = 0.0 + grid = LinearGrid(L=5, theta=6, N=2) + rho = grid.nodes[grid.unique_rho_idx, 0] + obj = MirrorRatio(field, grid=grid) + obj.build() + f = obj.compute(field.params_dict) + np.testing.assert_allclose( + f, mirror_ratio_axis * (1 - rho) + mirror_ratio_edge * rho + ) + + @pytest.mark.unit + def test_linking_current(self): + """Test calculation of signed linking current from coils to plasma.""" + eq = Equilibrium() + G = eq.compute("G", grid=LinearGrid(rho=1.0))["G"][0] * 2 * jnp.pi / mu_0 + c = G / 8 + coil1 = FourierPlanarCoil(current=1.5 * c, center=[10, 1, 0]) + coil2 = FourierPlanarCoil(current=0.5 * c, center=[10, 2, 0]) + # explicit symmetry coils + coilset1 = CoilSet.from_symmetry((coil1, coil2), NFP=2, sym=True) + expected_currents = [ + c * 1.5, # these are the 2 actual coils, with different currents + c * 0.5, + -c * 0.5, # these are the stellarator symmetric ones in first field period + -c * 1.5, + c * 1.5, # these next 4 are the ones from the 2nd field period + c * 0.5, + -c * 0.5, + -c * 1.5, + ] + np.testing.assert_allclose(coilset1._all_currents(), expected_currents) + obj = LinkingCurrentConsistency(eq, coilset1) + obj.build() + f = obj.compute(coilset1.params_dict, eq.params_dict) + np.testing.assert_allclose(f, 0) + + # same with virtual coils + coilset2 = CoilSet(coil1, coil2, NFP=2, sym=True) + np.testing.assert_allclose(coilset2._all_currents(), expected_currents) + obj = LinkingCurrentConsistency(eq, coilset2) + obj.build() + f = obj.compute(coilset2.params_dict, eq.params_dict) + np.testing.assert_allclose(f, 0) + + # both coilsets together. These have overlapping coils but it doesn't + # affect the linking number + coilset3 = MixedCoilSet(coilset1, coilset2, check_intersection=False) + np.testing.assert_allclose( + coilset3._all_currents(), expected_currents + expected_currents + ) + obj = LinkingCurrentConsistency(eq, coilset3) + obj.build() + f = obj.compute(coilset3.params_dict, eq.params_dict) + np.testing.assert_allclose(f, -G) # coils provide 2G so error is -G + + # CoilSet + 1 extra coil + coilset4 = MixedCoilSet(coilset1, coil2, check_intersection=False) + np.testing.assert_allclose( + coilset4._all_currents(), expected_currents + [0.5 * G / 8] + ) + obj = LinkingCurrentConsistency(eq, coilset4) + obj.build() + f = obj.compute(coilset4.params_dict, eq.params_dict) + np.testing.assert_allclose(f, -0.5 * G / 8) + @pytest.mark.unit def test_omnigenity_multiple_surfaces(self): """Test omnigenity transform vectorized over multiple surfaces.""" @@ -1529,6 +1635,54 @@ def test(field, grid): np.testing.assert_allclose(result1 * 2, result5) np.testing.assert_allclose(result2, result4) + @pytest.mark.unit + def test_objective_compute(self): + """To avoid issues such as #1424.""" + eq = get("W7-X") + rho = np.linspace(0.1, 1, 3) + alpha = np.array([0]) + Y_B = 50 + num_transit = 4 + num_pitch = 16 + num_quad = 16 + zeta = np.linspace(0, 2 * np.pi * num_transit, Y_B * num_transit) + grid = get_rtz_grid(eq, rho, alpha, zeta, coordinates="raz") + data = eq.compute( + ["effective ripple", "Gamma_c"], + grid=grid, + num_quad=num_quad, + num_pitch=num_pitch, + ) + obj = EffectiveRipple( + eq, + rho=rho, + alpha=alpha, + Y_B=Y_B, + num_transit=num_transit, + num_quad=num_quad, + num_pitch=num_pitch, + ) + obj.build() + # TODO(#1094) + np.testing.assert_allclose( + obj.compute(eq.params_dict), + grid.compress(data["effective ripple"]), + rtol=0.004, + ) + obj = GammaC( + eq, + rho=rho, + alpha=alpha, + Y_B=Y_B, + num_transit=num_transit, + num_quad=num_quad, + num_pitch=num_pitch, + ) + obj.build() + np.testing.assert_allclose( + obj.compute(eq.params_dict), grid.compress(data["Gamma_c"]) + ) + @pytest.mark.regression def test_derivative_modes(): @@ -2479,7 +2633,7 @@ def test_loss_function_asserts(): def _reduced_resolution_objective(eq, objective): """Speed up testing suite by defining rules to reduce objective resolution.""" kwargs = {} - if objective in {EffectiveRipple}: + if objective in {EffectiveRipple, GammaC}: kwargs["Y_B"] = 50 kwargs["num_transit"] = 2 kwargs["num_pitch"] = 25 @@ -2512,6 +2666,7 @@ class TestComputeScalarResolution: FusionPower, GenericObjective, HeatingPowerISS04, + LinkingCurrentConsistency, Omnigenity, PlasmaCoilSetMinDistance, PlasmaVesselDistance, @@ -2944,6 +3099,26 @@ def test_compute_scalar_resolution_coils(self, objective): f[i] = obj.compute_scalar(obj.x()) np.testing.assert_allclose(f, f[-1], rtol=1e-2, atol=1e-12) + @pytest.mark.unit + def test_compute_scalar_resolution_linking_current(self): + """LinkingCurrentConsistency.""" + coil = FourierPlanarCoil(center=[10, 1, 0]) + eq = Equilibrium() + coilset = CoilSet.from_symmetry(coil, NFP=4, sym=True) + f = np.zeros_like(self.res_array, dtype=float) + for i, res in enumerate(self.res_array): + obj = ObjectiveFunction( + LinkingCurrentConsistency( + eq, + coilset, + grid=LinearGrid(M=int(eq.M_grid * res), N=int(eq.N_grid * res)), + ), + use_jit=False, + ) + obj.build(verbose=0) + f[i] = obj.compute_scalar(obj.x()) + np.testing.assert_allclose(f, f[-1], rtol=1e-2, atol=1e-12) + class TestObjectiveNaNGrad: """Make sure reverse mode AD works correctly for all objectives.""" @@ -2972,7 +3147,9 @@ class TestObjectiveNaNGrad: EffectiveRipple, ForceBalanceAnisotropic, FusionPower, + GammaC, HeatingPowerISS04, + LinkingCurrentConsistency, Omnigenity, PlasmaCoilSetMinDistance, PlasmaVesselDistance, @@ -3249,6 +3426,17 @@ def test_objective_no_nangrad_effective_ripple(self): g = obj.grad(obj.x()) assert not np.any(np.isnan(g)) + @pytest.mark.unit + def test_objective_no_nangrad_Gamma_c(self): + """Gamma_c.""" + eq = get("ESTELL") + with pytest.warns(UserWarning, match="Reducing radial"): + eq.change_resolution(2, 2, 2, 4, 4, 4) + obj = ObjectiveFunction(_reduced_resolution_objective(eq, GammaC)) + obj.build(verbose=0) + g = obj.grad(obj.x()) + assert not np.any(np.isnan(g)) + @pytest.mark.unit def test_objective_no_nangrad_ballooning(self): """BallooningStability.""" @@ -3258,6 +3446,17 @@ def test_objective_no_nangrad_ballooning(self): g = obj.grad(obj.x()) assert not np.any(np.isnan(g)) + @pytest.mark.unit + def test_objective_no_nangrad_linking_current(self): + """LinkingCurrentConsistency.""" + coil = FourierPlanarCoil(center=[10, 1, 0]) + coilset = CoilSet.from_symmetry(coil, NFP=4, sym=True) + eq = Equilibrium() + obj = ObjectiveFunction(LinkingCurrentConsistency(eq, coilset)) + obj.build() + g = obj.grad(obj.x()) + assert not np.any(np.isnan(g)) + @pytest.mark.unit def test_asymmetric_normalization(): diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 29bf66a40..637d247a8 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -33,9 +33,9 @@ FixParameters, FixPressure, FixPsi, - FixSumCoilCurrent, ForceBalance, GenericObjective, + LinkingCurrentConsistency, MagneticWell, MeanCurvature, ObjectiveFunction, @@ -1405,15 +1405,17 @@ def test_optimize_coil_currents(DummyCoilSet): coil.current = current / coils.num_coils objective = ObjectiveFunction(QuadraticFlux(eq=eq, field=coils, vacuum=True)) - constraints = FixSumCoilCurrent(coils) + constraints = LinkingCurrentConsistency(eq, coils, eq_fixed=True) optimizer = Optimizer("lsq-exact") - [coils_opt], _ = optimizer.optimize( - things=coils, - objective=objective, - constraints=constraints, - verbose=2, - copy=True, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message=".*\n.*\nIncompatible") + [coils_opt], _ = optimizer.optimize( + things=coils, + objective=objective, + constraints=constraints, + verbose=2, + copy=True, + ) # check that optimized coil currents changed by more than 15% from initial values np.testing.assert_array_less( np.asarray(coils.current).mean() * 0.15, diff --git a/tests/test_stability_funs.py b/tests/test_stability_funs.py index 6c72e0c56..bfcee8ac9 100644 --- a/tests/test_stability_funs.py +++ b/tests/test_stability_funs.py @@ -1,4 +1,4 @@ -"""Tests for Mercier stability functions.""" +"""Tests for stability functions.""" import numpy as np import pytest