from desc import set_device set_device("gpu") import nvtx import numpy as np from desc.continuation import solve_continuation_automatic from desc.equilibrium import EquilibriaFamily, Equilibrium from desc.geometry import FourierRZToroidalSurface from desc.grid import LinearGrid from desc.objectives import ( AspectRatio, FixBoundaryR, FixBoundaryZ, FixCurrent, FixPressure, FixPsi, ForceBalance, ObjectiveFunction, QuasisymmetryTwoTerm, Elongation, RotationalTransform, Volume, ) from desc.optimize import Optimizer surf = FourierRZToroidalSurface( R_lmn=[1, 0.125, 0.1], Z_lmn=[-0.125, -0.1], modes_R=[[0, 0], [1, 0], [0, 1]], modes_Z=[[-1, 0], [0, -1]], NFP=4, ) # create initial equilibrium. Psi chosen to give B ~ 1 T. Could also give profiles here, # default is zero pressure and zero current eq = Equilibrium(M=12, N=12, Psi=0.04, surface=surf) with nvtx.annotate("solve_continuation_automatic"): # this is usually all you need to solve a fixed boundary equilibrium eq0 = solve_continuation_automatic(eq, verbose=0)[-1] grid = LinearGrid( M=eq0.M_grid, N=eq0.N_grid, NFP=eq0.NFP, rho=np.array([0.6, 0.8, 1.0]), sym=True ) objective = ObjectiveFunction( ( # pass in the grid we defined, and don't forget the target helicity! QuasisymmetryTwoTerm(eq=eq0, helicity=(1, eq.NFP), grid=grid), ), ) constraints = ( ForceBalance(eq=eq0), # try to keep the aspect ratio between 7 and 9 AspectRatio(eq=eq0, bounds=(7, 9)), # similarly, try to keep it from getting too elongated Elongation(eq=eq0, bounds=(0, 3)), # Keep volume the same as the initial volume Volume(eq=eq0, target=eq0.compute("V")["V"]), # target for average iota RotationalTransform(eq=eq0, target=1.1, loss_function="mean"), # fix major radius FixBoundaryR(eq=eq0, modes=[0, 0, 0]), # fix vacuum profiles FixPressure(eq=eq0), FixCurrent(eq=eq0), FixPsi(eq=eq0), ) optimizer = Optimizer("lsq-auglag") with nvtx.annotate("optimize"): eqa, history = eq0.optimize( objective=objective, constraints=constraints, optimizer=optimizer, # each iteration of the augmented Lagrangian optimizer is cheaper than a step of a # proximal optimizer, but it generally requires more iterations to converge maxiter=20, copy=True, verbose=3, options={}, )