From a8f8e07380423f91ebbe651cdf41e57ca6109c82 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Fri, 28 May 2021 00:44:36 +0200 Subject: [PATCH 01/27] Implement AMR with p4est (needs debugging) --- src/callbacks_step/amr.jl | 184 +++++++++++++++++- src/callbacks_step/amr_dg1d.jl | 15 +- src/callbacks_step/amr_dg2d.jl | 24 +-- src/callbacks_step/amr_dg3d.jl | 15 +- src/callbacks_step/analysis.jl | 26 ++- src/mesh/mesh_io.jl | 13 +- src/mesh/p4est_mesh.jl | 195 ++++++++++++++++++- src/solvers/dg_p4est/containers.jl | 298 +++++++++++++++++++++++------ 8 files changed, 661 insertions(+), 109 deletions(-) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index bbddaf304e8..99329c289fa 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -177,7 +177,7 @@ end # `passive_args` is currently used for Euler with self-gravity to adapt the gravity solver # passively without querying its indicator, based on the assumption that both solvers use # the same mesh. That's a hack and should be improved in the future once we have more examples -# and a better understandin of such a coupling. +# and a better understanding of such a coupling. # `passive_args` is expected to be an iterable of `Tuple`s of the form # `(p_u_ode, p_mesh, p_equations, p_dg, p_cache)`. function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, @@ -189,7 +189,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, u = wrap_array(u_ode, mesh, equations, dg, cache) lambda = @timed timer() "indicator" controller(u, mesh, equations, dg, cache, - t=t, iter=iter) + t=t, iter=iter) if mpi_isparallel() # Collect lambda for all elements @@ -222,10 +222,13 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, # refine mesh refined_original_cells = @timed timer() "mesh" refine!(mesh.tree, to_refine) + # Find all indices of elements whose cell ids are in cells_to_refine + elements_to_refine = findall(cell_id -> cell_id in cells_to_refine, cache.elements.cell_ids) + # refine solver - @timed timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, refined_original_cells) + @timed timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args - @timed timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, refined_original_cells) + @timed timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, elements_to_refine) end else # If there is nothing to refine, create empty array for later use @@ -280,10 +283,13 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, end end + # Find all indices of elements whose cell ids are in child_cells_to_coarsen + elements_to_remove = findall(cell_id -> cell_id in child_cells_to_coarsen, cache.elements.cell_ids) + # coarsen solver - @timed timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, removed_child_cells) + @timed timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args - @timed timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, removed_child_cells) + @timed timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, elements_to_remove) end else # If there is nothing to coarsen, create empty array for later use @@ -313,6 +319,99 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, end +# Copy controller values to quad user data storage, will be called below +function copy_to_quad_iter_volume(info, user_data) + # Global trees array + trees = convert_sc_array(p4est_tree_t, info.p4est.trees) + # Quadrant numbering offset of this quadrant, one-based indexing + offset = trees[info.treeid + 1].quadrants_offset + # Global quad ID + quad_id = offset + info.quadid + + # Access user_data = lambda + user_data_ptr = Ptr{Int}(user_data) + # Load controller_value = lambda[quad_id + 1] + controller_value = unsafe_load(user_data_ptr, quad_id + 1) + + # Access quadrant's user data ([global quad ID, controller_value]) + quad_data_ptr = Ptr{Int}(info.quad.p.user_data) + # Save controller value to quadrant's user data. + unsafe_store!(quad_data_ptr, controller_value, 2) + + return nothing +end + +function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, + equations, dg::DG, cache, + t, iter; + only_refine=false, only_coarsen=false, + passive_args=()) + @unpack controller, adaptor = amr_callback + + u = wrap_array(u_ode, mesh, equations, dg, cache) + lambda = @timed timer() "indicator" controller(u, mesh, equations, dg, cache, + t=t, iter=iter) + + @boundscheck begin + @assert axes(lambda) == (Base.OneTo(ncells(mesh)),) ( + "Indicator array (axes = $(axes(lambda))) and mesh cells (axes = $(Base.OneTo(ncells(mesh)))) have different axes" + ) + end + + # Copy controller value of each quad to the quad's user data storage + iter_volume_c = @cfunction(copy_to_quad_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + + # TODO P4EST Is lambda always a Vector{Int}? + GC.@preserve lambda begin + p4est_iterate(mesh.p4est, + C_NULL, # ghost layer + pointer(lambda), + iter_volume_c, # iter_volume + C_NULL, # iter_face + C_NULL) # iter_corner + end + + @timed timer() "refine" if !only_coarsen + # Refine mesh + refined_original_cells = @timed timer() "mesh" refine!(mesh) + + # Refine solver + @timed timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, refined_original_cells) + for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args + @timed timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, refined_original_cells) + end + else + # If there is nothing to refine, create empty array for later use + refined_original_cells = Int[] + end + + @timed timer() "coarsen" if !only_refine + # Coarsen mesh + coarsened_original_cells = @timed timer() "mesh" coarsen!(mesh) + + # coarsen solver + @timed timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, coarsened_original_cells) + for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args + @timed timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, p_equations, + p_dg, p_cache, coarsened_original_cells) + end + else + # If there is nothing to coarsen, create empty array for later use + coarsened_original_cells = Int[] + end + + # Store whether there were any cells coarsened or refined + has_changed = !isempty(refined_original_cells) || !isempty(coarsened_original_cells) + if has_changed # TODO: Taal decide, where shall we set this? + # don't set it to has_changed since there can be changes from earlier calls + mesh.unsaved_changes = true + end + + # Return true if there were any cells coarsened or refined, otherwise false + return has_changed +end + + # After refining cells, shift original cell ids to match new locations # Note: Assumes sorted lists of original and refined cell ids! # Note: `mesh` is only required to extract ndims @@ -464,6 +563,79 @@ function (controller::ControllerThreeLevel)(u::AbstractArray{<:Any}, end +# Loop above (as for TreeMesh) but executed by p4est +function controller_three_level_iter_volume(info, user_data) + # Unpack user_data = [controller, alpha] + ptr = Ptr{Any}(user_data) + data_array = unsafe_wrap(Array, ptr, 2) + controller = data_array[1] + alpha = data_array[2] + + @unpack controller_value = controller.cache + + # Global trees array + trees = convert_sc_array(p4est_tree_t, info.p4est.trees) + # Quadrant numbering offset of this quadrant, one-based indexing + offset = trees[info.treeid + 1].quadrants_offset + # Global quad ID + quad_id = offset + info.quadid + # Julia element ID + element = quad_id + 1 + + current_level = info.quad.level + + # set target level + target_level = current_level + if alpha[element] > controller.max_threshold + target_level = controller.max_level + elseif alpha[element] > controller.med_threshold + if controller.med_level > 0 + target_level = controller.med_level + # otherwise, target_level = current_level + # set med_level = -1 to implicitly use med_level = current_level + end + else + target_level = controller.base_level + end + + # compare target level with actual level to set controller + if current_level < target_level + controller_value[element] = 1 # refine! + elseif current_level > target_level + controller_value[element] = -1 # coarsen! + else + controller_value[element] = 0 # we're good + end + + return nothing +end + +function (controller::ControllerThreeLevel)(u::AbstractArray{<:Any}, + mesh::P4estMesh, equations, dg::DG, cache; + kwargs...) + + @unpack controller_value = controller.cache + resize!(controller_value, nelements(dg, cache)) + + alpha = controller.indicator(u, equations, dg, cache; kwargs...) + + # Let p4est do the same iteration as with TreeMesh above + user_data = [controller, alpha] + iter_volume_c = @cfunction(controller_three_level_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + + GC.@preserve user_data begin + p4est_iterate(mesh.p4est, + C_NULL, # ghost layer + pointer(user_data), + iter_volume_c, # iter_volume + C_NULL, # iter_face + C_NULL) # iter_corner + end + + return controller_value +end + + """ ControllerThreeLevelCombined(semi, indicator_primary, indicator_secondary; diff --git a/src/callbacks_step/amr_dg1d.jl b/src/callbacks_step/amr_dg1d.jl index 94469272c47..15a9a1aa252 100644 --- a/src/callbacks_step/amr_dg1d.jl +++ b/src/callbacks_step/amr_dg1d.jl @@ -1,18 +1,14 @@ # Refine elements in the DG solver based on a list of cell_ids that should be refined function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, - equations, dg::DGSEM, cache, cells_to_refine) + equations, dg::DGSEM, cache, elements_to_refine) # Return early if there is nothing to do - if isempty(cells_to_refine) + if isempty(elements_to_refine) return end # Determine for each existing element whether it needs to be refined needs_refinement = falses(nelements(dg, cache)) - - # The "Ref(...)" is such that we can vectorize the search but not the array that is searched - elements_to_refine = searchsortedfirst.(Ref(cache.elements.cell_ids[1:nelements(dg, cache)]), - cells_to_refine) needs_refinement[elements_to_refine] .= true # Retain current solution data @@ -119,17 +115,14 @@ end # Coarsen elements in the DG solver based on a list of cell_ids that should be removed function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, - equations, dg::DGSEM, cache, child_cells_to_coarsen) + equations, dg::DGSEM, cache, elements_to_remove) # Return early if there is nothing to do - if isempty(child_cells_to_coarsen) + if isempty(elements_to_remove) return end # Determine for each old element whether it needs to be removed to_be_removed = falses(nelements(dg, cache)) - # The "Ref(...)" is such that we can vectorize the search but not the array that is searched - elements_to_remove = searchsortedfirst.(Ref(cache.elements.cell_ids[1:nelements(dg, cache)]), - child_cells_to_coarsen) to_be_removed[elements_to_remove] .= true # Retain current solution data diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index 604eeea6df5..84f8aa45ca4 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -100,18 +100,15 @@ end # Refine elements in the DG solver based on a list of cell_ids that should be refined -function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{2}, - equations, dg::DGSEM, cache, cells_to_refine) +function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estMesh{2}}, + equations, dg::DGSEM, cache, elements_to_refine) # Return early if there is nothing to do - if isempty(cells_to_refine) + if isempty(elements_to_refine) return end # Determine for each existing element whether it needs to be refined needs_refinement = falses(nelements(dg, cache)) - - # Find all indices of elements whose cell ids are in cells_to_refine - elements_to_refine = findall(cell_id -> cell_id in cells_to_refine, cache.elements.cell_ids) needs_refinement[elements_to_refine] .= true # Retain current solution data @@ -146,7 +143,7 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{2}, end # GC.@preserve old_u_ode # Sanity check - if isperiodic(mesh.tree) && nmortars(cache.mortars) == 0 && !mpi_isparallel() + if mesh isa TreeMesh && isperiodic(mesh.tree) && nmortars(cache.mortars) == 0 && !mpi_isparallel() @assert ninterfaces(cache.interfaces) == ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end @@ -222,18 +219,15 @@ end # Coarsen elements in the DG solver based on a list of cell_ids that should be removed -function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{2}, - equations, dg::DGSEM, cache, child_cells_to_coarsen) +function coarsen!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estMesh{2}}, + equations, dg::DGSEM, cache, elements_to_remove) # Return early if there is nothing to do - if isempty(child_cells_to_coarsen) + if isempty(elements_to_remove) return end # Determine for each old element whether it needs to be removed to_be_removed = falses(nelements(dg, cache)) - - # Find all indices of elements whose cell ids are in child_cells_to_coarsen - elements_to_remove = findall(cell_id -> cell_id in child_cells_to_coarsen, cache.elements.cell_ids) to_be_removed[elements_to_remove] .= true # Retain current solution data @@ -279,7 +273,7 @@ function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{2}, end # GC.@preserve old_u_ode # Sanity check - if isperiodic(mesh.tree) && nmortars(cache.mortars) == 0 && !mpi_isparallel() + if mesh isa TreeMesh && isperiodic(mesh.tree) && nmortars(cache.mortars) == 0 && !mpi_isparallel() @assert ninterfaces(cache.interfaces) == ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end @@ -343,7 +337,7 @@ end # this method is called when an `ControllerThreeLevel` is constructed -function create_cache(::Type{ControllerThreeLevel}, mesh::TreeMesh{2}, equations, dg::DG, cache) +function create_cache(::Type{ControllerThreeLevel}, mesh::Union{TreeMesh{2}, P4estMesh{2}}, equations, dg::DG, cache) controller_value = Vector{Int}(undef, nelements(dg, cache)) return (; controller_value) diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index 98500a61a00..056e53dccd9 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -1,18 +1,14 @@ # Refine elements in the DG solver based on a list of cell_ids that should be refined function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, - equations, dg::DGSEM, cache, cells_to_refine) + equations, dg::DGSEM, cache, elements_to_refine) # Return early if there is nothing to do - if isempty(cells_to_refine) + if isempty(elements_to_refine) return end # Determine for each existing element whether it needs to be refined needs_refinement = falses(nelements(dg, cache)) - - # The "Ref(...)" is such that we can vectorize the search but not the array that is searched - elements_to_refine = searchsortedfirst.(Ref(cache.elements.cell_ids[1:nelements(dg, cache)]), - cells_to_refine) needs_refinement[elements_to_refine] .= true # Retain current solution data @@ -159,17 +155,14 @@ end # Coarsen elements in the DG solver based on a list of cell_ids that should be removed function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, - equations, dg::DGSEM, cache, child_cells_to_coarsen) + equations, dg::DGSEM, cache, elements_to_remove) # Return early if there is nothing to do - if isempty(child_cells_to_coarsen) + if isempty(elements_to_remove) return end # Determine for each old element whether it needs to be removed to_be_removed = falses(nelements(dg, cache)) - # The "Ref(...)" is such that we can vectorize the search but not the array that is searched - elements_to_remove = searchsortedfirst.(Ref(cache.elements.cell_ids[1:nelements(dg, cache)]), - child_cells_to_coarsen) to_be_removed[elements_to_remove] .= true # Retain current solution data diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index 243be9c5c13..ecf30386cf5 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -354,7 +354,7 @@ end # Print level information only if AMR is enabled -function print_amr_information(callbacks, mesh, solver, cache) +function print_amr_information(callbacks, mesh::TreeMesh, solver, cache) # Return early if there is nothing to print uses_amr(callbacks) || return nothing @@ -378,6 +378,30 @@ function print_amr_information(callbacks, mesh, solver, cache) end +# Print level information only if AMR is enabled +function print_amr_information(callbacks, mesh::P4estMesh, solver, cache) + + # Return early if there is nothing to print + uses_amr(callbacks) || return nothing + + elements_per_level = zeros(P4EST_MAXLEVEL + 1) + + for tree in convert_sc_array(p4est_tree_t, mesh.p4est.trees) + @. elements_per_level += tree.quadrants_per_level + end + + min_level = findfirst(i -> i > 0, elements_per_level) + max_level = findlast(i -> i > 0, elements_per_level) + + for level = max_level:-1:min_level+1 + mpi_println(" ├── level $level: " * @sprintf("% 14d", elements_per_level[level])) + end + mpi_println(" └── level $min_level: " * @sprintf("% 14d", elements_per_level[min_level])) + + return nothing +end + + # Iterate over tuples of analysis integrals in a type-stable way using "lispy tuple programming". function analyze_integrals(analysis_integrals::NTuple{N,Any}, io, du, u, t, semi) where {N} diff --git a/src/mesh/mesh_io.jl b/src/mesh/mesh_io.jl index 95c90e4a1ad..0cbbd08a405 100644 --- a/src/mesh/mesh_io.jl +++ b/src/mesh/mesh_io.jl @@ -136,12 +136,19 @@ end # of the mesh, like its size and the type of boundary mapping function. # Then, within Trixi2Vtk, the P4estMesh and its node coordinates are reconstructured from # these attributes for plotting purposes -function save_mesh_file(mesh::P4estMesh, output_directory) +function save_mesh_file(mesh::P4estMesh, output_directory, timestep=0) # Create output directory (if it does not exist) mkpath(output_directory) - filename = joinpath(output_directory, "mesh.h5") - p4est_filename = "p4est_data" + # Determine file name based on existence of meaningful time step + if timestep > 0 + filename = joinpath(output_directory, @sprintf("mesh_%06d.h5", timestep)) + p4est_filename = @sprintf("p4est_data_%06d", timestep) + else + filename = joinpath(output_directory, "mesh.h5") + p4est_filename = "p4est_data" + end + p4est_file = joinpath(output_directory, p4est_filename) # Save the complete connectivity/p4est data to disk. diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index ec1c0a575a6..d4e1e774fc6 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -104,7 +104,8 @@ function P4estMesh(trees_per_dimension; polydeg, # p4est_connectivity_new_brick has trees in Morton order, so use our own function for this conn = connectivity_structured(trees_per_dimension..., periodicity) - p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 0, C_NULL, C_NULL) + # Use Int-Vector of size 2 as quadrant user data + p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) ghost = p4est_ghost_new(p4est, P4EST_CONNECT_FACE) p4est_mesh = p4est_mesh_new(p4est, ghost, P4EST_CONNECT_FACE) @@ -199,7 +200,8 @@ function P4estMesh(meshfile::String; n_trees) calc_tree_node_coordinates!(tree_node_coordinates, nodes, mapping, vertices, tree_to_vertex) - p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 0, C_NULL, C_NULL) + # Use Int-Vector of size 2 as quadrant user data + p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) ghost = p4est_ghost_new(p4est, P4EST_CONNECT_FACE) p4est_mesh = p4est_mesh_new(p4est, ghost, P4EST_CONNECT_FACE) @@ -397,3 +399,192 @@ end function map_node_coordinates!(node_coordinates::AbstractArray{<:Any, 4}, mapping::Nothing) return node_coordinates end + + +function init_fn(p4est, which_tree, quadrant) + # Unpack quadrant's user data ([global quad ID, controller_value]) + ptr = Ptr{Int}(quadrant.p.user_data) + data = unsafe_wrap(Array, ptr, 2) + + # Initialize quad ID as -1 and controller_value as 0 (don't refine or coarsen) + data[1] = -1 + data[2] = 0 + + return nothing +end + +function refine_fn(p4est, which_tree, quadrant) + # Controller value has been copied to the quadrant's user data storage before + # Unpack quadrant's user data ([global quad ID, controller_value]) + ptr = Ptr{Int}(quadrant.p.user_data) + controller_value = unsafe_load(ptr, 2) + + if controller_value > 0 + # return true (refine) + return Cint(1) + else + # return false (don't refine) + return Cint(0) + end +end + +# Refine marked cells and rebalance forest +# Return a list of all cells that have been refined during refinement or rebalancing +function refine!(mesh::P4estMesh) + original_n_cells = ncells(mesh) + + # Copy original element IDs to quad user data storage + save_original_ids(mesh) + + init_fn_c = @cfunction(init_fn, Cvoid, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + + # Refine marked cells + refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + @timed timer() "refine_unbalanced" p4est_refine(mesh.p4est, true, refine_fn_c, init_fn_c) + + @timed timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) + # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes + # See https://github.com/cburstedde/p4est/issues/112 + @timed timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) + + return collect_changed_cells(mesh, original_n_cells) +end + + +function check_balanced_iter_volume(info, user_data) + # Global trees array + trees = convert_sc_array(p4est_tree_t, info.p4est.trees) + # Quadrant numbering offset of this quadrant, one-based indexing + offset = trees[info.treeid + 1].quadrants_offset + # Global quad ID + quad_id = offset + info.quadid + + p4est_mesh = Ptr{p4est_mesh_t}(info.p4est.user_pointer) + + quad_to_face = unsafe_wrap(Array, p4est_mesh.quad_to_face, (4, quad_id + 1)) + + # quad_to_face will be negative if there are two half-size neighbors in this direction. + # Therefore, coarsening this cell will make the forest unbalanced. + if any(i -> i < 0, quad_to_face[:, quad_id + 1]) + # Unpack quadrant's user data ([global quad ID, controller_value]) + ptr = Ptr{Int}(info.quad.p.user_data) + # Set controller value to 0, don't coarsen! + unsafe_store!(ptr, 0, 2) + end + + return nothing +end + +function coarsen_fn(p4est, which_tree, quadrants_ptr) + quadrants = unsafe_wrap(Array, quadrants_ptr, 4) + + # Controller value has been copied to the quadrant's user data storage before + # Unpack quadrant's user data ([global quad ID, controller_value]) + if all(i -> unsafe_load(Ptr{Int}(quadrants[i].p.user_data), 2) < 0, eachindex(quadrants)) + # return true (coarsen) + return Cint(0) + else + # return false (don't coarsen) + return Cint(0) + end +end + +# Coarsen marked cells if the forest will stay balanced +# Return a list of all cells that have been coarsened +function coarsen!(mesh::P4estMesh) + original_n_cells = ncells(mesh) + + # Copy original element IDs to quad user data storage + save_original_ids(mesh) + + # Mark cells whose coarsening will make the forest unbalanced as "don't coarsen" + # TODO P4EST recursive. A cell could be coarsenable after coarsening another cell first. + mesh.p4est.user_pointer = mesh.p4est_mesh + iter_volume_c = @cfunction(check_balanced_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + + p4est_iterate(mesh.p4est, + C_NULL, # ghost layer + C_NULL, # user data + iter_volume_c, # iter_volume + C_NULL, # iter_face + C_NULL) # iter_corner + + # Coarsen marked cells + coarsen_fn_c = @cfunction(coarsen_fn, Cint, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{Ptr{p4est_quadrant_t}})) + init_fn_c = @cfunction(init_fn, Cvoid, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + @timed timer() "coarsen!" p4est_coarsen(mesh.p4est, true, coarsen_fn_c, init_fn_c) + + return collect_changed_cells(mesh, original_n_cells) +end + + +# Copy global quad ID to quad's user data storage, will be called below +function save_original_id_iter_volume(info, user_data) + # Global trees array + trees = convert_sc_array(p4est_tree_t, info.p4est.trees) + # Quadrant numbering offset of this quadrant, one-based indexing + offset = trees[info.treeid + 1].quadrants_offset + # Global quad ID + quad_id = offset + info.quadid + + # Unpack quadrant's user data ([global quad ID, controller_value]) + ptr = Ptr{Int}(info.quad.p.user_data) + # Save global quad ID + unsafe_store!(ptr, quad_id, 2) + + return nothing +end + +# Copy old element IDs to each quad's user data storage +function save_original_ids(mesh::P4estMesh) + iter_volume_c = @cfunction(save_original_id_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + + p4est_iterate(mesh.p4est, + C_NULL, # ghost layer + C_NULL, # user data + iter_volume_c, # iter_volume + C_NULL, # iter_face + C_NULL) # iter_corner +end + + +# Extract information about which cells have been changed +function collect_changed_iter_volume(info, user_data) + # The original element ID has been saved to user_data before + # Unpack quadrant's user data ([global quad ID, controller_value]) + quad_data_ptr = Ptr{Int}(info.quad.p.user_data) + original_id = unsafe_load(quad_data_ptr, 1) + + # original_id of cells that have been newly created during refinement is -1 + if original_id >= 0 + # Unpack user_data = original_cells (we only need the first original_id values) + user_data_ptr = Ptr{Int}(user_data) + + # If quad has an original_id, it existed before refinement, and therefore wasn't changed. + # Mark original_id as "not changed during refinement/coarsening" in original_cells + unsafe_store!(user_data_ptr, 0, original_id + 1) + end + + return nothing +end + +function collect_changed_cells(mesh, original_n_cells) + original_cells = collect(1:original_n_cells) + + # Iterate over all quads and set original cells that haven't been changed to zero + iter_volume_c = @cfunction(collect_changed_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + + GC.@preserve original_cells begin + p4est_iterate(mesh.p4est, + C_NULL, # ghost layer + pointer(original_cells), + iter_volume_c, # iter_volume + C_NULL, # iter_face + C_NULL) # iter_corner + end + + # Changed cells are all that haven't been set to zero above + changed_original_cells = original_cells[original_cells .> 0] + + return changed_original_cells +end diff --git a/src/solvers/dg_p4est/containers.jl b/src/solvers/dg_p4est/containers.jl index cff8eb2c42d..8efa2a05f49 100644 --- a/src/solvers/dg_p4est/containers.jl +++ b/src/solvers/dg_p4est/containers.jl @@ -1,5 +1,5 @@ # Note: This is an experimental feature and may be changed in future releases without notice. -struct ElementContainerP4est{NDIMS, RealT<:Real, uEltype<:Real, NDIMSP1, NDIMSP2, NDIMSP3} +mutable struct ElementContainerP4est{NDIMS, RealT<:Real, uEltype<:Real, NDIMSP1, NDIMSP2, NDIMSP3} <: AbstractContainer # Physical coordinates at each node node_coordinates ::Array{RealT, NDIMSP2} # [orientation, node_i, node_j, node_k, element] # Jacobian matrix of the transformation @@ -11,94 +11,216 @@ struct ElementContainerP4est{NDIMS, RealT<:Real, uEltype<:Real, NDIMSP1, NDIMSP2 inverse_jacobian ::Array{RealT, NDIMSP1} # [node_i, node_j, node_k, element] # Buffer for calculated surface flux surface_flux_values ::Array{uEltype, NDIMSP2} # [variable, i, j, direction, element] + + # internal `resize!`able storage + _node_coordinates ::Vector{RealT} + _jacobian_matrix ::Vector{RealT} + _contravariant_vectors::Vector{RealT} + _inverse_jacobian ::Vector{RealT} + _surface_flux_values ::Vector{uEltype} +end + +@inline nelements(elements::ElementContainerP4est) = size(elements.node_coordinates, ndims(elements) + 2) +@inline Base.ndims(::ElementContainerP4est{NDIMS}) where NDIMS = NDIMS +@inline Base.eltype(::ElementContainerP4est{NDIMS, RealT, uEltype}) where {NDIMS, RealT, uEltype} = uEltype + +# Only one-dimensional `Array`s are `resize!`able in Julia. +# Hence, we use `Vector`s as internal storage and `resize!` +# them whenever needed. Then, we reuse the same memory by +# `unsafe_wrap`ping multi-dimensional `Array`s around the +# internal storage. +function Base.resize!(elements::ElementContainerP4est, capacity) + @unpack _node_coordinates, _jacobian_matrix, _contravariant_vectors, + _inverse_jacobian, _surface_flux_values = elements + + n_dims = ndims(elements) + n_nodes = size(elements.node_coordinates, 2) + n_variables = size(elements.surface_flux_values, 1) + + resize!(_node_coordinates, n_dims * n_nodes^n_dims * capacity) + elements.node_coordinates = unsafe_wrap(Array, pointer(_node_coordinates), + (n_dims, ntuple(_ -> n_nodes, n_dims)..., capacity)) + + resize!(_jacobian_matrix, n_dims^2 * n_nodes^n_dims * capacity) + elements.jacobian_matrix = unsafe_wrap(Array, pointer(_jacobian_matrix), + (n_dims, n_dims, ntuple(_ -> n_nodes, n_dims)..., capacity)) + + resize!(_contravariant_vectors, length(_jacobian_matrix)) + elements.contravariant_vectors = unsafe_wrap(Array, pointer(_contravariant_vectors), + size(elements.jacobian_matrix)) + + resize!(_inverse_jacobian, n_nodes^n_dims * capacity) + elements.inverse_jacobian = unsafe_wrap(Array, pointer(_inverse_jacobian), + (ntuple(_ -> n_nodes, n_dims)..., capacity)) + + resize!(_surface_flux_values, + n_variables * n_nodes^(n_dims-1) * (n_dims*2) * capacity) + elements.surface_flux_values = unsafe_wrap(Array, pointer(_surface_flux_values), + (n_variables, ntuple(_ -> n_nodes, n_dims-1)..., n_dims*2, capacity)) + + return nothing end # Create element container and initialize element data function init_elements(mesh::P4estMesh{NDIMS, RealT}, equations, basis, ::Type{uEltype}) where {NDIMS, RealT<:Real, uEltype<:Real} - nelements = ncells(mesh) - node_coordinates = Array{RealT, NDIMS+2}(undef, NDIMS, ntuple(_ -> nnodes(basis), NDIMS)..., nelements) - jacobian_matrix = Array{RealT, NDIMS+3}(undef, NDIMS, NDIMS, ntuple(_ -> nnodes(basis), NDIMS)..., nelements) - contravariant_vectors = similar(jacobian_matrix) - inverse_jacobian = Array{RealT, NDIMS+1}(undef, ntuple(_ -> nnodes(basis), NDIMS)..., nelements) - surface_flux_values = Array{uEltype, NDIMS+2}(undef, nvariables(equations), - ntuple(_ -> nnodes(basis), NDIMS-1)..., NDIMS*2, nelements) + + _node_coordinates = Vector{RealT}(undef, NDIMS * nnodes(basis)^NDIMS * nelements) + node_coordinates = unsafe_wrap(Array, pointer(_node_coordinates), + (NDIMS, ntuple(_ -> nnodes(basis), NDIMS)..., nelements)) + + _jacobian_matrix = Vector{RealT}(undef, NDIMS^2 * nnodes(basis)^NDIMS * nelements) + jacobian_matrix = unsafe_wrap(Array, pointer(_jacobian_matrix), + (NDIMS, NDIMS, ntuple(_ -> nnodes(basis), NDIMS)..., nelements)) + + _contravariant_vectors = similar(_jacobian_matrix) + contravariant_vectors = unsafe_wrap(Array, pointer(_contravariant_vectors), + size(jacobian_matrix)) + + _inverse_jacobian = Vector{RealT}(undef, nnodes(basis)^NDIMS * nelements) + inverse_jacobian = unsafe_wrap(Array, pointer(_inverse_jacobian), + (ntuple(_ -> nnodes(basis), NDIMS)..., nelements)) + + _surface_flux_values = Vector{uEltype}(undef, + nvariables(equations) * nnodes(basis)^(NDIMS-1) * (NDIMS*2) * nelements) + surface_flux_values = unsafe_wrap(Array, pointer(_surface_flux_values), + (nvariables(equations), ntuple(_ -> nnodes(basis), NDIMS-1)..., NDIMS*2, nelements)) elements = ElementContainerP4est{NDIMS, RealT, uEltype, NDIMS+1, NDIMS+2, NDIMS+3}( - node_coordinates, jacobian_matrix, contravariant_vectors, - inverse_jacobian, surface_flux_values) + node_coordinates, jacobian_matrix, contravariant_vectors, + inverse_jacobian, surface_flux_values, + _node_coordinates, _jacobian_matrix, _contravariant_vectors, + _inverse_jacobian, _surface_flux_values) init_elements!(elements, mesh, basis) return elements end -@inline nelements(elements::ElementContainerP4est) = size(elements.node_coordinates, ndims(elements) + 2) -@inline Base.ndims(::ElementContainerP4est{NDIMS}) where NDIMS = NDIMS -Base.eltype(::ElementContainerP4est{NDIMS, RealT, uEltype}) where {NDIMS, RealT, uEltype} = uEltype +mutable struct InterfaceContainerP4est{NDIMS, uEltype<:Real, NDIMSP2} <: AbstractContainer + u ::Array{uEltype, NDIMSP2} # [primary/secondary, variable, i, j, interface] + element_ids ::Matrix{Int} # [primary/secondary, interface] + node_indices ::Matrix{NTuple{NDIMS, Symbol}} # [primary/secondary, interface] + + # internal `resize!`able storage + _u ::Vector{uEltype} + _element_ids ::Vector{Int} + _node_indices ::Vector{NTuple{NDIMS, Symbol}} +end +@inline ninterfaces(interfaces::InterfaceContainerP4est) = size(interfaces.element_ids, 2) +@inline Base.ndims(::InterfaceContainerP4est{NDIMS}) where NDIMS = NDIMS -struct InterfaceContainerP4est{NDIMS, uEltype<:Real, NDIMSP2} <: AbstractContainer - u::Array{uEltype, NDIMSP2} # [primary/secondary, variable, i, j, interface] - element_ids::Array{Int, 2} # [primary/secondary, interface] - node_indices::Array{NTuple{NDIMS, Symbol}, 2} # [primary/secondary, interface] +# See explanation of Base.resize! for the element container +function Base.resize!(interfaces::InterfaceContainerP4est, capacity) + @unpack _u, _element_ids, _node_indices = interfaces + + n_dims = ndims(interfaces) + n_nodes = size(interfaces.u, 3) + n_variables = size(interfaces.u, 2) + + resize!(_u, 2 * n_variables * n_nodes^(n_dims-1) * capacity) + interfaces.u = unsafe_wrap(Array, pointer(_u), + (2, n_variables, ntuple(_ -> n_nodes, n_dims-1)..., capacity)) + + resize!(_element_ids, 2 * capacity) + interfaces.element_ids = unsafe_wrap(Array, pointer(_element_ids), (2, capacity)) + + resize!(_node_indices, 2 * capacity) + interfaces.node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, capacity)) + + return nothing end + # Create interface container and initialize interface data. -function init_interfaces(mesh::P4estMesh, equations, basis, - elements::ElementContainerP4est{NDIMS, RealT, uEltype} - ) where {NDIMS, RealT<:Real, uEltype<:Real} +function init_interfaces(mesh::P4estMesh, equations, basis, elements) + NDIMS = ndims(elements) + uEltype = eltype(elements) + # Initialize container n_interfaces = count_required_interfaces(mesh) - u = Array{uEltype, NDIMS+2}(undef, 2, nvariables(equations), - ntuple(_ -> nnodes(basis), NDIMS-1)..., - n_interfaces) - element_ids = Array{Int, 2}(undef, 2, n_interfaces) - node_indices = Array{NTuple{NDIMS, Symbol}, 2}(undef, 2, n_interfaces) + _u = Vector{uEltype}(undef, 2 * nvariables(equations) * nnodes(basis)^(NDIMS-1) * n_interfaces) + u = unsafe_wrap(Array, pointer(_u), + (2, nvariables(equations), ntuple(_ -> nnodes(basis), NDIMS-1)..., n_interfaces)) + + _element_ids = Vector{Int}(undef, 2 * n_interfaces) + element_ids = unsafe_wrap(Array, pointer(_element_ids), (2, n_interfaces)) - interfaces = InterfaceContainerP4est{NDIMS, uEltype, NDIMS+2}(u, element_ids, node_indices) + _node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_interfaces) + node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_interfaces)) + + interfaces = InterfaceContainerP4est{NDIMS, uEltype, NDIMS+2}(u, element_ids, node_indices, + _u, _element_ids, _node_indices) init_interfaces!(interfaces, mesh) return interfaces end -@inline ninterfaces(interfaces::InterfaceContainerP4est) = size(interfaces.element_ids, 2) - -struct BoundaryContainerP4est{NDIMS, uEltype<:Real, NDIMSP1} <: AbstractContainer - u::Array{uEltype, NDIMSP1} # [variables, i, j, boundary] - element_ids::Vector{Int} # [boundary] +mutable struct BoundaryContainerP4est{NDIMS, uEltype<:Real, NDIMSP1} <: AbstractContainer + u ::Array{uEltype, NDIMSP1} # [variables, i, j, boundary] + element_ids ::Vector{Int} # [boundary] node_indices::Vector{NTuple{NDIMS, Symbol}} # [boundary] - name::Vector{Symbol} # [boundary] + name ::Vector{Symbol} # [boundary] + + # internal `resize!`able storage + _u ::Vector{uEltype} +end + +@inline nboundaries(boundaries::BoundaryContainerP4est) = length(boundaries.element_ids) +@inline Base.ndims(::BoundaryContainerP4est{NDIMS}) where NDIMS = NDIMS + +# See explanation of Base.resize! for the element container +function Base.resize!(boundaries::BoundaryContainerP4est, capacity) + @unpack _u, element_ids, node_indices, name = boundaries + + n_dims = ndims(boundaries) + n_nodes = size(boundaries.u, 2) + n_variables = size(boundaries.u, 1) + + resize!(_u, n_variables * n_nodes^(n_dims-1) * capacity) + boundaries.u = unsafe_wrap(Array, pointer(_u), + (n_variables, ntuple(_ -> n_nodes, n_dims-1)..., capacity)) + + resize!(element_ids, capacity) + + resize!(node_indices, capacity) + + resize!(name, capacity) + + return nothing end + # Create interface container and initialize interface data in `elements`. -function init_boundaries(mesh::P4estMesh, equations, basis, - elements::ElementContainerP4est{NDIMS, RealT, uEltype} - ) where {NDIMS, RealT<:Real, uEltype<:Real} +function init_boundaries(mesh::P4estMesh, equations, basis, elements) + NDIMS = ndims(elements) + uEltype = eltype(elements) + # Initialize container n_boundaries = count_required_boundaries(mesh) - u = Array{uEltype, NDIMS+1}(undef, nvariables(equations), - ntuple(_ -> nnodes(basis), NDIMS-1)..., - n_boundaries) - element_ids = Vector{Int}(undef, n_boundaries) + _u = Vector{uEltype}(undef, nvariables(equations) * nnodes(basis)^(NDIMS-1) * n_boundaries) + u = unsafe_wrap(Array, pointer(_u), + (nvariables(equations), ntuple(_ -> nnodes(basis), NDIMS-1)..., n_boundaries)) + + element_ids = Vector{Int}(undef, n_boundaries) node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, n_boundaries) - names = Vector{Symbol}(undef, n_boundaries) + names = Vector{Symbol}(undef, n_boundaries) - boundaries = BoundaryContainerP4est{NDIMS, uEltype, NDIMS+1}(u, element_ids, node_indices, names) + boundaries = BoundaryContainerP4est{NDIMS, uEltype, NDIMS+1}(u, element_ids, + node_indices, names, _u) init_boundaries!(boundaries, mesh) return boundaries end -@inline nboundaries(boundaries::BoundaryContainerP4est) = length(boundaries.element_ids) - # Container data structure (structure-of-arrays style) for DG L2 mortars # @@ -125,36 +247,92 @@ end # | \---------------------------/ \----------------------------/ # | # ⋅----> ξ -struct MortarContainerP4est{NDIMS, uEltype<:Real, NDIMSP1, NDIMSP3} <: AbstractContainer - u::Array{uEltype, NDIMSP3} # [small/large side, variable, position, i, j, mortar] - element_ids::Array{Int, 2} # [position, mortar] - node_indices::Array{NTuple{NDIMS, Symbol}, 2} # [small/large, mortar] +mutable struct MortarContainerP4est{NDIMS, uEltype<:Real, NDIMSP1, NDIMSP3} <: AbstractContainer + u ::Array{uEltype, NDIMSP3} # [small/large side, variable, position, i, j, mortar] + element_ids ::Matrix{Int} # [position, mortar] + node_indices ::Matrix{NTuple{NDIMS, Symbol}} # [small/large, mortar] + + # internal `resize!`able storage + _u ::Vector{uEltype} + _element_ids ::Vector{Int} + _node_indices ::Vector{NTuple{NDIMS, Symbol}} end +@inline nmortars(mortars::MortarContainerP4est) = size(mortars.element_ids, 2) +@inline Base.ndims(::MortarContainerP4est{NDIMS}) where NDIMS = NDIMS + +# See explanation of Base.resize! for the element container +function Base.resize!(mortars::MortarContainerP4est, capacity) + @unpack _u, _element_ids, _node_indices = mortars + + n_dims = ndims(mortars) + n_nodes = size(mortars.u, 4) + n_variables = size(mortars.u, 2) + + resize!(_u, 2 * n_variables * 2^(n_dims-1) * n_nodes^(n_dims-1) * capacity) + mortars.u = unsafe_wrap(Array, pointer(_u), + (2, n_variables, 2^(n_dims-1), ntuple(_ -> n_nodes, n_dims-1)..., capacity)) + + resize!(_element_ids, (2^(n_dims-1) + 1) * capacity) + mortars.element_ids = unsafe_wrap(Array, pointer(_element_ids), + (2^(n_dims-1) + 1, capacity)) + + resize!(_node_indices, 2 * capacity) + mortars.node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, capacity)) + + return nothing +end + + # Create mortar container and initialize mortar data. -function init_mortars(mesh::P4estMesh, equations, basis, - elements::ElementContainerP4est{NDIMS, RealT, uEltype} - ) where {NDIMS, RealT<:Real, uEltype<:Real} +function init_mortars(mesh::P4estMesh, equations, basis, elements) + NDIMS = ndims(elements) + uEltype = eltype(elements) + # Initialize container n_mortars = count_required_mortars(mesh) - u = Array{uEltype, NDIMS+3}(undef, 2, - nvariables(equations), - 2^(NDIMS-1), - ntuple(_ -> nnodes(basis), NDIMS-1)..., - n_mortars) - element_ids = Array{Int, 2}(undef, 2^(NDIMS-1) + 1, n_mortars) - node_indices = Array{NTuple{NDIMS, Symbol}, 2}(undef, 2, n_mortars) + _u = Vector{uEltype}(undef, + 2 * nvariables(equations) * 2^(NDIMS-1) * nnodes(basis)^(NDIMS-1) * n_mortars) + u = unsafe_wrap(Array, pointer(_u), + (2, nvariables(equations), 2^(NDIMS-1), ntuple(_ -> nnodes(basis), NDIMS-1)..., n_mortars)) + + _element_ids = Vector{Int}(undef, (2^(NDIMS-1) + 1) * n_mortars) + element_ids = unsafe_wrap(Array, pointer(_element_ids), (2^(NDIMS-1) + 1, n_mortars)) - mortars = MortarContainerP4est{NDIMS, uEltype, NDIMS+1, NDIMS+3}(u, element_ids, - node_indices) + _node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_mortars) + node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_mortars)) + + mortars = MortarContainerP4est{NDIMS, uEltype, NDIMS+1, NDIMS+3}(u, element_ids, node_indices, + _u, _element_ids, _node_indices) init_mortars!(mortars, mesh) return mortars end -@inline nmortars(mortars::MortarContainerP4est) = size(mortars.element_ids, 2) + +function reinitialize_containers!(mesh::P4estMesh, equations, dg::DGSEM, cache) + # Re-initialize elements container + @unpack elements = cache + resize!(elements, ncells(mesh)) + init_elements!(elements, mesh, dg.basis) + + # re-initialize interfaces container + @unpack interfaces = cache + resize!(interfaces, count_required_interfaces(mesh)) + init_interfaces!(interfaces, mesh) + + # re-initialize boundaries container + @unpack boundaries = cache + resize!(boundaries, count_required_boundaries(mesh)) + init_boundaries!(boundaries, mesh) + + # re-initialize mortars container + @unpack mortars = cache + resize!(mortars, count_required_mortars(mesh)) + init_mortars!(mortars, mesh) +end # Iterate over all interfaces and count inner interfaces From 4c3fab2ca77714ac6335edf4adff10d3aa8c6d5f Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Fri, 28 May 2021 00:49:21 +0200 Subject: [PATCH 02/27] Fix TreeMesh AMR --- src/callbacks_step/amr.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 99329c289fa..40669d01c5f 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -222,8 +222,8 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, # refine mesh refined_original_cells = @timed timer() "mesh" refine!(mesh.tree, to_refine) - # Find all indices of elements whose cell ids are in cells_to_refine - elements_to_refine = findall(cell_id -> cell_id in cells_to_refine, cache.elements.cell_ids) + # Find all indices of elements whose cell ids are in refined_original_cells + elements_to_refine = findall(cell_id -> cell_id in refined_original_cells, cache.elements.cell_ids) # refine solver @timed timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine) @@ -283,8 +283,8 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, end end - # Find all indices of elements whose cell ids are in child_cells_to_coarsen - elements_to_remove = findall(cell_id -> cell_id in child_cells_to_coarsen, cache.elements.cell_ids) + # Find all indices of elements whose cell ids are in removed_child_cells + elements_to_remove = findall(cell_id -> cell_id in removed_child_cells, cache.elements.cell_ids) # coarsen solver @timed timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove) From ebd27f6bf0017ec215e103228029649bc61a2d0f Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Fri, 28 May 2021 01:05:46 +0200 Subject: [PATCH 03/27] Fix print_amr_information --- src/callbacks_step/analysis.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index ecf30386cf5..6e4075f998e 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -390,13 +390,13 @@ function print_amr_information(callbacks, mesh::P4estMesh, solver, cache) @. elements_per_level += tree.quadrants_per_level end - min_level = findfirst(i -> i > 0, elements_per_level) - max_level = findlast(i -> i > 0, elements_per_level) + min_level = findfirst(i -> i > 0, elements_per_level) - 1 + max_level = findlast(i -> i > 0, elements_per_level) - 1 for level = max_level:-1:min_level+1 - mpi_println(" ├── level $level: " * @sprintf("% 14d", elements_per_level[level])) + mpi_println(" ├── level $level: " * @sprintf("% 14d", elements_per_level[level + 1])) end - mpi_println(" └── level $min_level: " * @sprintf("% 14d", elements_per_level[min_level])) + mpi_println(" └── level $min_level: " * @sprintf("% 14d", elements_per_level[min_level + 1])) return nothing end From 5350fc58f7ade966ba285f34bc6f2110fbe28073 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Fri, 28 May 2021 01:06:08 +0200 Subject: [PATCH 04/27] Fix refinement --- src/mesh/p4est_mesh.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index d4e1e774fc6..4ee1635b12f 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -530,7 +530,7 @@ function save_original_id_iter_volume(info, user_data) # Unpack quadrant's user data ([global quad ID, controller_value]) ptr = Ptr{Int}(info.quad.p.user_data) # Save global quad ID - unsafe_store!(ptr, quad_id, 2) + unsafe_store!(ptr, quad_id, 1) return nothing end From 28bc847ff00d43b9720b79771881bcee5daf91a2 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Fri, 28 May 2021 01:28:11 +0200 Subject: [PATCH 05/27] Fix CurvedMesh --- src/callbacks_step/analysis.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index 6e4075f998e..a4931b12969 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -377,6 +377,9 @@ function print_amr_information(callbacks, mesh::TreeMesh, solver, cache) return nothing end +function print_amr_information(callbacks, mesh::CurvedMesh, solver, cache) + return nothing +end # Print level information only if AMR is enabled function print_amr_information(callbacks, mesh::P4estMesh, solver, cache) From 7ee5a7ea9e74e158fd9d8e35297bc9cb01984f09 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Sat, 29 May 2021 01:15:03 +0200 Subject: [PATCH 06/27] Fix AMR with p4est --- src/mesh/mesh_io.jl | 4 +- src/mesh/p4est_mesh.jl | 139 ++++++++++++++++++++++++++--------------- 2 files changed, 89 insertions(+), 54 deletions(-) diff --git a/src/mesh/mesh_io.jl b/src/mesh/mesh_io.jl index 0cbbd08a405..c19e7d0f648 100644 --- a/src/mesh/mesh_io.jl +++ b/src/mesh/mesh_io.jl @@ -258,10 +258,8 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) conn_vec = Vector{Ptr{p4est_connectivity_t}}(undef, 1) p4est = p4est_load(p4est_file, C_NULL, 0, false, C_NULL, pointer(conn_vec)) - ghost = p4est_ghost_new(p4est, P4EST_CONNECT_FACE) - p4est_mesh = p4est_mesh_new(p4est, ghost, P4EST_CONNECT_FACE) - mesh = P4estMesh{ndims, RealT, ndims+2}(p4est, p4est_mesh, tree_node_coordinates, + mesh = P4estMesh{ndims, RealT, ndims+2}(p4est, tree_node_coordinates, nodes, boundary_names, "", false) else error("Unknown mesh type!") diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index 4ee1635b12f..3b29eecb3da 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -9,7 +9,6 @@ to manage trees and mesh refinement. """ mutable struct P4estMesh{NDIMS, RealT<:Real, NDIMSP2} <: AbstractMesh{NDIMS} p4est ::Ptr{p4est_t} - p4est_mesh ::Ptr{p4est_mesh_t} # Coordinates at the nodes specified by the tensor product of `nodes` (NDIMS times). # This specifies the geometry interpolation for each tree. tree_node_coordinates ::Array{RealT, NDIMSP2} # [dimension, i, j, k, tree] @@ -107,12 +106,8 @@ function P4estMesh(trees_per_dimension; polydeg, # Use Int-Vector of size 2 as quadrant user data p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) - ghost = p4est_ghost_new(p4est, P4EST_CONNECT_FACE) - p4est_mesh = p4est_mesh_new(p4est, ghost, P4EST_CONNECT_FACE) - # Destroy p4est structs at exit of Julia function destroy_p4est_structs() - p4est_mesh_destroy(p4est_mesh) p4est_ghost_destroy(ghost) p4est_destroy(p4est) p4est_connectivity_destroy(conn) @@ -146,7 +141,7 @@ function P4estMesh(trees_per_dimension; polydeg, end end - return P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, p4est_mesh, tree_node_coordinates, nodes, + return P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, boundary_names, "", unsaved_changes) end @@ -202,13 +197,11 @@ function P4estMesh(meshfile::String; # Use Int-Vector of size 2 as quadrant user data p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) - ghost = p4est_ghost_new(p4est, P4EST_CONNECT_FACE) - p4est_mesh = p4est_mesh_new(p4est, ghost, P4EST_CONNECT_FACE) # There's no simple and generic way to distinguish boundaries. Name all of them :all. boundary_names = fill(:all, 4, n_trees) - return P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, p4est_mesh, tree_node_coordinates, nodes, + return P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, boundary_names, "", unsaved_changes) end @@ -440,7 +433,7 @@ function refine!(mesh::P4estMesh) # Refine marked cells refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) - @timed timer() "refine_unbalanced" p4est_refine(mesh.p4est, true, refine_fn_c, init_fn_c) + @timed timer() "refine_unbalanced" p4est_refine(mesh.p4est, false, refine_fn_c, init_fn_c) @timed timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes @@ -451,30 +444,6 @@ function refine!(mesh::P4estMesh) end -function check_balanced_iter_volume(info, user_data) - # Global trees array - trees = convert_sc_array(p4est_tree_t, info.p4est.trees) - # Quadrant numbering offset of this quadrant, one-based indexing - offset = trees[info.treeid + 1].quadrants_offset - # Global quad ID - quad_id = offset + info.quadid - - p4est_mesh = Ptr{p4est_mesh_t}(info.p4est.user_pointer) - - quad_to_face = unsafe_wrap(Array, p4est_mesh.quad_to_face, (4, quad_id + 1)) - - # quad_to_face will be negative if there are two half-size neighbors in this direction. - # Therefore, coarsening this cell will make the forest unbalanced. - if any(i -> i < 0, quad_to_face[:, quad_id + 1]) - # Unpack quadrant's user data ([global quad ID, controller_value]) - ptr = Ptr{Int}(info.quad.p.user_data) - # Set controller value to 0, don't coarsen! - unsafe_store!(ptr, 0, 2) - end - - return nothing -end - function coarsen_fn(p4est, which_tree, quadrants_ptr) quadrants = unsafe_wrap(Array, quadrants_ptr, 4) @@ -482,7 +451,7 @@ function coarsen_fn(p4est, which_tree, quadrants_ptr) # Unpack quadrant's user data ([global quad ID, controller_value]) if all(i -> unsafe_load(Ptr{Int}(quadrants[i].p.user_data), 2) < 0, eachindex(quadrants)) # return true (coarsen) - return Cint(0) + return Cint(1) else # return false (don't coarsen) return Cint(0) @@ -492,29 +461,49 @@ end # Coarsen marked cells if the forest will stay balanced # Return a list of all cells that have been coarsened function coarsen!(mesh::P4estMesh) + # Copy original element IDs to quad user data storage original_n_cells = ncells(mesh) + save_original_ids(mesh) - # Copy original element IDs to quad user data storage + # Coarsen marked cells + coarsen_fn_c = @cfunction(coarsen_fn, Cint, + (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{Ptr{p4est_quadrant_t}})) + init_fn_c = @cfunction(init_fn, Cvoid, + (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + + @timed timer() "coarsen!" p4est_coarsen(mesh.p4est, false, coarsen_fn_c, init_fn_c) + + new_cells = collect_new_cells(mesh) + coarsened_cells_vec = collect_changed_cells(mesh, original_n_cells) + # 2^NDIMS changed cells should have been coarsened to one new cell. + # This matrix will store the IDs of all cells that have been coarsened to cell new_cells[i] + # in the i-th column. + coarsened_cells = reshape(coarsened_cells_vec, 2^ndims(mesh), length(new_cells)) + + # Save new original IDs to find out what changed after balancing + intermediate_n_cells = ncells(mesh) save_original_ids(mesh) - # Mark cells whose coarsening will make the forest unbalanced as "don't coarsen" - # TODO P4EST recursive. A cell could be coarsenable after coarsening another cell first. - mesh.p4est.user_pointer = mesh.p4est_mesh - iter_volume_c = @cfunction(check_balanced_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + @timed timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) + # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes + # See https://github.com/cburstedde/p4est/issues/112 + @timed timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - C_NULL, # user data - iter_volume_c, # iter_volume - C_NULL, # iter_face - C_NULL) # iter_corner + refined_cells = collect_changed_cells(mesh, intermediate_n_cells) - # Coarsen marked cells - coarsen_fn_c = @cfunction(coarsen_fn, Cint, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{Ptr{p4est_quadrant_t}})) - init_fn_c = @cfunction(init_fn, Cvoid, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) - @timed timer() "coarsen!" p4est_coarsen(mesh.p4est, true, coarsen_fn_c, init_fn_c) + # Some cells may have been coarsened even though they unbalanced the forest. + # These cells have now been refined again by p4est_balance. + # Find intermediate ID (ID of coarse cell between coarsening and balancing) + # of each cell that have been coarsened and then refined again. + for refined_cell in refined_cells + # i-th cell of the ones that have created by coarsening has been refined again + i = findfirst(isequal(refined_cell), new_cells) - return collect_changed_cells(mesh, original_n_cells) + # Remove IDs of the 2^NDIMS cells that have been coarsened to this cell + coarsened_cells[:, i] .= -1 + end + + return coarsened_cells_vec[coarsened_cells_vec .>= 0] end @@ -588,3 +577,51 @@ function collect_changed_cells(mesh, original_n_cells) return changed_original_cells end + + +# Extract newly created cells +function collect_new_iter_volume(info, user_data) + # The original element ID has been saved to user_data before + # Unpack quadrant's user data ([global quad ID, controller_value]) + quad_data_ptr = Ptr{Int}(info.quad.p.user_data) + original_id = unsafe_load(quad_data_ptr, 1) + + # original_id of cells that have been newly created during refinement is -1 + if original_id < 0 + # Global trees array + trees = convert_sc_array(p4est_tree_t, info.p4est.trees) + # Quadrant numbering offset of this quadrant, one-based indexing + offset = trees[info.treeid + 1].quadrants_offset + # Global quad ID + quad_id = offset + info.quadid + + # Unpack user_data = original_cells + user_data_ptr = Ptr{Int}(user_data) + + # Mark cell as "newly created during refinement/coarsening/balancing" + unsafe_store!(user_data_ptr, 1, quad_id + 1) + end + + return nothing +end + +function collect_new_cells(mesh) + cell_is_new = zeros(Int, ncells(mesh)) + + # Iterate over all quads and set original cells that haven't been changed to zero + iter_volume_c = @cfunction(collect_new_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + + GC.@preserve cell_is_new begin + p4est_iterate(mesh.p4est, + C_NULL, # ghost layer + pointer(cell_is_new), + iter_volume_c, # iter_volume + C_NULL, # iter_face + C_NULL) # iter_corner + end + + # Changed cells are all that haven't been set to zero above + new_cells = findall(isequal(1), cell_is_new) + + return new_cells +end From 0f9e1c14031a404badc29d1401fdc216db0544f0 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Sat, 29 May 2021 11:19:55 +0200 Subject: [PATCH 07/27] Add general print_amr_information for non-AMR meshes --- src/callbacks_step/analysis.jl | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index a4931b12969..f6c7bab0c2d 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -354,7 +354,7 @@ end # Print level information only if AMR is enabled -function print_amr_information(callbacks, mesh::TreeMesh, solver, cache) +function print_amr_information(callbacks, mesh, solver, cache) # Return early if there is nothing to print uses_amr(callbacks) || return nothing @@ -377,10 +377,6 @@ function print_amr_information(callbacks, mesh::TreeMesh, solver, cache) return nothing end -function print_amr_information(callbacks, mesh::CurvedMesh, solver, cache) - return nothing -end - # Print level information only if AMR is enabled function print_amr_information(callbacks, mesh::P4estMesh, solver, cache) From 1579c17a6ae8b3520904b256b6734a71b9e210f4 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Sat, 29 May 2021 15:00:22 +0200 Subject: [PATCH 08/27] Fix boundary conditions with AMR --- src/solvers/dg_unstructured_quad/dg_2d.jl | 6 +++ .../sort_boundary_conditions.jl | 42 +++++++++++-------- 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/src/solvers/dg_unstructured_quad/dg_2d.jl b/src/solvers/dg_unstructured_quad/dg_2d.jl index 697d2e6ac05..77ce7afcd7b 100644 --- a/src/solvers/dg_unstructured_quad/dg_2d.jl +++ b/src/solvers/dg_unstructured_quad/dg_2d.jl @@ -307,6 +307,12 @@ end function calc_boundary_flux!(cache, t, boundary_conditions, mesh::Union{UnstructuredQuadMesh, P4estMesh}, equations, dg::DG) + if nboundaries(cache.boundaries) != nboundaries(boundary_conditions) + # When using AMR, the boundary container may have been resized. + # In this case, the boundary types container must be reinitialized as well. + initialize!(boundary_conditions, cache) + end + @unpack boundary_condition_types, boundary_indices = boundary_conditions calc_boundary_flux_by_type!(cache, t, boundary_condition_types, boundary_indices, diff --git a/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl b/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl index 09bc0c6a511..e9f63938886 100644 --- a/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl +++ b/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl @@ -9,21 +9,38 @@ set by the user in the elixir file is also stored for printing. !!! warning "Experimental code" This boundary condition container is experimental and can change any time. """ -struct UnstructuredQuadSortedBoundaryTypes{N, BCs<:NTuple{N, Any}} +mutable struct UnstructuredQuadSortedBoundaryTypes{N, BCs<:NTuple{N, Any}} boundary_condition_types::BCs # specific boundary condition type(s), e.g. BoundaryConditionWall boundary_indices::NTuple{N, Vector{Int}} # integer vectors containing global boundary indices boundary_dictionary::Dict{Symbol, Any} # boundary conditions as set by the user in the elixir file end +@inline nboundaries(container::UnstructuredQuadSortedBoundaryTypes) = sum(container.boundary_indices .|> length) + # constructor that "eats" the original boundary condition dictionary and sorts the information # from the `UnstructuredBoundaryContainer2D` in cache.boundaries according to the boundary types # and stores the associated global boundary indexing in NTuple function UnstructuredQuadSortedBoundaryTypes(boundary_conditions::Dict, cache) + # extract the unique boundary function routines from the dictionary + boundary_condition_types = Tuple(unique(collect(values(boundary_conditions)))) + n_boundary_types = length(boundary_condition_types) + boundary_indices = ntuple(_ -> [], n_boundary_types) + + container = UnstructuredQuadSortedBoundaryTypes{n_boundary_types, typeof(boundary_condition_types)}( + boundary_condition_types, boundary_indices, boundary_conditions) + + initialize!(container, cache) +end + + +function initialize!(boundary_types_container::UnstructuredQuadSortedBoundaryTypes{N}, cache) where N + @unpack boundary_dictionary, boundary_condition_types = boundary_types_container + unique_names = unique(cache.boundaries.name) # Verify that each Dict key is a valid boundary name - for key in keys(boundary_conditions) + for key in keys(boundary_dictionary) if !(key in unique_names) error("Key $(repr(key)) is not a valid boundary name") end @@ -31,20 +48,16 @@ function UnstructuredQuadSortedBoundaryTypes(boundary_conditions::Dict, cache) # Verify that each boundary has a boundary condition for name in unique_names - if name !== Symbol("---") && !haskey(boundary_conditions, name) + if name !== Symbol("---") && !haskey(boundary_dictionary, name) error("No boundary condition specified for boundary $(repr(name))") end end - # extract the unique boundary function routines from the dictionary - boundary_condition_types = Tuple(unique(collect(values(boundary_conditions)))) - n_boundary_types = length(boundary_condition_types) - # pull and sort the indexing for each boundary type - _boundary_indices = Vector{Any}(nothing, n_boundary_types) - for j in 1:n_boundary_types + _boundary_indices = Vector{Any}(nothing, N) + for j in 1:N indices_for_current_type = Int[] - for (test_name, test_condition) in boundary_conditions + for (test_name, test_condition) in boundary_dictionary temp_indices = findall(x->x===test_name, cache.boundaries.name) if test_condition === boundary_condition_types[j] indices_for_current_type = vcat(indices_for_current_type, temp_indices) @@ -54,12 +67,7 @@ function UnstructuredQuadSortedBoundaryTypes(boundary_conditions::Dict, cache) end # convert the work array with the boundary indices into a tuple - boundary_indices = Tuple(_boundary_indices) - - boundary_dictionary = boundary_conditions + boundary_types_container.boundary_indices = Tuple(_boundary_indices) - return UnstructuredQuadSortedBoundaryTypes{n_boundary_types, typeof(boundary_condition_types)}( - boundary_condition_types, - boundary_indices, - boundary_dictionary) + return boundary_types_container end From af196c66b557e1c881604fef794248589eafad69 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Mon, 31 May 2021 01:51:35 +0200 Subject: [PATCH 09/27] Fix AMR on unstructured meshes --- src/solvers/dg_p4est/containers_2d.jl | 8 +++---- src/solvers/dg_p4est/dg_2d.jl | 32 ++++++++++++++++++--------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/src/solvers/dg_p4est/containers_2d.jl b/src/solvers/dg_p4est/containers_2d.jl index 3f29cfe42b0..0937cabebe7 100644 --- a/src/solvers/dg_p4est/containers_2d.jl +++ b/src/solvers/dg_p4est/containers_2d.jl @@ -246,7 +246,7 @@ function init_mortars_iter_face(info, user_data) trees[sides[2].treeid + 1].quadrants_offset] if sides[1].is_hanging == true - # Left is small, right is large + # Left is small (1), right is large (2) small_large = [1, 2] local_small_quad_ids = sides[1].is.hanging.quadid @@ -257,7 +257,7 @@ function init_mortars_iter_face(info, user_data) @assert sides[2].is_hanging == false large_quad_id = offsets[2] + sides[2].is.full.quadid else # sides[2].is_hanging == true - # Left is large, right is small + # Left is large (2), right is small (1) small_large = [2, 1] local_small_quad_ids = sides[2].is.hanging.quadid @@ -288,10 +288,10 @@ function init_mortars_iter_face(info, user_data) # For orientation == 1, the large side needs to be indexed backwards # relative to the mortar. if small_large[side] == 1 || orientation == 0 - # Forward indexing + # Forward indexing for small side or orientation == 0 i = :i else - # Backward indexing + # Backward indexing for large side with reversed orientation i = :i_backwards end diff --git a/src/solvers/dg_p4est/dg_2d.jl b/src/solvers/dg_p4est/dg_2d.jl index 445d94965d0..91ee359740e 100644 --- a/src/solvers/dg_p4est/dg_2d.jl +++ b/src/solvers/dg_p4est/dg_2d.jl @@ -165,15 +165,15 @@ function prolong2mortars!(cache, u, size_ = (nnodes(dg), nnodes(dg)) @threaded for mortar in eachmortar(dg, cache) - small_node_indices = node_indices[1, mortar] - large_node_indices = node_indices[2, mortar] + small_indices = node_indices[1, mortar] + large_indices = node_indices[2, mortar] # Copy solution small to small for pos in 1:2, i in eachnode(dg), v in eachvariable(equations) # Use Tuple `node_indices` and `evaluate_index` to copy values # from the correct face and in the correct orientation - cache.mortars.u[1, v, pos, i, mortar] = u[v, evaluate_index(small_node_indices, size_, 1, i), - evaluate_index(small_node_indices, size_, 2, i), + cache.mortars.u[1, v, pos, i, mortar] = u[v, evaluate_index(small_indices, size_, 1, i), + evaluate_index(small_indices, size_, 2, i), element_ids[pos, mortar]] end @@ -185,8 +185,8 @@ function prolong2mortars!(cache, u, for i in eachnode(dg), v in eachvariable(equations) # Use Tuple `node_indices` and `evaluate_index` to copy values # from the correct face and in the correct orientation - u_buffer[v, i] = u[v, evaluate_index(large_node_indices, size_, 1, i), - evaluate_index(large_node_indices, size_, 2, i), + u_buffer[v, i] = u[v, evaluate_index(large_indices, size_, 1, i), + evaluate_index(large_indices, size_, 2, i), element_ids[3, mortar]] end @@ -237,9 +237,13 @@ function calc_mortar_flux!(surface_flux_values, set_node_vars!(fstar[pos], flux_, equations, dg, i) end + # Buffer to interpolate flux values of the large element to before copying + # in the correct orientation + u_buffer = cache.u_threaded[Threads.threadid()] + mortar_fluxes_to_elements!(surface_flux_values, mesh, equations, mortar_l2, dg, cache, - mortar, fstar) + mortar, fstar, u_buffer) end return nothing @@ -249,7 +253,7 @@ end @inline function mortar_fluxes_to_elements!(surface_flux_values, mesh::P4estMesh{2}, equations, mortar_l2::LobattoLegendreMortarL2, - dg::DGSEM, cache, mortar, fstar) + dg::DGSEM, cache, mortar, fstar, u_buffer) @unpack element_ids, node_indices = cache.mortars small_indices = node_indices[1, mortar] @@ -264,7 +268,7 @@ end for pos in 1:2, i in eachnode(dg), v in eachvariable(equations) # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux # to left and right element storage in the correct orientation - surface_index = evaluate_index_surface(node_indices[pos, mortar], size_, 1, i) + surface_index = evaluate_index_surface(small_indices, size_, 1, i) surface_flux_values[v, surface_index, small_direction, element_ids[pos, mortar]] = fstar[pos][v, i] @@ -282,10 +286,18 @@ end # to obtain the flux of the large element. # # TODO: Taal performance, see comment in dg_tree/dg_2d.jl - multiply_dimensionwise!(view(surface_flux_values, :, :, large_direction, large_element), + multiply_dimensionwise!(u_buffer, mortar_l2.reverse_upper, -2 * fstar[2], mortar_l2.reverse_lower, -2 * fstar[1]) + # Copy interpolated flux values from buffer to large element face in the correct orientation + for i in eachnode(dg), v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux + # to surface flux storage in the correct orientation + surface_index = evaluate_index_surface(large_indices, size_, 1, i) + surface_flux_values[v, surface_index, large_direction, large_element] = u_buffer[v, i] + end + return nothing end From 190ac3e5398c05dc1817dc2384d57a560d1c8282 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 1 Jun 2021 12:42:10 +0200 Subject: [PATCH 10/27] Add tests for AMR with p4est --- ...r_advection_amr_p4est_unstructured_flag.jl | 92 +++++++++++ ...dvection_amr_solution_independent_p4est.jl | 149 ++++++++++++++++++ ...ir_advection_p4est_non_conforming_flag.jl} | 2 +- ..._p4est_non_conforming_flag_unstructured.jl | 95 +++++++++++ test/test_examples_2d_p4est.jl | 22 ++- 5 files changed, 357 insertions(+), 3 deletions(-) create mode 100644 examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl create mode 100644 examples/2d/elixir_advection_amr_solution_independent_p4est.jl rename examples/2d/{elixir_advection_p4est_non_conforming.jl => elixir_advection_p4est_non_conforming_flag.jl} (98%) create mode 100644 examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl diff --git a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl new file mode 100644 index 00000000000..cda6bb40f9b --- /dev/null +++ b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl @@ -0,0 +1,92 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (1.0, 1.0) +# advectionvelocity = (0.2, -0.3) +equations = LinearScalarAdvectionEquation2D(advectionvelocity) + +initial_condition = initial_condition_gauss + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict( + :all => boundary_condition +) + +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +# Deformed rectangle that looks like a waving flag, +# lower and upper faces are sinus curves, left and right are vertical lines. +f1(s) = SVector(-5.0, 5 * s - 5.0) +f2(s) = SVector( 5.0, 5 * s + 5.0) +f3(s) = SVector(5 * s, -5.0 + 5 * sin(0.5 * pi * s)) +f4(s) = SVector(5 * s, 5.0 + 5 * sin(0.5 * pi * s)) +faces = (f1, f2, f3, f4) + +Trixi.validate_faces(faces) +mapping_flag = Trixi.transfinite_mapping(faces) + +# Unstructured mesh with 24 cells of the square domain [-1, 1]^n +mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") +isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + mesh_file) + +mesh = P4estMesh(mesh_file, polydeg=3, + mapping=mapping_flag, + initial_refinement_level=1) + + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions=boundary_conditions) + + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 10.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval=analysis_interval, + extra_analysis_integrals=(entropy,)) + +alive_callback = AliveCallback(analysis_interval=analysis_interval) + +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +save_solution = SaveSolutionCallback(interval=1, + save_initial_solution=true, + save_final_solution=true, + solution_variables=cons2prim) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable=first), + base_level=1, + med_level=2, med_threshold=0.1, + max_level=3, max_threshold=0.6) +amr_callback = AMRCallback(semi, amr_controller, + interval=5, + adapt_initial_condition=true, + adapt_initial_condition_only_refine=true) + +stepsize_callback = StepsizeCallback(cfl=1.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_restart, save_solution, + amr_callback, stepsize_callback); + + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); + +summary_callback() # print the timer summary diff --git a/examples/2d/elixir_advection_amr_solution_independent_p4est.jl b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl new file mode 100644 index 00000000000..73c60d621b5 --- /dev/null +++ b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl @@ -0,0 +1,149 @@ + +using OrdinaryDiffEq +using Trixi + +# define new structs inside a module to allow re-evaluating the file +module TrixiExtension + +using Trixi + +struct IndicatorSolutionIndependent{Cache<:NamedTuple} <: Trixi.AbstractIndicator + cache::Cache +end +function IndicatorSolutionIndependent(semi) + basis = semi.solver.basis + alpha = Vector{real(basis)}() + cache = (; semi.mesh, alpha) + return IndicatorSolutionIndependent{typeof(cache)}(cache) +end +function (indicator::IndicatorSolutionIndependent)(u::AbstractArray{<:Any,4}, + equations, dg, cache; + t, kwargs...) + + mesh = indicator.cache.mesh + alpha = indicator.cache.alpha + resize!(alpha, nelements(dg, cache)) + + #Predict the theoretical center. + advectionvelocity = (1.0, 1.0) + center = t.*advectionvelocity + + inner_distance = 1 + outer_distance = 1.85 + + #Iterate over all elements + for element in 1:length(alpha) + # Calculate periodic distance between cell and center. + # This requires an uncurved mesh! + coordinates = [0.5 * (cache.elements.node_coordinates[1, 1, 1, element] + + cache.elements.node_coordinates[1, end, 1, element]), + 0.5 * (cache.elements.node_coordinates[2, 1, 1, element] + + cache.elements.node_coordinates[2, 1, end, element])] + + #The geometric shape of the amr should be preserved when the base_level is increased. + #This is done by looking at the original coordinates of each cell. + cell_coordinates = original_coordinates(coordinates, 5/8) + cell_distance = periodic_distance_2d(cell_coordinates, center, 10) + if cell_distance < (inner_distance+outer_distance)/2 + cell_coordinates = original_coordinates(coordinates, 5/16) + cell_distance = periodic_distance_2d(cell_coordinates, center, 10) + end + + #Set alpha according to cells position inside the circles. + target_level = (cell_distance < inner_distance) + (cell_distance < outer_distance) + alpha[element] = target_level/2 + end + return alpha +end + +# For periodic domains, distance between two points must take into account +# periodic extensions of the domain +function periodic_distance_2d(coordinates, center, domain_length) + dx = coordinates .- center + dx_shifted = abs.(dx .% domain_length) + dx_periodic = min.(dx_shifted, domain_length .- dx_shifted) + return sqrt(sum(dx_periodic.^2)) +end + +#This takes a cells coordinates and transforms them into the coordinates of a parent-cell it originally refined from. +#It does it so that the parent-cell has given cell_length. +function original_coordinates(coordinates, cell_length) + offset = coordinates .% cell_length + offset_sign = sign.(offset) + border = coordinates - offset + center = border + (offset_sign .* cell_length/2) + return center +end + +end # module TrixiExtension + +import .TrixiExtension +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (1.0, 1.0) +# advectionvelocity = (0.2, -0.3) +equations = LinearScalarAdvectionEquation2D(advectionvelocity) + +initial_condition = initial_condition_gauss + +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +coordinates_min = (-5, -5) +coordinates_max = ( 5, 5) + +mesh = P4estMesh((1, 1), polydeg=3, + coordinates_min=coordinates_min, coordinates_max=coordinates_max, + initial_refinement_level=4) + + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 10.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval=analysis_interval, + extra_analysis_integrals=(entropy,)) + +alive_callback = AliveCallback(analysis_interval=analysis_interval) + +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +save_solution = SaveSolutionCallback(interval=100, + save_initial_solution=true, + save_final_solution=true, + solution_variables=cons2prim) + +amr_controller = ControllerThreeLevel(semi, TrixiExtension.IndicatorSolutionIndependent(semi), + base_level=4, + med_level=5, med_threshold=0.1, + max_level=6, max_threshold=0.6) + +amr_callback = AMRCallback(semi, amr_controller, + interval=5, + adapt_initial_condition=true, + adapt_initial_condition_only_refine=true) + +stepsize_callback = StepsizeCallback(cfl=1.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_restart, save_solution, + amr_callback, stepsize_callback); + + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); +summary_callback() # print the timer summary diff --git a/examples/2d/elixir_advection_p4est_non_conforming.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl similarity index 98% rename from examples/2d/elixir_advection_p4est_non_conforming.jl rename to examples/2d/elixir_advection_p4est_non_conforming_flag.jl index 8b5195761c7..317bda62b81 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl @@ -21,7 +21,7 @@ f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s)) trees_per_dimension = (3, 2) -# Create P4estMesh with 8 x 8 trees and 16 x 16 elements +# Create P4estMesh with 3 x 2 trees and 6 x 4 elements mesh = P4estMesh(trees_per_dimension, polydeg=3, faces=(f1, f2, f3, f4), initial_refinement_level=1) diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl new file mode 100644 index 00000000000..809a3eabc02 --- /dev/null +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl @@ -0,0 +1,95 @@ + +using OrdinaryDiffEq +using Trixi + + +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (1.0, 1.0) +equations = LinearScalarAdvectionEquation2D(advectionvelocity) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict( + :all => boundary_condition +) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +# Deformed rectangle that looks like a waving flag, +# lower and upper faces are sinus curves, left and right are vertical lines. +f1(s) = SVector(-1.0, s - 1.0) +f2(s) = SVector( 1.0, s + 1.0) +f3(s) = SVector(s, -1.0 + sin(0.5 * pi * s)) +f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s)) +faces = (f1, f2, f3, f4) + +Trixi.validate_faces(faces) +mapping_flag = Trixi.transfinite_mapping(faces) + +# Unstructured mesh with 24 cells of the square domain [-1, 1]^n +mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") +isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + mesh_file) + +mesh = P4estMesh(mesh_file, polydeg=3, + mapping=mapping_flag, + initial_refinement_level=0) + +# Refine bottom left quadrant of each forest to level 3 +function refine_fn(p4est, which_tree, quadrant) + if quadrant.x == 0 && quadrant.y == 0 && quadrant.level < 3 + # return true (refine) + return Cint(1) + else + # return false (don't refine) + return Cint(0) + end +end + +# Refine recursively until each bottom left quadrant of a forest has level 4 +# The mesh will be rebalanced before the simulation starts +refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p4est_quadrant_t})) +Trixi.p4est_refine(mesh.p4est, true, refine_fn_c, C_NULL) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions=boundary_conditions) + + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +ode = semidiscretize(semi, (0.0, 0.2)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval=100) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval=100, + solution_variables=cons2prim) + +# The StepsizeCallback handles the re-calculcation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl=1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) + + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); + +# Print the timer summary +summary_callback() diff --git a/test/test_examples_2d_p4est.jl b/test/test_examples_2d_p4est.jl index 9fc2e6ea66a..8099e84d85f 100644 --- a/test/test_examples_2d_p4est.jl +++ b/test/test_examples_2d_p4est.jl @@ -15,12 +15,30 @@ EXAMPLES_DIR = joinpath(pathof(Trixi) |> dirname |> dirname, "examples", "2d") linf = [6.437440532947036e-5]) end - @testset "elixir_advection_p4est_non_conforming.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_p4est_non_conforming.jl"), + @testset "elixir_advection_p4est_non_conforming_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_p4est_non_conforming_flag.jl"), l2 = [4.634288969205318e-4], linf = [4.740692055057893e-3]) end + @testset "elixir_advection_p4est_non_conforming_flag_unstructured.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_p4est_non_conforming_flag_unstructured.jl"), + l2 = [3.038384623519386e-3], + linf = [6.324792487776842e-2]) + end + + @testset "elixir_advection_amr_solution_independent_p4est.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_solution_independent_p4est.jl"), + l2 = [7.945073295691892e-5], + linf = [0.0007454287896710293]) + end + + @testset "elixir_advection_amr_p4est_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_p4est_unstructured_flag.jl"), + l2 = [1.535436496965521e-3], + linf = [4.400105886102593e-2]) + end + @testset "elixir_advection_restart_p4est.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart_p4est.jl"), l2 = [6.398955192910044e-6], From 3c94bdb8f8ca3b356f95daa1b04bf61aac0f4df9 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 1 Jun 2021 14:28:09 +0200 Subject: [PATCH 11/27] Use Downloads: download --- examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl | 1 + .../elixir_advection_p4est_non_conforming_flag_unstructured.jl | 1 + 2 files changed, 2 insertions(+) diff --git a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl index cda6bb40f9b..1020ebddb54 100644 --- a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl +++ b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl @@ -1,4 +1,5 @@ +using Downloads: download using OrdinaryDiffEq using Trixi diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl index 809a3eabc02..8dee9cbccf4 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl @@ -1,4 +1,5 @@ +using Downloads: download using OrdinaryDiffEq using Trixi From 78f671abe0bb8f6f8969fc84b8d21392014ec143 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 1 Jun 2021 16:28:59 +0200 Subject: [PATCH 12/27] Improve performance of P4estMesh --- ...dvection_amr_solution_independent_p4est.jl | 8 ++--- src/solvers/dg_p4est/containers.jl | 9 +++++ src/solvers/dg_p4est/containers_2d.jl | 34 +++++++++++++++---- src/solvers/dg_p4est/dg_2d.jl | 15 ++++---- 4 files changed, 49 insertions(+), 17 deletions(-) diff --git a/examples/2d/elixir_advection_amr_solution_independent_p4est.jl b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl index 73c60d621b5..648aa4e4fb4 100644 --- a/examples/2d/elixir_advection_amr_solution_independent_p4est.jl +++ b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl @@ -35,10 +35,10 @@ function (indicator::IndicatorSolutionIndependent)(u::AbstractArray{<:Any,4}, for element in 1:length(alpha) # Calculate periodic distance between cell and center. # This requires an uncurved mesh! - coordinates = [0.5 * (cache.elements.node_coordinates[1, 1, 1, element] + - cache.elements.node_coordinates[1, end, 1, element]), - 0.5 * (cache.elements.node_coordinates[2, 1, 1, element] + - cache.elements.node_coordinates[2, 1, end, element])] + coordinates = SVector(0.5 * (cache.elements.node_coordinates[1, 1, 1, element] + + cache.elements.node_coordinates[1, end, 1, element]), + 0.5 * (cache.elements.node_coordinates[2, 1, 1, element] + + cache.elements.node_coordinates[2, 1, end, element])) #The geometric shape of the amr should be preserved when the base_level is increased. #This is done by looking at the original coordinates of each cell. diff --git a/src/solvers/dg_p4est/containers.jl b/src/solvers/dg_p4est/containers.jl index 8efa2a05f49..a7c8b77b823 100644 --- a/src/solvers/dg_p4est/containers.jl +++ b/src/solvers/dg_p4est/containers.jl @@ -440,6 +440,15 @@ function convert_sc_array(::Type{T}, sc_array) where T end +function load_sc_array(::Type{T}, sc_array, i=1) where T + element_size = sc_array.elem_size + + @assert element_size == sizeof(T) + + return unsafe_wrap(T, sc_array.array + element_size * (i - 1)) +end + + # Convert Tuple node_indices to actual indices. # E.g., (:one, :i, :j) will be (1, i, j) for some i and j, # (:i, :end) will be (i, size[2]), diff --git a/src/solvers/dg_p4est/containers_2d.jl b/src/solvers/dg_p4est/containers_2d.jl index 0937cabebe7..5bb444b2ed5 100644 --- a/src/solvers/dg_p4est/containers_2d.jl +++ b/src/solvers/dg_p4est/containers_2d.jl @@ -69,7 +69,8 @@ function init_interfaces_iter_face(info, user_data) return nothing end - sides = convert_sc_array(p4est_iter_face_side_t, info.sides) + sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), + load_sc_array(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging == true || sides[2].is_hanging == true # Mortar, no normal interface @@ -81,16 +82,23 @@ function init_interfaces_iter_face(info, user_data) data_array = unsafe_wrap(Array, ptr, 3) interfaces = data_array[1] interface_id = data_array[2] - data_array[2] += 1 + data_array[2] = interface_id + 1 mesh = data_array[3] + # Function barrier because the unpacked user_data above is type-unstable + init_interfaces_iter_face_inner(info, sides, interfaces, interface_id, mesh) +end + +# Function barrier for type stability +function init_interfaces_iter_face_inner(info, sides, interfaces, interface_id, mesh) # Global trees array - trees = convert_sc_array(p4est_tree_t, mesh.p4est.trees) + trees = (load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), + load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) # Quadrant numbering offsets of the quadrants at this interface, one-based indexing - offsets = [trees[sides[1].treeid + 1].quadrants_offset, - trees[sides[2].treeid + 1].quadrants_offset] + offsets = SVector(trees[1].quadrants_offset, + trees[2].quadrants_offset) - local_quad_ids = [sides[1].is.full.quadid, sides[2].is.full.quadid] + local_quad_ids = SVector(sides[1].is.full.quadid, sides[2].is.full.quadid) # Global IDs of the neighboring quads quad_ids = offsets + local_quad_ids @@ -100,7 +108,7 @@ function init_interfaces_iter_face(info, user_data) interfaces.element_ids[2, interface_id] = quad_ids[2] + 1 # Face at which the interface lies - faces = [sides[1].face, sides[2].face] + faces = (sides[1].face, sides[2].face) # Relative orientation of the two cell faces, # 0 for aligned coordinates, 1 for reversed coordinates. @@ -164,6 +172,12 @@ function init_boundaries_iter_face(info, user_data) data_array[2] += 1 mesh = data_array[3] + # Function barrier because the unpacked user_data above is type-unstable + init_boundaries_iter_face_inner(info, boundaries, boundary_id, mesh) +end + +# Function barrier for type stability +function init_boundaries_iter_face_inner(info, boundaries, boundary_id, mesh) # Extract boundary data sides = convert_sc_array(p4est_iter_face_side_t, info.sides) # Global trees array @@ -239,6 +253,12 @@ function init_mortars_iter_face(info, user_data) data_array[2] += 1 mesh = data_array[3] + # Function barrier because the unpacked user_data above is type-unstable + init_mortars_iter_face_inner(info, sides, mortars, mortar_id, mesh) +end + +# Function barrier for type stability +function init_mortars_iter_face_inner(info, sides, mortars, mortar_id, mesh) # Global trees array trees = convert_sc_array(p4est_tree_t, mesh.p4est.trees) # Quadrant numbering offsets of the quadrants at this interface, one-based indexing diff --git a/src/solvers/dg_p4est/dg_2d.jl b/src/solvers/dg_p4est/dg_2d.jl index 91ee359740e..0b01d46b61b 100644 --- a/src/solvers/dg_p4est/dg_2d.jl +++ b/src/solvers/dg_p4est/dg_2d.jl @@ -2,7 +2,9 @@ # and called from the basic `create_cache` method at the top. function create_cache(mesh::P4estMesh{2}, equations, mortar_l2::LobattoLegendreMortarL2, uEltype) # TODO: Taal performance using different types - MA2d = MArray{Tuple{nvariables(equations), nnodes(mortar_l2)}, uEltype, 2} + MA2d = MArray{Tuple{nvariables(equations), nnodes(mortar_l2)}, + uEltype, 2, + nvariables(equations) * nnodes(mortar_l2)} fstar_upper_threaded = MA2d[MA2d(undef) for _ in 1:Threads.nthreads()] fstar_lower_threaded = MA2d[MA2d(undef) for _ in 1:Threads.nthreads()] u_threaded = MA2d[MA2d(undef) for _ in 1:Threads.nthreads()] @@ -277,6 +279,11 @@ end large_element = element_ids[3, mortar] # Project small fluxes to large element. + # TODO: Taal performance, see comment in dg_tree/dg_2d.jl + multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_upper, fstar[2], + mortar_l2.reverse_lower, fstar[1]) + # The flux is calculated in the outward direction of the small elements, # so the sign must be switched to get the flux in outward direction # of the large element. @@ -284,11 +291,7 @@ end # of the large element as well) are twice as large as the contravariant vectors # of the small elements. Therefore, the flux need to be scaled by a factor of 2 # to obtain the flux of the large element. - # - # TODO: Taal performance, see comment in dg_tree/dg_2d.jl - multiply_dimensionwise!(u_buffer, - mortar_l2.reverse_upper, -2 * fstar[2], - mortar_l2.reverse_lower, -2 * fstar[1]) + u_buffer .*= -2 # Copy interpolated flux values from buffer to large element face in the correct orientation for i in eachnode(dg), v in eachvariable(equations) From 22798064df10a2bdb1d99ec0c49ad0c37c9c210a Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 1 Jun 2021 21:42:40 +0200 Subject: [PATCH 13/27] Prepare PR --- ...ixir_advection_amr_solution_independent.jl | 4 +- ...dvection_amr_solution_independent_p4est.jl | 6 +- ...xir_advection_p4est_non_conforming_flag.jl | 4 +- ..._p4est_non_conforming_flag_unstructured.jl | 4 +- src/Trixi.jl | 2 + src/auxiliary/auxiliary.jl | 17 ++++ src/callbacks_step/amr.jl | 27 +++--- src/callbacks_step/analysis.jl | 2 +- src/mesh/p4est_mesh.jl | 84 +++++++++++-------- src/solvers/dg_p4est/dg_2d.jl | 4 +- 10 files changed, 94 insertions(+), 60 deletions(-) diff --git a/examples/2d/elixir_advection_amr_solution_independent.jl b/examples/2d/elixir_advection_amr_solution_independent.jl index ea7c9dbb4e0..4bee8793997 100644 --- a/examples/2d/elixir_advection_amr_solution_independent.jl +++ b/examples/2d/elixir_advection_amr_solution_independent.jl @@ -17,8 +17,8 @@ function IndicatorSolutionIndependent(semi) return IndicatorSolutionIndependent{typeof(cache)}(cache) end function (indicator::IndicatorSolutionIndependent)(u::AbstractArray{<:Any,4}, - equations, dg, cache; - t, kwargs...) + equations, dg, cache; + t, kwargs...) mesh = indicator.cache.mesh alpha = indicator.cache.alpha diff --git a/examples/2d/elixir_advection_amr_solution_independent_p4est.jl b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl index 648aa4e4fb4..0e914ce5378 100644 --- a/examples/2d/elixir_advection_amr_solution_independent_p4est.jl +++ b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl @@ -10,15 +10,17 @@ using Trixi struct IndicatorSolutionIndependent{Cache<:NamedTuple} <: Trixi.AbstractIndicator cache::Cache end + function IndicatorSolutionIndependent(semi) basis = semi.solver.basis alpha = Vector{real(basis)}() cache = (; semi.mesh, alpha) return IndicatorSolutionIndependent{typeof(cache)}(cache) end + function (indicator::IndicatorSolutionIndependent)(u::AbstractArray{<:Any,4}, - equations, dg, cache; - t, kwargs...) + equations, dg, cache; + t, kwargs...) mesh = indicator.cache.mesh alpha = indicator.cache.alpha diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl index 317bda62b81..c4297debfec 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl @@ -26,7 +26,7 @@ mesh = P4estMesh(trees_per_dimension, polydeg=3, faces=(f1, f2, f3, f4), initial_refinement_level=1) -# Refine bottom left quadrant of each forest to level 4 +# Refine bottom left quadrant of each tree to level 4 function refine_fn(p4est, which_tree, quadrant) if quadrant.x == 0 && quadrant.y == 0 && quadrant.level < 4 # return true (refine) @@ -37,7 +37,7 @@ function refine_fn(p4est, which_tree, quadrant) end end -# Refine recursively until each bottom left quadrant of a forest has level 4 +# Refine recursively until each bottom left quadrant of a tree has level 4 # The mesh will be rebalanced before the simulation starts refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p4est_quadrant_t})) Trixi.p4est_refine(mesh.p4est, true, refine_fn_c, C_NULL) diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl index 8dee9cbccf4..1c58f9d4f6a 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl @@ -40,7 +40,7 @@ mesh = P4estMesh(mesh_file, polydeg=3, mapping=mapping_flag, initial_refinement_level=0) -# Refine bottom left quadrant of each forest to level 3 +# Refine bottom left quadrant of each tree to level 3 function refine_fn(p4est, which_tree, quadrant) if quadrant.x == 0 && quadrant.y == 0 && quadrant.level < 3 # return true (refine) @@ -51,7 +51,7 @@ function refine_fn(p4est, which_tree, quadrant) end end -# Refine recursively until each bottom left quadrant of a forest has level 4 +# Refine recursively until each bottom left quadrant of a tree has level 4 # The mesh will be rebalanced before the simulation starts refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p4est_quadrant_t})) Trixi.p4est_refine(mesh.p4est, true, refine_fn_c, C_NULL) diff --git a/src/Trixi.jl b/src/Trixi.jl index 77050111f43..74269a6590a 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -176,6 +176,8 @@ export PlotData1D, PlotData2D, getmesh, adapt_to_mesh_level!, adapt_to_mesh_leve function __init__() init_mpi() + init_p4est() + # Enable features that depend on the availability of the Plots package @require Plots="91a5bcdd-55d7-5caf-9e0b-520d859cae80" begin using .Plots: plot, plot!, savefig diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 7b98ae42719..8318ba9d6bd 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -212,3 +212,20 @@ macro timed(timer_output, label, expr) val end end + + +""" + init_p4est() + +Initialize p4est by calling `p4est_init` and setting the log level to `SC_LP_ERROR`. +This function will check if p4est is already initialized +and if yes, do nothing, thus it is safe to call it multiple times. +""" +function init_p4est() + if p4est_package_id()[] >= 0 + return nothing + end + + # Initialize p4est with log level ERROR to prevent a lot of output in AMR simulations + p4est_init(C_NULL, SC_LP_ERROR) +end diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 40669d01c5f..638afc53657 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -321,14 +321,15 @@ end # Copy controller values to quad user data storage, will be called below function copy_to_quad_iter_volume(info, user_data) - # Global trees array - trees = convert_sc_array(p4est_tree_t, info.p4est.trees) - # Quadrant numbering offset of this quadrant, one-based indexing - offset = trees[info.treeid + 1].quadrants_offset + # Global trees array, one-based indexing + tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset # Global quad ID quad_id = offset + info.quadid # Access user_data = lambda + # TODO P4EST Is lambda always a Vector{Int}? user_data_ptr = Ptr{Int}(user_data) # Load controller_value = lambda[quad_id + 1] controller_value = unsafe_load(user_data_ptr, quad_id + 1) @@ -361,7 +362,6 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, # Copy controller value of each quad to the quad's user data storage iter_volume_c = @cfunction(copy_to_quad_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) - # TODO P4EST Is lambda always a Vector{Int}? GC.@preserve lambda begin p4est_iterate(mesh.p4est, C_NULL, # ghost layer @@ -376,9 +376,11 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, refined_original_cells = @timed timer() "mesh" refine!(mesh) # Refine solver - @timed timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, refined_original_cells) + @timed timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, + refined_original_cells) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args - @timed timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, refined_original_cells) + @timed timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, p_equations, + p_dg, p_cache, refined_original_cells) end else # If there is nothing to refine, create empty array for later use @@ -390,7 +392,8 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, coarsened_original_cells = @timed timer() "mesh" coarsen!(mesh) # coarsen solver - @timed timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, coarsened_original_cells) + @timed timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, + coarsened_original_cells) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args @timed timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, coarsened_original_cells) @@ -573,10 +576,10 @@ function controller_three_level_iter_volume(info, user_data) @unpack controller_value = controller.cache - # Global trees array - trees = convert_sc_array(p4est_tree_t, info.p4est.trees) - # Quadrant numbering offset of this quadrant, one-based indexing - offset = trees[info.treeid + 1].quadrants_offset + # Global trees array, one-based indexing + tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset # Global quad ID quad_id = offset + info.quadid # Julia element ID diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index f6c7bab0c2d..43943bde1eb 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -386,7 +386,7 @@ function print_amr_information(callbacks, mesh::P4estMesh, solver, cache) elements_per_level = zeros(P4EST_MAXLEVEL + 1) for tree in convert_sc_array(p4est_tree_t, mesh.p4est.trees) - @. elements_per_level += tree.quadrants_per_level + elements_per_level .+= tree.quadrants_per_level end min_level = findfirst(i -> i > 0, elements_per_level) - 1 diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index 3b29eecb3da..2e2dac1ce5e 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -108,7 +108,6 @@ function P4estMesh(trees_per_dimension; polydeg, # Destroy p4est structs at exit of Julia function destroy_p4est_structs() - p4est_ghost_destroy(ghost) p4est_destroy(p4est) p4est_connectivity_destroy(conn) end @@ -198,6 +197,14 @@ function P4estMesh(meshfile::String; # Use Int-Vector of size 2 as quadrant user data p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) + # Destroy p4est structs at exit of Julia + function destroy_p4est_structs() + p4est_destroy(p4est) + p4est_connectivity_destroy(conn) + end + + atexit(destroy_p4est_structs) + # There's no simple and generic way to distinguish boundaries. Name all of them :all. boundary_names = fill(:all, 4, n_trees) @@ -397,18 +404,17 @@ end function init_fn(p4est, which_tree, quadrant) # Unpack quadrant's user data ([global quad ID, controller_value]) ptr = Ptr{Int}(quadrant.p.user_data) - data = unsafe_wrap(Array, ptr, 2) # Initialize quad ID as -1 and controller_value as 0 (don't refine or coarsen) - data[1] = -1 - data[2] = 0 + unsafe_store!(ptr, -1, 1) + unsafe_store!(ptr, 0, 2) return nothing end function refine_fn(p4est, which_tree, quadrant) - # Controller value has been copied to the quadrant's user data storage before - # Unpack quadrant's user data ([global quad ID, controller_value]) + # Controller value has been copied to the quadrant's user data storage before. + # Unpack quadrant's user data ([global quad ID, controller_value]). ptr = Ptr{Int}(quadrant.p.user_data) controller_value = unsafe_load(ptr, 2) @@ -421,19 +427,20 @@ function refine_fn(p4est, which_tree, quadrant) end end -# Refine marked cells and rebalance forest -# Return a list of all cells that have been refined during refinement or rebalancing +# Refine marked cells and rebalance forest. +# Return a list of all cells that have been refined during refinement or rebalancing. function refine!(mesh::P4estMesh) - original_n_cells = ncells(mesh) - # Copy original element IDs to quad user data storage + original_n_cells = ncells(mesh) save_original_ids(mesh) - init_fn_c = @cfunction(init_fn, Cvoid, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + init_fn_c = @cfunction(init_fn, Cvoid, + (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) # Refine marked cells - refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) - @timed timer() "refine_unbalanced" p4est_refine(mesh.p4est, false, refine_fn_c, init_fn_c) + refine_fn_c = @cfunction(refine_fn, Cint, + (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + @timed timer() "refine" p4est_refine(mesh.p4est, false, refine_fn_c, init_fn_c) @timed timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes @@ -447,7 +454,7 @@ end function coarsen_fn(p4est, which_tree, quadrants_ptr) quadrants = unsafe_wrap(Array, quadrants_ptr, 4) - # Controller value has been copied to the quadrant's user data storage before + # Controller value has been copied to the quadrant's user data storage before. # Unpack quadrant's user data ([global quad ID, controller_value]) if all(i -> unsafe_load(Ptr{Int}(quadrants[i].p.user_data), 2) < 0, eachindex(quadrants)) # return true (coarsen) @@ -458,8 +465,8 @@ function coarsen_fn(p4est, which_tree, quadrants_ptr) end end -# Coarsen marked cells if the forest will stay balanced -# Return a list of all cells that have been coarsened +# Coarsen marked cells if the forest will stay balanced. +# Return a list of all cells that have been coarsened. function coarsen!(mesh::P4estMesh) # Copy original element IDs to quad user data storage original_n_cells = ncells(mesh) @@ -493,26 +500,28 @@ function coarsen!(mesh::P4estMesh) # Some cells may have been coarsened even though they unbalanced the forest. # These cells have now been refined again by p4est_balance. - # Find intermediate ID (ID of coarse cell between coarsening and balancing) - # of each cell that have been coarsened and then refined again. + # refined_cells contains the intermediate IDs (ID of coarse cell + # between coarsening and balancing) of these cells. + # Find original ID of each cell that has been coarsened and then refined again. for refined_cell in refined_cells - # i-th cell of the ones that have created by coarsening has been refined again + # i-th cell of the ones that have been created by coarsening has been refined again i = findfirst(isequal(refined_cell), new_cells) # Remove IDs of the 2^NDIMS cells that have been coarsened to this cell coarsened_cells[:, i] .= -1 end + # Return all IDs of cells that have been coarsened but not refined again by balancing return coarsened_cells_vec[coarsened_cells_vec .>= 0] end # Copy global quad ID to quad's user data storage, will be called below function save_original_id_iter_volume(info, user_data) - # Global trees array - trees = convert_sc_array(p4est_tree_t, info.p4est.trees) - # Quadrant numbering offset of this quadrant, one-based indexing - offset = trees[info.treeid + 1].quadrants_offset + # Global trees array, one-based indexing + tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset # Global quad ID quad_id = offset + info.quadid @@ -539,17 +548,18 @@ end # Extract information about which cells have been changed function collect_changed_iter_volume(info, user_data) - # The original element ID has been saved to user_data before - # Unpack quadrant's user data ([global quad ID, controller_value]) + # The original element ID has been saved to user_data before. + # Unpack quadrant's user data ([global quad ID, controller_value]). quad_data_ptr = Ptr{Int}(info.quad.p.user_data) original_id = unsafe_load(quad_data_ptr, 1) - # original_id of cells that have been newly created during refinement is -1 + # original_id of cells that have been newly created is -1 if original_id >= 0 - # Unpack user_data = original_cells (we only need the first original_id values) + # Unpack user_data = original_cells user_data_ptr = Ptr{Int}(user_data) - # If quad has an original_id, it existed before refinement, and therefore wasn't changed. + # If quad has an original_id, it existed before refinement/coarsening, + # and therefore wasn't changed. # Mark original_id as "not changed during refinement/coarsening" in original_cells unsafe_store!(user_data_ptr, 0, original_id + 1) end @@ -581,17 +591,17 @@ end # Extract newly created cells function collect_new_iter_volume(info, user_data) - # The original element ID has been saved to user_data before - # Unpack quadrant's user data ([global quad ID, controller_value]) + # The original element ID has been saved to user_data before. + # Unpack quadrant's user data ([global quad ID, controller_value]). quad_data_ptr = Ptr{Int}(info.quad.p.user_data) original_id = unsafe_load(quad_data_ptr, 1) - # original_id of cells that have been newly created during refinement is -1 + # original_id of cells that have been newly created is -1 if original_id < 0 - # Global trees array - trees = convert_sc_array(p4est_tree_t, info.p4est.trees) - # Quadrant numbering offset of this quadrant, one-based indexing - offset = trees[info.treeid + 1].quadrants_offset + # Global trees array, one-based indexing + tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset # Global quad ID quad_id = offset + info.quadid @@ -608,7 +618,7 @@ end function collect_new_cells(mesh) cell_is_new = zeros(Int, ncells(mesh)) - # Iterate over all quads and set original cells that haven't been changed to zero + # Iterate over all quads and set original cells that have been changed to one iter_volume_c = @cfunction(collect_new_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) GC.@preserve cell_is_new begin @@ -621,7 +631,7 @@ function collect_new_cells(mesh) end # Changed cells are all that haven't been set to zero above - new_cells = findall(isequal(1), cell_is_new) + new_cells = cell_is_new[cell_is_new .> 0] return new_cells end diff --git a/src/solvers/dg_p4est/dg_2d.jl b/src/solvers/dg_p4est/dg_2d.jl index 0b01d46b61b..625763176e9 100644 --- a/src/solvers/dg_p4est/dg_2d.jl +++ b/src/solvers/dg_p4est/dg_2d.jl @@ -167,8 +167,8 @@ function prolong2mortars!(cache, u, size_ = (nnodes(dg), nnodes(dg)) @threaded for mortar in eachmortar(dg, cache) - small_indices = node_indices[1, mortar] - large_indices = node_indices[2, mortar] + small_indices = node_indices[1, mortar] + large_indices = node_indices[2, mortar] # Copy solution small to small for pos in 1:2, i in eachnode(dg), v in eachvariable(equations) From 3cabfe913716fb10b025bd71618c533fe7c5e46b Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 1 Jun 2021 23:44:58 +0200 Subject: [PATCH 14/27] Fix 2279806 --- src/mesh/p4est_mesh.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index 2e2dac1ce5e..b2522c66df0 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -631,7 +631,7 @@ function collect_new_cells(mesh) end # Changed cells are all that haven't been set to zero above - new_cells = cell_is_new[cell_is_new .> 0] + new_cells = findall(isequal(1), cell_is_new) return new_cells end From 262e27ff1b011f126479e22e7d7b438f7c638dcf Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 2 Jun 2021 12:06:39 +0200 Subject: [PATCH 15/27] Remove some allocations from count_required functions --- src/solvers/dg_p4est/containers.jl | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/solvers/dg_p4est/containers.jl b/src/solvers/dg_p4est/containers.jl index a7c8b77b823..341c4ebfafa 100644 --- a/src/solvers/dg_p4est/containers.jl +++ b/src/solvers/dg_p4est/containers.jl @@ -339,14 +339,15 @@ end function count_interfaces_iter_face(info, user_data) if info.sides.elem_count == 2 # Extract interface data - sides = convert_sc_array(p4est_iter_face_side_t, info.sides) + sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), + load_sc_array(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging == 0 && sides[2].is_hanging == 0 # No hanging nodes => normal interface # Unpack user_data = [interface_count] and increment interface_count ptr = Ptr{Int}(user_data) - data_array = unsafe_wrap(Array, ptr, 1) - data_array[1] += 1 + id = unsafe_load(ptr) + unsafe_store!(ptr, id + 1) end end @@ -358,8 +359,8 @@ function count_boundaries_iter_face(info, user_data) if info.sides.elem_count == 1 # Unpack user_data = [boundary_count] and increment boundary_count ptr = Ptr{Int}(user_data) - data_array = unsafe_wrap(Array, ptr, 1) - data_array[1] += 1 + id = unsafe_load(ptr) + unsafe_store!(ptr, id + 1) end return nothing @@ -369,14 +370,15 @@ end function count_mortars_iter_face(info, user_data) if info.sides.elem_count == 2 # Extract interface data - sides = convert_sc_array(p4est_iter_face_side_t, info.sides) + sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), + load_sc_array(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging != 0 || sides[2].is_hanging != 0 # Hanging nodes => mortar # Unpack user_data = [mortar_count] and increment mortar_count ptr = Ptr{Int}(user_data) - data_array = unsafe_wrap(Array, ptr, 1) - data_array[1] += 1 + id = unsafe_load(ptr) + unsafe_store!(ptr, id + 1) end end From 018da92a52e7a1a6d81eb1385736ccb7673afe8f Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 2 Jun 2021 20:51:00 +0200 Subject: [PATCH 16/27] Remove more allocations --- src/solvers/dg_p4est/containers_2d.jl | 36 ++++++++++++++------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/src/solvers/dg_p4est/containers_2d.jl b/src/solvers/dg_p4est/containers_2d.jl index 5bb444b2ed5..232583bc884 100644 --- a/src/solvers/dg_p4est/containers_2d.jl +++ b/src/solvers/dg_p4est/containers_2d.jl @@ -91,10 +91,10 @@ end # Function barrier for type stability function init_interfaces_iter_face_inner(info, sides, interfaces, interface_id, mesh) - # Global trees array + # Global trees array, one-based indexing trees = (load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) - # Quadrant numbering offsets of the quadrants at this interface, one-based indexing + # Quadrant numbering offsets of the quadrants at this interface offsets = SVector(trees[1].quadrants_offset, trees[2].quadrants_offset) @@ -179,16 +179,16 @@ end # Function barrier for type stability function init_boundaries_iter_face_inner(info, boundaries, boundary_id, mesh) # Extract boundary data - sides = convert_sc_array(p4est_iter_face_side_t, info.sides) - # Global trees array - trees = convert_sc_array(p4est_tree_t, mesh.p4est.trees) - # Quadrant numbering offset of this quadrant, one-based indexing - offset = trees[sides[1].treeid + 1].quadrants_offset + side = load_sc_array(p4est_iter_face_side_t, info.sides) + # Global trees array, one-based indexing + tree = load_sc_array(p4est_tree_t, mesh.p4est.trees, side.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset # Verify before accessing is.full, but this should never happen - @assert sides[1].is_hanging == false + @assert side.is_hanging == false - local_quad_id = sides[1].is.full.quadid + local_quad_id = side.is.full.quadid # Global ID of this quad quad_id = offset + local_quad_id @@ -197,7 +197,7 @@ function init_boundaries_iter_face_inner(info, boundaries, boundary_id, mesh) boundaries.element_ids[boundary_id] = quad_id + 1 # Face at which the boundary lies - face = sides[1].face + face = side.face if face == 0 # Index face in negative x-direction @@ -214,7 +214,7 @@ function init_boundaries_iter_face_inner(info, boundaries, boundary_id, mesh) end # One-based indexing - boundaries.name[boundary_id] = mesh.boundary_names[face + 1, sides[1].treeid + 1] + boundaries.name[boundary_id] = mesh.boundary_names[face + 1, side.treeid + 1] return nothing end @@ -238,7 +238,8 @@ function init_mortars_iter_face(info, user_data) return nothing end - sides = convert_sc_array(p4est_iter_face_side_t, info.sides) + sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), + load_sc_array(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging == false && sides[2].is_hanging == false # Normal interface, no mortar @@ -259,11 +260,12 @@ end # Function barrier for type stability function init_mortars_iter_face_inner(info, sides, mortars, mortar_id, mesh) - # Global trees array - trees = convert_sc_array(p4est_tree_t, mesh.p4est.trees) - # Quadrant numbering offsets of the quadrants at this interface, one-based indexing - offsets = [trees[sides[1].treeid + 1].quadrants_offset, - trees[sides[2].treeid + 1].quadrants_offset] + # Global trees array, one-based indexing + trees = (load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), + load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) + # Quadrant numbering offsets of the quadrants at this interface + offsets = SVector(trees[1].quadrants_offset, + trees[2].quadrants_offset) if sides[1].is_hanging == true # Left is small (1), right is large (2) From 8b7c0ab17c68f3f1b08cf6f23cd6626b3ad5c8ec Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Sat, 5 Jun 2021 16:23:45 +0200 Subject: [PATCH 17/27] Implement suggestions --- .../2d/elixir_advection_amr_p4est_unstructured_flag.jl | 6 +++--- examples/2d/elixir_advection_amr_solution_independent.jl | 4 ++-- .../2d/elixir_advection_amr_solution_independent_p4est.jl | 8 +++++--- examples/2d/elixir_advection_p4est_non_conforming_flag.jl | 2 +- src/callbacks_step/amr.jl | 4 ++-- src/callbacks_step/analysis.jl | 4 ++-- src/mesh/p4est_mesh.jl | 4 ++-- .../dg_unstructured_quad/sort_boundary_conditions.jl | 2 +- test/test_examples_2d_p4est.jl | 4 ++-- 9 files changed, 20 insertions(+), 18 deletions(-) diff --git a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl index 1020ebddb54..c55bded0f5b 100644 --- a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl +++ b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl @@ -6,8 +6,8 @@ using Trixi ############################################################################### # semidiscretization of the linear advection equation -advectionvelocity = (1.0, 1.0) -# advectionvelocity = (0.2, -0.3) +# advectionvelocity = (1.0, 1.0) +advectionvelocity = (0.2, -0.3) equations = LinearScalarAdvectionEquation2D(advectionvelocity) initial_condition = initial_condition_gauss @@ -75,7 +75,7 @@ amr_callback = AMRCallback(semi, amr_controller, adapt_initial_condition=true, adapt_initial_condition_only_refine=true) -stepsize_callback = StepsizeCallback(cfl=1.6) +stepsize_callback = StepsizeCallback(cfl=0.7) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, diff --git a/examples/2d/elixir_advection_amr_solution_independent.jl b/examples/2d/elixir_advection_amr_solution_independent.jl index 4bee8793997..039b4209b4f 100644 --- a/examples/2d/elixir_advection_amr_solution_independent.jl +++ b/examples/2d/elixir_advection_amr_solution_independent.jl @@ -86,8 +86,8 @@ initial_condition = initial_condition_gauss solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) -coordinates_min = (-5, -5) -coordinates_max = ( 5, 5) +coordinates_min = (-5.0, -5.0) +coordinates_max = ( 5.0, 5.0) mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level=4, n_cells_max=30_000) diff --git a/examples/2d/elixir_advection_amr_solution_independent_p4est.jl b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl index 0e914ce5378..1b879637f4d 100644 --- a/examples/2d/elixir_advection_amr_solution_independent_p4est.jl +++ b/examples/2d/elixir_advection_amr_solution_independent_p4est.jl @@ -91,10 +91,12 @@ initial_condition = initial_condition_gauss solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) -coordinates_min = (-5, -5) -coordinates_max = ( 5, 5) +coordinates_min = (-5.0, -5.0) +coordinates_max = ( 5.0, 5.0) -mesh = P4estMesh((1, 1), polydeg=3, +trees_per_dimension = (1, 1) + +mesh = P4estMesh(trees_per_dimension, polydeg=3, coordinates_min=coordinates_min, coordinates_max=coordinates_max, initial_refinement_level=4) diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl index c4297debfec..78ae922a0e0 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl @@ -19,9 +19,9 @@ f2(s) = SVector( 1.0, s + 1.0) f3(s) = SVector(s, -1.0 + sin(0.5 * pi * s)) f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s)) +# Create P4estMesh with 3 x 2 trees and 6 x 4 elements trees_per_dimension = (3, 2) -# Create P4estMesh with 3 x 2 trees and 6 x 4 elements mesh = P4estMesh(trees_per_dimension, polydeg=3, faces=(f1, f2, f3, f4), initial_refinement_level=1) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 638afc53657..a790c0b757b 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -223,7 +223,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, refined_original_cells = @timed timer() "mesh" refine!(mesh.tree, to_refine) # Find all indices of elements whose cell ids are in refined_original_cells - elements_to_refine = findall(cell_id -> cell_id in refined_original_cells, cache.elements.cell_ids) + elements_to_refine = findall(in(refined_original_cells), cache.elements.cell_ids) # refine solver @timed timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine) @@ -284,7 +284,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, end # Find all indices of elements whose cell ids are in removed_child_cells - elements_to_remove = findall(cell_id -> cell_id in removed_child_cells, cache.elements.cell_ids) + elements_to_remove = findall(in(removed_child_cells), cache.elements.cell_ids) # coarsen solver @timed timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index 43943bde1eb..80c95d28e73 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -370,9 +370,9 @@ function print_amr_information(callbacks, mesh, solver, cache) end for level = max_level:-1:min_level+1 - mpi_println(" ├── level $level: " * @sprintf("% 14d", count(isequal(level), levels))) + mpi_println(" ├── level $level: " * @sprintf("% 14d", count(==(level), levels))) end - mpi_println(" └── level $min_level: " * @sprintf("% 14d", count(isequal(min_level), levels))) + mpi_println(" └── level $min_level: " * @sprintf("% 14d", count(==(min_level), levels))) return nothing end diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index b2522c66df0..e71e28cb6b9 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -505,7 +505,7 @@ function coarsen!(mesh::P4estMesh) # Find original ID of each cell that has been coarsened and then refined again. for refined_cell in refined_cells # i-th cell of the ones that have been created by coarsening has been refined again - i = findfirst(isequal(refined_cell), new_cells) + i = findfirst(==(refined_cell), new_cells) # Remove IDs of the 2^NDIMS cells that have been coarsened to this cell coarsened_cells[:, i] .= -1 @@ -631,7 +631,7 @@ function collect_new_cells(mesh) end # Changed cells are all that haven't been set to zero above - new_cells = findall(isequal(1), cell_is_new) + new_cells = findall(==(1), cell_is_new) return new_cells end diff --git a/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl b/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl index e9f63938886..09fe76baf8e 100644 --- a/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl +++ b/src/solvers/dg_unstructured_quad/sort_boundary_conditions.jl @@ -15,7 +15,7 @@ mutable struct UnstructuredQuadSortedBoundaryTypes{N, BCs<:NTuple{N, Any}} boundary_dictionary::Dict{Symbol, Any} # boundary conditions as set by the user in the elixir file end -@inline nboundaries(container::UnstructuredQuadSortedBoundaryTypes) = sum(container.boundary_indices .|> length) +@inline nboundaries(container::UnstructuredQuadSortedBoundaryTypes) = sum(length, container.boundary_indices) # constructor that "eats" the original boundary condition dictionary and sorts the information diff --git a/test/test_examples_2d_p4est.jl b/test/test_examples_2d_p4est.jl index 8099e84d85f..2ffeff09c46 100644 --- a/test/test_examples_2d_p4est.jl +++ b/test/test_examples_2d_p4est.jl @@ -35,8 +35,8 @@ EXAMPLES_DIR = joinpath(pathof(Trixi) |> dirname |> dirname, "examples", "2d") @testset "elixir_advection_amr_p4est_unstructured_flag.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_p4est_unstructured_flag.jl"), - l2 = [1.535436496965521e-3], - linf = [4.400105886102593e-2]) + l2 = [1.29561551e-03], + linf = [3.92061383e-02]) end @testset "elixir_advection_restart_p4est.jl" begin From be749ff770a1c8d97f6a6f216230eee2c60ebbb4 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Sun, 6 Jun 2021 15:42:34 +0200 Subject: [PATCH 18/27] Implement more suggestions --- ...r_advection_amr_p4est_unstructured_flag.jl | 5 ++- src/mesh/p4est_mesh.jl | 38 ++++++++++--------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl index c55bded0f5b..4bcc2a00537 100644 --- a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl +++ b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl @@ -27,6 +27,9 @@ f3(s) = SVector(5 * s, -5.0 + 5 * sin(0.5 * pi * s)) f4(s) = SVector(5 * s, 5.0 + 5 * sin(0.5 * pi * s)) faces = (f1, f2, f3, f4) +# This creates a mapping that transforms [-1, 1]^2 to the domain with the faces defined above. +# It generally doesn't work for meshes loaded from mesh files because these can be meshes +# of arbitrary domains, but the mesh below is specifically built on the domain [-1, 1]^2. Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) @@ -61,7 +64,7 @@ alive_callback = AliveCallback(analysis_interval=analysis_interval) save_restart = SaveRestartCallback(interval=100, save_final_restart=true) -save_solution = SaveSolutionCallback(interval=1, +save_solution = SaveSolutionCallback(interval=100, save_initial_solution=true, save_final_solution=true, solution_variables=cons2prim) diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index e71e28cb6b9..89d7ce8bdc1 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -106,14 +106,6 @@ function P4estMesh(trees_per_dimension; polydeg, # Use Int-Vector of size 2 as quadrant user data p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) - # Destroy p4est structs at exit of Julia - function destroy_p4est_structs() - p4est_destroy(p4est) - p4est_connectivity_destroy(conn) - end - - atexit(destroy_p4est_structs) - # Non-periodic boundaries boundary_names = fill(Symbol("---"), 4, prod(trees_per_dimension)) linear_indices = LinearIndices(trees_per_dimension) @@ -140,8 +132,17 @@ function P4estMesh(trees_per_dimension; polydeg, end end - return P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, + mesh = P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, boundary_names, "", unsaved_changes) + + # Destroy p4est structs when the mesh is garbage collected + finalizer(mesh) do + conn = mesh.p4est.connectivity + p4est_destroy(mesh.p4est) + p4est_connectivity_destroy(conn) + end + + return mesh end @@ -197,19 +198,20 @@ function P4estMesh(meshfile::String; # Use Int-Vector of size 2 as quadrant user data p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) - # Destroy p4est structs at exit of Julia - function destroy_p4est_structs() - p4est_destroy(p4est) - p4est_connectivity_destroy(conn) - end - - atexit(destroy_p4est_structs) - # There's no simple and generic way to distinguish boundaries. Name all of them :all. boundary_names = fill(:all, 4, n_trees) - return P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, + mesh = P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, boundary_names, "", unsaved_changes) + + # Destroy p4est structs when the mesh is garbage collected + finalizer(mesh) do + conn = mesh.p4est.connectivity + p4est_destroy(mesh.p4est) + p4est_connectivity_destroy(conn) + end + + return mesh end From a6ee5860a79ecf64b3c657fdf8464e50cbd3c330 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Mon, 7 Jun 2021 12:14:39 +0200 Subject: [PATCH 19/27] Fix polydeg in show(P4estMesh) --- src/mesh/p4est_mesh.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index 89d7ce8bdc1..d1b76717f59 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -322,7 +322,7 @@ function Base.show(io::IO, ::MIME"text/plain", mesh::P4estMesh) setup = [ "#trees" => ntrees(mesh), "current #cells" => ncells(mesh), - "polydeg" => length(mesh.nodes), + "polydeg" => length(mesh.nodes) - 1, ] summary_box(io, "P4estMesh{" * string(ndims(mesh)) * ", " * string(real(mesh)) * "}", setup) end From fbaf92eb4ae4a45dfcc47f7d1f39d7d0677c0250 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 8 Jun 2021 13:59:18 +0200 Subject: [PATCH 20/27] Destroy p4est data structures in inner constructor --- src/mesh/mesh_io.jl | 4 ++-- src/mesh/p4est_mesh.jl | 44 +++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/mesh/mesh_io.jl b/src/mesh/mesh_io.jl index c19e7d0f648..788de17424c 100644 --- a/src/mesh/mesh_io.jl +++ b/src/mesh/mesh_io.jl @@ -259,8 +259,8 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) conn_vec = Vector{Ptr{p4est_connectivity_t}}(undef, 1) p4est = p4est_load(p4est_file, C_NULL, 0, false, C_NULL, pointer(conn_vec)) - mesh = P4estMesh{ndims, RealT, ndims+2}(p4est, tree_node_coordinates, - nodes, boundary_names, "", false) + mesh = P4estMesh{ndims}(p4est, tree_node_coordinates, + nodes, boundary_names, "", false) else error("Unknown mesh type!") end diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index d1b76717f59..e19bfa73513 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -16,6 +16,24 @@ mutable struct P4estMesh{NDIMS, RealT<:Real, NDIMSP2} <: AbstractMesh{NDIMS} boundary_names ::Array{Symbol, 2} # [face direction, tree] current_filename ::String unsaved_changes ::Bool + + function P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, boundary_names, + current_filename, unsaved_changes) where NDIMS + mesh = new{NDIMS, eltype(tree_node_coordinates), NDIMS+2}(p4est, tree_node_coordinates, + nodes, boundary_names, current_filename, unsaved_changes) + + # Destroy p4est structs when the mesh is garbage collected + finalizer(destroy_mesh, mesh) + + return mesh + end +end + + +function destroy_mesh(mesh) + conn = mesh.p4est.connectivity + p4est_destroy(mesh.p4est) + p4est_connectivity_destroy(conn) end @@ -132,17 +150,8 @@ function P4estMesh(trees_per_dimension; polydeg, end end - mesh = P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, - boundary_names, "", unsaved_changes) - - # Destroy p4est structs when the mesh is garbage collected - finalizer(mesh) do - conn = mesh.p4est.connectivity - p4est_destroy(mesh.p4est) - p4est_connectivity_destroy(conn) - end - - return mesh + return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, + boundary_names, "", unsaved_changes) end @@ -201,17 +210,8 @@ function P4estMesh(meshfile::String; # There's no simple and generic way to distinguish boundaries. Name all of them :all. boundary_names = fill(:all, 4, n_trees) - mesh = P4estMesh{NDIMS, RealT, NDIMS+2}(p4est, tree_node_coordinates, nodes, - boundary_names, "", unsaved_changes) - - # Destroy p4est structs when the mesh is garbage collected - finalizer(mesh) do - conn = mesh.p4est.connectivity - p4est_destroy(mesh.p4est) - p4est_connectivity_destroy(conn) - end - - return mesh + return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, + boundary_names, "", unsaved_changes) end From ea4d7ff6f75b221f31e63f677982c1a4d15b500a Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 8 Jun 2021 14:00:37 +0200 Subject: [PATCH 21/27] Update examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl Co-authored-by: Hendrik Ranocha --- examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl index 4bcc2a00537..f7947f89dcc 100644 --- a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl +++ b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl @@ -6,7 +6,6 @@ using Trixi ############################################################################### # semidiscretization of the linear advection equation -# advectionvelocity = (1.0, 1.0) advectionvelocity = (0.2, -0.3) equations = LinearScalarAdvectionEquation2D(advectionvelocity) From 975e7ef1fd75f86775867705acc5b431ab09619b Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Tue, 8 Jun 2021 14:00:43 +0200 Subject: [PATCH 22/27] Update examples/2d/elixir_advection_p4est_non_conforming_flag.jl Co-authored-by: Hendrik Ranocha --- examples/2d/elixir_advection_p4est_non_conforming_flag.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl index 78ae922a0e0..d316be7bd4c 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl @@ -21,7 +21,6 @@ f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s)) # Create P4estMesh with 3 x 2 trees and 6 x 4 elements trees_per_dimension = (3, 2) - mesh = P4estMesh(trees_per_dimension, polydeg=3, faces=(f1, f2, f3, f4), initial_refinement_level=1) From 8ab220ebace0361812ae6089e70bc916bec41e67 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 9 Jun 2021 16:32:30 +0200 Subject: [PATCH 23/27] Make AMR controllers independent of the mesh --- src/callbacks_step/amr.jl | 132 +++++++++++++++----------------------- 1 file changed, 51 insertions(+), 81 deletions(-) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index a790c0b757b..55ea59bfe93 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -321,7 +321,7 @@ end # Copy controller values to quad user data storage, will be called below function copy_to_quad_iter_volume(info, user_data) - # Global trees array, one-based indexing + # Load tree from global trees array, one-based indexing tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset @@ -522,21 +522,64 @@ function get_element_variables!(element_variables, indicator::AbstractIndicator, end +function current_element_levels(mesh::TreeMesh, solver, cache) + cell_ids = cache.elements.cell_ids[eachelement(solver, cache)] + + return mesh.tree.levels[cell_ids] +end + + +function extract_levels_iter_volume(info, user_data) + # Load tree from global trees array, one-based indexing + tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset + # Global quad ID + quad_id = offset + info.quadid + # Julia element ID + element_id = quad_id + 1 + + current_level = info.quad.level + + # Unpack user_data = current_levels and save current element level + ptr = Ptr{Int}(user_data) + unsafe_store!(ptr, current_level, element_id) + + return nothing +end + +function current_element_levels(mesh::P4estMesh, solver, cache) + current_levels = Vector{Int}(undef, nelements(solver, cache)) + iter_volume_c = @cfunction(extract_levels_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + + GC.@preserve current_levels begin + p4est_iterate(mesh.p4est, + C_NULL, # ghost layer + pointer(current_levels), # user_data + iter_volume_c, # iter_volume + C_NULL, # iter_face + C_NULL) # iter_corner + end + + return current_levels +end + + # TODO: Taal refactor, merge the two loops of ControllerThreeLevel and IndicatorLöhner etc.? # But that would remove the simplest possibility to write that stuff to a file... # We could of course implement some additional logic and workarounds, but is it worth the effort? function (controller::ControllerThreeLevel)(u::AbstractArray{<:Any}, - mesh::TreeMesh, equations, dg::DG, cache; + mesh, equations, dg::DG, cache; kwargs...) @unpack controller_value = controller.cache resize!(controller_value, nelements(dg, cache)) alpha = controller.indicator(u, equations, dg, cache; kwargs...) + current_levels = current_element_levels(mesh, dg, cache) @threaded for element in eachelement(dg, cache) - cell_id = cache.elements.cell_ids[element] - current_level = mesh.tree.levels[cell_id] + current_level = current_levels[element] # set target level target_level = current_level @@ -566,80 +609,6 @@ function (controller::ControllerThreeLevel)(u::AbstractArray{<:Any}, end -# Loop above (as for TreeMesh) but executed by p4est -function controller_three_level_iter_volume(info, user_data) - # Unpack user_data = [controller, alpha] - ptr = Ptr{Any}(user_data) - data_array = unsafe_wrap(Array, ptr, 2) - controller = data_array[1] - alpha = data_array[2] - - @unpack controller_value = controller.cache - - # Global trees array, one-based indexing - tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) - # Quadrant numbering offset of this quadrant - offset = tree.quadrants_offset - # Global quad ID - quad_id = offset + info.quadid - # Julia element ID - element = quad_id + 1 - - current_level = info.quad.level - - # set target level - target_level = current_level - if alpha[element] > controller.max_threshold - target_level = controller.max_level - elseif alpha[element] > controller.med_threshold - if controller.med_level > 0 - target_level = controller.med_level - # otherwise, target_level = current_level - # set med_level = -1 to implicitly use med_level = current_level - end - else - target_level = controller.base_level - end - - # compare target level with actual level to set controller - if current_level < target_level - controller_value[element] = 1 # refine! - elseif current_level > target_level - controller_value[element] = -1 # coarsen! - else - controller_value[element] = 0 # we're good - end - - return nothing -end - -function (controller::ControllerThreeLevel)(u::AbstractArray{<:Any}, - mesh::P4estMesh, equations, dg::DG, cache; - kwargs...) - - @unpack controller_value = controller.cache - resize!(controller_value, nelements(dg, cache)) - - alpha = controller.indicator(u, equations, dg, cache; kwargs...) - - # Let p4est do the same iteration as with TreeMesh above - user_data = [controller, alpha] - iter_volume_c = @cfunction(controller_three_level_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) - - GC.@preserve user_data begin - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - pointer(user_data), - iter_volume_c, # iter_volume - C_NULL, # iter_face - C_NULL) # iter_corner - end - - return controller_value -end - - - """ ControllerThreeLevelCombined(semi, indicator_primary, indicator_secondary; base_level=1, @@ -728,7 +697,7 @@ end function (controller::ControllerThreeLevelCombined)(u::AbstractArray{<:Any}, - mesh::TreeMesh, equations, dg::DG, cache; + mesh, equations, dg::DG, cache; kwargs...) @unpack controller_value = controller.cache @@ -737,9 +706,10 @@ function (controller::ControllerThreeLevelCombined)(u::AbstractArray{<:Any}, alpha = controller.indicator_primary(u, equations, dg, cache; kwargs...) alpha_secondary = controller.indicator_secondary(u, equations, dg, cache) + current_levels = current_element_levels(mesh, dg, cache) + @threaded for element in eachelement(dg, cache) - cell_id = cache.elements.cell_ids[element] - current_level = mesh.tree.levels[cell_id] + current_level = current_levels[element] # set target level target_level = current_level From e71913fb37fa7493c41c39c497a9287d51372ba4 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 9 Jun 2021 18:08:31 +0200 Subject: [PATCH 24/27] Implement suggestions --- src/auxiliary/auxiliary.jl | 2 ++ src/callbacks_step/amr.jl | 19 +++++++++----- src/callbacks_step/analysis.jl | 2 +- src/mesh/p4est_mesh.jl | 22 +++++++++++------ src/solvers/dg_p4est/containers.jl | 15 ++++++------ src/solvers/dg_p4est/containers_2d.jl | 30 +++++++++++------------ src/solvers/dg_unstructured_quad/dg_2d.jl | 6 ----- 7 files changed, 53 insertions(+), 43 deletions(-) diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 8318ba9d6bd..65f8a2f3d33 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -228,4 +228,6 @@ function init_p4est() # Initialize p4est with log level ERROR to prevent a lot of output in AMR simulations p4est_init(C_NULL, SC_LP_ERROR) + + return nothing end diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 55ea59bfe93..156c2b32dd7 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -169,7 +169,7 @@ end kwargs...) # Note that we don't `wrap_array` the vector `u_ode` to be able to `resize!` # it when doing AMR while still dispatching on the `mesh` etc. - amr_callback(u_ode, mesh_equations_solver_cache(semi)..., t, iter; kwargs...) + amr_callback(u_ode, mesh_equations_solver_cache(semi)..., semi, t, iter; kwargs...) end @@ -181,7 +181,7 @@ end # `passive_args` is expected to be an iterable of `Tuple`s of the form # `(p_u_ode, p_mesh, p_equations, p_dg, p_cache)`. function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, - equations, dg::DG, cache, + equations, dg::DG, cache, semi, t, iter; only_refine=false, only_coarsen=false, passive_args=()) @@ -322,14 +322,13 @@ end # Copy controller values to quad user data storage, will be called below function copy_to_quad_iter_volume(info, user_data) # Load tree from global trees array, one-based indexing - tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID quad_id = offset + info.quadid # Access user_data = lambda - # TODO P4EST Is lambda always a Vector{Int}? user_data_ptr = Ptr{Int}(user_data) # Load controller_value = lambda[quad_id + 1] controller_value = unsafe_load(user_data_ptr, quad_id + 1) @@ -343,7 +342,7 @@ function copy_to_quad_iter_volume(info, user_data) end function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, - equations, dg::DG, cache, + equations, dg::DG, cache, semi, t, iter; only_refine=false, only_coarsen=false, passive_args=()) @@ -362,6 +361,9 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, # Copy controller value of each quad to the quad's user data storage iter_volume_c = @cfunction(copy_to_quad_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + # The pointer to lambda will be interpreted as Ptr{Int} above + @assert lambda isa Vector{Int} + GC.@preserve lambda begin p4est_iterate(mesh.p4est, C_NULL, # ghost layer @@ -408,6 +410,11 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, if has_changed # TODO: Taal decide, where shall we set this? # don't set it to has_changed since there can be changes from earlier calls mesh.unsaved_changes = true + + if semi.boundary_conditions isa UnstructuredQuadSortedBoundaryTypes + # Reinitialize boundary types container because boundaries may have changed. + initialize!(boundary_conditions, cache) + end end # Return true if there were any cells coarsened or refined, otherwise false @@ -531,7 +538,7 @@ end function extract_levels_iter_volume(info, user_data) # Load tree from global trees array, one-based indexing - tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index 80c95d28e73..c306d965c67 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -385,7 +385,7 @@ function print_amr_information(callbacks, mesh::P4estMesh, solver, cache) elements_per_level = zeros(P4EST_MAXLEVEL + 1) - for tree in convert_sc_array(p4est_tree_t, mesh.p4est.trees) + for tree in unsafe_wrap_sc(p4est_tree_t, mesh.p4est.trees) elements_per_level .+= tree.quadrants_per_level end diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index e19bfa73513..42156cdbbc7 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -457,8 +457,12 @@ function coarsen_fn(p4est, which_tree, quadrants_ptr) quadrants = unsafe_wrap(Array, quadrants_ptr, 4) # Controller value has been copied to the quadrant's user data storage before. - # Unpack quadrant's user data ([global quad ID, controller_value]) - if all(i -> unsafe_load(Ptr{Int}(quadrants[i].p.user_data), 2) < 0, eachindex(quadrants)) + # Load controller value from quadrant's user data ([global quad ID, controller_value]). + controller_value(i) = unsafe_load(Ptr{Int}(quadrants[i].p.user_data), 2) + + # p4est calls this function for each 2^ndims quads that could be coarsened to a single one. + # Only coarsen if all these 2^ndims quads have been marked for coarsening. + if all(i -> controller_value(i) < 0, eachindex(quadrants)) # return true (coarsen) return Cint(1) else @@ -482,9 +486,11 @@ function coarsen!(mesh::P4estMesh) @timed timer() "coarsen!" p4est_coarsen(mesh.p4est, false, coarsen_fn_c, init_fn_c) + # IDs of newly created cells (one-based) new_cells = collect_new_cells(mesh) + # Old IDs of cells that have been coarsened (one-based) coarsened_cells_vec = collect_changed_cells(mesh, original_n_cells) - # 2^NDIMS changed cells should have been coarsened to one new cell. + # 2^ndims changed cells should have been coarsened to one new cell. # This matrix will store the IDs of all cells that have been coarsened to cell new_cells[i] # in the i-th column. coarsened_cells = reshape(coarsened_cells_vec, 2^ndims(mesh), length(new_cells)) @@ -520,8 +526,8 @@ end # Copy global quad ID to quad's user data storage, will be called below function save_original_id_iter_volume(info, user_data) - # Global trees array, one-based indexing - tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + # Load tree from global trees array, one-based indexing + tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID @@ -551,7 +557,7 @@ end # Extract information about which cells have been changed function collect_changed_iter_volume(info, user_data) # The original element ID has been saved to user_data before. - # Unpack quadrant's user data ([global quad ID, controller_value]). + # Load original quad ID from quad's user data ([global quad ID, controller_value]). quad_data_ptr = Ptr{Int}(info.quad.p.user_data) original_id = unsafe_load(quad_data_ptr, 1) @@ -600,8 +606,8 @@ function collect_new_iter_volume(info, user_data) # original_id of cells that have been newly created is -1 if original_id < 0 - # Global trees array, one-based indexing - tree = load_sc_array(p4est_tree_t, info.p4est.trees, info.treeid + 1) + # Load tree from global trees array, one-based indexing + tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID diff --git a/src/solvers/dg_p4est/containers.jl b/src/solvers/dg_p4est/containers.jl index 341c4ebfafa..de24bb03569 100644 --- a/src/solvers/dg_p4est/containers.jl +++ b/src/solvers/dg_p4est/containers.jl @@ -339,8 +339,8 @@ end function count_interfaces_iter_face(info, user_data) if info.sides.elem_count == 2 # Extract interface data - sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), - load_sc_array(p4est_iter_face_side_t, info.sides, 2)) + sides = (unsafe_load_sc(p4est_iter_face_side_t, info.sides, 1), + unsafe_load_sc(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging == 0 && sides[2].is_hanging == 0 # No hanging nodes => normal interface @@ -370,8 +370,8 @@ end function count_mortars_iter_face(info, user_data) if info.sides.elem_count == 2 # Extract interface data - sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), - load_sc_array(p4est_iter_face_side_t, info.sides, 2)) + sides = (unsafe_load_sc(p4est_iter_face_side_t, info.sides, 1), + unsafe_load_sc(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging != 0 || sides[2].is_hanging != 0 # Hanging nodes => mortar @@ -431,8 +431,8 @@ function iterate_faces(mesh::P4estMesh, iter_face_c, user_data) end -# Convert sc_array to Julia array of the specified type -function convert_sc_array(::Type{T}, sc_array) where T +# Convert sc_array of type T to Julia array +function unsafe_wrap_sc(::Type{T}, sc_array) where T element_count = sc_array.elem_count element_size = sc_array.elem_size @@ -442,7 +442,8 @@ function convert_sc_array(::Type{T}, sc_array) where T end -function load_sc_array(::Type{T}, sc_array, i=1) where T +# Load the ith element (1-indexed) of an sc array of type T +function unsafe_load_sc(::Type{T}, sc_array, i=1) where T element_size = sc_array.elem_size @assert element_size == sizeof(T) diff --git a/src/solvers/dg_p4est/containers_2d.jl b/src/solvers/dg_p4est/containers_2d.jl index 232583bc884..23cc4111fda 100644 --- a/src/solvers/dg_p4est/containers_2d.jl +++ b/src/solvers/dg_p4est/containers_2d.jl @@ -31,11 +31,11 @@ function calc_node_coordinates!(node_coordinates, p4est_root_len = 1 << P4EST_MAXLEVEL p4est_quadrant_len(l) = 1 << (P4EST_MAXLEVEL - l) - trees = convert_sc_array(p4est_tree_t, mesh.p4est.trees) + trees = unsafe_wrap_sc(p4est_tree_t, mesh.p4est.trees) for tree in eachindex(trees) offset = trees[tree].quadrants_offset - quadrants = convert_sc_array(p4est_quadrant_t, trees[tree].quadrants) + quadrants = unsafe_wrap_sc(p4est_quadrant_t, trees[tree].quadrants) for i in eachindex(quadrants) element = offset + i @@ -69,8 +69,8 @@ function init_interfaces_iter_face(info, user_data) return nothing end - sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), - load_sc_array(p4est_iter_face_side_t, info.sides, 2)) + sides = (unsafe_load_sc(p4est_iter_face_side_t, info.sides, 1), + unsafe_load_sc(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging == true || sides[2].is_hanging == true # Mortar, no normal interface @@ -91,9 +91,9 @@ end # Function barrier for type stability function init_interfaces_iter_face_inner(info, sides, interfaces, interface_id, mesh) - # Global trees array, one-based indexing - trees = (load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), - load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) + # Load local trees from global trees array, one-based indexing + trees = (unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), + unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) # Quadrant numbering offsets of the quadrants at this interface offsets = SVector(trees[1].quadrants_offset, trees[2].quadrants_offset) @@ -179,9 +179,9 @@ end # Function barrier for type stability function init_boundaries_iter_face_inner(info, boundaries, boundary_id, mesh) # Extract boundary data - side = load_sc_array(p4est_iter_face_side_t, info.sides) - # Global trees array, one-based indexing - tree = load_sc_array(p4est_tree_t, mesh.p4est.trees, side.treeid + 1) + side = unsafe_load_sc(p4est_iter_face_side_t, info.sides) + # Load tree from global trees array, one-based indexing + tree = unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, side.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset @@ -238,8 +238,8 @@ function init_mortars_iter_face(info, user_data) return nothing end - sides = (load_sc_array(p4est_iter_face_side_t, info.sides, 1), - load_sc_array(p4est_iter_face_side_t, info.sides, 2)) + sides = (unsafe_load_sc(p4est_iter_face_side_t, info.sides, 1), + unsafe_load_sc(p4est_iter_face_side_t, info.sides, 2)) if sides[1].is_hanging == false && sides[2].is_hanging == false # Normal interface, no mortar @@ -260,9 +260,9 @@ end # Function barrier for type stability function init_mortars_iter_face_inner(info, sides, mortars, mortar_id, mesh) - # Global trees array, one-based indexing - trees = (load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), - load_sc_array(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) + # Load local trees from global trees array, one-based indexing + trees = (unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), + unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) # Quadrant numbering offsets of the quadrants at this interface offsets = SVector(trees[1].quadrants_offset, trees[2].quadrants_offset) diff --git a/src/solvers/dg_unstructured_quad/dg_2d.jl b/src/solvers/dg_unstructured_quad/dg_2d.jl index 77ce7afcd7b..697d2e6ac05 100644 --- a/src/solvers/dg_unstructured_quad/dg_2d.jl +++ b/src/solvers/dg_unstructured_quad/dg_2d.jl @@ -307,12 +307,6 @@ end function calc_boundary_flux!(cache, t, boundary_conditions, mesh::Union{UnstructuredQuadMesh, P4estMesh}, equations, dg::DG) - if nboundaries(cache.boundaries) != nboundaries(boundary_conditions) - # When using AMR, the boundary container may have been resized. - # In this case, the boundary types container must be reinitialized as well. - initialize!(boundary_conditions, cache) - end - @unpack boundary_condition_types, boundary_indices = boundary_conditions calc_boundary_flux_by_type!(cache, t, boundary_condition_types, boundary_indices, From 5caaa569b9bc8d98054f5c64405964f08332b5ea Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 9 Jun 2021 18:09:09 +0200 Subject: [PATCH 25/27] Fix e71913f --- src/callbacks_step/amr.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 156c2b32dd7..a3059abda01 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -413,7 +413,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, if semi.boundary_conditions isa UnstructuredQuadSortedBoundaryTypes # Reinitialize boundary types container because boundaries may have changed. - initialize!(boundary_conditions, cache) + initialize!(semi.boundary_conditions, cache) end end From 6a6a183de50d70d4f7eebba62b61adb8246a5ebd Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 9 Jun 2021 18:19:34 +0200 Subject: [PATCH 26/27] Implement suggestions --- src/callbacks_step/amr.jl | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index a3059abda01..52cbfd9c929 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -411,16 +411,22 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, # don't set it to has_changed since there can be changes from earlier calls mesh.unsaved_changes = true - if semi.boundary_conditions isa UnstructuredQuadSortedBoundaryTypes - # Reinitialize boundary types container because boundaries may have changed. - initialize!(semi.boundary_conditions, cache) - end + reinitialize_boundaries!(semi.boundary_conditions, cache) end # Return true if there were any cells coarsened or refined, otherwise false return has_changed end +function reinitialize_boundaries!(boundary_conditions::UnstructuredQuadSortedBoundaryTypes, cache) + # Reinitialize boundary types container because boundaries may have changed. + initialize!(boundary_conditions, cache) +end + +function reinitialize_boundaries!(boundary_conditions, cache) + return boundary_conditions +end + # After refining cells, shift original cell ids to match new locations # Note: Assumes sorted lists of original and refined cell ids! From f1e342fb0bc3a59e77b3476c21b3852e8cd089a4 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Wed, 9 Jun 2021 22:57:58 +0200 Subject: [PATCH 27/27] Fix 6a6a183 --- src/semidiscretization/semidiscretization_euler_gravity.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/semidiscretization/semidiscretization_euler_gravity.jl b/src/semidiscretization/semidiscretization_euler_gravity.jl index 4e15e5bdf7b..0d3e8b0e7b6 100644 --- a/src/semidiscretization/semidiscretization_euler_gravity.jl +++ b/src/semidiscretization/semidiscretization_euler_gravity.jl @@ -435,6 +435,6 @@ end semi::SemidiscretizationEulerGravity, t, iter; kwargs...) passive_args = ((semi.cache.u_ode, mesh_equations_solver_cache(semi.semi_gravity)...),) - amr_callback(u_ode, mesh_equations_solver_cache(semi.semi_euler)..., t, iter; + amr_callback(u_ode, mesh_equations_solver_cache(semi.semi_euler)..., semi, t, iter; kwargs..., passive_args=passive_args) end