37 template <
int dim,
int spacedim,
class LacType>
39 const std::string &component_names,
40 const std::string &problem_name)
42 , component_names(component_names)
44 , problem_name(problem_name)
45 , section_name(problem_name ==
"" ?
"" :
"/" + problem_name)
46 , mpi_communicator(MPI_COMM_WORLD)
47 , mpi_rank(
Utilities::
MPI::this_mpi_process(mpi_communicator))
48 , mpi_size(
Utilities::
MPI::n_mpi_processes(mpi_communicator))
49 , pcout(
std::cout, mpi_rank == 0)
52 , grid_generator(section_name +
"/Grid")
53 , grid_refinement(section_name +
"/Grid/Refinement",
67 , finite_element(section_name,
69 "FESystem[FE_Q(1)^" + std::to_string(
n_components) +
"]")
70 , dof_handler(triangulation)
72 , preconditioner(section_name +
"/Solver/System AMG preconditioner")
73 , mass_inverse_operator(section_name +
"/Solver/Mass")
74 , mass_preconditioner(section_name +
"/Solver/Mass AMG preconditioner")
75 , forcing_term(section_name +
"/Functions",
78 , exact_solution(section_name +
"/Functions",
81 , initial_value(section_name +
"/Functions",
84 , boundary_conditions(section_name +
"/Boundary conditions",
91 , error_table(section_name +
"/Error",
93 std::vector<std::set<VectorTools::NormType>>(
96 , data_out(section_name +
"/Output")
97 , ark_ode_data(section_name +
"/ARKode")
99 add_parameter(
"n_threads",
101 "Fix number of threads during the execution");
102 add_parameter(
"verbosity",
104 "Verbosity level used with deallog");
105 add_parameter(
"evolution type",
107 "The type of time evolution to use in the linear problem.");
108 enter_subsection(
"Quasi-static");
109 add_parameter(
"start time", start_time,
"Start time of the simulation");
110 add_parameter(
"end time", end_time,
"End time of the simulation");
111 add_parameter(
"initial time step",
112 desired_start_step_size,
113 "Initial time step of the simulation");
116 advance_time_call_back.connect(
117 [&](
const auto &time,
const auto &,
const auto &) {
118 boundary_conditions.set_time(time);
119 forcing_term.set_time(time);
120 exact_solution.set_time(time);
126 template <
int dim,
int spacedim,
class LacType>
136 deallog <<
"System setup " << std::endl;
139 ref_cells.size() == 1,
141 "This program does nots support mixed simplex/hex grid types."));
145 ExcMessage(
"The finite element must be defined on the same "
146 "cell type as the grid."));
148 dof_handler.distribute_dofs(finite_element);
156 const auto [block_names, block_multiplicities] =
166 dof_handler.locally_owned_dofs().split_by_block(dofs_per_block);
168 IndexSet non_blocked_locally_relevant_dofs;
170 non_blocked_locally_relevant_dofs);
171 locally_relevant_dofs =
174 deallog <<
"Number of degrees of freedom [total / ("
176 << dof_handler.n_dofs() <<
" / ("
180 constraints.reinit(non_blocked_locally_relevant_dofs);
189 boundary_conditions.check_consistency(triangulation);
201 boundary_conditions.apply_essential_boundary_conditions(dof_handler,
205 add_constraints_call_back();
210 locally_relevant_dofs,
217 initializer(sparsity, dof_handler, constraints, coupling);
218 initializer(sparsity, matrix);
220 initializer(sparsity, mass_matrix);
222 initializer(solution);
224 initializer.ghosted(locally_relevant_solution);
228 boundary_conditions.apply_natural_boundary_conditions(
229 *mapping, dof_handler, constraints, matrix, rhs);
231 exact_solution.update_constants({});
232 forcing_term.update_constants({});
236 triangulation, finite_element().tensor_degree() + 1);
239 triangulation, finite_element().tensor_degree() + 1);
242 setup_system_call_back();
247 template <
int dim,
int spacedim,
class LacType>
259 template <
int dim,
int spacedim,
class LacType>
262 dealii::Vector<float> &)
const
269 template <
int dim,
int spacedim,
class LacType>
273 constraints.distribute_local_to_global(copy.matrices[0],
275 copy.local_dof_indices[0],
282 template <
int dim,
int spacedim,
class LacType>
297 CopyData copy(finite_element().n_dofs_per_cell());
299 auto worker = [&](
const auto &cell,
auto &scratch,
auto ©) {
300 assemble_system_one_cell(cell, scratch, copy);
303 auto copier = [&](
const auto ©) { copy_one_cell(copy); };
309 dof_handler.begin_active()),
329 CopyData copy(finite_element().n_dofs_per_cell());
331 auto worker = [&](
const auto &cell,
auto &scratch,
auto ©) {
332 const auto &fev = scratch.reinit(cell);
333 copy.matrices[0] = 0;
334 cell->get_dof_indices(copy.local_dof_indices[0]);
335 for (
const auto &q : fev.quadrature_point_indices())
336 for (
const auto &i : fev.dof_indices())
337 for (
const auto &j : fev.dof_indices())
338 if (finite_element().system_to_component_index(i).first ==
339 finite_element().system_to_component_index(j).first)
340 copy.matrices[0](i, j) +=
341 fev.shape_value(i, q) * fev.shape_value(j, q) * fev.JxW(q);
344 auto copier = [&](
const auto ©) {
345 constraints.distribute_local_to_global(copy.matrices[0],
346 copy.local_dof_indices[0],
354 dof_handler.begin_active()),
364 assemble_system_call_back();
369 template <
int dim,
int spacedim,
class LacType>
378 template <
int dim,
int spacedim,
class LacType>
384 grid_refinement.estimate_error(*mapping,
386 locally_relevant_solution,
389 error_table.error_from_exact(*mapping,
391 locally_relevant_solution,
397 template <
int dim,
int spacedim,
class LacType>
408 template <
int dim,
int spacedim,
class LacType>
419 template <
int dim,
int spacedim,
class LacType>
422 const unsigned cycle)
const
425 deallog <<
"Output results (" << component_names <<
")" << std::endl;
430 grid_refinement.get_n_refinement_cycles()));
431 data_out.attach_dof_handler(dof_handler, suffix);
432 data_out.add_data_vector(locally_relevant_solution,
434 dealii::DataOut<dim, spacedim>::type_dof_data);
436 add_data_vector(data_out);
437 data_out.write_data_and_clear(*mapping);
439 output_results_call_back();
444 template <
int dim,
int spacedim,
class LacType>
448 if (number_of_threads != -1 && number_of_threads > 0)
450 static_cast<unsigned int>(number_of_threads));
452 deallog <<
"Running " << problem_name << std::endl
457 <<
"Number of MPI processes : " << mpi_size << std::endl
458 <<
"MPI rank of this process: " << mpi_rank << std::endl;
460 AssertThrow((lac_is_dealii && mpi_size == 1) || (lac_is_dealii ==
false),
461 ExcMessage(
"deal.II LAC only works in serial"));
466 template <
int dim,
int spacedim,
class LacType>
470 switch (evolution_type)
488 template <
int dim,
int spacedim,
class LacType>
493 deallog <<
"Solving quasi-static problem" << std::endl;
495 DiscreteTime time(start_time, end_time, desired_start_step_size);
496 unsigned int output_cycle = 0;
502 deallog <<
"Timestep " << cycle <<
", current time = " << t
503 <<
" , step size = " << dt << std::endl;
505 advance_time_call_back(t, dt, cycle);
509 if (cycle % output_frequency == 0)
510 output_results(output_cycle++);
522 output_results(output_cycle++);
529 template <
int dim,
int spacedim,
class LacType>
534 [&](
const double,
const auto &vector,
const auto step) {
535 locally_relevant_solution = vector;
536 output_results(step);
539 arkode.implicit_function = [&](
const double t,
const auto &y,
auto &res) {
540 deallog <<
"Evaluation at time " << t << std::endl;
541 advance_time_call_back(t, 0.0, 0);
542 matrix.vmult(res, y);
543 res.sadd(-1.0, 1.0, rhs);
547 arkode.mass_times_vector = [&](
const double,
const auto &src,
auto &dst) {
548 mass_matrix.vmult(dst, src);
553 arkode.jacobian_times_vector =
554 [&](
const auto &v,
auto &Jv, double,
const auto &,
const auto &) {
562 [&](
auto &op,
auto &prec,
auto &dst,
const auto &src,
double tol) ->
int {
565 deallog <<
"Solving mass system" << std::endl;
566 mass_inverse_operator.solve(op, prec, src, dst, tol);
575 arkode.solve_linearized_system =
576 [&](
auto &op,
auto &prec,
auto &dst,
const auto &src,
double tol) ->
int {
579 deallog <<
"Solving linearized system" << std::endl;
592 template <
int dim,
int spacedim,
class LacType>
597 deallog <<
"Solving transient problem" << std::endl;
604 ARKode arkode(ark_ode_data, mpi_communicator);
605 setup_transient(arkode);
606 setup_arkode_call_back(arkode);
609 auto res = arkode.solve_ode(solution);
613 ExcMessage(
"ARKode solver failed with error code " +
614 std::to_string(res)));
619 template <
int dim,
int spacedim,
class LacType>
624 deallog <<
"Solving steady state problem" << std::endl;
626 for (
const auto &cycle : grid_refinement.get_refinement_cycles())
628 deallog <<
"Cycle " << cycle << std::endl;
632 estimate(error_per_cell);
633 output_results(cycle);
634 if (cycle < grid_refinement.get_n_refinement_cycles() - 1)
636 mark(error_per_cell);
640 if (this->mpi_rank == 0)
641 error_table.output_table(std::cout);
double get_next_step_size() const
unsigned int get_step_number() const
double get_current_time() const
double get_previous_step_size() const
std::vector< IndexSet > split_by_block(const std::vector< types::global_dof_index > &n_indices_per_block) const
unsigned int depth_console(const unsigned int n)
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
General class, used to initialize different types of block vectors, block atrices and block sparsity ...
Construct a LinearProblem.
void run_steady_state()
Solve a steady state problem.
virtual void run()
Main entry point of the problem.
void refine()
Refine the grid.
virtual void setup_system()
Initial setup: distribute degrees of freedom, make all vectors and matrices of the right size,...
virtual void output_results(const unsigned cycle) const
Output the solution and the grid in a format that can be read by Paraview or Visit.
virtual void assemble_system()
Actually loop over cells, and assemble the global system.
virtual void print_system_info() const
print some information about the current processes/mpi/ranks/etc.
void run_quasi_static()
Solve a quasi static problem.
virtual void copy_one_cell(const CopyData ©)
Distribute the data that has been assembled by assemble_system_on_cell() to the global matrix and rhs...
virtual void custom_estimator(Vector< float > &error_per_cell) const
Overload this function to use a custom error estimator in the mesh refinement process.
virtual void setup_transient(ARKode &arkode)
Setup the transient problem.
virtual void solve()
Solve the global system.
virtual void assemble_system_one_cell(const typename DoFHandler< dim, spacedim >::active_cell_iterator &cell, ScratchData &scratch, CopyData ©)
Assemble the local system matrix on cell, using scratch for FEValues and other expensive scratch obje...
void run_transient()
Solve a dynamic problem.
virtual void estimate(Vector< float > &error_per_cell) const
Perform a posteriori error estimation, and store the results in the error_per_cell vector.
typename SUNDIALS::ARKode< typename LacType::BlockVector > ARKode
SUNDIALS time integrator.
void mark(const Vector< float > &error_per_cell)
According to the chosen strategy, mark some cells for refinement, possibily using the error_per_cell ...
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
static ::ExceptionBase & ExcPureFunctionCalled()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
const Mapping< dim, spacedim > & get_default_linear_mapping(const Triangulation< dim, spacedim > &triangulation)
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void reference_cell(Triangulation< dim, spacedim > &tria, const ReferenceCell &reference_cell)
std::vector< std::string > split_string_list(const std::string &s, const std::string &delimiter=",")
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
unsigned int needed_digits(const unsigned int max_number)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
We collect in this namespace all PDEs that are relevant to Fluid Structure Interaction Problems.
EvolutionType
Describe the dependencies of the linear problem w.r.t.
const types::boundary_id internal_face_boundary_id
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation