diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index 910b5161..00000000
--- a/.gitattributes
+++ /dev/null
@@ -1,11 +0,0 @@
-test/HD/SedovBlastWave/python/1Dsolution/data.0001.vtk filter=lfs diff=lfs merge=lfs -text
-test/HD/ViscousFlowPastCylinder/python/data.0001.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/MHD/AxisFluxTube/python/data.0001.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/MHD/sod-iso/python/data.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/MHD/OrszagTang/python/data.0001.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/HD/FargoPlanet/python/data.0001.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/MHD/AxisFluxTube/python/data.0001.ref-coarsening.vtk filter=lfs diff=lfs merge=lfs -text
-test/MHD/OrszagTang3D/python/data.0001.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/MHD/sod/python/data.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/MHD/FargoMHDSpherical/python/data.0001.ref.vtk filter=lfs diff=lfs merge=lfs -text
-test/HD/MachReflection/python/data.0001.ref.vtk filter=lfs diff=lfs merge=lfs -text
diff --git a/.github/workflows/idefix-ci.yml b/.github/workflows/idefix-ci.yml
index 250aa4af..61e7d293 100644
--- a/.github/workflows/idefix-ci.yml
+++ b/.github/workflows/idefix-ci.yml
@@ -4,6 +4,7 @@ on:
push:
branches:
- master
+ - v2.0
pull_request:
paths-ignore:
- '.github/ISSUE_TEMPLATE/*'
@@ -16,17 +17,17 @@ concurrency:
cancel-in-progress: true
env:
- TEST_OPTIONS: -DKokkos_ENABLE_CUDA=ON
+ TESTME_OPTIONS: -cuda -Werror
+ PYTHONPATH: ${{ github.workspace }}
+ IDEFIX_DIR: ${{ github.workspace }}
jobs:
Linter:
# Don't do this in forks
- if: github.repository == 'idefix-code/idefix'
+ if: ${{ github.repository == 'idefix-code/idefix' || github.repository == 'glesur/idefix' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- with:
- lfs: false
- uses: actions/setup-python@v4
with:
python-version: 3.x
@@ -34,114 +35,253 @@ jobs:
- uses: pre-commit-ci/lite-action@v1.0.0
if: always()
- Hydrodynamics:
+ ShocksHydro:
+ needs: Linter
+ runs-on: self-hosted
+ steps:
+ - name: Check out repo
+ uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ - name: Sod test
+ run: |
+ cd $IDEFIX_DIR/test/HD/sod
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Isothermal Sod test
+ run: |
+ cd $IDEFIX_DIR/test/HD/sod-iso
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Mach reflection test
+ run: |
+ cd $IDEFIX_DIR/test/HD//MachReflection
+ ./testme.py -all $TESTME_OPTIONS
+
+ ParabolicHydro:
needs: Linter
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- - name: Run Hydro test
- run: cd test && ./checks_hydro.sh $TEST_OPTIONS
+ - name: Viscous flow past cylinder
+ run: |
+ cd $IDEFIX_DIR/test/HD/ViscousFlowPastCylinder
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Viscous disk
+ run: |
+ cd $IDEFIX_DIR/test/HD/ViscousDisk
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Thermal diffusion
+ run: |
+ cd $IDEFIX_DIR/test/HD/thermalDiffusion
+ ./testme.py -all $TESTME_OPTIONS
- Magneto-Hydrodynamics:
+ ShocksMHD:
needs: Linter
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- - name: Run MHD test
- run: cd test && ./checks_mhd.sh $TEST_OPTIONS
+ - name: MHD Sod test
+ run: |
+ cd $IDEFIX_DIR/test/MHD/sod
+ ./testme.py -all $TESTME_OPTIONS
+ - name: MHD Isothermal Sod test
+ run: |
+ cd $IDEFIX_DIR/test/MHD/sod-iso
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Orszag Tang
+ run: |
+ cd $IDEFIX_DIR/test/MHD/OrszagTang
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Orszag Tang 3D+restart tests
+ run: |
+ cd $IDEFIX_DIR/test/MHD/OrszagTang3D
+ ./testme.py -all $TESTME_OPTIONS
+
- MPI:
+ ParabolicMHD:
needs: Linter
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- - name: Run MPI test
- run: cd test && ./checks_mpi.sh $TEST_OPTIONS
+ - name: Ambipolar C Shock
+ run: |
+ cd $IDEFIX_DIR/test/MHD/AmbipolarCshock
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Ambipolar C Shock 3D
+ run: |
+ cd $IDEFIX_DIR/test/MHD/AmbipolarCshock3D
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Resistive Alfvén wave
+ run: |
+ cd $IDEFIX_DIR/test/MHD/ResistiveAlfvenWave
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Hall whistler waves
+ run: |
+ cd $IDEFIX_DIR/test/MHD/HallWhistler
+ ./testme.py -all $TESTME_OPTIONS
+
+ Fargo:
+ needs: [Linter, ShocksHydro, ParabolicHydro, ShocksMHD, ParabolicMHD]
+ runs-on: self-hosted
+ steps:
+ - name: Check out repo
+ uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ - name: Fargo + planet
+ run: |
+ cd $IDEFIX_DIR/test/HD/FargoPlanet
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Fargo MHD spherical
+ run: |
+ cd $IDEFIX_DIR/test/MHD/FargoMHDSpherical
+ ./testme.py -all $TESTME_OPTIONS
+
+ ShearingBox:
+ needs: [Linter, ShocksHydro, ParabolicHydro, ShocksMHD, ParabolicMHD]
+ runs-on: self-hosted
+ steps:
+ - name: Check out repo
+ uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ - name: Hydro shearing box
+ run: |
+ cd $IDEFIX_DIR/test/HD/ShearingBox
+ ./testme.py -all $TESTME_OPTIONS
+ - name: MHD shearing box
+ run: |
+ cd $IDEFIX_DIR/test/MHD/ShearingBox
+ ./testme.py -all $TESTME_OPTIONS
+
+ SelfGravity:
+ needs: [Linter, ShocksHydro, ParabolicHydro, ShocksMHD, ParabolicMHD]
+ runs-on: self-hosted
+ steps:
+ - name: Check out repo
+ uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ - name: Jeans Instability
+ run: |
+ cd $IDEFIX_DIR/test/SelfGravity/JeansInstability
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Random sphere spherical
+ run: |
+ cd $IDEFIX_DIR/test/SelfGravity/RandomSphere
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Random sphere cartesian
+ run: |
+ cd $IDEFIX_DIR/test/SelfGravity/RandomSphereCartesian
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Uniform spherical collapse
+ run: |
+ cd $IDEFIX_DIR/test/SelfGravity/UniformCollapse
+ ./testme.py -all $TESTME_OPTIONS
- VectorPotential:
- needs: [Linter, Hydrodynamics, Magneto-Hydrodynamics, MPI]
+ Planet:
+ needs: [Linter, ShocksHydro, ParabolicHydro, ShocksMHD, ParabolicMHD]
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- - name: Run Vector Potential test
- run: cd test && ./checks_vector_potential.sh $TEST_OPTIONS
+ - name: 3 body
+ run: |
+ cd $IDEFIX_DIR/test/Planet/Planet3Body
+ ./testme.py -all $TESTME_OPTIONS
+ - name: migration
+ run: |
+ cd $IDEFIX_DIR/test/Planet/PlanetMigration2D
+ ./testme.py -all $TESTME_OPTIONS
+ - name: planet-planet
+ run: |
+ cd $IDEFIX_DIR/test/Planet/PlanetPlanetRK42D
+ ./testme.py -all $TESTME_OPTIONS
+ - name: spiral wake
+ run: |
+ cd $IDEFIX_DIR/test/Planet/PlanetSpiral2D
+ ./testme.py -all $TESTME_OPTIONS
+ - name: torques
+ run: |
+ cd $IDEFIX_DIR/test/Planet/PlanetTorque3D
+ ./testme.py -all $TESTME_OPTIONS
+ - name: RK5
+ run: |
+ cd $IDEFIX_DIR/test/Planet/PlanetsIsActiveRK52D
+ ./testme.py -all $TESTME_OPTIONS
- HighOrder:
- needs: [Linter, Hydrodynamics, Magneto-Hydrodynamics, MPI]
+ Dust:
+ needs: [Linter, ShocksHydro, ParabolicHydro, ShocksMHD, ParabolicMHD]
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- - name: Run high order test
- run: cd test && ./checks_highorder.sh $TEST_OPTIONS
+ - name: Energy conservation
+ run: |
+ cd $IDEFIX_DIR/test/Dust/DustEnergy
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Dusty wave
+ run: |
+ cd $IDEFIX_DIR/test/Dust/DustyWave
+ ./testme.py -all $TESTME_OPTIONS
- SinglePrecision:
- needs: [Linter, Hydrodynamics, Magneto-Hydrodynamics, MPI]
+ Braginskii:
+ needs: [Linter, ShocksHydro, ParabolicHydro, ShocksMHD, ParabolicMHD]
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- - name: Run single precision test
- run: cd test && ./checks_singleprecision.sh $TEST_OPTIONS
+ - name: MTI
+ run: |
+ cd $IDEFIX_DIR/test/MHD/MTI
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Spherical anisotropic diffusion
+ run: |
+ cd $IDEFIX_DIR/test/MHD/sphBragTDiffusion
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Spherical anisotropic viscosity
+ run: |
+ cd $IDEFIX_DIR/test/MHD/sphBragViscosity
+ ./testme.py -all $TESTME_OPTIONS
Examples:
- needs: [VectorPotential, HighOrder,SinglePrecision]
+ needs: [Fargo, Dust, Planet, ShearingBox, SelfGravity]
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- name: Run examples test
run: cd test && ./checks_examples.sh $TEST_OPTIONS
Utils:
- needs: [VectorPotential, HighOrder,SinglePrecision]
+ needs: [Fargo, Dust, Planet, ShearingBox, SelfGravity]
runs-on: self-hosted
steps:
- name: Check out repo
uses: actions/checkout@v3
with:
- lfs: true
submodules: recursive
- # Manually do a git LFS https://github.com/actions/checkout/issues/270
- - run: git lfs pull
- - name: Run utils test
- run: cd test && ./checks_utils.sh $TEST_OPTIONS
+ - name: Lookup table
+ run: |
+ cd $IDEFIX_DIR/test/utils/lookupTable
+ ./testme.py -all $TESTME_OPTIONS
+ - name: Dump Image
+ run: |
+ cd $IDEFIX_DIR/test/utils/dumpImage
+ ./testme.py -all $TESTME_OPTIONS
diff --git a/.gitignore b/.gitignore
index 213b33e6..422647c3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,7 +12,7 @@ doc/source/xml/*
# generated files
*.log
-src/gitversion.hpp
+src/version.hpp
**/libkokkos.a
**/KokkosCore_config.h
build
@@ -34,6 +34,7 @@ test/**/Makefile
test/**/KokkosCore*
test/**/*.csv
test/**/*.pyc
+test/**/*.dat
# machine specific cache and hidden files
.*
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 0ebafac3..c184917d 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -20,52 +20,127 @@ lint:
- pre-commit install
- pre-commit run --all-files
-HD tests:
+Shocks Hydro:
stage: base_checks
image: lesurg/idefix:latest
script:
- - cd test
- - ./checks_hydro.sh $TEST_OPTIONS
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/HD/sod
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/HD/sod-iso
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/HD/MachReflection
+ - ./testme.py -all $TESTME_OPTIONS
-MHD tests:
+Parabolic Hydro:
stage: base_checks
image: lesurg/idefix:latest
script:
- - cd test
- - ./checks_mhd.sh $TEST_OPTIONS
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/HD/ViscousFlowPastCylinder
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/HD/ViscousDisk
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/HD/thermalDiffusion
+ - ./testme.py -all $TESTME_OPTIONS
-MPI tests:
+Shocks MHD:
stage: base_checks
image: lesurg/idefix:latest
script:
- - cd test
- - ./checks_mpi.sh $TEST_OPTIONS
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/MHD/sod
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/MHD/sod-iso
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/MHD/OrszagTang
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/MHD/OrszagTang3D
+ - ./testme.py -all $TESTME_OPTIONS
-Vector potential tests:
- stage: base_checks
+Parabolic MHD:
+ stage: base_checks
+ image: lesurg/idefix:latest
+ script:
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/MHD/AmbipolarCshock
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/MHD/AmbipolarCshock3D
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/MHD/ResistiveAlfvenWave
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/MHD/HallWhistler
+ - ./testme.py -all $TESTME_OPTIONS
+
+Fargo:
+ stage: advanced_checks
image: lesurg/idefix:latest
script:
- - cd test
- - ./checks_vector_potential.sh $TEST_OPTIONS
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/HD/FargoPlanet
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/MHD/FargoMHDSpherical
+ - ./testme.py -all $TESTME_OPTIONS
-HighOrder tests:
+ShearingBox:
stage: advanced_checks
image: lesurg/idefix:latest
script:
- - cd test
- - ./checks_highorder.sh $TEST_OPTIONS
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/HD/ShearingBox
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/HD/ShearingBox
+ - ./testme.py -all $TESTME_OPTIONS
-SinglePrecision tests:
+SelfGravity:
stage: advanced_checks
image: lesurg/idefix:latest
script:
- - cd test
- - ./checks_singleprecision.sh $TEST_OPTIONS
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/SelfGravity/JeansInstability
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/SelfGravity/RandomSphere
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/SelfGravity/RandomSphereCartesian
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/SelfGravity/UniformCollapse
+ - ./testme.py -all $TESTME_OPTIONS
+
+
+Planet:
+ stage: advanced_checks
+ image: lesurg/idefix:latest
+ script:
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/Planet/Planet3Body
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/Planet/PlanetMigration2D
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/Planet/PlanetPlanetRK42D
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/Planet/PlanetSpiral2D
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/Planet/PlanetTorque3D
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/Planet/PlanetsIsActiveRK52D
+ - ./testme.py -all $TESTME_OPTIONS
+
+
+Dust:
+ stage: advanced_checks
+ image: lesurg/idefix:latest
+ script:
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/Dust/DustEnergy
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/Dust/DustyWave
+ - ./testme.py -all $TESTME_OPTIONS
Examples tests:
stage: advanced_checks
image: lesurg/idefix:latest
script:
+ - export IDEFIX_DIR=${PWD}
- cd test
- ./checks_examples.sh $TEST_OPTIONS
@@ -73,8 +148,11 @@ Utils tests:
stage: advanced_checks
image: lesurg/idefix:latest
script:
- - cd test
- - ./checks_utils.sh $TEST_OPTIONS
+ - export IDEFIX_DIR=${PWD}
+ - cd $IDEFIX_DIR/test/utils/lookupTable
+ - ./testme.py -all $TESTME_OPTIONS
+ - cd $IDEFIX_DIR/test/utils/dumpImage
+ - ./testme.py -all $TESTME_OPTIONS
pages:
image: lesurg/idefix-documentation:latest
diff --git a/.gitmodules b/.gitmodules
index 6c4fddec..83fb2456 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,6 @@
[submodule "src/kokkos"]
path = src/kokkos
url = https://github.com/kokkos/kokkos.git
+[submodule "reference"]
+ path = reference
+ url = https://github.com/idefix-code/reference
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 11b0176a..06fe7210 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -12,6 +12,8 @@ repos:
- id: check-yaml
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
+ - id: check-added-large-files
+ args: ['--maxkb=100'] ## prevent files larger than 100kB from being commited (exclude git lfs files)
- repo: https://github.com/Lucas-C/pre-commit-hooks-nodejs
rev: v1.1.2
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a4c6ec5e..2ce42182 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,32 +4,60 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## Upcoming
+## [2.0.0] 2023-10-23
### Changed
+- Reorganisation of class instances embedded in datablocks to allow for multi-fluid problems using a generalised template class "Fluid" that replaces "Hydro". Most instances (like data.hydro.Vc) have been replaced by pointers (e.g data.hydro->Vc) (!307).
- RKL scheme now correctly takes into account grid coarsening when estimating the timestep of parabolic terms (!254)
- fixed a bug in the evaluation of gravitational forces from the gradient of the potential in regions where grid coarsening is enabled (!267)
+- The CI tests now include a "non-regression" test that validate the code outputs *at machine precision*. This comes in addition to the "standard" test that validate the code against known analytical solution (at truncation precision). Each test now contains a testme.py script that does the full validation (documentation will come, for now use testme -help). (!311)
+- use PPM reconstruction for 2D Riemann solvers in Emf Computation when PPM is required.
+- the MPI routines have been refactored to guarantee reproductibility at machine precision independently of the domain decomposition. This implies that normal B field component are now exchanged between neighbouring processes. (!308)
+- allow the user to specify a particular output directory for vtk and dmp files (!339)
+- the -restart option without any number now loads the latest generated dump file, and not the last dump in the directory (!353)
- fixed a bug that resulted in erroneous momentum and energy fluxes when using the combination of Fargo and Viscosity in non-cartesian geometries. (!267)
- fixed a bug in shock flattening that resulted in loss of conservative properties when using periodic boundaries and/or MPI domain decomposition (!275)
+- fixed a bug due to a missing curvature term in the viscosity stress tensor in spherical geometry (!343)
+- fixed a bug that led to an incorrect heating rate when both fargo and viscosity were enabled (!333)
- fixed a bug in emergency vtk outputs that could lead to an MPI deadlock when the user did not enable VTK outputs (!274)
- fixed a bug that could result in MPI deadlocks when an exception is thrown by a single MPI process in the integration loop (!266)
- fixed a bug in shock flattening that could lead to the breakup of conservation properties (!275)
- fixed a bug in LookupTable that could lead to incorrect interpolation (!286)
- fixed a bug that prevented to compile on HIP backend (!291)
-
-
+- fixed a bug that prevented idefix with vector potential to restart from dumps created with vector potential (!306)
+- fixed a bug that led to race condition when using GPU offloading, axis boundary condition and domain decomposition along X3 (!309)
+- fixed a bug that led to inconsistent results with MPI and UCT_HLLx EMF reconstruction schemes (!310)
+- fixed a bug that could result in .dmp file duplication on restart (!354)
+- Moving development onto github.com. Some references to merge requests will likely be lost.
+- fixed a bug in v2.0 developement branch that resulted in broekn dump files when MHD was enabled with DIMENSIONS<3 (#174)
+- IDEFIX_Debug now automatically introduce Kokkos::fence at the end of each idefix_for, enforcing synchronisation between host and device when debugging (#188)
+- Do not use git lfs anymore due to bandwidth restrictions imposed by github (#183)
+- use proper cell centroid PLM reconstruction when using non-cartesian coordinates (#196)
+- fixed a bug that could lead to low (O(1e-10)) leakage of conserved quantities along the axis (#196)
+- changed radial pressure curvature term so that machine precision balance is achieved for constant pressure flows in every geometry (#196)
### Added
- Self-gravity (!186)
+- Multi-dust species as pressureless fluids (!336)
+- Passive tracers (!341)
+- Planet module (planet migration, planet-planet integration) (!278)
+- Custom equation of states (#185)
+- Sliced VTK files (beta version) (#195)
- Check that the MPI library is GPU-aware when using a GPU backend (!262)
- An optional user-defined Setup destructor can now be defined (!260)
- performance improvement on CPUs by cleaning loops and rewriting EMF reconstruction (!281)
- The tolerance on div(B) allowed by the code can now be set at runtime (!292)
- Nan detection is now performed every 100 integration loops by default so as mitigate performance impact on CPUs (!292)
+- It is now possible to build a stretch grid from a logarithmic grid section, and not only a uniform grid section (!304)
+- Shock flattening can now be used in combination with LimO3 slope limiter (!312)
+- XDMF output format (optional, requires HDF5 library on the host machine) (#13)
+- new -profile option to perform on-the-fly profiling without requiring Kokkos Tools (#188)
+- -v and -h options to show version and list of accepted arguments
### Removed
- auto-tuning was removed as it was preventing auto-vectorisation on Intel compilers. Loop tuning are now set at compile time (!281)
+
## [1.1.0] 2022-09-07
### Changed
- use buffers for mpi axis exchanges to improve performances on GPUs (!195)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f5b0fb54..7777bbde 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,14 +2,23 @@ cmake_minimum_required(VERSION 3.16)
set(CMAKE_BUILD_TYPE Release)
set (CMAKE_CXX_STANDARD 17)
+set(Idefix_VERSION_MAJOR 2)
+set(Idefix_VERSION_MINOR 0)
+set(Idefix_VERSION_PATCH 00)
+
project (idefix VERSION 1.0.0)
option(Idefix_MHD "enable MHD" OFF)
option(Idefix_MPI "enable Message Passing Interface parallelisation" OFF)
option(Idefix_HIGH_ORDER_FARGO "Force Fargo to use a PPM reconstruction scheme" OFF)
option(Idefix_DEBUG "Enable Idefix debug features (makes the code very slow)" OFF)
+option(Idefix_RUNTIME_CHECKS "Enable runtime sanity checks" OFF)
option(Idefix_WERROR "Treat compiler warnings as errors" OFF)
set(Idefix_CXX_FLAGS "" CACHE STRING "Additional compiler/linker flag")
set(Idefix_DEFS "definitions.hpp" CACHE FILEPATH "Problem definition header file")
+option(Idefix_CUSTOM_EOS "Use custom equation of state" OFF)
+if(Idefix_CUSTOM_EOS)
+ set(Idefix_CUSTOM_EOS_FILE "eos_custom.hpp" CACHE FILEPATH "Custom equation of state source file")
+endif()
set(Idefix_RECONSTRUCTION "Linear" CACHE STRING "Type of cell reconstruction scheme")
option(Idefix_HDF5 "Enable HDF5 I/O (requires HDF5 library)" OFF)
if(Idefix_MHD)
@@ -70,7 +79,6 @@ if(EXISTS ${PROJECT_BINARY_DIR}/CMakeLists.txt)
endif()
-
if(Idefix_MHD)
add_compile_definitions("MHD=YES")
else()
@@ -109,6 +117,10 @@ if(Idefix_DEBUG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g")
endif()
+if(Idefix_RUNTIME_CHECKS)
+ add_compile_definitions("RUNTIME_CHECKS")
+endif()
+
if(Idefix_HIGH_ORDER_FARGO)
add_compile_definitions("HIGH_ORDER_FARGO")
endif()
@@ -116,14 +128,24 @@ endif()
if(Idefix_EVOLVE_VECTOR_POTENTIAL)
add_compile_definitions("EVOLVE_VECTOR_POTENTIAL")
endif()
-#update gitversion.hpp if possible
+#update version.hpp if possible
git_describe(GIT_SHA1)
-write_file(${CMAKE_SOURCE_DIR}/src/gitversion.hpp "#define GITVERSION \"${GIT_SHA1}\"")
+set(Idefix_VERSION ${Idefix_VERSION_MAJOR}.${Idefix_VERSION_MINOR}.${Idefix_VERSION_PATCH}-${GIT_SHA1})
+file(WRITE ${CMAKE_SOURCE_DIR}/src/version.hpp "#define IDEFIX_GIT_COMMIT \"${GIT_SHA1}\"\n")
+file(APPEND ${CMAKE_SOURCE_DIR}/src/version.hpp "#define IDEFIX_VERSION_MAJOR \"${Idefix_VERSION_MAJOR}\"\n")
+file(APPEND ${CMAKE_SOURCE_DIR}/src/version.hpp "#define IDEFIX_VERSION_MINOR \"${Idefix_VERSION_MINOR}\"\n")
+file(APPEND ${CMAKE_SOURCE_DIR}/src/version.hpp "#define IDEFIX_VERSION_PATCH \"${Idefix_VERSION_PATCH}\"\n")
+file(APPEND ${CMAKE_SOURCE_DIR}/src/version.hpp "#define IDEFIX_VERSION \"${Idefix_VERSION}\"\n")
+
if(NOT ${Idefix_DEFS} STREQUAL "definitions.hpp")
add_compile_definitions("DEFINITIONS_FILE=\"${Idefix_DEFS}\"")
endif()
+if(Idefix_CUSTOM_EOS)
+ add_compile_definitions("EOS_FILE=\"${Idefix_CUSTOM_EOS_FILE}\"")
+endif()
+
# Order of the scheme
if(${Idefix_RECONSTRUCTION} STREQUAL "Constant")
add_compile_definitions("ORDER=1")
@@ -172,15 +194,22 @@ target_include_directories(idefix PUBLIC
target_include_directories(idefix PUBLIC
src/kokkos/core/src
src/dataBlock
- src/hydro
- src/hydro/boundary
- src/hydro/electromotiveforce
- src/hydro/HDsolvers
- src/hydro/MHDsolvers
+ src/dataBlock/planetarySystem
+ src/fluid
+ src/fluid/boundary
+ src/fluid/braginskii
+ src/fluid/constrainedTransport
+ src/fluid/eos
+ src/fluid/RiemannSolver
+ src/fluid/RiemannSolver/HDsolvers
+ src/fluid/RiemannSolver/MHDsolvers
+ src/fluid/RiemannSolver/Dustsolvers
+ src/fluid/tracer
src/output
src/rkl
src/gravity
src/utils
+ src/utils/iterativesolver
src
)
@@ -196,5 +225,8 @@ message(STATUS " MPI: ${Idefix_MPI}")
message(STATUS " HDF5: ${Idefix_HDF5}")
message(STATUS " Reconstruction: ${Idefix_RECONSTRUCTION}")
message(STATUS " Precision: ${Idefix_PRECISION}")
-message(STATUS " Version: ${GIT_SHA1}")
+message(STATUS " Version: ${Idefix_VERSION}")
message(STATUS " Problem definitions: '${Idefix_DEFS}'")
+if(Idefix_CUSTOM_EOS)
+ message(STATUS " EOS: Custom file '${Idefix_CUSTOM_EOS_FILE}'")
+endif()
diff --git a/README.md b/README.md
index 7853cd00..a005f297 100644
--- a/README.md
+++ b/README.md
@@ -12,7 +12,7 @@
* [With MPI (gpu)](#with-mpi-gpu)
- [Profiling](#profiling)
- [Debugging](#debugging)
-- [Running tests](#running-tests)
+- [Code Validation](#code-validation)
- [Contributing](#contributing)
@@ -20,14 +20,16 @@
Download:
---------
-using either ssh or https url as `
`
+Assuming you want to use https to get idefix (easiest option):
+
```shell
-git clone idefix
+git clone https://github.com/idefix-code/idefix.git idefix
cd idefix
git submodule init
git submodule update
```
+
Installation:
-------------
@@ -100,31 +102,39 @@ mpirun -np 8 ./idefix -dec 1 2 4 --kokkos-num-devices=4
Profiling
-------------------
-use the kokkos profiler tool, which can be downloaded from kokkos-tools (on github)
-Then set the environement variable:
+use the embedded profiling tool by adding "-profile" when calling idefix (no need to recompile)
```shell
-export KOKKOS_PROFILE_LIBRARY=kokkos-tools/src/tools/space-time-stack/kp_space_time_stack.so
-````
-
-and then simply run the code (no need to recompile)
+./idefix -profile
+```
Debugging
-------------------
Add `-DIdefix_DEBUG=ON` when calling cmake, or activate the `Idefix_DEBUG` option in ccmake, and recompile.
Note that this option triggers a lot of outputs and memory access checks which significantly slow down the code.
-Running tests
--------------------
-Tests require Python 3 along with some third party dependencies to be installed.
-To install those deps, run
+Code Validation
+---------------
+
+Most of tests provided in the `test/` directory can be validated against analytical solution (standard test)
+and/or pre-computed solutions (non-regression tests). Note that the validation relies on large reference
+files that are stored in the separate `idefix-code/reference` repository that is cloned as a submodule.
+
+Ensure that reference files
+were properly downloaded (in the reference/ directory of the root of idefix) before attempting to validate the code.
+
+In order to do a full validation of a particular test
+(with all of the possible combination of algorithms), use the script `testme.py`
+with the `-all` option, as in e.g.:
```shell
-pip install -r test/python_requirements.txt
+cd $IDEFIX_DIR/test/HD/sod
+./testme.py -all
```
-The test suite itself is then run with
+Tests require Python 3 along with some third party dependencies to be installed.
+To install those deps, run
```shell
-bash test/checks.sh
+pip install -r test/python_requirements.txt
```
Contributing
diff --git a/cmake/GetGitRevisionDescription.cmake b/cmake/GetGitRevisionDescription.cmake
index 31ea28e3..81b5844b 100644
--- a/cmake/GetGitRevisionDescription.cmake
+++ b/cmake/GetGitRevisionDescription.cmake
@@ -201,7 +201,8 @@ function(git_describe _var)
#message(STATUS "Arguments to execute_process: ${ARGN}")
execute_process(
- COMMAND "${GIT_EXECUTABLE}" describe --tags --always ${hash} ${ARGN}
+ #COMMAND "${GIT_EXECUTABLE}" describe --tags --always ${hash} ${ARGN}
+ COMMAND "${GIT_EXECUTABLE}" rev-parse --short HEAD
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
RESULT_VARIABLE res
OUTPUT_VARIABLE out
@@ -210,6 +211,8 @@ function(git_describe _var)
set(out "${out}-${res}-NOTFOUND")
endif()
+ #set(out "${hash}")
+
set(${_var}
"${out}"
PARENT_SCOPE)
diff --git a/doc/python_requirements.txt b/doc/python_requirements.txt
index f26ddd60..b0acf406 100644
--- a/doc/python_requirements.txt
+++ b/doc/python_requirements.txt
@@ -12,3 +12,4 @@ sphinx_git==11.0.0
breathe==4.34.0
exhale==0.3.6
m2r2==0.3.2
+sphinx-copybutton==0.5.2
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 634077a5..362da4da 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -23,7 +23,7 @@
author = 'Geoffroy Lesur'
# The full version, including alpha/beta/rc tags
-release = '1.0'
+release = '2.0.0'
@@ -37,7 +37,8 @@
'sphinx_git',
"breathe",
"exhale",
- "m2r2"
+ "m2r2",
+ "sphinx_copybutton"
]
source_suffix = [".rst", ".md"]
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index dc3d3170..f7d5461a 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -24,6 +24,9 @@ How do I use my favourite XXXXX compiler?
I have a complex setup, and have written some functions in separate .cpp files. How can I add these files to *Idefix* build tree?
Add a ``CmakeLists.txt`` in your problem directory and use the function `add_idefix_source` (see :ref:`customSourceFiles`).
+I want to run on the GPUs of xxx machine, how do I proceed?
+ Check the examples in :ref:`setupExamples`
+
Compilation
-----------
@@ -33,6 +36,9 @@ Is there a way to see explicitely the compilation commands with ``make``?
The compilation stops while compiling Kokkos with ``/usr/include/stdlib.h(58): error: expected a ";"``
This happens on Gricad machines when LIBDL is activated (wrong glibc). Simply disable LIBDL.
+When using the Intel compiler on a Mac Intel, I get a linking error involving the ``SharedAllocationRecordIvvE18t_tracking_enabledE`` symbol.
+ This is a known bug of the Intel Mac compiler with Kokkos. Apparently Intel has decided not to fix it. Check the issue on the `Kokkos git page `_.
+
Execution
---------
@@ -44,11 +50,12 @@ How can I stop the code without loosing the current calculation?
I'm doing performance measures. How do I disable all outputs in *Idefix*?
Add ``-nowrite`` when you call *Idefix* executable.
+
Developement
------------
I have a serious bug (e.g. segmentation fault), in my setup, how do I proceed?
- Add ``-DIdefix_DEBUG=ON`` to ``cmake`` and recompile to find out exactly where the code crashes.
+ Add ``-DIdefix_DEBUG=ON`` to ``cmake`` and recompile to find out exactly where the code crashes (see :ref:`debugging`).
I want to test a modification to *Idefix* source code specific to my problem without modifying files in `$IDEFIX_DIR`. How can I proceed?
Add a ``CmakeLists.txt`` in your problem directory and use the function `replace_idefix_source` (see :ref:`customSourceFiles`).
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 18762cd6..c0d96f12 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -20,18 +20,42 @@ algorithm is essentially the same (but with some major modification to its struc
================
Requirements
================
-*Idefix* is written is standard C++17 and does not rely on any external library in serial (non MPI). *Idefix* requires a C++17 compatible compiler. It has been tested successfully with GCC (>8), Intel compiler suite (>2018) and
-Clang on both Intel and AMD CPUs. *Idefix* has also been tested on NVIDIA GPUs (Pascal, Volta and Ampere architectures) using the nvcc (>10) compiler, and on AMD GPUs (Radeon Mi50) using the hipcc compiler.
-*Idefix* relies internally on the `Kokkos `_ library, which is bundled with *Idefix* as a git submodule and compiled on the fly, hence no external installation is required.
-When using MPI parallelisation, *Idefix* relies on an external MPI library. *Idefix* has been tested successfully with OpenMPI and IntelMPI libraries. When used on GPU architectures, *Idefix* assumes that
-the MPI library is GPU-Aware. If unsure, check this last point with your system administrator.
+*Idefix* is written is standard C++17 and does not rely on any external library in serial (non MPI).
+
+Compiler
+ *Idefix* requires a C++17 compatible compiler. It has been tested successfully with GCC (>8), Intel compiler suite (>2018) and
+ Clang on both Intel and AMD CPUs. *Idefix* has also been tested on NVIDIA GPUs (Pascal, Volta and Ampere architectures) using the nvcc (>10) compiler, and on AMD GPUs (Radeon Mi50, Mi210, Mi250) using the hipcc compiler.
+
+Kokkos library
+ *Idefix* relies internally on the `Kokkos `_ library, which is bundled with *Idefix* as a git submodule and compiled on the fly, hence no external installation is required.
+
+MPI library
+ When using MPI parallelisation, *Idefix* relies on an external MPI library. *Idefix* has been tested successfully with OpenMPI and IntelMPI libraries. When used on GPU architectures, *Idefix* assumes that
+ the MPI library is GPU-Aware. If unsure, check this last point with your system administrator.
+
+================
+Features
+================
+* Compressible hydrodynamics in 1D, 2D, 3D
+* Compressible magnetohydrodynamics using constrained transport in 1D, 2D, 3D
+* Multiple geometry (cartesian, polar, spherical)
+* Variable mesh spacing
+* Multiple parallelisation strategies (OpenMP, MPI, GPU offloading, etc...)
+* Full non-ideal MHD (Ohmic, ambipolar, Hall)
+* Viscosity and thermal diffusion
+* Super-timestepping for all parabolic terms
+* Orbital advection (Fargo-like)
+* Self-gravity
+* Multi dust species modelled as pressureless fluids
+* Multiple planets interraction
===========================
Terms and condition of Use
===========================
*Idefix* is distributed freely under the `CeCILL license `_, a free software license adapted to both international and French legal matters, in the spirit of and retaining
-compatibility with the GNU General Public License (GPL). We expect *Idefix* to be referenced and acknowledeged by authors in their publications.
+compatibility with the GNU General Public License (GPL). We expect *Idefix* to be referenced and acknowledeged by authors in their publications. At the minimum, the authors
+should cite the *Idefix* `method paper `_.
*Idefix* data structure and algorithm are derived from Andrea Mignone's `PLUTO code `_, released under the GPL license.
*Idefix* also relies on the `Kokkos `_ performance portability programming ecosystem released under the terms
@@ -84,6 +108,7 @@ under the European Union Horizon 2020 research and innovation programme (Grant a
reference
modules
programmingguide
+ kokkos
contributing
faq
changelog
diff --git a/doc/source/kokkos.rst b/doc/source/kokkos.rst
new file mode 100644
index 00000000..3faf1550
--- /dev/null
+++ b/doc/source/kokkos.rst
@@ -0,0 +1,10 @@
+=====================
+Kokkos
+=====================
+
+*Idefix* comes with the `Kokkos `_ library bundled as a git submodule. The bundled version of the Kokkos
+library is known to work for the version of Idefix associated to it, and is compiled on the fly when
+compiling Idefix. Hence, there is no need to download or compile separately Kokkos.
+
+For technical details on Kokkos, advanced users are encouraged to read the `Kokkos documentation `_ and watch the
+`Kokkos lecture notes `_.
diff --git a/doc/source/modules.rst b/doc/source/modules.rst
index 41ec159f..13287ece 100644
--- a/doc/source/modules.rst
+++ b/doc/source/modules.rst
@@ -8,6 +8,23 @@ In this section, you will find a more detailed documentation about each module t
:ref:`fargoModule`
The orbital advection module, speeds up the computation of flows dominated by an azimuthal motion (such as discs).
+:ref:`planetModule`
+ The planet module, which treats the planet-disk interaction and planet-planet interaction.
+
+:ref:`dustModule`
+ The dust module, modeling dust grains as a zero-pressure gas.
+
+:ref:`eosModule`
+ The custom equation of state module, allowing the user to define its own equation of state.
+
+:ref:`selfGravityModule`
+ The self-gravity computation module, handles the impact of the gas distribution on its own dynamic when massive
+ enough (as in a core collapse).
+
+:ref:`braginskiiModule`
+ The Braginskii module, models the anisotropic flux of heat and momentum
+ taking place in weakly collisional, magnetised plasma (like the intracluster medium).
+
:ref:`gridCoarseningModule`
The grid coarsening module, that allows to derefine the grid in particular locations to speed up the computation.
@@ -17,4 +34,9 @@ In this section, you will find a more detailed documentation about each module t
:caption: Contents:
modules/fargo.rst
+ modules/planet.rst
+ modules/dust.rst
+ modules/eos.rst
+ modules/selfGravity.rst
+ modules/braginskii.rst
modules/gridCoarsening.rst
diff --git a/doc/source/modules/braginskii.rst b/doc/source/modules/braginskii.rst
new file mode 100644
index 00000000..e657cedd
--- /dev/null
+++ b/doc/source/modules/braginskii.rst
@@ -0,0 +1,121 @@
+.. _braginskiiModule:
+
+Braginskii module
+===================
+
+Equations solved and methods
+---------------------------
+
+The ``Braginskii`` module implements the anisotropic heat and momentum fluxes specific
+to weakly collisional, magnetised plasma like the intracluster medium
+for which this module was originally designed.
+In practice, it computes the modified Fourier law :math:`q_\mathrm{B}` and
+the modified viscous stress tensor :math:`\Pi_\mathrm{B}` (Latter & Kunz 2012):
+
+:math:`q_\mathrm{B} = -\kappa_\parallel \left(\hat{b}\cdot \nabla T\right) \hat{b}`
+
+:math:`\Pi_\mathrm{B} = - \left( p_\perp - p_\parallel \right) \left( \hat{b} \hat{b} - \frac{1}{3} I \right)`
+
+where
+:math:`T` and :math:`v` are respectively the temperature and the velocity of the fluid,
+:math:`p_\parallel` and :math:`p_\perp` the pressure in the direction parallel and
+perpendicular to the local magnetic field.
+:math:`\kappa_\parallel` and :math:`\mu_\parallel` are the parallel thermal conductivity
+and dynamic viscosity respectively, while
+:math:`\hat{b}` is the unit vector in the direction of the local magnetic field
+and :math:`I` the identity.
+The pressure anisotropy can be computed from the following closure:
+:math:`p_\perp - p_\parallel = 3\mu_\mathrm{B} \left(\hat{b}\hat{b}:\nabla v - \frac{1}{3} \nabla\cdot v \right)` (Schekochihin et al. 2010).
+
+The anisotropic heat flux from the ``Braginskii`` module is implemented in *Idefix*
+according to the centered asymmetric scheme described in Sharma & Hammett (2007, Section 2.1).
+The Braginskii viscous stress tensor is implemented with the same scheme,
+though adapted to vector quantities.
+
+.. note::
+ By default, the Braginskii module computes the transverse heat flux terms at the right
+ cell interface by a simple arithmetic average
+ (Eq. (6)-(7) from Sharma & Hammett 2007).
+ However in the same paper, the authors showed that this implementation can lead to
+ unphysical heat flux from high to low temperature regions.
+ So we implemented slope limiters for the computation of these transverse heat fluxes,
+ as described in Eq. (17) from Sharma & Hammett (2007).
+ Only the van Leer and the Monotonized Central (MC) limiters are available
+ since the minmod limiter has been found to be too diffusive.
+ Transverse viscous fluxes are limited in the same manner,
+ as described by ZuHone et al. (2015) in their fifth footnote.
+
+.. note::
+ Perpendicular heat flux
+ :math:`q_\perp = -\kappa_\perp \left[ \nabla T - \left(\hat{b}\cdot \nabla T\right) \hat{b}\right]`
+ can also be taken into account
+ (see section :ref:`braginskiiParameterSection`),
+ but perpendicular viscous flux cannot in the current implementation.
+ However, both Braginskii operators can be used *in addition* to the classical
+ Fourier law and viscosity. Modeling both isotropic and anisotropic diffusion
+ should be equivalent to having different
+ parallel and perpendicular diffusivities.
+
+The main output of the ``Braginskii`` module is the addition of the anisotropic heat and/or
+momentum flux terms to the other various fluxes.
+The computational cost of the ``Braginskii`` module is therefore one
+of a parabolic term, meaning that
+the associated Courant-Friedrichs-Lewy (CFL)
+condition can be stiff in case of very diffusive plasma.
+However, like other parabolic operators in *Idefix*, a Runge-Kutta-Legendre super time-stepping
+scheme is available to speed-up the computation (with respect to a fully explicit integration)
+of the Braginskii heat flux and viscosity.
+
+..
+ Please refer to Section 2.8 from Lesur et al.
+ for more details on the this super time-stepping scheme.
+
+.. _braginskiiParameterSection:
+
+Main parameters of the module
+-----------------------------
+
+The ``Braginskii`` module can be enabled adding one or two lines in the ``[Hydro]`` section
+starting with the keyword
+`bragTDiffusion` or/and *bragViscosity*. The following table summarises the different options
+associated to the activation of the Braginskii module:
+
++--------+-----------------------+-------------------------+---------------------------------------------------------------------------------------+
+| Column | Entry name | Parameter type | Comment |
++========+=======================+=========================+=======================================================================================+
+| 0 | bragModule | string | | Activates Braginskii diffusion. Can be ``bragTDiffusion`` or ``bragViscosity``. |
++--------+-----------------------+-------------------------+---------------------------------------------------------------------------------------+
+| 1 | integration | string | | Specifies the type of scheme to be used to integrate the parabolic term. |
+| | | | | Can be ``rkl`` or ``explicit``. |
++--------+-----------------------+-------------------------+---------------------------------------------------------------------------------------+
+| 2 | slope limiter | string | | Choose the type of limiter to be used to compute anisotropic transverse flux terms. |
+| | | | | Can be ``mc``, ``vanleer`` or ``nolimiter``. |
++--------+-----------------------+-------------------------+---------------------------------------------------------------------------------------+
+| 3 | diffusivity type | string | | Specifies the type of diffusivity wanted. Can be ``constant`` or ``userdef``. |
++--------+-----------------------+-------------------------+---------------------------------------------------------------------------------------+
+| 4 | parallel diffusivity | real | | Mandatory if the diffusivity type is ``constant``. Not needed otherwise. |
+| | | | | Value of the parallel diffusivity. Should be a real number. |
++--------+-----------------------+-------------------------+---------------------------------------------------------------------------------------+
+| 5 | normal diffusivity | real | | When bragModule ``bragTDiffusion`` and diffusivity type ``constant``, |
+| | | | | value of the normal diffusivity. Should be a real number. |
++--------+-----------------------+-------------------------+---------------------------------------------------------------------------------------+
+
+Numerical checks
+---------------
+
+In Cartesian geometry, the ``Braginskii`` module has been tested with many setups
+and in all configurations of magnetic polarisation:
+growth rates of local instabilities (see the MTI test inspired from Parrish et al. 2012)
+and damped rates of eigenmodes of the corresponding Braginskii operator (tests not included).
+In Cylindrical/Polar geometry, only the anisotropic heat conduction has been tested
+with numerical measurements of the damped rates of its eigenmodes, in all directions
+(tests not included).
+In Spherical geometry, both Braginskii operators have been validated by measuring the damped rates
+of their eigenmodes for a purely radial and purely azimuthal magnetic polarisation
+(tests included except the viscosity with an azimuthal magnetic field).
+
+In conclusion, the ``Braginskii`` operators have been thoroughly tested in Cartesian geometry.
+The same goes for the anisotropic heat flux in Cylindrical/Polar geometry while
+the anisotropic viscosity has *never* been tested in this geometry.
+In spherical geometry, both ``Braginskii`` operators have been partially validated
+(diffusion along the polar axis has not been directly tested).
diff --git a/doc/source/modules/dust.rst b/doc/source/modules/dust.rst
new file mode 100644
index 00000000..ca371dad
--- /dev/null
+++ b/doc/source/modules/dust.rst
@@ -0,0 +1,135 @@
+.. _dustModule:
+
+Dust fluid module
+=========================
+
+Equations
+---------
+The dust module is designed to treat dust grains as a zero-pressure gas coupled to the gas by a linear drag force on the velocity.
+*Idefix* can handle as many dust species as one wants, each with different coupling constants. For each dust specie :math:`i`, the code solve the dust evolution equation
+
+.. math::
+
+ \partial_t \rho_i+\mathbf{\nabla}\cdot \rho \mathbf{v}_i&=0
+
+ \partial_t \rho_i \mathbf{v}_i + \nabla\cdot \rho \mathbf{v}_i\mathbf{v}_i&=\gamma_i \rho_i \rho (\mathbf{v}-\mathbf{v}_i)-\rho_i\mathbf{\nabla}\psi_G-\rho_i\mathbf{g}
+
+
+where :math:`\rho_i` and :math:`\rho` are the dust and gas densities, :math:`\mathbf{v}_i` and :math:`\mathbf{v}` are the dust and gas velocities and :math:`\gamma_i` is the drag coefficient
+between the dust and the gas. Note that by construction, all of the source terms (gravity, rotation, shearing box) of the gas are also automatically applied to each dust specie.
+
+When the gas follows an ideal or user-defined equation of state, the dust-gas drag leads to friction heating. Because the dust component does not posess an internal energy, *Idefix*
+assumes that all of the friction heating is deposited in the gas internal energy as an addition heating term :math:`Q_d`:
+
+.. math::
+
+ Q_d = \sum_i \gamma_i \rho_i \rho (\mathbf{v}-\mathbf{v}_i)\cdot\mathbf{v}_i
+
+When the drag feedback is enabled (default behaviour), the gas is subject to an additional drag force
+
+.. math::
+
+ \mathbf{f}=\sum_i \gamma_i \rho_i \rho (\mathbf{v}_i-\mathbf{v})
+
+which guarantees total momentum conservation.
+
+.. note::
+
+ The heating associated to the feedback does not require any additional source term in the energy equation as *Idefix* conserves the total gas energy by design.
+
+
+
+Drag CFL condition
+-------------------
+*Idefix* computes the drag terms with a time-explicit scheme. Hence, an addition CFL constraint arrises because of the drag:
+
+.. math::
+
+ dt < \min(\frac{1}{\sum_i\gamma_i(\rho_i+\rho)})
+
+*Idefix* automatically adjusts the CFL to satisfy this inequality, in addition to the usual CFL condition.
+
+Dust parameters
+---------------
+
+The dust module can be enabled adding a block `[Dust]` in your input .ini file. The parameters are as follow:
+
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| Entry name | Parameter type | Comment |
++================+=========================+=============================================================================================+
+| nSpecies | integer | | Number of dust species to solve |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| drag | string, float, ... | | The first parameter describe the drag type. Possible values are: ``gamma``, ``tau``, |
+| | | | ``size`` and ``userdef``. |
+| | | | The remaining parameters gives the drag parameter :math:`\beta_i` for each dust specie. |
+| | | | (see below). *Idefix* expect to have as many drag parameters as there are dust species. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| drag_feedback | bool | | (optionnal) whether the gas feedback is enabled (default true). |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+
+The drag parameter :math:`\beta_i` above sets the functional form of :math:`\gamma_i(\rho, \rho_i, c_s)` depending on the drag type:
+
+``gamma``:
+ This sets :math:`\gamma_i=\beta_i`
+``tau``:
+ This sets :math:`\gamma_i=1/(\rho \beta_i)` so that :math:`\beta_i` is the (constant) stopping time :math:`\tau_i` of the dust grains.
+``size``:
+ This sets :math:`\gamma_i=c_s/\beta_i` so that :math:`\tau_i=(\rho \gamma_i)^{-1}=\beta_i/(\rho c_s)` where :math:`c_s` is the gas sound speed.
+ It designed to reproduce the behaviour of a fixed size dust grain of constant density that follows either Epstein or Stokes drag law:
+
+ Epstein drag:
+ In the Epstein regime, :math:`\beta_i=\rho_s a` where :math:`\rho_s` is the solid density and :math:`a` is the solid size.
+ Stokes drag:
+ In the Stokes regime, :math:`\beta_i=4\rho_s a^2/(9\lambda_\mathrm{mfp})` where :math:`\lambda_\mathrm{mfp}` is the gas mean free path.
+
+``userdef``:
+ This allows the user to define a specialized drag law, that is a function :math:`\gamma_i(\rho, \rho_i, c_s, \beta_i)`. In this case, a user-defined
+ function should be enrolled by each drag instance (see the example in `test/Dust/FargoPlanet`). Note that the entry ``drag`` in your
+ input file should still contain a list of :math:`\beta_i`.
+
+
+.. warning::
+ The drag force assumes that the gas density field ``hydro->Vc(RHO...)`` is a volumic density. For 2D problems assuming
+ a razor-thin geometry, this assumption is incorrect since ``hydro->Vc(RHO...)`` is a surface density. In this case,
+ the user has to define a specific drag law since *Idefix* has no way to guess how to convert the surface density to
+ the volumic density (see example in `test/Dust/FargoPlanet`).
+
+
+
+Using the dust module
+---------------------
+
+Several examples are provided in the :file:`test/Dust` directory. Each dust specie is considered in Idefix as a instance of the `Fluid` class, hence
+one can apply the technics used for the gas to each dust specie. Because *Idefix* can handle an arbitrarily number of dust species, each specie is stored
+in an instance of `Fluid` and stored in a container (:code:`std::vector dust`) in the `DataBlock`. The same is true for the mirror `DataBlockHost`: the
+dust primitive variable are all stored in :code:`std::vector dustVc` . For instance, initialising
+a single dust specie is done as follow:
+
+.. code-block:: c++
+
+
+ void Setup::InitFlow(DataBlock &data) {
+ // Create a host copy
+ DataBlockHost d(data);
+
+ for(int k = 0; k < d.np_tot[KDIR] ; k++) {
+ for(int j = 0; j < d.np_tot[JDIR] ; j++) {
+ for(int i = 0; i < d.np_tot[IDIR] ; i++) {
+
+ d.Vc(RHO,k,j,i) = 1.0; // Set the gas density to 1
+ d.dustVc[0](RHO,k,j,i) = 1.0; // Set first dust specie density to 1
+
+ d.Vc(VX1,k,j,i) = 1; // Set the gas velocity to 1
+ d.dustVc[0](VX1,k,j,i) = 0.0; // Set the dust velocity to 0
+
+ }
+ }
+ }
+
+ // Send it all, if needed
+ d.SyncToDevice();
+ }
+
+
+
+All of the dust fields are automatically outputed in the dump and vtk outputs created by *Idefix*.
diff --git a/doc/source/modules/eos.rst b/doc/source/modules/eos.rst
new file mode 100644
index 00000000..041a378d
--- /dev/null
+++ b/doc/source/modules/eos.rst
@@ -0,0 +1,55 @@
+.. _eosModule:
+
+Equation of state module
+=========================
+
+About
+---------
+
+By default, *Idefix* can handle either isothermal equation of states (in which case the pressure is never computed, and the code
+works with a prescribed sound speed function), or an ideal adiabatic equation of state (assuming a constant adiabatic exponent :math:`\gamma`).
+
+If one wants to compute the dynamics of more complex fluids (e.g. multiphase flows, partial ionisation, etc.), then the ideal adiabatic equation of
+state is not sufficient and one needs to code a *custom* equation of state. This is done by implementing the class ``EquationOfState`` with the functions
+required by *Idefix* algorithm.
+
+Functions needed
+-----------------
+
+*Idefix* requires the definition of 3 functions that are called during the integration loop: a function to compute the first adiabatic exponent :math:`\Gamma_1`, and
+the functions needed to convert pressure to internal energy and *vice-versa*. The definition for these function is:
+
+.. code-block:: c++
+
+ class EquationOfState {
+ // First adiabatic exponent.
+ KOKKOS_INLINE_FUNCTION real GetGamma(real P , real rho ) const {
+ real gamma;
+ // Compute gamma (needed for sound speed estimations, 5/3 would work in general)
+ return gamma;
+ }
+
+ // Compute the internal energy from pressure and density
+ KOKKOS_INLINE_FUNCTION
+ real GetInternalEnergy(real P, real rho) const {
+ real eint; // = ...
+ return eint;
+ }
+
+ // Compute the pressure from internal energy and density
+ KOKKOS_INLINE_FUNCTION
+ real GetPressure(real Eint, real rho) const {
+ real P; // = ...
+ return P;
+ }
+ }
+
+
+How to use a custom EOS
+-----------------------
+
+#. Copy the template file ``eos_template.hpp`` (in src/fluid/eos) in your problem directory and rename it (e.g. ``my_eos.hpp``)
+#. Make sure that you have not enabled the ISOTHERMAL approximation in your ``definitions.hpp``
+#. Implement your EOS in ``my_eos.hpp``, and in particular the 3 EOS functions required.
+#. in cmake, enable ``Idefix_CUSTOM_EOS`` and set ``Idefix_CUSTOM_EOS_FILE`` to ``my_eos.hpp`` (or the filename you have chosen in #1)
+#. Compile and run
diff --git a/doc/source/modules/fargo.rst b/doc/source/modules/fargo.rst
index 60489ef2..07f98336 100644
--- a/doc/source/modules/fargo.rst
+++ b/doc/source/modules/fargo.rst
@@ -1,7 +1,7 @@
.. _fargoModule:
-The FARGO module
-================
+Orbital advection (aka "FARGO") module
+=======================================
The Fargo module implements the orbital advection algorithm, to accelerate the computation of flows
subject to a dominant axisymmetric azimuthal velocity component. This is in particular the case
diff --git a/doc/source/modules/gridCoarsening.rst b/doc/source/modules/gridCoarsening.rst
index be0d5f6a..987a74ed 100644
--- a/doc/source/modules/gridCoarsening.rst
+++ b/doc/source/modules/gridCoarsening.rst
@@ -1,6 +1,6 @@
.. _gridCoarseningModule:
-The Grid coarsening module
+Grid coarsening module
==========================
About grid coarsening
diff --git a/doc/source/modules/planet.rst b/doc/source/modules/planet.rst
new file mode 100644
index 00000000..ac8f510d
--- /dev/null
+++ b/doc/source/modules/planet.rst
@@ -0,0 +1,91 @@
+.. _planetModule:
+
+Planet module
+==============
+
+The planet module allows one to model one or several planets interacting with the gas and other planets. One can force the planet to be on a fixed orbit (in which case
+migration and planet-planet interaction are ignored) or on the contrary integrate the equation of motion of the planet(s) simulatenously with the gas. All of the parameters
+used by the Planet module are set in the ``[Planet]`` block of the input file and read at runtime.
+
+Common parameters for every planets:
+
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| Entry name | Parameter type | Comment |
++========================+=======================+===========================================================================================================+
+| smoothing | string, float, float | | Expression of the planet potential (``plummer`` or ``polynomial``) |
+| | | | and smoothing length for the regularisation of the planet potential around the planet location. |
+| | | | Mandatory |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| feelDisk | bool | | Whether the planet feels the gravitational forces due to the gas. This parameter enables migration. |
+| | | | Mandatory |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| feelPlanets | bool | | Whether the planet interacts gravitationnaly with other planets. |
+| | | | Mandatory |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| integrator | (string) | | Type of time integrator. Can be ``analytical`` (planets are on fixed orbits) or ``rk4`` (numerical |
+| | | | integration). |
+| | | | default: ``rk4`` |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| indirectPlanets | (bool) | | Include indirect planet term arising from the acceleration of the star by the planet. |
+| | | | default: ``true`` |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| torqueNormalization | (float) | | Add a multiplicative factor in the gravitational torque computation |
+| | | | when updating the velocity components of the planet(s) if feelDisk=``true``. |
+| | | | default: ``1.0`` |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| hillCut | (bool) | | Exclude Hill's sphere (with radius :math:`r_\mathrm{h}`) when computing the gravitational torque |
+| | | | exerted by the gas on the planet. More specifically, we extract the material below |
+| | | | :math:`0.5r_\mathrm{h}` and use a :math:`\mathrm{sin}^2` transition until :math:`r_\mathrm{h}`. |
+| | | | default: ``false`` |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+| masstaper | (float) | | Whether the planet masses are progressively increased or set initially to their values. If set to ``0``,|
+| | | | the masstaper is disabled (default). Otherwise, the value sets the time needed to reach the final |
+| | | | planets mass. |
++------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------+
+
+
+Parameters specific to each planet (each entry is followed by a list of values for each planet):
+
++-----------------------+---------------------+----------------------------------------------------------------------------------------------------------------+
+| Entry name | Parameter type | Comment |
++=======================+=====================+================================================================================================================+
+| planetToPrimary | float, (float, ...) | | Mass ratio between the planet and the central star. |
+| | | | mandatory |
++-----------------------+---------------------+----------------------------------------------------------------------------------------------------------------+
+| initialDistance | float, (float, ...) | | Planet initial distance :math:`\displaystyle\frac{x_\mathrm{p}}{1+\mathrm{initialEccentricity}}`. |
+| | | | mandatory |
++-----------------------+---------------------+----------------------------------------------------------------------------------------------------------------+
+| initialEccentricity | (float, float, ...) | | Planet initial eccentricity. |
+| | | | default: ``0`` |
++-----------------------+---------------------+----------------------------------------------------------------------------------------------------------------+
+| initialInclination | (float, float, ...) | | Planet initial inclination. |
+| | | | default: ``0`` |
++-----------------------+---------------------+----------------------------------------------------------------------------------------------------------------+
+| tOffset | (float, float, ...) | | Are the planet(s) directly put in the simulations at t = 0 (default)? |
+| | | | Otherwise, the value sets the time to wait before introducing the planet(s). |
+| | | | default: ``0`` |
++-----------------------+---------------------+----------------------------------------------------------------------------------------------------------------+
+
+.. note::
+ Note that ``integrator=analytical`` is incompatible with ``feelDisk=true``, ``feelPlanet=true``, ``initialEccentricity!=0``, ``initialInclination!=0``.
+
+``smoothing`` takes 3 values. The first one gives the shape of the planet potential. The last two values (noted here ``a`` and ``b``) give the expression of the smoothing length, of the form :math:`\epsilon=ax_1^b`. In planet-disk interaction modeling, we usually choose ``a = aspect_ratio*thickness_smoothing`` and ``b = flaring_index``, with ``thickness_smoothing = 0.6`` typically in 2D (see, e.g., Masset & Benitez-Llambay, ApJ 817, 19 (2016)).
+
+* A ``plummer`` potential has the form:
+.. math::
+ \Phi_\mathrm{p}=\displaystyle-\frac{\mathrm{planetToPrimary}}{\sqrt{D_\mathrm{p}^2+\epsilon^2}}
+in cartesian coordinates :math:`(x,y,z)`, for a planet located at :math:`(x_\mathrm{p},y_\mathrm{p},z_\mathrm{p})` and with :math:`D_\mathrm{p}=\sqrt{(x-x_\mathrm{p})^2+(y-y_\mathrm{p})^2+(z-z_\mathrm{p})^2}`.
+
+* A ``polynomial`` potential has the form:
+.. math::
+ \Phi_\mathrm{p}=\left\{
+ \begin{array}{ll}
+ \displaystyle-\mathrm{planetToPrimary}(\frac{D_\mathrm{p}}{\epsilon}^4 - 2\frac{D_\mathrm{p}}{\epsilon}^3 + 2\frac{D_\mathrm{p}}{\epsilon}) & \mbox{if}~D_\mathrm{p}<\epsilon \\
+ \displaystyle-\frac{\mathrm{planetToPrimary}}{D_\mathrm{p}} & \mbox{else}
+ \end{array}
+ \right.
+
+.. note::
+ In 3D, it is possible to simulate only half of the disk by setting one of the boundary in X3 to 0 (in polar coordinates) or X2 to :math:`\pi/2` (in spherical coordinates).
+ In this case, we correct the computation of the gravitational torque exerted by the gas onto the planet(s) by removing the contribution of the vertical component of the torque and doubling
+ the horizontal contributions (hence assuming midplane symmetry).
diff --git a/doc/source/modules/selfGravity.rst b/doc/source/modules/selfGravity.rst
new file mode 100644
index 00000000..6254caa8
--- /dev/null
+++ b/doc/source/modules/selfGravity.rst
@@ -0,0 +1,101 @@
+.. _selfGravityModule:
+
+Self Gravity module
+===================
+
+Equations solved and method
+---------------------------
+
+The ``SelfGravity`` module implements the computation of a self-gravitational potential generated
+by the gas distribution. Physically, it solves for the potential :math:`\psi_{SG}` that satisfies the
+Poisson equation
+
+:math:`\Delta \psi_{SG}=4\pi G_c\,\rho` (where :math:`G_c` is the gravitational constant in code
+units)
+
+This computation becomes relevant when the said gas distribution
+is massive enough to influence its own dynamic. This is the case for example in young protoplanetary
+discs, which are subject to gravitational instabilities and for which this module was originally designed.
+
+The ``SelfGravity`` module implemented in *Idefix* follows the algorithm of the BIConjugate Gradient
+STABilized (BICGSTAB) method, a common iterative method designed to solve sparse linear systems (see for example
+Saad, Y. (2003). Iterative methods for sparse linear systems. Society for Industrial and Applied Mathematics.).
+This specific method allows to solve the Poisson equation in any dimensions and any geometries.
+
+.. note::
+ By default, the selfGravity module uses the BICGSTAB method to invert the Poisson equation.
+ However, an optional preconditionning feature (PBICGSTAB) is
+ implemented to handle particularly irregular and inhomogeneous grids, that may prevent the
+ convergence of the classic BICGSTAB. Note also that a simpler (yet very slow) Jacobi method
+ has been left for debug purpose. The user can also try the conjugate gradient and minimal residual
+ methods which have been tested successfully and are faster than BICGSTAB for some problems/grids.
+
+The main output of the ``SelfGravity`` module is the addition of the self-gravitational potential inferred from the
+gas distribution to the various sources of gravitational potential. At the beginning of every (M)HD step, the module is called to compute
+the potential due to the mass distribution at the given time. The potential computed by the ``SelfGravity`` module
+is then added to the other potential sources by the ``Gravity`` class (planets, central and user-defined potential).
+The computational cost of one selfGravity computation depends on the complexity of the
+gravitational field and the speed at which the density field evolves.
+
+Main parameters of the module
+-----------------------------
+
+The ``SelfGravity`` module is a submodule of the ``Gravity`` module to compute the gravitationnal potential. Hence, it is enabled
+by adding ``selfgravity`` to the ``potential`` in the ``[Gravity]`` block (see :ref:`gravitySection`). The parameters specific to self gravity are to be
+set in a dedicated ``[SelfGravity]`` block:
+
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| Entry name | Parameter type | Comment |
++================+=========================+=============================================================================================+
+| solver | string | | Specifies which solver should be used. Can be ``Jacobi``, ``CG``, ``MINRES``, ``BICGSTAB``|
+| | | | which corresponds to Jacobin, conjugate gradient, Minimal residual or bi-conjugate |
+| | | | stabilised method. Note that a preconditionned version is available adding a ``P`` to |
+| | | | the solver name (e.g. ``PCG`` or ``PBIGCSTAB`` ). |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| targetError | real | | Set the error allowed in the residual :math:`r=\Delta\psi_{SG}/(4\pi G_c)-\rho`. The error|
+| | | | computation is based on a L2 norm. Default is 1e-2. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| maxIter | int | | Set the maximum number of iterations allowed to the solver to reach convergence. Default |
+| | | | is 1000. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| skip | int | | Set the number of integration cycles between each computation of self-gravity potential. |
+| | | | Default is 1 (i.e. self-gravity is computed at every cycle). |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+
+
+Boundary conditions on self-gravitating potential
+--------------------------------------------------
+
+Solving for the self-gravitating potential require the definition of a boundary condition for said potential. This is done with the entries
+``boundary-Xn-dir`` of the ``[SelfGravity]`` block, where ``n`` can be 1, 2 or 3 and is the direction for the boundary condition and ``dir`` can be ``beg`` or ``end`` and
+indicates the side of the boundary.
+
+The boundary conditions can be following
+
++-----------------------+------------------------------------------------------------------------------------------------------------------+
+| ``boundary-Xn-dir`` | Comment |
++=======================+==================================================================================================================+
+| nullpot | Zero potential boundary conditions. The potential is set to zero in the ghost cells. |
++-----------------------+------------------------------------------------------------------------------------------------------------------+
+| nullgrad | Zero gradient boundary conditions. The potential in the last active cell is copied in the ghost cells. |
++-----------------------+------------------------------------------------------------------------------------------------------------------+
+| periodic | Periodic boundary conditions. The potential is copied between beg and end sides of the boundary. |
++-----------------------+------------------------------------------------------------------------------------------------------------------+
+| axis | | Axis boundary condition. Should be used in spherical coordinate in the X2 direction when the domain starts/stop|
+| | | on the axis. |
++-----------------------+------------------------------------------------------------------------------------------------------------------+
+| origin | | In spherical coordinates, artificially extends the grid used to compute the potential close to R=0. |
+| | | should only be used in X1-beg direction. |
++-----------------------+------------------------------------------------------------------------------------------------------------------+
+| userdef | |User-defined boundary conditions. The boundary condition function should be enrolled in the setup constructor |
+| | | (see :ref:`userdefBoundaries`). |
++-----------------------+------------------------------------------------------------------------------------------------------------------+
+
+.. note::
+ The method in fully periodic setups requires the removal of the mean gas density
+ before solving Poisson equation. This is done automatically if all of the self-gravity boundaries are set to ``periodic``.
+ Hence, make sure to specify all self-gravity boundary conditions as periodic for such setups, otherwise the solver will
+ fail to converge.
+
+The selfGravity module in *Idefix* is fully parallelised. This means that one can have a MPI domain decomposition in any spatial direction
+either on CPU or GPU.
diff --git a/doc/source/programmingguide.rst b/doc/source/programmingguide.rst
index 7a54a2be..18795b50 100644
--- a/doc/source/programmingguide.rst
+++ b/doc/source/programmingguide.rst
@@ -13,9 +13,10 @@ Data types
Because *Idefix* can run on GPUs, and since GPUs experience a significant speedup when working
with single precision arithmetic, a specific ``real`` datatype is used for all floating point
-operations in *Idefix*. This is by default aliased to ``double`` in `idefix.hpp`, but it can easily be modified
-to ``float`` for specific problems. Note however that this loss of precision might have a strong
-impact on the quality of the solution and is therefore not recommended.
+operations in *Idefix*. This is by default aliased to ``double`` in `idefix.hpp`, but it can easily converted
+to ``single`` with cmake ``Idefix_PRECISION`` property. Single precision arithemtic can lead to very significant speedups
+on some GPU architecture, but is not recommended for production runs as it can have an impact on the precision or even
+convergence of the solution.
Host and device
===============
@@ -162,9 +163,8 @@ For instance, a sum over all of the elements would be done through:
In the above example, ``localSum`` is the temporary variable *on the device* over which portions of the reduction
are performed, while ``mySum`` is the final variable, *on the host* where the result is stored.
-It is possible to do other reduction operations
-like findining minimum, maximum etc (see
-`Kokkos custom reductions `_
+It is possible to do other reduction operations like findining minimum, maximum etc (see
+`Kokkos custom reductions `_
for a list). For instance, the minimum value is obtained with the following code
snippet:
@@ -293,9 +293,9 @@ specific to this subprocess, in contrast to ``Grid``). Here is the full API for
.. _hydroClass:
-``Hydro`` class
+``Fluid`` class
---------------------
-The ``Hydro`` class (and its sub-classes) contains all of the fields and methods specific to (magneto) hydrodynamics. While
+The ``Fluid`` class (and its sub-classes) contains all of the fields and methods specific to (magneto) hydrodynamics. While
interested users may want to read in details the implementation of this class, we provide below a list of the most important
members
@@ -326,6 +326,21 @@ Because the code uses contrained transport, the field defined on cell faces is s
array. Just like for ``Vc``, there are aliases, with "s" suffixes defined to simplify the adressing
of the magnetic field components, as ``Vs(BX2s,k,j,i)``.
+It is important to realise that the ``Fluid`` class is a class template, that depends on the type of
+fluid that is modelled (encoded in a ``Physics`` class). By default, *Idefix* always instantiates
+one "default" fluid that contains the "default" physics requested by the user.
+This default fluid is, for compatibility reasons with *Idefix* v1, called `hydro` and is accessible
+from the ``dataBlock`` class as a pointer. It is defined as
+
+.. code-block:: c++
+
+ Fluid hydro;
+
+
+Additional fluids can be instantiated by *Idefix* for some problems, such as pressureless fluids
+to model dust grains (see :ref:`dustModule`).
+
+
.. _datablockhostClass:
``DataBlockHost`` class
@@ -390,7 +405,7 @@ The ``DumpImage`` class definition is
class DumpImage {
public:
- DumpImage(std::string, Output &); // constructor with dump filename and output object as parameters
+ DumpImage(std::string, DataBlock *); // constructor with dump filename and output object as parameters
int np_int[3]; // number of points in each direction
int geometry; // geometry of the dump
@@ -403,7 +418,7 @@ The ``DumpImage`` class definition is
};
-Typically, a ``DumpImage`` object is constructed invoking the ``DumpImage(filename, output)`` constructor,
+Typically, a ``DumpImage`` object is constructed invoking the ``DumpImage(filename, data)`` constructor,
which essentially opens, allocate and load the dump file in memory (when running with MPI, each processor
have access to the full domain covered by the dump, so try to avoid loading very large dumps!).
The user can then have access to the dump content using the variable members of the object
@@ -422,7 +437,7 @@ finished working with it. An example is provided in :ref:`setupInitDump`.
.. _LookupTableClass:
``LookupTable`` class
------------------
+---------------------
The ``LookupTable`` class allows one to read and interpolate elements from a coma-separated value (CSV) file or a numpy file
(generated from ``numpy.save`` in python).
@@ -524,36 +539,111 @@ The ``Get`` and ``GetHost`` functions expect a C array of size ``nDim`` and retu
.. note::
Usage examples are provided in `test/utils/lookupTable`.
+
+.. _debugging:
+
Debugging and profiling
=======================
-The easiest way to trigger debugging in ``Idefix`` is to switch on ``Idefix_DEBUG`` in cmake (for instance
+The easiest way to trigger debugging in *Idefix* is to switch on ``Idefix_DEBUG`` in cmake (for instance
adding ``-DIdefix_DEBUG=ON`` when calling cmake). This forces the code to log each time a function is called or
returned (this is achieved thanks to the ``idfx::pushRegion(std::string)`` and ``idfx::popRegion()`` which are
-found at the beginning and end of each function). In addition, ``Idefix_DEBUG`` enables Kokkos array bound checks, which
-will throw an error each time one tries to access an array out of its allocated memory space. Note that all of these
+found at the beginning and end of each function). In addition, ``Idefix_DEBUG`` will force *Idefix* to wait for each
+``idefix_for`` to finish before continuing to the next instruction (otherwise, ``idefix_for`` are asynchronous when using
+an accelerator). This simplifies the detection and identification of bugs in ``idefix_for`` loops. Note that all of these
debugging features induce a large overhead, and should therefore not be activated in production runs.
-It is also possible to use `Kokkos-tools `_ to debug and profile the code.
-For instance, on the fly profiling, can be enabled with the Kokkos ``space-time-stack`` module. To use it, simply clone
-``Kokkos-tools`` to the directory of your choice (say ``$KOKKOS_TOOLS``), then ``cd`` to
-``$KOKKOS_TOOLS/profiling/space-time-stack`` and compile the module with ``make``.
+If you suspect an out-of-bound access, it may be worth enabling additionaly ``Kokkos_ENABLE_BOUNDS_CHECK`` that will check
+that you are not trying to access an array outside of its bounds.
-Once the profiling module is compiled, you can use it by setting the environement variable ``KOKKOS_PROFILE_LIBRARY``.
-For instance, in bash:
+If you want to profile the code, the simplest way is to use the embedded profiling tool in *Idefix*, adding ``-profile`` to the command line
+when calling the code. This will produce a simplified profiling report when the *Idefix* finishes.
+
+It is also possible to use `Kokkos-tools `_ for more advanced profiling/debbugging. To use it,
+you must compile Kokkos tools in the directory of your choice and enable your favourite tool
+by setting the environement variable ``KOKKOS_TOOLS_LIBS`` to the tool path, for instance:
.. code-block:: bash
- export KOKKOS_PROFILE_LIBRARY=$KOKKOS_TOOLS/profiling/space-time-stack/kp_space_time_stack.so
+ export KOKKOS_TOOLS_LIBS=/profiling/space-time-stack/libkp_space_time_stack.so
-Once this environement variable is set, *Idefix* automatically logs profiling informations when it ends (recompilation of *Idefix*
-is not needed).
-.. tip::
+.. _defensiveProgramming:
+
+Defensive Programming
+---------------------
+
+``idefix.hpp`` defines useful function-like macros to program defensively and create
+informative error messages and warnings.
+
+First and foremost, ``IDEFIX_ERROR`` and ``IDEFIX_WARNING`` can be used in host space.
+
+.. code-block:: c++
+
+ #include "idefix.hpp"
+
+ #ifdef HAVE_ENERGY
+ IDEFIX_WARNING("This setup is not tested with HAVE_ENERGY defined");
+ #endif
+
+ real boxSize {-1};
+
+ // ... determine boxSize at runtime from parameter file
+
+ if(boxSize<1e-5) {
+ IDEFIX_ERROR("This setup requires a minimal box size of 1e-5");
+ }
+
+
+``idefix.hpp`` also defines the more advanced ``RUNTIME_CHECK_HOST`` and
+``RUNTIME_CHECK_KERNEL`` macros, which are useful to define arbitrary sanity checks at
+runtime in host space and within kernels respectively, together with a nicely formatted
+error message.
+
+Both macros take a condition (a boolean expression that *should* evaluate to ``true`` at
+runtime), and an error message. Additional arguments may be supplemented to the error
+message using string interpolation. Note however that this only works on CPU, so
+``RUNTIME_CHECK_KERNEL`` also expects a default error message that'll be used when
+running on GPUs.
+
+As an illustrative example, here's how they can be used to verify some assumptions at
+runtime.
+
+.. code-block:: c++
- By default, ``Kokkos-tools`` assumes the user code is using MPI. If one wants to perform profiling in serial, one should disable MPI before
- compling the ``space-time-stack`` module. This is done by editing the makefile in ``$KOKKOS_TOOLS/profiling/space-time-stack``
- changing the compiler ``CXX`` to a valid serial compiler, and adding ``-DUSE_MPI=0`` to ``CFLAGS``.
+ #include "idefix.hpp"
+
+ const int MAX_NPARTICLES = 1024;
+ const int NPARTICLES = 128;
+ const real lightSpeed = 1.0;
+ auto particleSpeed = IdefixArray1D("particleSpeed", NPARTICLES);
+
+ RUNTIME_CHECK_HOST(
+ NPARTICLES<=MAX_NPARTICLES,
+ "The number of particles requested (%i) is too high (max is %i)",
+ NPARTICLES, MAX_NPARTICLES
+ );
+
+ idefix_for("check particle speeds",
+ 0, NPARTICLES,
+ KOKKOS_LAMBDA(int idx) {
+ RUNTIME_CHECK_KERNEL(
+ particleSpeed(idx) < lightSpeed,
+ "Speeding particle(s) detected !", // this default error message is used on GPUs
+ "Particle at index %i has speed %.16e, which is greater than light speed (%.16e)",
+ idx, particleSpeed(idx), lightSpeed
+ );
+ }
+ );
+
+
+``RUNTIME_CHECK_HOST`` and ``RUNTIME_CHECK_KERNEL`` are considered debug-only tools, so
+are by default excluded at compilation, and do not impact performance in production.
+To enable them, use the `-D Idefix_RUNTIME_CHECKS=ON` configuration flag.
+
+It may also be useful to implement debug-only safeguards with custom logic that doesn't
+fit `RUNTIME_CHECK_*` macros. This can be achieved by using the compiler directive
+`#ifdef RUNTIME_CHECKS` directly.
Minimal skeleton
================
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
index 44c56562..870072d8 100644
--- a/doc/source/quickstart.rst
+++ b/doc/source/quickstart.rst
@@ -8,7 +8,7 @@ Download and install *Idefix*
One first need to download *Idefix* from the public git. Say you want to install *Idefix* in the directory ``~/src/idefix``, you need to run::
cd ~/src
- git clone https:// idefix
+ git clone https://github.com/idefix-code/idefix.git idefix
cd idefix
git submodule init
git submodule update
@@ -29,11 +29,11 @@ install directory of *Idefix*. We therefore conclude the installation with::
Configure and run the SOD tube test problem
===========================================
-The test problem are all located in the ``$IDEFIX_DIR/test`` directory of the distribution. To access the Sod shock tube test, one go to::
+The test problem are all located in the ``$IDEFIX_DIR/test`` directory of the distribution. To access the Sod shock tube test, one goes to::
cd $IDEFIX_DIR/test/HD/sod
-From there, one sees 3 files and a directory:
+From there, one sees 4 files and a directories:
``definitions.hpp``
The configuration file. It contains C preprocessor directives for configuration options which require a re-compilation of the code. These are mostly
@@ -46,11 +46,15 @@ From there, one sees 3 files and a directory:
``idefix.ini``
The input file. This file is read at runtime *only*. It defines most of the properties of the code: resolution, integrator, output, physical modules.
-``python`` directory
- This directory is provided with most of the tests. Its content allows one to check that the code output is consistent with what is expected.
+``testme.py``
+ The validation script. It uses the python class ``idfx_test.py`` to validate the code outputs for that particular test. Use ``testme.py -help`` to find out the options.
+
+``python``
+ Directory that contains the standard test validation: i.e. comparison against an analytical solution (when it exists)
+
For the time being, the files are already set up for the Sod test problem. The only thing lacking is a ``makefile`` to actually compile the code.
-In *Idefix* the makefile is created by `Cmake `_ ,a tool to control code generation on diverse platforms. To configure *Idefix*,
+In *Idefix* the makefile is created by `cmake `_ ,a tool to control code generation on diverse platforms. To configure *Idefix*,
you need Cmake version >=3.16 installed on your machine. For this quickstart, let us configure the code to run on
the cpu in serial (default behaviour). Assuming a ``cmake`` is in the PATH, we simply type in::
@@ -66,14 +70,19 @@ Finally, we compile and run the code::
make -j 8
./idefix
-This test being one dimensional, it runs very fast. We can check that the final solution match the prediction of the shock tube problem. To this end, we go to the ``python``
-subdirectory and run the test::
+This test being one dimensional, it runs very fast. We can check that the final solution match the prediction of the shock tube problem. To this end, we
+use the ``testme.py`` script:
- cd python
- python3 ./testidefix.py
+ ./testme.py -check
-If everything goes well, ``testidefix.py`` will load the latest output produced by idefix, display it, compare it with an analytical solution and tell you
-whether the error is acceptable or not.
+The ``-check`` option tells the test suite that the simulation has already run and we just need a validation of the result. Note that it's also
+possible to validate all of the possible combination of algorithms using the ``-all`` option. In that case, the script automatically
+configure, compile and validate a series of test with varying combination of algorithms.
+
+.. warning::
+ Note that the validation relies on large reference
+ files that are stored in the separate `idefix-code/reference` repository that is automatically
+ cloned as a submodule along with Kokkos if you follow the above procedure.
Configure and run the Orszag-Tang test problem
@@ -87,8 +96,9 @@ test with::
cmake $IDEFIX_DIR
-Once the code is configured, it can be ran::
+Once the code is configured, it can be compiled and ran::
+ make -j 8
./idefix
This test can take much more time that the sod test since it is 2D. In the end, you can visualize the results, which are written as VTK files using
diff --git a/doc/source/reference.rst b/doc/source/reference.rst
index 1c08c970..d4447b70 100644
--- a/doc/source/reference.rst
+++ b/doc/source/reference.rst
@@ -1,5 +1,5 @@
=====================
-Reference guide
+User guide
=====================
First, it is good practice to set the environment variable ``IDEFIX_DIR`` to the root path of your *Idefix* distribution, as it is needed at several stages. Setting up *Idefix* for a particular problem implies editing several files, some of which are automatically generated.
To set up a particular *Idefix* setup, 5 steps are required.
diff --git a/doc/source/reference/commandline.rst b/doc/source/reference/commandline.rst
index 105f1569..9feef769 100644
--- a/doc/source/reference/commandline.rst
+++ b/doc/source/reference/commandline.rst
@@ -21,10 +21,16 @@ Several options can be provided at command line when running the code. These are
+--------------------+-------------------------------------------------------------------------------------------------------------------------+
| -maxcycles n | stops when the code has performed ``n`` integration cycles |
+--------------------+-------------------------------------------------------------------------------------------------------------------------+
+| -force_init | | call initial conditions before reading dump file (this has no effect if -restart is not also passed). |
+| | | This option is useful when more physics is enabled when restarting from a dump (e.g. switching on MHD or dust) |
+| | | as it initialize from the initial conditions the quantities that are absent from the restart dump |
++--------------------+-------------------------------------------------------------------------------------------------------------------------+
| -nolog | disable log files |
+--------------------+-------------------------------------------------------------------------------------------------------------------------+
| -nowrite | disable all writes (useful for raw performance measures or for tests). This option implies ``-nolog`` |
+--------------------+-------------------------------------------------------------------------------------------------------------------------+
+| -profile | Enable on-the-fly performance profiling (a final text report is automatically generated). |
++--------------------+-------------------------------------------------------------------------------------------------------------------------+
| -Werror | warning messages are considered as errors and stop the code with a non-zero exit code. |
+--------------------+-------------------------------------------------------------------------------------------------------------------------+
diff --git a/doc/source/reference/idefix.ini.rst b/doc/source/reference/idefix.ini.rst
index ada46ff6..ae2b8e5a 100644
--- a/doc/source/reference/idefix.ini.rst
+++ b/doc/source/reference/idefix.ini.rst
@@ -128,7 +128,11 @@ This section is used by the hydrodynamics class of *Idefix*. It defines the hydr
| | | | to be enrolled with ``EnrollIsoSoundSpeed(IsoSoundSpeedFunc)`` |
| | | | (see :ref:`functionEnrollment`). In this case, the second parameter is not used. |
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
-| gamma | float | Adiabatic index when ISOTHERMAL is not defined. Default to 5/3 if not set. |
+| gamma | float | | Adiabatic index when ISOTHERMAL is not defined. Default to 5/3 if not set. |
+| | | | NB: this parameter is used only by the default equation of state implemented in *Idefix* |
+| | | | Custom equation of states (:ref:`eosModule`) ignore this parameter |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| tracer | integer | Number of passive tracers associated to the fluid. Default to 0 if not set. |
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
| resistivity | string, string, (float) | | Switches on Ohmic diffusion. |
| | | | The first parameter can be ``explicit`` or ``rkl``. When ``explicit``, diffusion is |
@@ -193,6 +197,9 @@ This section is used by the hydrodynamics class of *Idefix*. It defines the hydr
| | | | limiter when strong shocks are detected. The entry parameter is the threshold above which |
| | | | a shock is considered "strong", in units of :math:`|\nabla P /P|`. A low value hence tends|
| | | | to increase the code numerical diffusivity. Typical values are 1 to 10. |
+| | | | Note that it is possible to enroll a user-defined function to flag specific cells for |
+| | | | shock flattening, in addition to the default flag. This user function can be enrolled |
+| | | | with ``Hydro.shockFlattening.EnrollUserShockFlag(UserShockFunc)`` . |
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
@@ -227,10 +234,19 @@ This section enables the orbital advection algorithm provided in *Idefix*. More
| | | | Default: 10 |
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
+.. _gravitySection:
+
``Gravity`` section
--------------------
-This section enables gravity in the form of a gravitational potential and/or an acceleration vector
+This section enables gravity in the form of a gravitational potential and/or an acceleration vector. The gravitational potential used by the code
+reads
+
+:math:`\psi=-G_c M_{\rm central}/R+\psi_{SG}+\psi_{\rm userdef}`
+
+where :math:`G_c` is the gravitational constant, :math:`M_{\rm central}` is the mass of a central object, :math:`\psi_{SG}` is the
+self-gravitational potential and :math:`\psi_{\rm userdef}` is a user-defined potential. Each term can be enabled individually in the gravity
+section as followed:
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
| Entry name | Parameter type | Comment |
@@ -239,21 +255,62 @@ This section enables gravity in the form of a gravitational potential and/or an
| | | | total potential used by *Idefix*. |
| | | | |
| | | | * ``userdef`` allows the user to give *Idefix* a user-defined potential function. In this |
-| | | | ``Gravity`` class expects a user-defined potential function to be enrolled with |
+| | | | case, ``Gravity`` class expects a user-defined potential function to be enrolled with |
| | | | ``Gavity::EnrollPotential(GravPotentialFunc)`` (see :ref:`functionEnrollment`) |
| | | | * ``central`` allows the user to automatically add the potential of a central point mass. |
| | | | In this case, the central mass is assumed to be 1 in code units. This can be modified |
| | | | using the Mcentral parameter, or using the ``Gravity::SetCentralMass(real)`` method. |
| | | | * ``selfgravity`` enables the potential computed from solving Poisson equation with the |
-| | | | density distribution |
+| | | | density distribution (see :ref:`selfGravitySection` and :ref:`selfGravityModule`). |
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
| Mcentral | real | | Mass of the central object when a central potential is enabled (see above). Default is 1. |
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| gravCst | real | | Set the value of the gravitational constant :math:`G_c` used by the central |
+| | | | mass potential and self-gravitational potential (when enabled) ). Default is 1. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
| bodyForce | string | | Adds an acceleration vector to each cell of the domain. The only value allowed |
| | | | is ``userdef``. The ``Gravity`` class then expects a user-defined bodyforce function to |
| | | | be enrolled via ``Gavity::EnrollBodyForce(BodyForceFunc)`` (see :ref:`functionEnrollment`)|
| | | | See the shearing box tests for examples of using bodyForce. |
+----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| skip | int | | Set the number of integration cycles between each computation of the gravity potential. |
+| | | | Default is 1 (i.e. gravity is computed at every cycle). |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+
+
+
+.. _selfGravitySection:
+
+``SelfGravity`` section
+-----------------------
+
+This section describes the method used to compute the self-gravitating potential :math:`\psi_{SG}`. More details on the algorithm may be found in the dedicated
+:ref:`selfGravityModule` documentation. For this module to be used, self-gravity must be enabled as a source
+of gravitational potential in the ``Gravity`` section (see :ref:`gravitySection` above).
+
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| Entry name | Parameter type | Comment |
++================+=========================+=============================================================================================+
+| solver | string | | Specifies which solver should be used. Can be ``Jacobi``, ``BICGSTAB`` or ``PBICGSTAB`` |
+| | | | for the left preconditionned BICGSTAB solve. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| targetError | real | | Set the error allowed in the residual :math:`r=\Delta\psi_G/(4\pi G_c)-\rho`. The error |
+| | | | computation is based on a L2 norm. Default is 1e-2. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| maxIter | int | | Set the maximum number of iterations allowed to the solver to reach convergence. Default |
+| | | | is 1000. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| boundary-Xn-dir| string | | Boundary condition applied to the potential field computed by self-gravity |
+| | | | ``n`` can be 1, 2 or 3 and is the direction for the boundary condition. ``dir`` can be |
+| | | | ``beg`` or ``end`` and indicates the side of the boundary. |
+| | | | The boundary conditions allowed by the self-gravity solver are described in |
+| | | | :ref:`selfGravityModule` |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| skip | int | | Set the number of integration cycles between each computation of self-gravity potential. |
+| | | | Default is 1 (i.e. self-gravity is computed at every cycle). |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+
+
``RKL`` section
------------------
@@ -266,6 +323,8 @@ this block is simply ignored.
+================+====================+===========================================================================================================+
| cfl | float | CFL number for the RKL sub-step. Should be <0.5 for stability. Set by default to 0.5 if not provided |
+----------------+--------------------+-----------------------------------------------------------------------------------------------------------+
+| rmax_par | float | Maximum ratio between the hyperbolic timestep and the parabolic (RKL) timestep. Set to 100.0 by default. |
++----------------+--------------------+-----------------------------------------------------------------------------------------------------------+
| check_nan | bool | Whether RKL should check the solution when running. This option affects performances. Default false. |
+----------------+--------------------+-----------------------------------------------------------------------------------------------------------+
@@ -314,9 +373,32 @@ This section describes the outputs *Idefix* produces. For more details about eac
| dmp | float | | Time interval between dump outputs, in code units. |
| | | | If negative, periodic dump outputs are disabled. |
+----------------+-------------------------+--------------------------------------------------------------------------------------------------+
+| dmp_dir | string | | directory for dump file outputs. Default to "./" |
+| | | | The directory is automatically created if it does not exist. |
++----------------+-------------------------+--------------------------------------------------------------------------------------------------+
| vtk | float | | Time interval between vtk outputs, in code units. |
| | | | If negative, periodic vtk outputs are disabled. |
+----------------+-------------------------+--------------------------------------------------------------------------------------------------+
+| vtk_dir | string | | directory for vtk file outputs. Default to "./" |
+| | | | The directory is automatically created if it does not exist. |
++----------------+-------------------------+--------------------------------------------------------------------------------------------------+
+| vtk_sliceN | float, int, float, | | Create VTK files that contain a slice (cut or average) of the full domain. |
+| | string | | the "N" of the entry name is an integer that identify each slice, starting from n=1 |
+| | | | 1st parameter: Time interval between each slice vtk file |
+| | | | 2nd parameter: plane of the slice. 0=(x2,x3) slice, 1=(x1,x3), 2=(x1,x2) |
+| | | | 3rd parameter: localisation of the slice (when the slice is an average, this parameter only |
+| | | | affect the localisation of the slice in the produced vtk file |
+| | | | 4th parameter: slice type. Can be "cut" (for a slice of the full domain) or "average" (for an |
+| | | | average along the direction given by the second parameter). NB: "average" performs a naive |
+| | | | point average, without any consideration on the cell volumes/areas. |
+| | | | NB2: this feature is in beta, and sometimes fail with some MPI implementations. |
++----------------+-------------------------+--------------------------------------------------------------------------------------------------+
+| xdmf | float | | Time interval between xdmf outputs, in code units (requires Idefix to be configured with HDF5) |
+| | | | If negative, periodic xdmf outputs are disabled. |
++----------------+-------------------------+--------------------------------------------------------------------------------------------------+
+| xdmf_dir | string | | directory for xdmf file outputs. Default to "./" |
+| | | | The directory is automatically created if it does not exist. |
++----------------+-------------------------+--------------------------------------------------------------------------------------------------+
| analysis | float | | Time interval between analysis outputs, in code units. |
| | | | If negative, periodic analysis outputs are disabled. |
| | | | When this entry is set, *Idefix* expects a user-defined analysis function to be |
@@ -326,9 +408,31 @@ This section describes the outputs *Idefix* produces. For more details about eac
| | | | When this list is present in the input file, *Idefix* expects a user-defined |
| | | | function to be enrolled with ``Output::EnrollUserDefVariables(UserDefVariablesFunc)`` |
| | | | (see :ref:`functionEnrollment`). The user-defined variables defined by this function |
-| | | | are then written as new variables in vtk outputs. |
+| | | | are then written as new variables in vtk and/or xdmf outputs. |
+----------------+-------------------------+--------------------------------------------------------------------------------------------------+
.. note::
Even if dumps are not mentionned in your input file (and are therefore disabled), dump files are still produced when *Idefix* captures a signal
(see :ref:`signalHandling`) or when ``max_runtime`` is set and reached.
+
+
+.. _dustSection:
+
+``Dust`` section
+----------------------
+
+This section describes the dust fields computed using a zero pressure gas approximation (see :ref:`dustModule`).
+
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| Entry name | Parameter type | Comment |
++================+=========================+=============================================================================================+
+| nSpecies | integer | | Number of dust species to solve |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| drag | string, float, ... | | The first parameter describe the drag type. Possible values are: ``gamma``, ``tau``, |
+| | | | ``size`` and ``userdef``. |
+| | | | The remaining parameters give the drag parameter :math:`\beta_i` for each dust specie. |
+| | | | (see :ref:`dustModule`). *Idefix* expects to have as many drag parameters as there are |
+| | | | dust species. |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
+| drag_feedback | bool | | (optionnal) whether the gas feedback is enabled (default true). |
++----------------+-------------------------+---------------------------------------------------------------------------------------------+
diff --git a/doc/source/reference/makefile.rst b/doc/source/reference/makefile.rst
index 7995e05b..8e53c224 100644
--- a/doc/source/reference/makefile.rst
+++ b/doc/source/reference/makefile.rst
@@ -40,6 +40,13 @@ Several options can be enabled from the command line (or are accessible with ``c
Enable debug options in *Idefix*. This triggers a lot of outputs, and automatic bound checks of array accesses. As a result, this
option makes the code very slow.
+``-D Idefix_RUNTIME_CHECKS=ON``
+ Include (potentially expensive) runtime sanity checks implemented with ``RUNTIME_CHECK_HOST`` and ``RUNTIME_CHECK_KERNEL``.
+ See :ref:`defensiveProgramming`.
+
+``-D Idefix_HDF5=ON``
+ Enable HDF5 outputs. Requires the HDF5 library on the target system. Required for *Idefix* XDMF outputs.
+
``-D Idefix_RECONSTRUCTION=x``
Specify the type of reconstruction scheme (replaces the old "ORDER" parameter in ``definitions.hpp``). Accepted values for ``x`` are:
+ ``Constant``: first order, donor cell reconstruction,
@@ -88,6 +95,65 @@ Several options can be enabled from the command line (or are accessible with ``c
option to explictely tell ``cmake`` a path to a build=*Idefix* problem directory.
+.. _setupExamples:
+
+Configuration examples for selected clusters
+++++++++++++++++++++++++++++++++++++++++++++
+
+
+AdAstra at CINES, AMD Mi250X GPUs
+---------------------------------
+
+We recommend the following modules and environement variables on AdAstra:
+
+.. code-block:: bash
+
+ module load PrgEnv-cray-amd
+ module load cray-mpich
+ module load craype-network-ofi
+ module load cce
+ module load cpe
+ module load rocm/5.2.0
+ export LDFLAGS="-L${ROCM_PATH}/lib -lamdhip64 -lstdc++fs"
+
+The last line being there to guarantee the link to the HIP library and the access to specific
+C++17 functions.
+
+Finally, *Idefix* can be configured to run on Mi250 by enabling HIP and the desired architecture with the following options to ccmake:
+
+.. code-block:: bash
+
+ -DKokkos_ENABLE_HIP=ON -DKokkos_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATION=ON -DKokkos_ARCH_VEGA90A=ON``
+
+
+MPI (multi-GPU) can be enabled by adding ``-DIdefix_MPI=ON`` as usual.
+
+Jean Zay at IDRIS, Nvidia V100 and A100 GPUs
+--------------------------------------------
+
+We recommend the following modules and environement variables on Jean Zay:
+
+.. code-block:: bash
+
+ module load cuda/12.1.0
+ module load gcc/12.2.0
+ module load openmpi/4.1.1-cuda
+ module load cmake/3.18.0
+
+*Idefix* can then be configured to run on Nvidia V100 with the following options to ccmake:
+
+.. code-block:: bash
+
+ -DKokkos_ENABLE_CUDA=ON -DKokkos_ENABLE_VOLTA70=ON
+
+While Ampere A100 GPUs are enabled with
+
+.. code-block:: bash
+
+ -DKokkos_ENABLE_CUDA=ON -DKokkos_ENABLE_AMPERE80=ON
+
+MPI (multi-GPU) can be enabled by adding ``-DIdefix_MPI=ON`` as usual.
+
.. _setupSpecificOptions:
Setup-specific options
diff --git a/doc/source/reference/outputs.rst b/doc/source/reference/outputs.rst
index 6fe2e64a..d531b497 100644
--- a/doc/source/reference/outputs.rst
+++ b/doc/source/reference/outputs.rst
@@ -7,7 +7,7 @@ Output formats
--------------
*Idefix* uses several types of outputs you may want for your setup. By default, *Idefix* allows
-for 3 kinds of outputs:
+for 4 kinds of outputs:
* logs which essentially tells the user what *Idefix* is currently doing. When running in serial, logs are sent to stdout, but when
MPI is enabled, only the logs of the rank 0 process is sent to stdout, and each process (including rank 0) simultaneously writes a
@@ -17,6 +17,8 @@ for 3 kinds of outputs:
* VTK files (.vtk) are Visualation Toolkit files, which are easily readable by visualisation softwares such as `Paraview `_
or `Visit `_. A set of python methods is also provided to read vtk file from your
python scripts in the `pytools` directory.
+* XDMF files (eXtensible Data Model and Format) is a common format used in many HPC codes, which is easily readable by visualisation softwares such as `Paraview `_
+ or `Visit `_. The XDMF format relies on the HDF5 format and therefore requires *Idefix* to be configured with HDF5 support.
* user-defined analysis files. These are totally left to the user. They usually consist of ascii tables defined by the user, but they can
be anything.
diff --git a/doc/source/reference/setup.cpp.rst b/doc/source/reference/setup.cpp.rst
index e7f4554a..5c0691db 100644
--- a/doc/source/reference/setup.cpp.rst
+++ b/doc/source/reference/setup.cpp.rst
@@ -48,7 +48,7 @@ the input file, and for user-defined outputs. This can be seen as a way to link
setup to the main code at runtime, and avoid the need to pre-define tens of empty functions. Function enrollment
is achieved by calling one of the ``EnrollXXX`` function of the class associated to it.
-For instance, the ``Hydro`` class provide the following list of enrollment functions (declared in hydro.hpp):
+For instance, the ``Fluid`` class provide the following list of enrollment functions (declared in hydro.hpp):
.. code-block:: c++
@@ -72,22 +72,46 @@ For instance, the ``Hydro`` class provide the following list of enrollment funct
void EnrollIsoSoundSpeed(IsoSoundSpeedFunc);
When called, these function expects the address of the user-defined function. These user-defined
-function should have the following signatures (declared in hydro_defs.hpp):
+function should have the following signatures (declared in fluid_defs.hpp and boundary.hpp):
.. code-block:: c++
- using UserDefBoundaryFunc = void (*) (DataBlock &, int dir, BoundarySide side,
+ using UserDefBoundaryFunc = void (*) (Fluid *, int dir, BoundarySide side,
const real t);
using GravPotentialFunc = void (*) (DataBlock &, const real t, IdefixArray1D&,
IdefixArray1D&, IdefixArray1D&,
IdefixArray3D &);
- using SrcTermFunc = void (*) (DataBlock &, const real t, const real dt);
- using InternalBoundaryFunc = void (*) (DataBlock &, const real t);
+ using SrcTermFunc = void (*) (Fluid *, const real t, const real dt);
+ using InternalBoundaryFunc = void (*) (Fluid*, const real t);
using EmfBoundaryFunc = void (*) (DataBlock &, const real t);
using DiffusivityFunc = void (*) (DataBlock &, const real t, IdefixArray3D &);
using IsoSoundSpeedFunc = void (*) (DataBlock &, const real t, IdefixArray3D &);
+
+Note that some of these functions involve the template class ``Fluid``. The ``Fluid`` class
+is indeed capable of handling several types of fluids (described by the template parameter ``Phys``):
+MHD, HD, pressureless, etc... Hence, depending on the type of fluid to which the user-defined
+function applies, the signature of the function would be different. For instance, a User-defined
+boundary condition for a system that would solve for a gas+dust mixture would read
+
+.. code-block:: c++
+
+ void MyBoundary(Fluid * fluid, int dir, BoundarySide side, const real t) {
+ // Here comes the code for the Gas boundary condition
+ }
+
+ void MyBoundaryDust(Fluid * fluid, int dir, BoundarySide side, const real t) {
+ // Here comes the code for the dust boundary condition
+ }
+
+Note that *Idefix* defines an alias for the default fluid which is often found in the example provided:
+
+.. code-block:: c++
+
+ using Hydro = Fluid;
+
+
Example
*******
@@ -114,7 +138,7 @@ constructor which reads a parameter from the .ini file and enroll the user-defin
Mass = input.Get("Setup","mass",0);
// Enroll the user-defined potential
- data.hydro.EnrollGravPotential(&Potential);
+ data.gravity->EnrollGravPotential(&Potential);
}
@@ -126,18 +150,18 @@ User-defined boundaries
If one (or several) boundaries are set to ``userdef`` in the input file, the user needs to
enroll a user-defined boundary function in the ``Setup`` constructor as for the other user-def functions (see :ref:`functionEnrollment`).
Note that even if several boundaries are ``userdef`` in the input file, only one user-defined function
-is required. When *Idefix* calls the user defined boundary function, it sets the direction of the boundary (``dir=IDIR``, ``JDIR``,
+is required per fluid type. When *Idefix* calls the user defined boundary function, it sets the direction of the boundary (``dir=IDIR``, ``JDIR``,
or ``KDIR``) and the side of the bondary (``side=left`` or ``side=right``). If conveninent, one can use
the ``BoundaryFor`` wrapper functions to automatically loop on the boundary specified by ``dir`` and ``side``.
A typical user-defined boundary condition function looks like this:
.. code-block:: c++
- void UserdefBoundary(DataBlock& data, int dir, BoundarySide side, real t) {
- IdefixArray4D Vc = data.hydro.Vc;
- IdefixArray4D Vs = data.hydro.Vs;
+ void UserdefBoundary(Hydro *hydro, int dir, BoundarySide side, real t) {
+ IdefixArray4D Vc = hydro->Vc;
+ IdefixArray4D Vs = hydro->Vs;
if(dir==IDIR) {
- data.hydro.boundary.BoundaryFor("UserDefBoundary", dir, side,
+ hydro->boundary->BoundaryFor("UserDefBoundary", dir, side,
KOKKOS_LAMBDA (int k, int j, int i) {
Vc(RHO,k,j,i) = 1.0;
Vc(VX1,k,j,i) = 0.0;
@@ -147,17 +171,23 @@ A typical user-defined boundary condition function looks like this:
// For magnetic field (defined on cell sides), we need specific wrapper functions
// Note that we don't need to initialise the field component parallel to dir, as it is
// automatically reconstructed from the solenoidal condition and the tangential components
- data.hydro.boundary.BoundaryForX2s("UserDefBoundaryBX2s", dir, side,
+ hydro->boundary->BoundaryForX2s("UserDefBoundaryBX2s", dir, side,
KOKKOS_LAMBDA (int k, int j, int i) {
Vs(BX2s,k,j,i) = 0.0;
});
- data.hydro.boundary.BoundaryForX3s("UserDefBoundaryBX3s", dir, side,
+ hydro->boundary->BoundaryForX3s("UserDefBoundaryBX3s", dir, side,
KOKKOS_LAMBDA (int k, int j, int i) {
Vs(BX3s,k,j,i) = 0.0;
});
}
}
+.. warning::
+
+ Only the tangential field components should be initialised by user-defined boundary conditions.
+ *Idefix* automatically reconstruct (and overwrite!) the normal field component from the
+ divergence-free condition on B and the user-defined tangential magnetic field components.
+
.. _setupInitflow:
@@ -258,6 +288,36 @@ can be automatically derived using ``DataBlockHost::MakeVsFromAmag`` as in the e
dataHost.SyncToDevice();
}
+Initialising passive tracers
+********************************
+
+Idefix provides the possibility to use passive tracers, that are scalars passively advected by
+the associated fluid. Tracers are enabled by setting on non-zero integer to the parameter ``tracer`` in
+a ``[Hydro]`` (for hydro tracers) or a ``[Dust]`` block in your input file, so that the tracers
+will follow the main fluid or the dust fluids, with the given number of tracers.
+
+Each tracer can be accessed in the dataBlockHost as a particular field in the array Vc as in the example below
+
+.. code-block:: c++
+
+ void Setup::InitFlow(DataBlock &data) {
+ // Create a host copy of the DataBlock given in argument
+ DataBlockHost dataHost(data);
+
+ for(int k = 0; k < dataHost.np_tot[KDIR] ; k++) {
+ for(int j = 0; j < dataHost.np_tot[JDIR] ; j++) {
+ for(int i = 0; i < dataHost.np_tot[IDIR] ; i++) {
+ real x = dataHost.x[IDIR](i);
+ real y = dataHost.x[JDIR](j);
+ // First tracer
+ dataHost.Vc(TRG,k,j,i) = (x < 0 ? 0 : 1); // TRG for gas tracers
+ // second tracer
+ dataHost.Vc(TRG+1,k,j,i) = (y < 0 ? 0 : 1); // For gas tracers
+
+.. note::
+
+ Note that when using dust tracers, one should use the field ``TRD`` instead of ``TRG``.
+
.. _setupInitDump:
Initialising from a restart dump
@@ -270,18 +330,13 @@ a new initial condition by extrapolating or extanding a restart dump (such as in
test or a dimension change). In this case, one should use the ``DumpImage`` class which provides
all the tools needed to read a restart dump (see also :ref:`dumpImageClass`).
-One typically first construct an instance of ``DumpImage`` in the ``Setup`` constructor, and then
-use this instance to initialise the flow in ``Setup::InitFlow``. The procedure is examplified below,
+One typically first construct an instance of ``DumpImage`` in ``Setup::InitFlow``, and then
+use this instance to initialise the flow. The procedure is examplified below,
assuming we want to create a dump from ``mydump.dmp``:
.. code-block:: c++
- DumpImage *image; // Global pointer to our DumpImage
-
- // Setup constructor
- Setup::Setup(Input &input, Grid &grid, DataBlock &data, Output &output) {
- image = new DumpImage("mydump.dmp",output); // load the dump file and store it in a DumpImage
- }
+ #include "dumpImage.hpp"
// Flow initialisation, read directly from the DumpImage
void Setup::InitFlow(DataBlock &data) {
@@ -289,6 +344,8 @@ assuming we want to create a dump from ``mydump.dmp``:
// Create a host copy
DataBlockHost d(data);
+ DumpImage image("mydump.dmp", &data);
+
for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) {
for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) {
for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) {
@@ -299,9 +356,9 @@ assuming we want to create a dump from ``mydump.dmp``:
int jglob=j-2*d.beg[JDIR]+d.gbeg[JDIR];
int kglob=k-2*d.beg[KDIR]+d.gbeg[KDIR];
- d.Vc(RHO,k,j,i) = image->arrays["Vc-RHO"](kglob,jglob,iglob);
- d.Vc(PRS,k,j,i) = image->arrays["Vc-PRS"](kglob,jglob,iglob);
- d.Vc(VX1,k,j,i) = image->arrays["Vc-VX1"](kglob,jglob,iglob);
+ d.Vc(RHO,k,j,i) = image.arrays["Vc-RHO"](kglob,jglob,iglob);
+ d.Vc(PRS,k,j,i) = image.arrays["Vc-PRS"](kglob,jglob,iglob);
+ d.Vc(VX1,k,j,i) = image.arrays["Vc-VX1"](kglob,jglob,iglob);
}}}
// For magnetic variable, we should fill the entire active domain, hence an additional
diff --git a/make_tarballs.py b/make_tarballs.py
index f062e749..dfc1b55f 100644
--- a/make_tarballs.py
+++ b/make_tarballs.py
@@ -17,10 +17,8 @@
KOKKOS_EXCLUDE_LIST = [
".*",
- "example",
"appveyor.yml",
"scripts",
- "*test*",
]
@@ -97,6 +95,14 @@ def _make_self_contained_tarball(output_dir, *, suffix="", exclude_list=None):
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("output_dir", default=".", nargs="?")
+ parser.add_argument(
+ "--allow-dirty",
+ action="store_true",
+ help=(
+ "allow to build tarballs with uncommited changes. "
+ "Useful to test this script"
+ ),
+ )
args = parser.parse_args(argv)
with pushd(IDEFIX_DIR):
@@ -105,7 +111,7 @@ def main(argv=None):
# break if local copy is dirty
stdout = subprocess.check_output(["git", "diff", "--name-only"]).decode()
- if stdout.strip() != "":
+ if not args.allow_dirty and stdout.strip() != "":
print(
"ERROR: Local copy is dirty. Commit or stash changes before publishing.",
file=sys.stderr,
diff --git a/pytools/idfx_test.py b/pytools/idfx_test.py
new file mode 100644
index 00000000..ce718f37
--- /dev/null
+++ b/pytools/idfx_test.py
@@ -0,0 +1,448 @@
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+import re
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from .dump_io import readDump
+
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKCYAN = '\033[96m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+class idfxTest:
+ def __init__ (self):
+ parser = argparse.ArgumentParser()
+
+ idefix_dir_env = os.getenv("IDEFIX_DIR")
+
+ parser.add_argument("-noplot",
+ dest="noplot",
+ help="disable plotting in standard tests",
+ action="store_false")
+
+ parser.add_argument("-ploterr",
+ help="Enable plotting on error in regression tests",
+ action="store_true")
+
+ parser.add_argument("-cmake",
+ default=[],
+ help="CMake options",
+ nargs='+')
+
+ parser.add_argument("-definitions",
+ default="",
+ help="definitions.hpp file")
+
+ parser.add_argument("-dec",
+ default="",
+ help="MPI domain decomposition",
+ nargs='+')
+
+ parser.add_argument("-check",
+ help="Only perform regression tests without compilation",
+ action="store_true")
+
+ parser.add_argument("-cuda",
+ help="Test on Nvidia GPU using CUDA",
+ action="store_true")
+
+ parser.add_argument("-hip",
+ help="Test on AMD GPU using HIP",
+ action="store_true")
+
+ parser.add_argument("-single",
+ help="Enable single precision",
+ action="store_true")
+
+ parser.add_argument("-vectPot",
+ help="Enable vector potential formulation",
+ action="store_true")
+
+ parser.add_argument("-reconstruction",
+ type=int,
+ default=2,
+ help="set reconstruction scheme (2=PLM, 3=LimO3, 4=PPM)")
+
+ parser.add_argument("-idefixDir",
+ default=idefix_dir_env,
+ help="Set directory for idefix source files (default $IDEFIX_DIR)")
+
+ parser.add_argument("-mpi",
+ help="Enable MPI",
+ action="store_true")
+
+ parser.add_argument("-all",
+ help="Do all test suite (otherwise, just do the test with the current configuration)",
+ action="store_true")
+
+ parser.add_argument("-init",
+ help="Reinit reference files for non-regression tests (dangerous!)",
+ action="store_true")
+
+ parser.add_argument("-Werror",
+ help="Consider warnings as errors",
+ action="store_true")
+
+
+ args, unknown=parser.parse_known_args()
+
+ # transform all arguments from args into attributes of this instance
+ self.__dict__.update(vars(args))
+ self.referenceDirectory = os.path.join(idefix_dir_env,"reference")
+ # current directory relative to $IDEFIX_DIR/test (used to retrieve the path ot reference files)
+ self.testDir=os.path.relpath(os.curdir,os.path.join(idefix_dir_env,"test"))
+
+ def configure(self,definitionFile=""):
+ comm=["cmake"]
+ # add source directory
+ comm.append(self.idefixDir)
+ # add specific options
+ for opt in self.cmake:
+ comm.append("-D"+opt)
+
+ if self.cuda:
+ comm.append("-DKokkos_ENABLE_CUDA=ON")
+ # disable fmad operations on Cuda to make it compatible with CPU arithmetics
+ comm.append("-DIdefix_CXX_FLAGS=--fmad=false")
+
+ if self.hip:
+ comm.append("-DKokkos_ENABLE_HIP=ON")
+ # disable fmad operations on HIP to make it compatible with CPU arithmetics
+ comm.append("-DIdefix_CXX_FLAGS=-ffp-contract=off")
+
+ #if we use single precision
+ if(self.single):
+ comm.append("-DIdefix_PRECISION=Single")
+ else:
+ comm.append("-DIdefix_PRECISION=Double")
+
+
+ if(self.vectPot):
+ comm.append("-DIdefix_EVOLVE_VECTOR_POTENTIAL=ON")
+ else:
+ comm.append("-DIdefix_EVOLVE_VECTOR_POTENTIAL=OFF")
+
+ if(self.Werror):
+ comm.append("-DIdefix_WERROR=ON")
+
+ # add a definition file if provided
+ if(definitionFile):
+ self.definitions=definitionFile
+ else:
+ self.definitions="definitions.hpp"
+
+ comm.append("-DIdefix_DEFS="+self.definitions)
+
+ if(self.mpi):
+ comm.append("-DIdefix_MPI=ON")
+ else:
+ comm.append("-DIdefix_MPI=OFF")
+
+ if(self.reconstruction == 2):
+ comm.append("-DIdefix_RECONSTRUCTION=Linear")
+ elif(self.reconstruction==3):
+ comm.append("-DIdefix_RECONSTRUCTION=LimO3")
+ elif(self.reconstruction==4):
+ comm.append("-DIdefix_RECONSTRUCTION=Parabolic")
+
+
+ try:
+ cmake=subprocess.run(comm)
+ cmake.check_returncode()
+ except subprocess.CalledProcessError as e:
+ print(bcolors.FAIL+"***************************************************")
+ print("Cmake failed")
+ print("***************************************************"+bcolors.ENDC)
+ raise e
+
+ def compile(self,jobs=8):
+ try:
+ make=subprocess.run(["make","-j"+str(jobs)])
+ make.check_returncode()
+ except subprocess.CalledProcessError as e:
+ print(bcolors.FAIL+"***************************************************")
+ print("Compilation failed")
+ print("***************************************************"+bcolors.ENDC)
+ raise e
+
+ def run(self, inputFile="", np=2, nowrite=False, restart=-1):
+ comm=["./idefix"]
+ if inputFile:
+ comm.append("-i")
+ comm.append(inputFile)
+ if self.mpi:
+ if self.dec:
+ np=1
+ for n in range(len(self.dec)):
+ np=np*int(self.dec[n])
+
+ comm.insert(0,"mpirun")
+ comm.insert(1,"-np")
+ comm.insert(2,str(np))
+ if self.dec:
+ comm.append("-dec")
+ for n in range(len(self.dec)):
+ comm.append(str(self.dec[n]))
+
+ if nowrite:
+ comm.append("-nowrite")
+
+ if self.Werror:
+ comm.append("-Werror")
+
+ if restart>=0:
+ comm.append("-restart")
+ comm.append(str(restart))
+
+ try:
+ make=subprocess.run(comm)
+ make.check_returncode()
+ except subprocess.CalledProcessError as e:
+ print(bcolors.FAIL+"***************************************************")
+ print("Execution failed")
+ print("***************************************************"+bcolors.ENDC)
+ raise e
+
+ self._readLog()
+
+ def _readLog(self):
+ if not os.path.exists('./idefix.0.log'):
+ # When no idefix file is produced, we leave
+ return
+
+ with open('./idefix.0.log','r') as file:
+ log = file.read()
+
+ if "SINGLE PRECISION" in log:
+ self.single = True
+ else:
+ self.single = False
+
+ if "Kokkos CUDA target ENABLED" in log:
+ self.cuda = True
+ else:
+ self.cuda = False
+
+ if "Kokkos HIP target ENABLED" in log:
+ self.hip = True
+ else:
+ self.hip = False
+
+
+ self.reconstruction = 2
+ if "3rd order (LimO3)" in log:
+ self.reconstruction = 3
+
+ if "4th order (PPM)" in log:
+ self.reconstruction = 4
+
+ self.mpi=False
+ if "MPI ENABLED" in log:
+ self.mpi=True
+
+
+ # Get input file from log
+ line = re.search('(?<=Input Parameters using input file )(.*)', log)
+ self.inifile=line.group(0)[:-1]
+
+ # Get performances from log
+ line = re.search('Main: Perfs are (.*) cell', log)
+ self.perf=float(line.group(1))
+
+ def checkOnly(self, filename, tolerance=0):
+ # Assumes the code has been run manually using some configuration, so we simply
+ # do the test suite witout configure/compile/run
+ self._readLog()
+ self._showConfig()
+ if self.cuda or self.hip:
+ print(bcolors.WARNING+"***********************************************")
+ print("WARNING: Idefix guarantees floating point arithmetic accuracy")
+ print("ONLY when fmad instruction are explicitely disabled at compilation.")
+ print("Otheriwse, this test will likely fail.")
+ print("***********************************************"+bcolors.ENDC)
+ self.standardTest()
+ self.nonRegressionTest(filename, tolerance)
+
+ def standardTest(self):
+ if os.path.exists(os.path.join('python', 'testidefix.py')):
+ os.chdir("python")
+ comm = [sys.executable, "testidefix.py"]
+ if self.noplot:
+ comm.append("-noplot")
+
+ print(bcolors.OKCYAN+"Running standard test...")
+ try:
+ make=subprocess.run(comm)
+ make.check_returncode()
+ except subprocess.CalledProcessError as e:
+ print(bcolors.FAIL+"***************************************************")
+ print("Standard test execution failed")
+ print("***************************************************"+bcolors.ENDC)
+ raise e
+ print(bcolors.OKCYAN+"Standard test succeeded"+bcolors.ENDC)
+ os.chdir("..")
+ else:
+ print(bcolors.WARNING+"No standard testidefix.py for this test"+bcolors.ENDC)
+ sys.stdout.flush()
+
+ def nonRegressionTest(self, filename,tolerance=0):
+
+ fileref=os.path.join(self.referenceDirectory, self.testDir, self._getReferenceFilename())
+ if not(os.path.exists(fileref)):
+ raise Exception("Reference file "+fileref+ " doesn't exist")
+
+ filetest=filename
+ if not(os.path.exists(filetest)):
+ raise Exception("Test file "+filetest+ " doesn't exist")
+
+ Vref=readDump(fileref)
+ Vtest=readDump(filetest)
+ error=self._computeError(Vref,Vtest)
+ if error > tolerance:
+ print(bcolors.FAIL+"Non-Regression test failed!")
+ self._showConfig()
+ print(bcolors.ENDC)
+ if self.ploterr:
+ self._plotDiff(Vref,Vtest)
+ assert error <= tolerance, bcolors.FAIL+"Error (%e) above tolerance (%e)"%(error,tolerance)+bcolors.ENDC
+ print(bcolors.OKGREEN+"Non-regression test succeeded with error=%e"%error+bcolors.ENDC)
+ sys.stdout.flush()
+
+ def compareDump(self, file1, file2,tolerance=0):
+ Vref=readDump(file1)
+ Vtest=readDump(file2)
+ error=self._computeError(Vref,Vtest)
+ if error > tolerance:
+ print(bcolors.FAIL+"Files are different !")
+ print(bcolors.ENDC)
+
+ self._plotDiff(Vref,Vtest)
+ assert error <= tolerance, bcolors.FAIL+"Error (%e) above tolerance (%e)"%(error,tolerance)+bcolors.ENDC
+ print(bcolors.OKGREEN+"Files are identical up to error=%e"%error+bcolors.ENDC)
+ sys.stdout.flush()
+
+
+ def makeReference(self,filename):
+ self._readLog()
+ targetDir = os.path.join(self.referenceDirectory,self.testDir)
+ if not os.path.exists(targetDir):
+ print("Creating reference directory")
+ os.makedirs(targetDir, exist_ok=True)
+ fileout = os.path.join(targetDir, self._getReferenceFilename())
+ if(os.path.exists(fileout)):
+ ans=input(bcolors.WARNING+"This will overwrite already existing reference file:\n"+fileout+"\nDo you confirm? (type yes to continue): "+bcolors.ENDC)
+ if(ans != "yes"):
+ print(bcolors.WARNING+"Reference creation aborpted"+bcolors.ENDC)
+ return
+
+ shutil.copy(filename,fileout)
+ print(bcolors.OKGREEN+"Reference file "+fileout+" created"+bcolors.ENDC)
+ sys.stdout.flush()
+
+ def _showConfig(self):
+ print("**************************************************************")
+ if self.cuda:
+ print("Nvidia Cuda enabled.")
+ if self.hip:
+ print("AMD HIP enabled.")
+ print("CMake Opts: " +" ".join(self.cmake))
+ print("Definitions file:"+self.definitions)
+ print("Input File: "+self.inifile)
+ if(self.single):
+ print("Precision: Single")
+ else:
+ print("Precision: Double")
+ if(self.reconstruction==2):
+ print("Reconstruction: PLM")
+ elif(self.reconstruction==3):
+ print("Reconstruction: LimO3")
+ elif(self.reconstruction==4):
+ print("Reconstruction: PPM")
+ if(self.vectPot):
+ print("Vector Potential: ON")
+ else:
+ print("Vector Potential: OFF")
+ if self.mpi:
+ print("MPI: ON")
+ else:
+ print("MPI: OFF")
+
+ print("**************************************************************")
+
+ def _getReferenceFilename(self):
+ strReconstruction="plm"
+ if self.reconstruction == 3:
+ strReconstruction = "limo3"
+ if self.reconstruction == 4:
+ strReconstruction= "ppm"
+
+ strPrecision="double"
+ if self.single:
+ strPrecision="single"
+
+ fileref='dump.ref.'+strPrecision+"."+strReconstruction+"."+self.inifile
+ if self.vectPot:
+ fileref=fileref+".vectPot"
+
+ fileref=fileref+'.dmp'
+ return(fileref)
+
+ def _computeError(self,Vref,Vtest):
+ ntested=0
+ error=0
+ for fld in Vtest.data.keys():
+ if(Vtest.data[fld].ndim==3):
+ if fld in Vref.data.keys():
+ #print("error in "+fld+" = "+str(np.sqrt(np.mean((Vref.data[fld]-Vtest.data[fld])**2))))
+ error = error+np.sqrt(np.mean((Vref.data[fld]-Vtest.data[fld])**2))
+ ntested=ntested+1
+
+ if ntested==0:
+ raise Exception(bcolors.FAIL+"There is no common field between the reference and current file"+bcolors.ENDC)
+
+ error=error/ntested
+ return(error)
+
+ def _plotDiff(self,Vref,Vtest):
+
+ for fld in Vtest.data.keys():
+ if(Vtest.data[fld].ndim==3):
+ if fld in Vref.data.keys():
+ plt.figure()
+ plt.title(fld)
+ x1=Vref.x1
+ if Vref.data[fld].shape[0] == Vref.x1.size+1:
+ x1=np.zeros(Vref.data[fld].shape[0])
+ x1[:-1]=Vref.x1l
+ x1[-1]=Vref.x1r[-1]
+ x2=Vref.x2
+ if Vref.data[fld].shape[1] == Vref.x2.size+1:
+ x2=np.zeros(Vref.data[fld].shape[1])
+ x2[:-1]=Vref.x2l
+ x2[-1]=Vref.x2r[-1]
+ x3=Vref.x3
+ if Vref.data[fld].shape[2] == Vref.x3.size+1:
+ x3=np.zeros(Vref.data[fld].shape[2])
+ x3[:-1]=Vref.x3l
+ x3[-1]=Vref.x3r[-1]
+ if Vref.data[fld].shape[1]>1:
+ plt.pcolor(x1, x2, Vref.data[fld][:,:,0].T-Vtest.data[fld][:,:,0].T,cmap='seismic')
+ plt.xlabel("x1")
+ plt.ylabel("x2")
+ plt.colorbar()
+ else:
+ plt.plot(x1, Vref.data[fld][:,0,0]-Vtest.data[fld][:,0,0])
+ plt.xlabel("x1")
+ plt.show()
diff --git a/reference b/reference
new file mode 160000
index 00000000..7f26b7ed
--- /dev/null
+++ b/reference
@@ -0,0 +1 @@
+Subproject commit 7f26b7ed94671e875decbf3407f397bae674e4e5
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 5ab15369..727f2150 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,5 +1,5 @@
-add_subdirectory(hydro)
+add_subdirectory(fluid)
add_subdirectory(dataBlock)
add_subdirectory(output)
add_subdirectory(rkl)
diff --git a/src/dataBlock/CMakeLists.txt b/src/dataBlock/CMakeLists.txt
index 67a06575..c3410f41 100644
--- a/src/dataBlock/CMakeLists.txt
+++ b/src/dataBlock/CMakeLists.txt
@@ -1,3 +1,5 @@
+add_subdirectory(planetarySystem)
+
target_sources(idefix
PUBLIC ${CMAKE_CURRENT_LIST_DIR}/coarsen.cpp
PUBLIC ${CMAKE_CURRENT_LIST_DIR}/dataBlock.cpp
diff --git a/src/dataBlock/coarsen.cpp b/src/dataBlock/coarsen.cpp
index 0ee7cd52..2da3049c 100644
--- a/src/dataBlock/coarsen.cpp
+++ b/src/dataBlock/coarsen.cpp
@@ -9,6 +9,7 @@
#include "../idefix.hpp"
#include "dataBlock.hpp"
#include "dataBlockHost.hpp"
+#include "fluid.hpp"
void DataBlock::Coarsen() {
if(!haveGridCoarsening) {
@@ -16,9 +17,9 @@ void DataBlock::Coarsen() {
}
ComputeGridCoarseningLevels();
// This routine coarsen the *conservative* variables
- hydro.CoarsenFlow(hydro.Uc);
+ hydro->CoarsenFlow(hydro->Uc);
#if MHD==YES
- hydro.CoarsenMagField(hydro.Vs);
+ hydro->CoarsenMagField(hydro->Vs);
#endif
}
diff --git a/src/dataBlock/dataBlock.cpp b/src/dataBlock/dataBlock.cpp
index c11d5178..0414bb13 100644
--- a/src/dataBlock/dataBlock.cpp
+++ b/src/dataBlock/dataBlock.cpp
@@ -5,11 +5,20 @@
// Licensed under CeCILL 2.1 License, see COPYING for more information
// ***********************************************************************************
-#include "../idefix.hpp"
+#include
+#include "idefix.hpp"
#include "dataBlock.hpp"
+#include "fluid.hpp"
+#include "gravity.hpp"
+#include "planetarySystem.hpp"
+#include "vtk.hpp"
+#include "dump.hpp"
+#ifdef WITH_HDF5
+#include "xdmf.hpp"
+#endif
-void DataBlock::InitFromGrid(Grid &grid, Input &input) {
- idfx::pushRegion("DataBlock::InitFromGrid");
+DataBlock::DataBlock(Grid &grid, Input &input) {
+ idfx::pushRegion("DataBlock::DataBlock");
this->mygrid=&grid;
@@ -17,18 +26,6 @@ void DataBlock::InitFromGrid(Grid &grid, Input &input) {
GridHost gridHost(grid);
gridHost.SyncFromDevice();
- nghost = std::vector(3);
- np_int = std::vector(3);
- np_tot = std::vector(3);
- lbound = std::vector(3);
- rbound = std::vector(3);
- beg = std::vector(3);
- end = std::vector(3);
- gbeg = std::vector(3);
- gend = std::vector(3);
-
- xbeg = std::vector(3);
- xend = std::vector(3);
// Get the number of points from the parent grid object
for(int dir = 0 ; dir < 3 ; dir++) {
@@ -70,23 +67,23 @@ void DataBlock::InitFromGrid(Grid &grid, Input &input) {
std::string label;
for(int dir = 0 ; dir < 3 ; dir++) {
label = "DataBlock_x" + std::to_string(dir);
- x.push_back(IdefixArray1D(label, np_tot[dir]));
+ x[dir] = IdefixArray1D(label, np_tot[dir]);
label = "DataBlock_xr" + std::to_string(dir);
- xr.push_back(IdefixArray1D(label,np_tot[dir]));
+ xr[dir] = IdefixArray1D(label,np_tot[dir]);
label = "DataBlock_xl" + std::to_string(dir);
- xl.push_back(IdefixArray1D(label,np_tot[dir]));
+ xl[dir] = IdefixArray1D(label,np_tot[dir]);
label = "DataBlock_dx" + std::to_string(dir);
- dx.push_back(IdefixArray1D(label,np_tot[dir]));
+ dx[dir] = IdefixArray1D(label,np_tot[dir]);
label = "DataBlock_xgc" + std::to_string(dir);
- xgc.push_back(IdefixArray1D(label,np_tot[dir]));
+ xgc[dir] = IdefixArray1D(label,np_tot[dir]);
label = "DataBlock_A" + std::to_string(dir);
- A.push_back(IdefixArray3D(label,
- np_tot[KDIR]+KOFFSET, np_tot[JDIR]+JOFFSET, np_tot[IDIR]+IOFFSET));
+ A[dir] = IdefixArray3D(label,
+ np_tot[KDIR]+KOFFSET, np_tot[JDIR]+JOFFSET, np_tot[IDIR]+IOFFSET);
}
dV = IdefixArray3D("DataBlock_dV",np_tot[KDIR],np_tot[JDIR],np_tot[IDIR]);
@@ -100,59 +97,10 @@ void DataBlock::InitFromGrid(Grid &grid, Input &input) {
dmu = IdefixArray1D("DataBlock_dmu",np_tot[JDIR]);
#endif
+ // Initialize our sub-domain
+ this->ExtractSubdomain();
-
- // Copy the relevant part of the coordinate system to the datablock
- for(int dir = 0 ; dir < 3 ; dir++) {
- int offset=gbeg[dir]-beg[dir];
-
- IdefixArray1D x_input = grid.x[dir];
- IdefixArray1D x_output= x[dir];
- IdefixArray1D xr_input = grid.xr[dir];
- IdefixArray1D xr_output= xr[dir];
- IdefixArray1D xl_input = grid.xl[dir];
- IdefixArray1D xl_output= xl[dir];
- IdefixArray1D dx_input = grid.dx[dir];
- IdefixArray1D dx_output= dx[dir];
-
- idefix_for("coordinates",0,np_tot[dir],
- KOKKOS_LAMBDA (int i) {
- x_output(i) = x_input(i+offset);
- xr_output(i) = xr_input(i+offset);
- xl_output(i) = xl_input(i+offset);
- dx_output(i) = dx_input(i+offset);
- }
- );
- }
-
- // Initialize grid coarsening if needed
- if(grid.haveGridCoarsening != GridCoarsening::disabled) {
- this->haveGridCoarsening = grid.haveGridCoarsening;
- this->coarseningDirection = grid.coarseningDirection;
- this->coarseningLevel = std::vector>(3);
-
- for(int dir = 0 ; dir < 3 ; dir++) {
- if(coarseningDirection[dir]) {
- const int Xt = (dir == IDIR ? JDIR : IDIR);
- const int Xb = (dir == KDIR ? JDIR : KDIR);
-
- // Allocate coarsening level arrays
- coarseningLevel[dir] = IdefixArray2D(
- "DataBlock_corseLevel",
- np_tot[Xb],
- np_tot[Xt]);
- // Make a local reference
- IdefixArray2D coarseInit = coarseningLevel[dir];
- // Init coarsening level array to one everywhere
- idefix_for("init_coarsening", 0, np_tot[Xb], 0, np_tot[Xt],
- KOKKOS_LAMBDA(int j, int i) {
- coarseInit(j,i) = 1;
- });
- }
- }
- }
-
- // Iniaitlize the geometry
+ // Initialize the geometry
this->MakeGeometry();
// Initialise the state containers
@@ -161,38 +109,189 @@ void DataBlock::InitFromGrid(Grid &grid, Input &input) {
this->states["current"] = StateContainer();
+ // Initialize the Dump object
+ this->dump = std::make_unique(input, this);
+
+ // Initialize the VTK object
+ this->vtk = std::make_unique(input, this, "data");
+
+ // Init XDMF objects for HDF5 outputs
+ #ifdef WITH_HDF5
+ this->xdmf= std::make_unique(input,this);
+ #endif
+
+
// Initialize the hydro object attached to this datablock
- this->hydro.Init(input, grid, this);
+ this->hydro = std::make_unique>(grid, input, this);
// Initialise Fargo if needed
if(input.CheckBlock("Fargo")) {
- fargo.Init(input, this);
+ this->fargo = std::make_unique(input, DefaultPhysics::nvar, this);
this->haveFargo = true;
}
- // Initialise gravity if needed
- if(input.CheckBlock("Gravity")) {
- gravity.Init(input, this);
+ // initialise planets if needed
+ if(input.CheckBlock("Planet")) {
+ this->planetarySystem = std::make_unique(input, this);
+ this->haveplanetarySystem = true;
+ }
+
+ // Initialise gravity if needed (automatically if planets are present)
+ if(input.CheckBlock("Gravity") || haveplanetarySystem) {
+ this->gravity = std::make_unique(input, this);
this->haveGravity = true;
}
+ // Initialise dust grains if needed
+ if(input.CheckBlock("Dust")) {
+ haveDust = true;
+ int nSpecies = input.Get("Dust","nSpecies",0);
+ for(int i = 0 ; i < nSpecies ; i++) {
+ dust.emplace_back(std::make_unique>(grid, input, this, i));
+ }
+ }
+ // Register variables that need to be saved in case of restart dump
+ dump->RegisterVariable(&t, "time");
+ dump->RegisterVariable(&dt, "dt");
+
+ idfx::popRegion();
+}
+
+/**
+ * @brief Construct a new Data Block as a subgrid
+ *
+ * @param subgrid : subgrid from which the local datablock should be extracted from
+ */
+DataBlock::DataBlock(SubGrid *subgrid) {
+ idfx::pushRegion("DataBlock:DataBlock(SubGrid)");
+ Grid *grid = subgrid->parentGrid;
+ this->mygrid = subgrid->grid.get();
+
+ // Make a local copy of the grid for future usage.
+ GridHost gridHost(*grid);
+ gridHost.SyncFromDevice();
+
+
+ // Get the number of points from the parent grid object
+ for(int dir = 0 ; dir < 3 ; dir++) {
+ nghost[dir] = grid->nghost[dir];
+ // Domain decomposition: decompose the full domain size in grid by the number of processes
+ // in that direction
+ np_int[dir] = grid->np_int[dir]/grid->nproc[dir];
+ np_tot[dir] = np_int[dir]+2*nghost[dir];
+
+ // Boundary conditions
+ if (grid->xproc[dir]==0) {
+ lbound[dir] = grid->lbound[dir];
+ if(lbound[dir]==axis) this->haveAxis = true;
+ } else {
+ lbound[dir] = internal;
+ }
+
+ if (grid->xproc[dir] == grid->nproc[dir]-1) {
+ rbound[dir] = grid->rbound[dir];
+ if(rbound[dir]==axis) this->haveAxis = true;
+ } else {
+ rbound[dir] = internal;
+ }
+
+ beg[dir] = grid->nghost[dir];
+ end[dir] = grid->nghost[dir]+np_int[dir];
+
+ // Where does this datablock starts and end in the grid?
+ // This assumes even distribution of points between procs
+ gbeg[dir] = grid->nghost[dir] + grid->xproc[dir]*np_int[dir];
+ gend[dir] = grid->nghost[dir] + (grid->xproc[dir]+1)*np_int[dir];
+
+ // Local start and end of current datablock
+ xbeg[dir] = gridHost.xl[dir](gbeg[dir]);
+ xend[dir] = gridHost.xr[dir](gend[dir]-1);
+ }
+
+ // Next we erase the info the slice direction
+ int refdir = subgrid->direction;
+ nghost[refdir] = 0;
+ np_int[refdir] = 1;
+ np_tot[refdir] = 1;
+ beg[refdir] = 0;
+ end[refdir] = 1;
+ gbeg[refdir] = subgrid->index;
+ gend[refdir] = subgrid->index+1;
+ xbeg[refdir] = gridHost.x[refdir](subgrid->index);
+ xend[refdir] = gridHost.x[refdir](subgrid->index);
+
+ // Allocate the required fields (only a limited set for a datablock from a subgrid)
+ std::string label;
+ for(int dir = 0 ; dir < 3 ; dir++) {
+ label = "DataBlock_x" + std::to_string(dir);
+ x[dir] = IdefixArray1D(label, np_tot[dir]);
+
+ label = "DataBlock_xr" + std::to_string(dir);
+ xr[dir] = IdefixArray1D(label,np_tot[dir]);
+
+ label = "DataBlock_xl" + std::to_string(dir);
+ xl[dir] = IdefixArray1D(label,np_tot[dir]);
+
+ label = "DataBlock_dx" + std::to_string(dir);
+ dx[dir] = IdefixArray1D(label,np_tot[dir]);
+ }
+
+ // Initialize our sub-domain
+ this->ExtractSubdomain();
+
+ // Reset gbeg/gend so that they don't refer anymore to the parent grid
+ gbeg[refdir] = 0;
+ gend[refdir] = 1;
idfx::popRegion();
}
void DataBlock::ResetStage() {
- this->hydro.ResetStage();
+ this->hydro->ResetStage();
+ if(haveDust) {
+ for(int i = 0 ; i < dust.size() ; i++) {
+ dust[i]->ResetStage();
+ }
+ }
+}
+
+void DataBlock::ConsToPrim() {
+ this->hydro->ConvertConsToPrim();
+ if(haveDust) {
+ for(int i = 0 ; i < dust.size() ; i++) {
+ dust[i]->ConvertConsToPrim();
+ }
+ }
+}
+
+void DataBlock::PrimToCons() {
+ this->hydro->ConvertPrimToCons();
+ if(haveDust) {
+ for(int i = 0 ; i < dust.size() ; i++) {
+ dust[i]->ConvertPrimToCons();
+ }
+ }
}
// Set the boundaries of the data structures in this datablock
void DataBlock::SetBoundaries() {
if(haveGridCoarsening) {
ComputeGridCoarseningLevels();
- hydro.CoarsenFlow(hydro.Vc);
+ hydro->CoarsenFlow(hydro->Vc);
#if MHD==YES
- hydro.CoarsenMagField(hydro.Vs);
+ hydro->CoarsenMagField(hydro->Vs);
#endif
+ if(haveDust) {
+ for(int i = 0 ; i < dust.size() ; i++) {
+ dust[i]->CoarsenFlow(dust[i]->Vc);
+ }
+ }
}
- hydro.boundary.SetBoundaries(t);
+ if(haveDust) {
+ for(int i = 0 ; i < dust.size() ; i++) {
+ dust[i]->boundary->SetBoundaries(t);
+ }
+ }
+ hydro->boundary->SetBoundaries(t);
}
@@ -206,9 +305,23 @@ void DataBlock::ShowConfig() {
<< "...." << xend[dir] << std::endl;
}
}
- hydro.ShowConfig();
- if(haveFargo) fargo.ShowConfig();
- if(haveGravity) gravity.ShowConfig();
+ hydro->ShowConfig();
+ if(haveFargo) fargo->ShowConfig();
+ if(haveplanetarySystem) planetarySystem->ShowConfig();
+ if(haveGravity) gravity->ShowConfig();
+ if(haveUserStepFirst) idfx::cout << "DataBlock: User's first step has been enrolled."
+ << std::endl;
+ if(haveUserStepLast) idfx::cout << "DataBlock: User's last step has been enrolled."
+ << std::endl;
+ if(haveDust) {
+ idfx::cout << "DataBlock: evolving " << dust.size() << " dust species." << std::endl;
+ // Only show the config the first dust specie
+ dust[0]->ShowConfig();
+ /*
+ for(int i = 0 ; i < dust.size() ; i++) {
+ dust[i]->ShowConfig();
+ }*/
+ }
}
@@ -216,7 +329,7 @@ real DataBlock::ComputeTimestep() {
// Compute the timestep using all of the enabled modules in the current dataBlock
// First with the hydro block
- auto InvDt = hydro.InvDt;
+ auto InvDt = hydro->InvDt;
real dt;
idefix_reduce("Timestep_reduction",
beg[KDIR], end[KDIR],
@@ -226,6 +339,62 @@ real DataBlock::ComputeTimestep() {
dtmin=FMIN(ONE_F/InvDt(k,j,i),dtmin);
},
Kokkos::Min(dt));
+ if(haveDust) {
+ for(int n = 0 ; n < dust.size() ; n++) {
+ real dtDust;
+ auto InvDt = dust[n]->InvDt;
+ idefix_reduce("Timestep_reduction_dust",
+ beg[KDIR], end[KDIR],
+ beg[JDIR], end[JDIR],
+ beg[IDIR], end[IDIR],
+ KOKKOS_LAMBDA (int k, int j, int i, real &dtmin) {
+ dtmin=FMIN(ONE_F/InvDt(k,j,i),dtmin);
+ },
+ Kokkos::Min(dtDust));
+ dt = std::min(dt,dtDust);
+ }
+ }
Kokkos::fence();
return(dt);
}
+
+// Recompute magnetic fields from vector potential in dedicated fluids
+void DataBlock::DeriveVectorPotential() {
+ if constexpr(DefaultPhysics::mhd) {
+ #ifdef EVOLVE_VECTOR_POTENTIAL
+ hydro->emf->ComputeMagFieldFromA(hydro->Ve, hydro->Vs);
+ #endif
+ }
+}
+
+void DataBlock::LaunchUserStepLast() {
+ if(haveUserStepLast) {
+ idfx::pushRegion("User::UserStepLast");
+ if(userStepLast != nullptr)
+ userStepLast(*this, this->t, this->dt);
+ else
+ IDEFIX_ERROR("UserStepLast not properly initialized");
+ idfx::popRegion();
+ }
+}
+
+void DataBlock::LaunchUserStepFirst() {
+ if(haveUserStepFirst) {
+ idfx::pushRegion("User::UserStepFirst");
+ if(userStepFirst != nullptr)
+ userStepFirst(*this, this->t, this->dt);
+ else
+ IDEFIX_ERROR("UserStepLast not properly initialized");
+ idfx::popRegion();
+ }
+}
+
+void DataBlock::EnrollUserStepLast(StepFunc func) {
+ haveUserStepLast = true;
+ userStepLast = func;
+}
+
+void DataBlock::EnrollUserStepFirst(StepFunc func) {
+ haveUserStepFirst = true;
+ userStepFirst = func;
+}
diff --git a/src/dataBlock/dataBlock.hpp b/src/dataBlock/dataBlock.hpp
index 01c9a166..12f0847e 100644
--- a/src/dataBlock/dataBlock.hpp
+++ b/src/dataBlock/dataBlock.hpp
@@ -11,11 +11,12 @@
#include
#include
#include